code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from htcondor_executor import HTCondorExecutor
import dask
import dask.array as da
import numpy as np
def test_works_as_dask_executor():
with HTCondorExecutor() as pool:
with dask.config.set(pool=pool):
x = da.sum(da.ones(5)) ** 2
y = x.compute()
assert y == 25
|
[
"dask.array.ones",
"dask.config.set",
"htcondor_executor.HTCondorExecutor"
] |
[((149, 167), 'htcondor_executor.HTCondorExecutor', 'HTCondorExecutor', ([], {}), '()\n', (165, 167), False, 'from htcondor_executor import HTCondorExecutor\n'), ((190, 216), 'dask.config.set', 'dask.config.set', ([], {'pool': 'pool'}), '(pool=pool)\n', (205, 216), False, 'import dask\n'), ((241, 251), 'dask.array.ones', 'da.ones', (['(5)'], {}), '(5)\n', (248, 251), True, 'import dask.array as da\n')]
|
import datetime
import matplotlib.pyplot as plt
import matplotlib.ticker as tkr
from infographics import Figure, Infographic
from utils import timex
from covid19 import epid
BASE_IMAGE_FILE = 'src/covid19/assets/lk_map.png'
FONT_FILE = 'src/covid19/assets/Arial.ttf'
POPULATION = 21_800_000
PADDING = 0.12
WINDOW_DAYS_AND_COLOR = [(7, 'green'), (28, 'orange'), (112, 'red')]
class PlotVaxProjection(Figure.Figure):
def __init__(
self,
left_bottom=(PADDING, PADDING),
width_height=(1 - PADDING * 2, 1 - PADDING * 2),
figure_text='',
):
super().__init__(
left_bottom=left_bottom,
width_height=width_height,
figure_text=figure_text,
)
self.__data__ = PlotVaxProjection.__prep_data__(self)
def __prep_data__(self):
timeseries = epid.load_timeseries()
last_item = timeseries[-1]
date = last_item['date']
last_ut = timex.parse_time(date, '%Y-%m-%d')
date_id = timex.get_date_id(last_ut)
t = list(
map(
lambda d: d['ut'],
timeseries,
)
)
x = list(
map(
lambda ti: datetime.datetime.fromtimestamp(ti),
t,
)
)
y = list(
map(
lambda d: d['cum_total'] / (POPULATION * 4 / 3),
timeseries,
)
)
last_cum_total_dose2 = y[-1]
MAX_PROJECTION_DAYS = 1000
x_proj = [
datetime.datetime.fromtimestamp(last_ut + i * timex.SECONDS_IN.DAY)
for i in range(0, MAX_PROJECTION_DAYS)
]
window_data = []
for (window_days, color) in WINDOW_DAYS_AND_COLOR:
rate = (
(y[-1] - y[-1 - window_days])
/ (t[-1] - t[-1 - window_days])
* timex.SECONDS_IN.DAY
)
y_proj = [
last_cum_total_dose2 + rate * i
for i in range(0, MAX_PROJECTION_DAYS)
]
y_proj_filtered = list(
filter(
lambda y: y < 1,
y_proj,
)
)
x_proj_filtered = x_proj[: len(y_proj_filtered)]
days_to_goal = (1 - last_cum_total_dose2) / rate
goal_ut = last_ut + days_to_goal * timex.SECONDS_IN.DAY
window_data.append(
(
window_days,
color,
rate,
x_proj_filtered,
y_proj_filtered,
days_to_goal,
goal_ut,
)
)
return (date, date_id, x, y, window_data)
def draw(self):
super().draw()
(date, date_id, x, y, window_data) = self.__data__
ax = plt.axes(self.left_bottom + self.width_height)
plt.plot(x, y, color='green')
legend_items = ['Actual Vaccinations']
for (
window_days,
color,
rate,
x_proj_filtered,
y_proj_filtered,
days_to_goal,
goal_ut,
) in window_data:
plt.plot(
x_proj_filtered,
y_proj_filtered,
color=color,
linestyle='dashed',
)
plt.text(
x_proj_filtered[-1],
y_proj_filtered[-1],
timex.format_time(goal_ut, '%b %d,\n%Y'),
color=color,
fontsize=8,
ha='center',
va='bottom',
)
legend_items.append(
'Projection (based on %d-day rate)' % window_days
)
plt.legend(
legend_items,
loc='lower right',
)
plt.ylabel('Progress to Goal (Vaccinate everyone over the age of 20)')
plt.grid()
ax.get_yaxis().set_major_formatter(
tkr.FuncFormatter(lambda x, p: format(float(x), ',.1%'))
)
def get_data(self):
return self.__data__
def _plot(is_banner_image=False):
plot = PlotVaxProjection()
(date, date_id, x, y, window_data) = plot.get_data()
size = (16, 9)
banner_label = ''
if is_banner_image:
size = (27, 9)
banner_label = '.banner'
image_file = '/tmp/covid19.plot.%s.vax_projection%s.png' % (
date_id,
banner_label,
)
Infographic.Infographic(
title='Progress to Goal and Projections',
subtitle='COVID19 Vaccinations in Sri Lanka (as of %s)' % date,
footer_text='\n'.join(
['Data from https://www.epid.gov.lk', 'Visualization by @nuuuwan']
),
children=[plot],
size=size,
).save(image_file)
return image_file
if __name__ == '__main__':
_plot(is_banner_image=False)
_plot(is_banner_image=True)
|
[
"utils.timex.get_date_id",
"matplotlib.pyplot.plot",
"utils.timex.parse_time",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.legend",
"covid19.epid.load_timeseries",
"utils.timex.format_time",
"datetime.datetime.fromtimestamp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid"
] |
[((843, 865), 'covid19.epid.load_timeseries', 'epid.load_timeseries', ([], {}), '()\n', (863, 865), False, 'from covid19 import epid\n'), ((952, 986), 'utils.timex.parse_time', 'timex.parse_time', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (968, 986), False, 'from utils import timex\n'), ((1005, 1031), 'utils.timex.get_date_id', 'timex.get_date_id', (['last_ut'], {}), '(last_ut)\n', (1022, 1031), False, 'from utils import timex\n'), ((2895, 2941), 'matplotlib.pyplot.axes', 'plt.axes', (['(self.left_bottom + self.width_height)'], {}), '(self.left_bottom + self.width_height)\n', (2903, 2941), True, 'import matplotlib.pyplot as plt\n'), ((2950, 2979), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""green"""'}), "(x, y, color='green')\n", (2958, 2979), True, 'import matplotlib.pyplot as plt\n'), ((3806, 3849), 'matplotlib.pyplot.legend', 'plt.legend', (['legend_items'], {'loc': '"""lower right"""'}), "(legend_items, loc='lower right')\n", (3816, 3849), True, 'import matplotlib.pyplot as plt\n'), ((3893, 3963), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Progress to Goal (Vaccinate everyone over the age of 20)"""'], {}), "('Progress to Goal (Vaccinate everyone over the age of 20)')\n", (3903, 3963), True, 'import matplotlib.pyplot as plt\n'), ((3972, 3982), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3980, 3982), True, 'import matplotlib.pyplot as plt\n'), ((1552, 1619), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(last_ut + i * timex.SECONDS_IN.DAY)'], {}), '(last_ut + i * timex.SECONDS_IN.DAY)\n', (1583, 1619), False, 'import datetime\n'), ((3247, 3322), 'matplotlib.pyplot.plot', 'plt.plot', (['x_proj_filtered', 'y_proj_filtered'], {'color': 'color', 'linestyle': '"""dashed"""'}), "(x_proj_filtered, y_proj_filtered, color=color, linestyle='dashed')\n", (3255, 3322), True, 'import matplotlib.pyplot as plt\n'), ((3514, 3554), 'utils.timex.format_time', 'timex.format_time', (['goal_ut', '"""%b %d,\n%Y"""'], {}), "(goal_ut, '%b %d,\\n%Y')\n", (3531, 3554), False, 'from utils import timex\n'), ((1217, 1252), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ti'], {}), '(ti)\n', (1248, 1252), False, 'import datetime\n')]
|
import unittest
import pathlib
import os, sys, traceback
import yaml
import pickledb
from os.path import dirname, abspath
from shutil import copyfile
from flashlexiot.backend.thread import BasicPubsubThread, ExpireMessagesThread
from flashlexiot.sdk import FlashlexSDK
def loadConfig(configFile):
cfg = None
with open(configFile, 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
return cfg
class TestFlashlexSDK(unittest.TestCase):
def setUp(self):
fn = pathlib.Path(__file__).parent / 'test-config.yml'
#get defaults for data and keys
dir_path = os.path.dirname(os.path.realpath(__file__))
config = loadConfig("{0}/test-config.yml".format(dir_path))
self.sdk = FlashlexSDK(config)
config = self.sdk.getConfig()
config["flashlex"]["app"]["db"]["dataPath"] = pathlib.Path(__file__).parent
config["flashlex"]["app"]["db"]["subscriptionData"] = 'data/subscription2.db'
self.sdk.setConfig(config)
message1 = {
"pk": "b300d03c-7bd4-4110-9489-5ef59abb1981",
"timestamp": 1553978975.5232847,
"datetime": "2019-03-30 13:49:35",
"message": {
"topic": "pubsub/foobar30",
"payload": {
"message": "Sending a basic message...",
"sequence": 151
},
"pos": 1,
"retain": 0,
"mid": 1
}
}
message2 = {
"pk": "a300d03c-7bd4-4110-9489-5ef59abb1981",
"timestamp": 1553978976.5232847,
"datetime": "2019-03-30 13:49:35",
"message": {
"topic": "pubsub/foobar30",
"payload": {
"message": "Sending a basic message...",
"sequence": 151
},
"pos": 1,
"retain": 0,
"mid": 1
}
}
subscriptionDataPath = "{0}/{1}".format(
config["flashlex"]["app"]["db"]["dataPath"],
config["flashlex"]["app"]["db"]["subscriptionData"])
subscriptionDb = pickledb.load(subscriptionDataPath, False)
subscriptionDb.set(message1['pk'], message1)
subscriptionDb.set(message2['pk'], message2)
subscriptionDb.dump()
def test_load_config(self):
self.assertEqual('testThing1', self.sdk.getConfig()["flashlex"]["thing"]["name"])
def test_get_messages(self):
messages = self.sdk.getSubscribedMessages()
self.assertEqual(2,len(messages))
def test_remove_message(self):
config = self.sdk.getConfig()
messages = self.sdk.getSubscribedMessages()
self.sdk.removeMessageFromStore(messages[0])
messages = self.sdk.getSubscribedMessages()
self.assertEqual(1,len(messages))
def tearDown(self):
print('tear down')
config = self.sdk.getConfig()
subscriptionDataPath = "{0}/{1}".format(
config["flashlex"]["app"]["db"]["dataPath"],
config["flashlex"]["app"]["db"]["subscriptionData"])
os.remove(subscriptionDataPath)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"yaml.load",
"os.remove",
"pickledb.load",
"os.path.realpath",
"pathlib.Path",
"flashlexiot.sdk.FlashlexSDK"
] |
[((3243, 3258), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3256, 3258), False, 'import unittest\n'), ((371, 413), 'yaml.load', 'yaml.load', (['ymlfile'], {'Loader': 'yaml.FullLoader'}), '(ymlfile, Loader=yaml.FullLoader)\n', (380, 413), False, 'import yaml\n'), ((750, 769), 'flashlexiot.sdk.FlashlexSDK', 'FlashlexSDK', (['config'], {}), '(config)\n', (761, 769), False, 'from flashlexiot.sdk import FlashlexSDK\n'), ((2202, 2244), 'pickledb.load', 'pickledb.load', (['subscriptionDataPath', '(False)'], {}), '(subscriptionDataPath, False)\n', (2215, 2244), False, 'import pickledb\n'), ((3177, 3208), 'os.remove', 'os.remove', (['subscriptionDataPath'], {}), '(subscriptionDataPath)\n', (3186, 3208), False, 'import os, sys, traceback\n'), ((634, 660), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (650, 660), False, 'import os, sys, traceback\n'), ((863, 885), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (875, 885), False, 'import pathlib\n'), ((507, 529), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (519, 529), False, 'import pathlib\n')]
|
"""
Helpers/utils for working with tornado asynchronous stuff
"""
import contextlib
import logging
import sys
import threading
import salt.ext.tornado.concurrent
import salt.ext.tornado.ioloop
log = logging.getLogger(__name__)
@contextlib.contextmanager
def current_ioloop(io_loop):
"""
A context manager that will set the current ioloop to io_loop for the context
"""
orig_loop = salt.ext.tornado.ioloop.IOLoop.current()
io_loop.make_current()
try:
yield
finally:
orig_loop.make_current()
class SyncWrapper:
"""
A wrapper to make Async classes synchronous
This is uses as a simple wrapper, for example:
asynchronous = AsyncClass()
# this method would reguarly return a future
future = asynchronous.async_method()
sync = SyncWrapper(async_factory_method, (arg1, arg2), {'kwarg1': 'val'})
# the sync wrapper will automatically wait on the future
ret = sync.async_method()
"""
def __init__(
self,
cls,
args=None,
kwargs=None,
async_methods=None,
close_methods=None,
loop_kwarg=None,
):
self.io_loop = salt.ext.tornado.ioloop.IOLoop()
if args is None:
args = []
if kwargs is None:
kwargs = {}
if async_methods is None:
async_methods = []
if close_methods is None:
close_methods = []
self.loop_kwarg = loop_kwarg
self.cls = cls
if loop_kwarg:
kwargs[self.loop_kwarg] = self.io_loop
self.obj = cls(*args, **kwargs)
self._async_methods = list(
set(async_methods + getattr(self.obj, "async_methods", []))
)
self._close_methods = list(
set(close_methods + getattr(self.obj, "close_methods", []))
)
def _populate_async_methods(self):
"""
We need the '_coroutines' attribute on classes until we can depricate
tornado<4.5. After that 'is_coroutine_fuction' will always be
available.
"""
if hasattr(self.obj, "_coroutines"):
self._async_methods += self.obj._coroutines
def __repr__(self):
return "<SyncWrapper(cls={})".format(self.cls)
def close(self):
for method in self._close_methods:
if method in self._async_methods:
method = self._wrap(method)
else:
try:
method = getattr(self.obj, method)
except AttributeError:
log.error("No sync method %s on object %r", method, self.obj)
continue
try:
method()
except AttributeError:
log.error("No async method %s on object %r", method, self.obj)
except Exception: # pylint: disable=broad-except
log.exception("Exception encountered while running stop method")
io_loop = self.io_loop
io_loop.stop()
io_loop.close(all_fds=True)
def __getattr__(self, key):
if key in self._async_methods:
return self._wrap(key)
return getattr(self.obj, key)
def _wrap(self, key):
def wrap(*args, **kwargs):
results = []
thread = threading.Thread(
target=self._target, args=(key, args, kwargs, results, self.io_loop),
)
thread.start()
thread.join()
if results[0]:
return results[1]
else:
exc_info = results[1]
raise exc_info[1].with_traceback(exc_info[2])
return wrap
def _target(self, key, args, kwargs, results, io_loop):
try:
result = io_loop.run_sync(lambda: getattr(self.obj, key)(*args, **kwargs))
results.append(True)
results.append(result)
except Exception as exc: # pylint: disable=broad-except
results.append(False)
results.append(sys.exc_info())
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, tb):
self.close()
|
[
"threading.Thread",
"logging.getLogger",
"sys.exc_info"
] |
[((203, 230), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (220, 230), False, 'import logging\n'), ((3291, 3381), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._target', 'args': '(key, args, kwargs, results, self.io_loop)'}), '(target=self._target, args=(key, args, kwargs, results,\n self.io_loop))\n', (3307, 3381), False, 'import threading\n'), ((4017, 4031), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4029, 4031), False, 'import sys\n')]
|
import pytest
from dbt.tests.util import run_dbt, get_manifest
my_model_sql = """
select 1 as fun
"""
@pytest.fixture(scope="class")
def models():
return {"my_model.sql": my_model_sql}
def test_basic(project):
# Tests that a project with a single model works
results = run_dbt(["run"])
assert len(results) == 1
manifest = get_manifest(project.project_root)
assert "model.test.my_model" in manifest.nodes
|
[
"dbt.tests.util.run_dbt",
"pytest.fixture",
"dbt.tests.util.get_manifest"
] |
[((109, 138), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (123, 138), False, 'import pytest\n'), ((289, 305), 'dbt.tests.util.run_dbt', 'run_dbt', (["['run']"], {}), "(['run'])\n", (296, 305), False, 'from dbt.tests.util import run_dbt, get_manifest\n'), ((350, 384), 'dbt.tests.util.get_manifest', 'get_manifest', (['project.project_root'], {}), '(project.project_root)\n', (362, 384), False, 'from dbt.tests.util import run_dbt, get_manifest\n')]
|
# Fix paths for imports to work in unit tests ----------------
if __name__ == "__main__":
from _fix_paths import fix_paths
fix_paths()
# ------------------------------------------------------------
# Load libraries ---------------------------------------------
import numpy as np
from ssa_sim_v2.policies.policy import Policy
from ssa_sim_v2.simulator.action import Action
from ssa_sim_v2.tools import dhl
from ssa_sim_v2.simulator.action import Action, ActionSet
from ssa_sim_v2.simulator.attribute import AttrSet
from ssa_sim_v2.simulator.state import StateSet
# ------------------------------------------------------------
class PolicyThompsonSamplingSI(Policy):
"""
State-independent (SI) Thompson sampling policy.
:ivar list state_set: A list of states.
:ivar list action_set: A list of actions.
:ivar object rng: Random number generator.
:ivar int seed: Seed for the random number generator.
:ivar Policy.UDP udp: User-defined params.
:ivar Policy.HTP htp: Hard-tunable params.
:ivar Policy.STP stp: Soft-tunable params.
:ivar Policy.IP ip: Inner params.
"""
class UDP(Policy.UDP):
"""
A class for storing user-defined params -- hard-coded overwrite on all
other parameters.
:ivar float min_bid: Minimal allowed bid.
:ivar float max_bid: Maximal allowed bid.
"""
def __init__(self):
Policy.UDP.__init__(self)
# Min and max bid
self.min_bid = None
self.max_bid = None
class STP(Policy.STP):
"""
A class for storing soft-tunable params -- tuned externally
for a specific bidding entity (possibly based on a larger dataset
than inner parameters).
:ivar float mu_init: Initial belief for reward value.
:ivar float sigma_init: Initial uncertainty for the belief.
:ivar float sigma_measure: Measurement uncertainty.
"""
def __init__(self):
Policy.STP.__init__(self)
self.mu_init = 0.0
self.sigma_init = 1000.0
self.sigma_measure = 1.0
class IP(Policy.IP):
"""
A class for storing inner params -- all parameters trained within
the policy (based on data for a given bidding entity).
:ivar np.ndarray mu: Array of beliefs for the reward for every action.
:ivar np.ndarray sigma: Array of uncertainties for the reward for every
action.
"""
def __init__(self):
Policy.IP.__init__(self)
self.mu = None
self.sigma = None
def __init__(self, state_set, action_set, attr_set, seed=12345, save_history=False):
"""
:param StateSet state_set: State set.
:param ActionSet action_set: Action set.
:param AttrSet attr_set: Attribute set.
:param int seed: Seed for the random number generator.
:param bool save_history: Indicates if policy history should be saved
in the history attribute.
"""
Policy.__init__(self, state_set, action_set, attr_set, seed, save_history)
self.udp = self.UDP()
self.htp = self.HTP()
self.stp = self.STP()
self.ip = self.IP()
def initialize(self, params):
"""
Initializes the policy using a policy initializer (dependency injection
pattern). The policy initializer may be used to test many policy
parameters settings -- it is enough that the simulator provides
appropriate policy initializers which set the params.
:param PolicyInitializer policy_initializer: Policy initializer.
"""
Policy.initialize(self, params)
# Apply bounds if defined
if self.udp.min_bid is not None:
action_set_temp = []
for action in self.action_set:
if action.bid >= self.udp.min_bid:
action_set_temp.append(action)
self.action_set = action_set_temp
if self.udp.max_bid is not None:
action_set_temp = []
for action in self.action_set:
if action.bid <= self.udp.max_bid:
action_set_temp.append(action)
self.action_set = action_set_temp
# handy functions to discretize bids
self.bid_amount_to_index = lambda x: round(x)
self.index_to_bid_amount = lambda x: float(x)
# Initialize beliefs
self.ip.mu = np.array([self.stp.mu_init]*self.bid_amount_to_index(self.action_set.max_bid))
self.ip.sigma = np.array([self.stp.sigma_init]*self.bid_amount_to_index(self.action_set.max_bid))
def learn(self, state, data):
"""
A method that allows the policy to learn based on observations provided
by the simulator.
:param StateSet.State state: The state in the previous turn.
:param Dict data: Dictionary with the following fields:
* action -- Your original action used in the previous turn.
* effective_action -- Actual action used by the simulator.
The original action may need to be adjusted (base bid or modifiers
clipped to bounds) to be valid.
* reward -- Overall reward obtained in the previous turn.
* info -- A dictionary with overall data for the policy:
* auctions -- number of auctions,
* clicks -- number of clicks,
* conversions -- number of conversions,
* click_probability -- click probability (clicks / auctions),
* cvr -- conversion rate (conversions / clicks),
* rpc -- revenue per click (revenue / clicks),
* cpc -- cost per click (cost / clicks),
* rpv -- revenue per conversion (revenue / conversions),
* revenue -- revenue from all conversions,
* cost -- cost for all clicks,
* profit -- revenue - cost.
* attr_info: A dict with data per segment, e.g.
{
"gender": {"M": info_for_gender_M, "F": info_for_gender_F, ...},
"age": {"18-24": info_for_age_18-24, "25-34": info_for_age_25-34, ...},
...
},
where info_for... has the same form as info but contains data
only for a given segment.
"""
Policy.learn(self, state, data)
if data["action"] is None:
return
#idx = self.action_set.index(data["effective_action"])
idx = round(data['effective_action'].bid)
obs_mu = self._get_observation(data)
self.ip.mu[idx] = ((1.0 / self.ip.sigma[idx] ** 2 * self.ip.mu[idx]
+ 1.0 / self.stp.sigma_measure ** 2 * obs_mu)
/ (1.0 / self.ip.sigma[idx] ** 2 + 1.0 / self.stp.sigma_measure ** 2))
self.ip.sigma[idx] = (self.ip.sigma[idx] * self.stp.sigma_measure) \
/ np.sqrt(self.ip.sigma[idx] ** 2 + self.stp.sigma_measure ** 2)
def act(self, state, data=None):
"""
Returns an action given state.
:param State state: The current state.
:param Union[pd.DataFrame, dict] data: Input data.
:return: An action chosen by the policy.
:rtype: Action
"""
Policy.act(self, state, data)
try:
randomized_mu = np.array([self.rng.normal(self.ip.mu[idx], self.ip.sigma[idx])
for idx in range(round(self.action_set.max_bid))])
except:
print ('error !!!!!')
print (self.action_set.max_bid)
print (len(self.ip.mu))
print (len(self.ip.sigma))
print ('length of mu = {}, length of sigma = {}').format(len(self.ip.mu), len(self.ip.sigma))
action_index = dhl.randargmax(randomized_mu, rng=self.rng)
base_bid_amount = self.index_to_bid_amount(action_index)
action_inc = Action(base_bid_amount) # note: underspecified action (modifiers not defined at all)
# action_inc = Action(base_bid_amount, {'gender': {'M': 1.1, 'F': 1.2}}) # underspecified modifiers
action = self.action_set.validify_action(action_inc) # this function fills in unspecified modifiers
self.history.update({"bid": action.bid}) # TODO: we should keep not just the base bid, but the entire bid for all attrs
return action
def _get_observation(self, data):
return data["info"]["profit"] / data["info"]["auctions"] \
if data["info"]["auctions"] != 0 else 0.0
class PolicyThompsonSamplingPPASI(PolicyThompsonSamplingSI):
"""
State-independent (SI) Thompson sampling policy optimizing
the profit per auction (PPA).
"""
def _get_observation(self, data):
return data["info"]["profit"] / data["info"]["auctions"] \
if data["info"]["auctions"] != 0 else 0.0
class PolicyThompsonSamplingPSI(PolicyThompsonSamplingSI):
"""
State-independent (SI) Thompson sampling policy optimizing
the total profit (P).
"""
def _get_observation(self, data):
return data["info"]["profit"]
def test_01_setup():
"""
sample init init attr, state, action space
:return:
"""
names = ['gender', 'age']
vals = {'gender': ['M', 'F', 'U'],
'age': ['0-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-*']}
attr_set = AttrSet(names, vals)
state_set = StateSet(['date', 'how'], ['discrete', 'discrete'],
[['2018-01-01', '2018-01-02'], list(range(168))])
act_set = ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1)
return attr_set, state_set, act_set
def test_one_policy_run():
# init attr, state, action space
attr_set, state_set, act_set = test_01_setup()
# get first state
s = state_set.make_state({'date': '2018-01-01', 'how': 12})
# initialize policy
pol = PolicyThompsonSamplingSI(state_set, act_set, attr_set, seed=9292)
pol.initialize({"stp": {"cvr_default": 0.02, "rpv_default": 300.0}})
a = pol.act(s)
print(a)
if __name__ == "__main__":
test_01_setup()
test_one_policy_run()
|
[
"ssa_sim_v2.policies.policy.Policy.STP.__init__",
"_fix_paths.fix_paths",
"ssa_sim_v2.policies.policy.Policy.learn",
"ssa_sim_v2.policies.policy.Policy.__init__",
"ssa_sim_v2.simulator.attribute.AttrSet",
"ssa_sim_v2.simulator.action.ActionSet",
"ssa_sim_v2.policies.policy.Policy.UDP.__init__",
"ssa_sim_v2.policies.policy.Policy.IP.__init__",
"ssa_sim_v2.tools.dhl.randargmax",
"ssa_sim_v2.policies.policy.Policy.initialize",
"ssa_sim_v2.simulator.action.Action",
"ssa_sim_v2.policies.policy.Policy.act",
"numpy.sqrt"
] |
[((137, 148), '_fix_paths.fix_paths', 'fix_paths', ([], {}), '()\n', (146, 148), False, 'from _fix_paths import fix_paths\n'), ((9549, 9569), 'ssa_sim_v2.simulator.attribute.AttrSet', 'AttrSet', (['names', 'vals'], {}), '(names, vals)\n', (9556, 9569), False, 'from ssa_sim_v2.simulator.attribute import AttrSet\n'), ((9729, 9802), 'ssa_sim_v2.simulator.action.ActionSet', 'ActionSet', (['attr_set'], {'max_bid': '(9.99)', 'min_bid': '(0.01)', 'max_mod': '(9.0)', 'min_mod': '(0.1)'}), '(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1)\n', (9738, 9802), False, 'from ssa_sim_v2.simulator.action import Action, ActionSet\n'), ((3077, 3151), 'ssa_sim_v2.policies.policy.Policy.__init__', 'Policy.__init__', (['self', 'state_set', 'action_set', 'attr_set', 'seed', 'save_history'], {}), '(self, state_set, action_set, attr_set, seed, save_history)\n', (3092, 3151), False, 'from ssa_sim_v2.policies.policy import Policy\n'), ((3699, 3730), 'ssa_sim_v2.policies.policy.Policy.initialize', 'Policy.initialize', (['self', 'params'], {}), '(self, params)\n', (3716, 3730), False, 'from ssa_sim_v2.policies.policy import Policy\n'), ((6487, 6518), 'ssa_sim_v2.policies.policy.Policy.learn', 'Policy.learn', (['self', 'state', 'data'], {}), '(self, state, data)\n', (6499, 6518), False, 'from ssa_sim_v2.policies.policy import Policy\n'), ((7426, 7455), 'ssa_sim_v2.policies.policy.Policy.act', 'Policy.act', (['self', 'state', 'data'], {}), '(self, state, data)\n', (7436, 7455), False, 'from ssa_sim_v2.policies.policy import Policy\n'), ((7944, 7987), 'ssa_sim_v2.tools.dhl.randargmax', 'dhl.randargmax', (['randomized_mu'], {'rng': 'self.rng'}), '(randomized_mu, rng=self.rng)\n', (7958, 7987), False, 'from ssa_sim_v2.tools import dhl\n'), ((8074, 8097), 'ssa_sim_v2.simulator.action.Action', 'Action', (['base_bid_amount'], {}), '(base_bid_amount)\n', (8080, 8097), False, 'from ssa_sim_v2.simulator.action import Action, ActionSet\n'), ((1430, 1455), 'ssa_sim_v2.policies.policy.Policy.UDP.__init__', 'Policy.UDP.__init__', (['self'], {}), '(self)\n', (1449, 1455), False, 'from ssa_sim_v2.policies.policy import Policy\n'), ((2009, 2034), 'ssa_sim_v2.policies.policy.Policy.STP.__init__', 'Policy.STP.__init__', (['self'], {}), '(self)\n', (2028, 2034), False, 'from ssa_sim_v2.policies.policy import Policy\n'), ((2549, 2573), 'ssa_sim_v2.policies.policy.Policy.IP.__init__', 'Policy.IP.__init__', (['self'], {}), '(self)\n', (2567, 2573), False, 'from ssa_sim_v2.policies.policy import Policy\n'), ((7074, 7136), 'numpy.sqrt', 'np.sqrt', (['(self.ip.sigma[idx] ** 2 + self.stp.sigma_measure ** 2)'], {}), '(self.ip.sigma[idx] ** 2 + self.stp.sigma_measure ** 2)\n', (7081, 7136), True, 'import numpy as np\n')]
|
import torch
from torch_geometric.nn.reshape import Reshape
def test_reshape():
x = torch.randn(10, 4)
op = Reshape(5, 2, 4)
assert op.__repr__() == 'Reshape(5, 2, 4)'
assert op(x).size() == (5, 2, 4)
assert op(x).view(10, 4).tolist() == x.tolist()
|
[
"torch_geometric.nn.reshape.Reshape",
"torch.randn"
] |
[((90, 108), 'torch.randn', 'torch.randn', (['(10)', '(4)'], {}), '(10, 4)\n', (101, 108), False, 'import torch\n'), ((118, 134), 'torch_geometric.nn.reshape.Reshape', 'Reshape', (['(5)', '(2)', '(4)'], {}), '(5, 2, 4)\n', (125, 134), False, 'from torch_geometric.nn.reshape import Reshape\n')]
|
# from distutils.core import setup
from setuptools import setup
import pathlib
current_location = pathlib.Path(__file__).parent
readme = (current_location / "README.md").read_text()
setup(
name = 'chattingtransformer',
packages = ['chattingtransformer'],
version = '1.0.3',
license='Apache 2.0',
description = "GPT2 text generation with just two lines of code!",
long_description= readme,
long_description_content_type='text/markdown',
author = "Vennify Inc",
author_email = '<EMAIL>',
url = 'https://github.com/Vennify-Inc/chatting-transformer',
keywords = ["gpt2", "artificial", "intelligence", "ai", "text", "generation", "chatting", "vennify", "gpt", "transformer", "transformers", "nlp", "nlu", "natural", "language", "processing", "understanding"],
install_requires=[
'transformers>=3.1.0',
'torch>=1.6.0',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
"Intended Audience :: Science/Research",
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
"Topic :: Text Processing :: Linguistic",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Natural Language :: English"
],
)
|
[
"pathlib.Path",
"setuptools.setup"
] |
[((185, 1268), 'setuptools.setup', 'setup', ([], {'name': '"""chattingtransformer"""', 'packages': "['chattingtransformer']", 'version': '"""1.0.3"""', 'license': '"""Apache 2.0"""', 'description': '"""GPT2 text generation with just two lines of code!"""', 'long_description': 'readme', 'long_description_content_type': '"""text/markdown"""', 'author': '"""Vennify Inc"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/Vennify-Inc/chatting-transformer"""', 'keywords': "['gpt2', 'artificial', 'intelligence', 'ai', 'text', 'generation',\n 'chatting', 'vennify', 'gpt', 'transformer', 'transformers', 'nlp',\n 'nlu', 'natural', 'language', 'processing', 'understanding']", 'install_requires': "['transformers>=3.1.0', 'torch>=1.6.0']", 'classifiers': "['Development Status :: 3 - Alpha', 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Text Processing :: Linguistic',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Natural Language :: English']"}), "(name='chattingtransformer', packages=['chattingtransformer'], version\n ='1.0.3', license='Apache 2.0', description=\n 'GPT2 text generation with just two lines of code!', long_description=\n readme, long_description_content_type='text/markdown', author=\n 'Vennify Inc', author_email='<EMAIL>', url=\n 'https://github.com/Vennify-Inc/chatting-transformer', keywords=['gpt2',\n 'artificial', 'intelligence', 'ai', 'text', 'generation', 'chatting',\n 'vennify', 'gpt', 'transformer', 'transformers', 'nlp', 'nlu',\n 'natural', 'language', 'processing', 'understanding'], install_requires\n =['transformers>=3.1.0', 'torch>=1.6.0'], classifiers=[\n 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Text Processing :: Linguistic',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Natural Language :: English'])\n", (190, 1268), False, 'from setuptools import setup\n'), ((100, 122), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (112, 122), False, 'import pathlib\n')]
|
#!/usr/bin/python
# Import library functions we need
import sys
import time
try:
from rpi_ws281x import __version__, PixelStrip, Adafruit_NeoPixel, Color
except ImportError:
from neopixel import Adafruit_NeoPixel as PixelStrip, Color
__version__ = "legacy"
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
# LED strip configuration:
LED_COUNT = 8 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # PWM channel
LED_GAMMA = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 11, 11,
11, 12, 12, 13, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18,
19, 19, 20, 21, 21, 22, 22, 23, 23, 24, 25, 25, 26, 27, 27, 28,
29, 29, 30, 31, 31, 32, 33, 34, 34, 35, 36, 37, 37, 38, 39, 40,
40, 41, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 76, 77, 78, 79, 80, 81, 83, 84, 85, 86, 88, 89,
90, 91, 93, 94, 95, 96, 98, 99,100,102,103,104,106,107,109,110,
111,113,114,116,117,119,120,121,123,124,126,128,129,131,132,134,
135,137,138,140,142,143,145,146,148,150,151,153,155,157,158,160,
162,163,165,167,169,170,172,174,176,178,179,181,183,185,187,189,
191,193,194,196,198,200,202,204,206,208,210,212,214,216,218,220,
222,224,227,229,231,233,235,237,239,241,244,246,248,250,252,255]
LED_COUNT = max(0,int(sys.argv[1]))
WAIT_MS = max(0,int(sys.argv[2]))
MODE = sys.argv[3]
LED_BRIGHTNESS = min(255,int(max(0,float(sys.argv[4])) * 255 / 100))
if (sys.argv[5].lower() != "true"):
LED_GAMMA = range(256)
def getRGBfromI(RGBint):
blue = RGBint & 255
green = (RGBint >> 8) & 255
red = (RGBint >> 16) & 255
return red, green, blue
# Define functions which animate LEDs in various ways.
def setPixel(strip, i, color):
"""Set a single pixel"""
strip.setPixelColor(i, color)
strip.show()
def setPixels(strip, s, e, color, wait_ms=30):
"""Set pixels from s(tart) to e(nd)"""
if (wait_ms > 0):
for i in range(s, e+1):
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms/1000.0)
else:
for i in range(s, e+1):
strip.setPixelColor(i, color)
strip.show()
def setBrightness(strip, brightness, wait_ms=30):
"""Set overall brighness"""
strip.setBrightness(brightness)
strip.show()
time.sleep(wait_ms/1000.0)
def colorWipe(strip, color, wait_ms=30):
"""Wipe color across display a pixel at a time."""
if (wait_ms > 0):
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms/1000.0)
else:
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
def shiftUp(strip, color, wait_ms=30):
"""Shift all pixels one way."""
oldcolour = strip.getPixelColor(0)
strip.setPixelColor(0, color)
strip.show()
if (wait_ms > 0):
time.sleep(wait_ms/1000.0)
for i in range(1,LED_COUNT):
newcolour = oldcolour
oldcolour = strip.getPixelColor(i)
strip.setPixelColor(i, newcolour)
strip.show()
time.sleep(wait_ms/1000.0)
else:
for i in range(1,LED_COUNT):
newcolour = oldcolour
oldcolour = strip.getPixelColor(i)
strip.setPixelColor(i, newcolour)
strip.show()
def shiftDown(strip, color, wait_ms=30):
"""Shift all pixels the other way."""
oldcolour = strip.getPixelColor(LED_COUNT-1)
strip.setPixelColor(LED_COUNT-1, color)
strip.show()
if (wait_ms > 0):
time.sleep(wait_ms/1000.0)
for i in range(LED_COUNT-2,-1,-1):
newcolour = oldcolour
oldcolour = strip.getPixelColor(i)
strip.setPixelColor(i, newcolour)
strip.show()
time.sleep(wait_ms/1000.0)
else:
for i in range(LED_COUNT-2,-1,-1):
newcolour = oldcolour
oldcolour = strip.getPixelColor(i)
strip.setPixelColor(i, newcolour)
strip.show()
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(strip, wait_ms=20, iterations=2):
"""Draw rainbow that fades across all pixels at once."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def rainbowCycle(strip, wait_ms=20, iterations=2):
"""Draw rainbow that uniformly distributes itself across all pixels."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel(((i * 256 / strip.numPixels()) + j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
# Main loop:
if __name__ == '__main__':
# Create NeoPixel object with appropriate configuration.
#strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
if __version__ == "legacy":
strip = PixelStrip(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
else:
strip = PixelStrip(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_GAMMA)# Intialize the library (must be called once before other functions).
strip.begin()
## Color wipe animations.
colorWipe(strip, Color(127, 0, 0), WAIT_MS) # Red wipe
colorWipe(strip, Color(0, 127, 0), WAIT_MS) # Green wipe
colorWipe(strip, Color(0, 0, 127), WAIT_MS) # Blue wipe
colorWipe(strip, Color(0, 0, 0), WAIT_MS) # Off wipe
## Rainbow animations.
#rainbow(strip)
#rainbowCycle(strip)
#colorWipe(strip, Color(0, 0, 0)) # Off wipe
while True:
try:
data = raw_input()
bits = data.split(',')
if len(bits) == 2:
if bits[0] == "brightness":
setBrightness(strip, min(255,max(0,int(bits[1]))), WAIT_MS)
if len(bits) == 3:
if MODE == "shiftu":
shiftUp(strip, Color(int(bits[0]), int(bits[1]), int(bits[2])), WAIT_MS)
elif MODE == "shiftd":
shiftDown(strip, Color(int(bits[0]), int(bits[1]), int(bits[2])), WAIT_MS)
else:
colorWipe(strip, Color(int(bits[0]), int(bits[1]), int(bits[2])), WAIT_MS)
if (MODE[0] == 'p' and len(bits) == 4):
setPixel(strip, int(bits[0]), Color(int(bits[1]), int(bits[2]), int(bits[3]) ))
if (MODE[0] == 'p' and len(bits) == 5):
setPixels(strip, int(bits[0]), int(bits[1]), Color(int(bits[2]), int(bits[3]), int(bits[4]) ), WAIT_MS)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
sys.exit(0)
except Exception as ex:
print("bad data: "+data)
print(ex)
|
[
"neopixel.Adafruit_NeoPixel",
"neopixel.Color",
"sys.exit",
"time.sleep"
] |
[((2885, 2913), 'time.sleep', 'time.sleep', (['(wait_ms / 1000.0)'], {}), '(wait_ms / 1000.0)\n', (2895, 2913), False, 'import time\n'), ((3492, 3520), 'time.sleep', 'time.sleep', (['(wait_ms / 1000.0)'], {}), '(wait_ms / 1000.0)\n', (3502, 3520), False, 'import time\n'), ((4166, 4194), 'time.sleep', 'time.sleep', (['(wait_ms / 1000.0)'], {}), '(wait_ms / 1000.0)\n', (4176, 4194), False, 'import time\n'), ((4735, 4767), 'neopixel.Color', 'Color', (['(pos * 3)', '(255 - pos * 3)', '(0)'], {}), '(pos * 3, 255 - pos * 3, 0)\n', (4740, 4767), False, 'from neopixel import Adafruit_NeoPixel as PixelStrip, Color\n'), ((5202, 5230), 'time.sleep', 'time.sleep', (['(wait_ms / 1000.0)'], {}), '(wait_ms / 1000.0)\n', (5212, 5230), False, 'import time\n'), ((5550, 5578), 'time.sleep', 'time.sleep', (['(wait_ms / 1000.0)'], {}), '(wait_ms / 1000.0)\n', (5560, 5578), False, 'import time\n'), ((5828, 5925), 'neopixel.Adafruit_NeoPixel', 'PixelStrip', (['LED_COUNT', 'LED_PIN', 'LED_FREQ_HZ', 'LED_DMA', 'LED_INVERT', 'LED_BRIGHTNESS', 'LED_CHANNEL'], {}), '(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT,\n LED_BRIGHTNESS, LED_CHANNEL)\n', (5838, 5925), True, 'from neopixel import Adafruit_NeoPixel as PixelStrip, Color\n'), ((5948, 6056), 'neopixel.Adafruit_NeoPixel', 'PixelStrip', (['LED_COUNT', 'LED_PIN', 'LED_FREQ_HZ', 'LED_DMA', 'LED_INVERT', 'LED_BRIGHTNESS', 'LED_CHANNEL', 'LED_GAMMA'], {}), '(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT,\n LED_BRIGHTNESS, LED_CHANNEL, LED_GAMMA)\n', (5958, 6056), True, 'from neopixel import Adafruit_NeoPixel as PixelStrip, Color\n'), ((6193, 6209), 'neopixel.Color', 'Color', (['(127)', '(0)', '(0)'], {}), '(127, 0, 0)\n', (6198, 6209), False, 'from neopixel import Adafruit_NeoPixel as PixelStrip, Color\n'), ((6253, 6269), 'neopixel.Color', 'Color', (['(0)', '(127)', '(0)'], {}), '(0, 127, 0)\n', (6258, 6269), False, 'from neopixel import Adafruit_NeoPixel as PixelStrip, Color\n'), ((6315, 6331), 'neopixel.Color', 'Color', (['(0)', '(0)', '(127)'], {}), '(0, 0, 127)\n', (6320, 6331), False, 'from neopixel import Adafruit_NeoPixel as PixelStrip, Color\n'), ((6376, 6390), 'neopixel.Color', 'Color', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (6381, 6390), False, 'from neopixel import Adafruit_NeoPixel as PixelStrip, Color\n'), ((2613, 2641), 'time.sleep', 'time.sleep', (['(wait_ms / 1000.0)'], {}), '(wait_ms / 1000.0)\n', (2623, 2641), False, 'import time\n'), ((3153, 3181), 'time.sleep', 'time.sleep', (['(wait_ms / 1000.0)'], {}), '(wait_ms / 1000.0)\n', (3163, 3181), False, 'import time\n'), ((3720, 3748), 'time.sleep', 'time.sleep', (['(wait_ms / 1000.0)'], {}), '(wait_ms / 1000.0)\n', (3730, 3748), False, 'import time\n'), ((4400, 4428), 'time.sleep', 'time.sleep', (['(wait_ms / 1000.0)'], {}), '(wait_ms / 1000.0)\n', (4410, 4428), False, 'import time\n'), ((4821, 4853), 'neopixel.Color', 'Color', (['(255 - pos * 3)', '(0)', '(pos * 3)'], {}), '(255 - pos * 3, 0, pos * 3)\n', (4826, 4853), False, 'from neopixel import Adafruit_NeoPixel as PixelStrip, Color\n'), ((4898, 4930), 'neopixel.Color', 'Color', (['(0)', '(pos * 3)', '(255 - pos * 3)'], {}), '(0, pos * 3, 255 - pos * 3)\n', (4903, 4930), False, 'from neopixel import Adafruit_NeoPixel as PixelStrip, Color\n'), ((7626, 7637), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7634, 7637), False, 'import sys\n')]
|
#!/usr/bin/env python3
"""A python script to perform watermark embedding/detection
in the wavelet domain."""
# Copyright (C) 2020 by <NAME>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from scipy.io import wavfile
from scipy.signal import windows
import pywt
HOST_SIGNAL_FILE = "bass_half.wav" # 透かし埋め込み先のファイル
WATERMARK_SIGNAL_FILE = "wmed_signal.wav" # 透かしを埋め込んだファイル
PSEUDO_RAND_FILE = 'pseudo_rand.dat' # 疑似乱数列のファイル
WATERMARK_ORIGINAL_FILE = 'watermark_ori.dat' # オリジナルの透かし信号
REP_CODE = True # 繰り返し埋め込みを使う
FRAME_LENGTH = 2048 # フレーム長
CONTROL_STRENGTH = 1000 # 埋め込み強度
OVERLAP = 0.5 # フレーム分析のオーバーラップ率 (固定)
NUM_REPS = 3 # 埋め込みの繰り返し数
WAVELET_BASIS = 'db4'
WAVELET_LEVEL = 3
WAVELET_MODE = 'symmetric'
THRESHOLD = 0.0
def fix(xs):
"""
A emuration of MATLAB 'fix' function.
borrowed from https://ideone.com/YjJwOh
"""
# res = [np.floor(e) if e >= 0 else np.ceil(e) for e in xs]
if xs >= 0:
res = np.floor(xs)
else:
res = np.ceil(xs)
return res
def embed():
"""
perform embedding.
"""
# ホスト信号
sr, host_signal = wavfile.read(HOST_SIGNAL_FILE)
signal_len = len(host_signal)
# フレームの移動量
frame_shift = int(FRAME_LENGTH * (1 - OVERLAP))
# 隣接フレームとのオーバーラップ長
overlap_length = int(FRAME_LENGTH * OVERLAP)
# 埋め込みの総ビット数
embed_nbit = fix((signal_len - overlap_length) / frame_shift)
if REP_CODE:
# 実効的な埋め込み可能ビット数
effective_nbit = np.floor(embed_nbit / NUM_REPS)
embed_nbit = effective_nbit * NUM_REPS
else:
effective_nbit = embed_nbit
# 整数化
frame_shift = int(frame_shift)
effective_nbit = int(effective_nbit)
embed_nbit = int(embed_nbit)
# オリジナルの透かし信号を作成(0と1のビット列)
wmark_original = np.random.randint(2, size=int(effective_nbit))
# オリジナルの透かし信号を保存
with open(WATERMARK_ORIGINAL_FILE, 'w') as f:
for d in wmark_original:
f.write("%d\n" % d)
# 透かし信号を拡張する
if REP_CODE:
wmark_extended = np.repeat(wmark_original, NUM_REPS)
else:
wmark_extended = wmark_original
# 透かしの埋め込み強度
alpha = CONTROL_STRENGTH
# 透かしが埋め込まれた信号(watermarked signal)を生成 in wavelet domain
pointer = 0
count = 0
wmed_signal = np.zeros((frame_shift * embed_nbit)) # watermarked signal
prev = np.zeros((FRAME_LENGTH))
for i in range(embed_nbit):
frame = host_signal[pointer: pointer + FRAME_LENGTH]
# Wavelet係数を計算
coeffs = pywt.wavedec(data=frame, wavelet=WAVELET_BASIS,
level=WAVELET_LEVEL, mode=WAVELET_MODE)
# 透かしの埋め込み強度を平均と同じオーダーに設定する(adaptive)
# coef_size = int(np.log10(np.abs(np.mean(coeffs[0])))) + 1
# alpha = 10 ** coef_size
# 透かしの埋め込み
if wmark_extended[count] == 1:
coeffs[0] = coeffs[0] - np.mean(coeffs[0]) + alpha
else:
coeffs[0] = coeffs[0] - np.mean(coeffs[0]) - alpha
# 再構成
wmarked_frame = pywt.waverec(coeffs=coeffs, wavelet=WAVELET_BASIS,
mode=WAVELET_MODE)
# 窓をかける (Hann window)
wmarked_frame = wmarked_frame * windows.hann(FRAME_LENGTH)
wmed_signal[frame_shift * i: frame_shift * (i+1)] = \
np.concatenate((prev[frame_shift:FRAME_LENGTH] +
wmarked_frame[0:overlap_length],
wmarked_frame[overlap_length:frame_shift]))
prev = wmarked_frame
count = count + 1
pointer = pointer + frame_shift
# ホスト信号の残りと結合
wmed_signal = np.concatenate(
(wmed_signal, host_signal[len(wmed_signal): signal_len]))
# 透かしが埋め込まれた信号をwavとして保存
wmed_signal = wmed_signal.astype(np.int16) # convert float into integer
wavfile.write(WATERMARK_SIGNAL_FILE, sr, wmed_signal)
def detect():
"""
perform detecton.
"""
sr, host_signal = wavfile.read(HOST_SIGNAL_FILE)
# 埋め込み済みの音声ファイルを開く
_, eval_signal = wavfile.read(WATERMARK_SIGNAL_FILE)
signal_len = len(eval_signal)
# オリジナルの透かし信号をロード
with open(WATERMARK_ORIGINAL_FILE, 'r') as f:
wmark_original = f.readlines()
wmark_original = np.array([float(w.rstrip()) for w in wmark_original])
# フレームの移動量
frame_shift = int(FRAME_LENGTH * (1 - OVERLAP))
# 埋め込みビット数
embed_nbit = fix((signal_len - FRAME_LENGTH * OVERLAP) / frame_shift)
if REP_CODE:
# 実質的な埋め込み可能ビット数
effective_nbit = np.floor(embed_nbit / NUM_REPS)
embed_nbit = effective_nbit * NUM_REPS
else:
effective_nbit = embed_nbit
frame_shift = int(frame_shift)
effective_nbit = int(effective_nbit)
embed_nbit = int(embed_nbit)
# オリジナルの透かし信号をロード
with open(WATERMARK_ORIGINAL_FILE, 'r') as f:
wmark_original = f.readlines()
wmark_original = np.array([int(w.rstrip()) for w in wmark_original])
# 透かし情報の検出
pointer = 0
detected_bit = np.zeros(embed_nbit)
for i in range(embed_nbit):
wmarked_frame = eval_signal[pointer: pointer + FRAME_LENGTH]
# wavelet decomposition
wmarked_coeffs = pywt.wavedec(
data=wmarked_frame, wavelet=WAVELET_BASIS, level=WAVELET_LEVEL,
mode=WAVELET_MODE)
thres = np.sum(wmarked_coeffs[0])
if thres >= THRESHOLD:
detected_bit[i] = 1
else:
detected_bit[i] = 0
pointer = pointer + frame_shift
if REP_CODE:
count = 0
wmark_recovered = np.zeros(effective_nbit)
for i in range(effective_nbit):
# ビットを集計(平均値)
ave = np.sum(detected_bit[count:count + NUM_REPS]) / NUM_REPS
if ave >= 0.5: # 過半数
wmark_recovered[i] = 1
else:
wmark_recovered[i] = 0
count = count + NUM_REPS
else:
wmark_recovered = detected_bit
# ビット誤り率を表示
denom = np.int(np.sum(np.abs(wmark_recovered - wmark_original)))
BER = np.sum(np.abs(wmark_recovered - wmark_original)) / \
effective_nbit * 100
print(f'BER = {BER} % ({denom} / {effective_nbit})')
# SNRを表示
SNR = 10 * np.log10(
np.sum(np.square(host_signal.astype(np.float32)))
/ np.sum(np.square(host_signal.astype(np.float32)
- eval_signal.astype(np.float32))))
print(f'SNR = {SNR:.2f} dB')
# bpsを表示
print('BPS = {:.2f} bps'.format(embed_nbit / (len(host_signal) / sr)))
def main():
"""Main routine. """
embed()
detect()
if __name__ in '__main__':
main()
|
[
"numpy.sum",
"numpy.ceil",
"numpy.abs",
"pywt.wavedec",
"numpy.floor",
"numpy.zeros",
"scipy.io.wavfile.write",
"scipy.io.wavfile.read",
"pywt.waverec",
"numpy.mean",
"scipy.signal.windows.hann",
"numpy.concatenate",
"numpy.repeat"
] |
[((1801, 1831), 'scipy.io.wavfile.read', 'wavfile.read', (['HOST_SIGNAL_FILE'], {}), '(HOST_SIGNAL_FILE)\n', (1813, 1831), False, 'from scipy.io import wavfile\n'), ((2944, 2978), 'numpy.zeros', 'np.zeros', (['(frame_shift * embed_nbit)'], {}), '(frame_shift * embed_nbit)\n', (2952, 2978), True, 'import numpy as np\n'), ((3014, 3036), 'numpy.zeros', 'np.zeros', (['FRAME_LENGTH'], {}), '(FRAME_LENGTH)\n', (3022, 3036), True, 'import numpy as np\n'), ((4465, 4518), 'scipy.io.wavfile.write', 'wavfile.write', (['WATERMARK_SIGNAL_FILE', 'sr', 'wmed_signal'], {}), '(WATERMARK_SIGNAL_FILE, sr, wmed_signal)\n', (4478, 4518), False, 'from scipy.io import wavfile\n'), ((4596, 4626), 'scipy.io.wavfile.read', 'wavfile.read', (['HOST_SIGNAL_FILE'], {}), '(HOST_SIGNAL_FILE)\n', (4608, 4626), False, 'from scipy.io import wavfile\n'), ((4672, 4707), 'scipy.io.wavfile.read', 'wavfile.read', (['WATERMARK_SIGNAL_FILE'], {}), '(WATERMARK_SIGNAL_FILE)\n', (4684, 4707), False, 'from scipy.io import wavfile\n'), ((5627, 5647), 'numpy.zeros', 'np.zeros', (['embed_nbit'], {}), '(embed_nbit)\n', (5635, 5647), True, 'import numpy as np\n'), ((1648, 1660), 'numpy.floor', 'np.floor', (['xs'], {}), '(xs)\n', (1656, 1660), True, 'import numpy as np\n'), ((1685, 1696), 'numpy.ceil', 'np.ceil', (['xs'], {}), '(xs)\n', (1692, 1696), True, 'import numpy as np\n'), ((2159, 2190), 'numpy.floor', 'np.floor', (['(embed_nbit / NUM_REPS)'], {}), '(embed_nbit / NUM_REPS)\n', (2167, 2190), True, 'import numpy as np\n'), ((2702, 2737), 'numpy.repeat', 'np.repeat', (['wmark_original', 'NUM_REPS'], {}), '(wmark_original, NUM_REPS)\n', (2711, 2737), True, 'import numpy as np\n'), ((3173, 3265), 'pywt.wavedec', 'pywt.wavedec', ([], {'data': 'frame', 'wavelet': 'WAVELET_BASIS', 'level': 'WAVELET_LEVEL', 'mode': 'WAVELET_MODE'}), '(data=frame, wavelet=WAVELET_BASIS, level=WAVELET_LEVEL, mode=\n WAVELET_MODE)\n', (3185, 3265), False, 'import pywt\n'), ((3678, 3747), 'pywt.waverec', 'pywt.waverec', ([], {'coeffs': 'coeffs', 'wavelet': 'WAVELET_BASIS', 'mode': 'WAVELET_MODE'}), '(coeffs=coeffs, wavelet=WAVELET_BASIS, mode=WAVELET_MODE)\n', (3690, 3747), False, 'import pywt\n'), ((3958, 4088), 'numpy.concatenate', 'np.concatenate', (['(prev[frame_shift:FRAME_LENGTH] + wmarked_frame[0:overlap_length],\n wmarked_frame[overlap_length:frame_shift])'], {}), '((prev[frame_shift:FRAME_LENGTH] + wmarked_frame[0:\n overlap_length], wmarked_frame[overlap_length:frame_shift]))\n', (3972, 4088), True, 'import numpy as np\n'), ((5155, 5186), 'numpy.floor', 'np.floor', (['(embed_nbit / NUM_REPS)'], {}), '(embed_nbit / NUM_REPS)\n', (5163, 5186), True, 'import numpy as np\n'), ((5807, 5906), 'pywt.wavedec', 'pywt.wavedec', ([], {'data': 'wmarked_frame', 'wavelet': 'WAVELET_BASIS', 'level': 'WAVELET_LEVEL', 'mode': 'WAVELET_MODE'}), '(data=wmarked_frame, wavelet=WAVELET_BASIS, level=WAVELET_LEVEL,\n mode=WAVELET_MODE)\n', (5819, 5906), False, 'import pywt\n'), ((5945, 5970), 'numpy.sum', 'np.sum', (['wmarked_coeffs[0]'], {}), '(wmarked_coeffs[0])\n', (5951, 5970), True, 'import numpy as np\n'), ((6184, 6208), 'numpy.zeros', 'np.zeros', (['effective_nbit'], {}), '(effective_nbit)\n', (6192, 6208), True, 'import numpy as np\n'), ((3856, 3882), 'scipy.signal.windows.hann', 'windows.hann', (['FRAME_LENGTH'], {}), '(FRAME_LENGTH)\n', (3868, 3882), False, 'from scipy.signal import windows\n'), ((6616, 6656), 'numpy.abs', 'np.abs', (['(wmark_recovered - wmark_original)'], {}), '(wmark_recovered - wmark_original)\n', (6622, 6656), True, 'import numpy as np\n'), ((6295, 6339), 'numpy.sum', 'np.sum', (['detected_bit[count:count + NUM_REPS]'], {}), '(detected_bit[count:count + NUM_REPS])\n', (6301, 6339), True, 'import numpy as np\n'), ((6676, 6716), 'numpy.abs', 'np.abs', (['(wmark_recovered - wmark_original)'], {}), '(wmark_recovered - wmark_original)\n', (6682, 6716), True, 'import numpy as np\n'), ((3535, 3553), 'numpy.mean', 'np.mean', (['coeffs[0]'], {}), '(coeffs[0])\n', (3542, 3553), True, 'import numpy as np\n'), ((3612, 3630), 'numpy.mean', 'np.mean', (['coeffs[0]'], {}), '(coeffs[0])\n', (3619, 3630), True, 'import numpy as np\n')]
|
import torch.nn as nn
import spaghettini
from spaghettini import register, quick_register, load, check
quick_register(nn.Linear)
register("relu")(nn.ReLU)
quick_register(nn.Sequential)
print(check())
net = load("assets/pytorch.yaml")
print(net)
|
[
"spaghettini.quick_register",
"spaghettini.load",
"spaghettini.check",
"spaghettini.register"
] |
[((104, 129), 'spaghettini.quick_register', 'quick_register', (['nn.Linear'], {}), '(nn.Linear)\n', (118, 129), False, 'from spaghettini import register, quick_register, load, check\n'), ((156, 185), 'spaghettini.quick_register', 'quick_register', (['nn.Sequential'], {}), '(nn.Sequential)\n', (170, 185), False, 'from spaghettini import register, quick_register, load, check\n'), ((208, 235), 'spaghettini.load', 'load', (['"""assets/pytorch.yaml"""'], {}), "('assets/pytorch.yaml')\n", (212, 235), False, 'from spaghettini import register, quick_register, load, check\n'), ((130, 146), 'spaghettini.register', 'register', (['"""relu"""'], {}), "('relu')\n", (138, 146), False, 'from spaghettini import register, quick_register, load, check\n'), ((192, 199), 'spaghettini.check', 'check', ([], {}), '()\n', (197, 199), False, 'from spaghettini import register, quick_register, load, check\n')]
|
import argparse
import logging
from flowlib import flow_pb2
from flowlib.flowd_utils import get_flowd_connection
__help__ = 'force health check probe on all workflows (default) or specified workflow ID\'s'
def __refine_args__(parser: argparse.ArgumentParser):
parser.add_argument(
'-o',
'--output',
action='store_true',
help='Output response data to stdout.'
)
parser.add_argument(
'ids',
nargs='*',
type=str,
help='Specific workflow deployment ID\'s to probe.'
)
return parser
def probe_action(namespace: argparse.Namespace, *args, **kws):
response = None
with get_flowd_connection(namespace.flowd_host, namespace.flowd_port) as flowd:
request = flow_pb2.ProbeRequest(
ids=namespace.ids
)
response = flowd.ProbeWorkflow(request)
status = response.status
if status < 0:
logging.error(
f'Error from server: {response.status}, "{response.message}"'
)
else:
logging.info(
f'Got response: {response.status}, "{response.message}", {response.data}'
)
if namespace.output:
print(response.data)
return status
|
[
"flowlib.flowd_utils.get_flowd_connection",
"logging.info",
"flowlib.flow_pb2.ProbeRequest",
"logging.error"
] |
[((662, 726), 'flowlib.flowd_utils.get_flowd_connection', 'get_flowd_connection', (['namespace.flowd_host', 'namespace.flowd_port'], {}), '(namespace.flowd_host, namespace.flowd_port)\n', (682, 726), False, 'from flowlib.flowd_utils import get_flowd_connection\n'), ((755, 795), 'flowlib.flow_pb2.ProbeRequest', 'flow_pb2.ProbeRequest', ([], {'ids': 'namespace.ids'}), '(ids=namespace.ids)\n', (776, 795), False, 'from flowlib import flow_pb2\n'), ((922, 998), 'logging.error', 'logging.error', (['f"""Error from server: {response.status}, "{response.message}\\""""'], {}), '(f\'Error from server: {response.status}, "{response.message}"\')\n', (935, 998), False, 'import logging\n'), ((1039, 1131), 'logging.info', 'logging.info', (['f"""Got response: {response.status}, "{response.message}", {response.data}"""'], {}), '(\n f\'Got response: {response.status}, "{response.message}", {response.data}\')\n', (1051, 1131), False, 'import logging\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# tools/targets_from_recon_ng.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import argparse
import csv
import os
import random
import re
import sys
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import king_phisher.color as color
import king_phisher.version as version
PROG_DESCRIPTION = """King Phisher Recon-ng CSV Converter
This tool is used to convert the output from the recon-ng reporting/csv module
to a CSV file for use with King Phisher.
"""
PROG_EPILOG = """The format string uses Python's native .format syntax.
Format string examples:
first initial followed by the last name (default)
{first:.1}{last}
first name dot last name
{first}.{last}
"""
RECON_NG_CONTACTS_TABLE = (
'first_name',
'middle_name',
'last_name',
'email',
'title',
'region',
'country',
'module'
)
def main():
parser = argparse.ArgumentParser(
conflict_handler='resolve',
description=PROG_DESCRIPTION,
epilog=PROG_EPILOG,
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('-f', '--format', dest='email_format', default='{first:.1}{last}', help='the email format string to use')
parser.add_argument('-n', '--number', dest='limit', type=int, help='only process the specified number of contacts')
parser.add_argument('-v', '--version', action='version', version=parser.prog + ' Version: ' + version.version)
parser.add_argument('--shuffle', action='store_true', default=False, help='shuffle the contacts to randomize their order')
parser.add_argument('--filter-region', action='store', default=r'.*', help='a regex to use to filter contacts by region')
parser.add_argument('domain', help='the domain to append to emails')
parser.add_argument('in_file', type=argparse.FileType('r'), help='the csv file of contacts from recon-ng')
parser.add_argument('out_file', type=argparse.FileType('w'), help='the target csv file to create for')
arguments = parser.parse_args()
filtered = 0
targets = []
color.print_status('reading contacts from: ' + os.path.abspath(arguments.in_file.name))
for row in csv.DictReader(arguments.in_file, fieldnames=RECON_NG_CONTACTS_TABLE):
if re.match(arguments.filter_region, row['region']) is None:
filtered += 1
continue
targets.append((row['first_name'], row['last_name']))
arguments.in_file.close()
color.print_status("read in {0:,} contacts from recon-ng csv output".format(len(targets) + filtered))
if filtered:
color.print_status(" {0:,} contacts filtered".format(filtered))
color.print_status(" {0:,} contacts remain".format(len(targets)))
if arguments.shuffle:
random.shuffle(targets)
color.print_status('shuffled the list of contacts')
color.print_status('writing the results to: ' + os.path.abspath(arguments.out_file.name))
writer = csv.writer(arguments.out_file)
for first_name, last_name in targets[:arguments.limit]:
email_address = arguments.email_format.format(first=first_name.lower(), last=last_name.lower())
email_address += '@' + arguments.domain
writer.writerow([first_name, last_name, email_address])
arguments.out_file.close()
if __name__ == '__main__':
sys.exit(main())
|
[
"os.path.abspath",
"csv.writer",
"argparse.ArgumentParser",
"csv.DictReader",
"random.shuffle",
"os.path.dirname",
"re.match",
"king_phisher.color.print_status",
"argparse.FileType"
] |
[((2372, 2530), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'conflict_handler': '"""resolve"""', 'description': 'PROG_DESCRIPTION', 'epilog': 'PROG_EPILOG', 'formatter_class': 'argparse.RawTextHelpFormatter'}), "(conflict_handler='resolve', description=\n PROG_DESCRIPTION, epilog=PROG_EPILOG, formatter_class=argparse.\n RawTextHelpFormatter)\n", (2395, 2530), False, 'import argparse\n'), ((3580, 3649), 'csv.DictReader', 'csv.DictReader', (['arguments.in_file'], {'fieldnames': 'RECON_NG_CONTACTS_TABLE'}), '(arguments.in_file, fieldnames=RECON_NG_CONTACTS_TABLE)\n', (3594, 3649), False, 'import csv\n'), ((4286, 4316), 'csv.writer', 'csv.writer', (['arguments.out_file'], {}), '(arguments.out_file)\n', (4296, 4316), False, 'import csv\n'), ((4106, 4129), 'random.shuffle', 'random.shuffle', (['targets'], {}), '(targets)\n', (4120, 4129), False, 'import random\n'), ((4132, 4183), 'king_phisher.color.print_status', 'color.print_status', (['"""shuffled the list of contacts"""'], {}), "('shuffled the list of contacts')\n", (4150, 4183), True, 'import king_phisher.color as color\n'), ((1705, 1730), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1720, 1730), False, 'import os\n'), ((3242, 3264), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (3259, 3264), False, 'import argparse\n'), ((3351, 3373), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (3368, 3373), False, 'import argparse\n'), ((3527, 3566), 'os.path.abspath', 'os.path.abspath', (['arguments.in_file.name'], {}), '(arguments.in_file.name)\n', (3542, 3566), False, 'import os\n'), ((3656, 3704), 're.match', 're.match', (['arguments.filter_region', "row['region']"], {}), "(arguments.filter_region, row['region'])\n", (3664, 3704), False, 'import re\n'), ((4234, 4274), 'os.path.abspath', 'os.path.abspath', (['arguments.out_file.name'], {}), '(arguments.out_file.name)\n', (4249, 4274), False, 'import os\n')]
|
'''latlong.py - simple command line tool to generate a random destination from
starting coordinates
Warning: You might have to swim
'''
import math
import random
import sys
EARTH_RADIUS = 6378.1
MIN_DIST = 1
MAX_DIST = 16 # destination radius in KM
def plot_location(latitude, longitude, bearing, distance):
'''Plot a new location based on starting point, bearing and distance'''
bearing_rad = math.radians(bearing)
lat1 = math.radians(latitude)
lon1 = math.radians(longitude)
d_over_r = distance/EARTH_RADIUS
lat2 = math.asin(math.sin(lat1)*math.cos(d_over_r) +
math.cos(lat1)*math.sin(d_over_r)*math.cos(bearing_rad))
lon2 = lon1 + math.atan2(math.sin(bearing_rad)*math.sin(d_over_r)*math.cos(lat1),
math.cos(d_over_r)-math.sin(lat1)*math.sin(lat2))
lat2 = round(math.degrees(lat2), 6)
lon2 = round(math.degrees(lon2), 6)
return [lat2,lon2]
def get_random_location(latitude, longitude, radius_km):
'''Return coordinates for a random location based on starting point and radius'''
# get random destination and distance
bearing = random.randint(0,360)
distance_km = round(radius_km * random.random(),3)
print(f"Bearing: {str(bearing)}, Distance (km): {str(distance_km)}")
# calculate the new latitude and longitude
return plot_location(latitude, longitude, bearing, distance_km)
def main():
'''Get a location and find a random destination within 10 miles'''
if len(sys.argv) != 2:
exit("Expecting a lat,lon argument on command line")
coord_list = sys.argv[1].split(',')
latitude = float(coord_list[0])
longitude = float(coord_list[1])
new_coords = get_random_location(latitude, longitude, MAX_DIST)
print(f"New coords: {str(new_coords[0])},{str(new_coords[1])}")
if __name__ == "__main__":
main()
|
[
"random.randint",
"math.radians",
"math.sin",
"random.random",
"math.cos",
"math.degrees"
] |
[((417, 438), 'math.radians', 'math.radians', (['bearing'], {}), '(bearing)\n', (429, 438), False, 'import math\n'), ((452, 474), 'math.radians', 'math.radians', (['latitude'], {}), '(latitude)\n', (464, 474), False, 'import math\n'), ((486, 509), 'math.radians', 'math.radians', (['longitude'], {}), '(longitude)\n', (498, 509), False, 'import math\n'), ((1130, 1152), 'random.randint', 'random.randint', (['(0)', '(360)'], {}), '(0, 360)\n', (1144, 1152), False, 'import random\n'), ((842, 860), 'math.degrees', 'math.degrees', (['lat2'], {}), '(lat2)\n', (854, 860), False, 'import math\n'), ((882, 900), 'math.degrees', 'math.degrees', (['lon2'], {}), '(lon2)\n', (894, 900), False, 'import math\n'), ((1188, 1203), 'random.random', 'random.random', ([], {}), '()\n', (1201, 1203), False, 'import random\n'), ((570, 584), 'math.sin', 'math.sin', (['lat1'], {}), '(lat1)\n', (578, 584), False, 'import math\n'), ((585, 603), 'math.cos', 'math.cos', (['d_over_r'], {}), '(d_over_r)\n', (593, 603), False, 'import math\n'), ((648, 669), 'math.cos', 'math.cos', (['bearing_rad'], {}), '(bearing_rad)\n', (656, 669), False, 'import math\n'), ((742, 756), 'math.cos', 'math.cos', (['lat1'], {}), '(lat1)\n', (750, 756), False, 'import math\n'), ((774, 792), 'math.cos', 'math.cos', (['d_over_r'], {}), '(d_over_r)\n', (782, 792), False, 'import math\n'), ((614, 628), 'math.cos', 'math.cos', (['lat1'], {}), '(lat1)\n', (622, 628), False, 'import math\n'), ((629, 647), 'math.sin', 'math.sin', (['d_over_r'], {}), '(d_over_r)\n', (637, 647), False, 'import math\n'), ((701, 722), 'math.sin', 'math.sin', (['bearing_rad'], {}), '(bearing_rad)\n', (709, 722), False, 'import math\n'), ((723, 741), 'math.sin', 'math.sin', (['d_over_r'], {}), '(d_over_r)\n', (731, 741), False, 'import math\n'), ((793, 807), 'math.sin', 'math.sin', (['lat1'], {}), '(lat1)\n', (801, 807), False, 'import math\n'), ((808, 822), 'math.sin', 'math.sin', (['lat2'], {}), '(lat2)\n', (816, 822), False, 'import math\n')]
|
import json
import os
import shutil
import tempfile
import unittest
import ayeaye
PROJECT_TEST_PATH = os.path.dirname(os.path.abspath(__file__))
EXAMPLE_CSV_PATH = os.path.join(PROJECT_TEST_PATH, 'data', 'deadly_creatures.csv')
class FakeModel(ayeaye.Model):
animals = ayeaye.Connect(engine_url=f"csv://{EXAMPLE_CSV_PATH}")
def build(self):
for a in self.animals:
self.log(a.common_name)
class TestModels(unittest.TestCase):
def setUp(self):
self._working_directory = None
def tearDown(self):
if self._working_directory and os.path.isdir(self._working_directory):
shutil.rmtree(self._working_directory)
def working_directory(self):
self._working_directory = tempfile.mkdtemp()
return self._working_directory
def test_go_closes_dataset_connections(self):
m = FakeModel()
m.log_to_stdout = False
m.go()
self.assertEqual(None, m.animals.file_handle, "File handle should be closed")
def test_double_usage(self):
"""
Use Connect as a callable to make a second iterable. Make the cartesian product to
demonstrate they are independent.
"""
class AnimalsModel(ayeaye.Model):
animals_a = ayeaye.Connect(engine_url="csv://" + EXAMPLE_CSV_PATH)
animals_b = animals_a.clone()
animals_output = ayeaye.Connect(access=ayeaye.AccessMode.WRITE)
def build(self):
cartesian = []
for a in self.animals_a:
for b in self.animals_b:
cartesian.append(f"{a.common_name}_{b.common_name}")
self.animals_output.data = cartesian
self.assertNotEqual(id(AnimalsModel.animals_a), id(AnimalsModel.animals_b))
m = AnimalsModel()
output_file = "{}/animals_summary.json".format(self.working_directory())
m.animals_output.update(engine_url=f"json://{output_file};indent=4")
output_encoding = m.animals_output.encoding
m.go()
with open(output_file, 'r', encoding=output_encoding) as f:
output_data = json.load(f)
expected_data = ['Crown of thorns starfish_Crown of thorns starfish',
'Crown of thorns starfish_Golden dart frog'
]
self.assertEqual(expected_data, output_data)
|
[
"os.path.abspath",
"json.load",
"os.path.isdir",
"tempfile.mkdtemp",
"ayeaye.Connect",
"shutil.rmtree",
"os.path.join"
] |
[((166, 229), 'os.path.join', 'os.path.join', (['PROJECT_TEST_PATH', '"""data"""', '"""deadly_creatures.csv"""'], {}), "(PROJECT_TEST_PATH, 'data', 'deadly_creatures.csv')\n", (178, 229), False, 'import os\n'), ((120, 145), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (135, 145), False, 'import os\n'), ((277, 331), 'ayeaye.Connect', 'ayeaye.Connect', ([], {'engine_url': 'f"""csv://{EXAMPLE_CSV_PATH}"""'}), "(engine_url=f'csv://{EXAMPLE_CSV_PATH}')\n", (291, 331), False, 'import ayeaye\n'), ((744, 762), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (760, 762), False, 'import tempfile\n'), ((585, 623), 'os.path.isdir', 'os.path.isdir', (['self._working_directory'], {}), '(self._working_directory)\n', (598, 623), False, 'import os\n'), ((637, 675), 'shutil.rmtree', 'shutil.rmtree', (['self._working_directory'], {}), '(self._working_directory)\n', (650, 675), False, 'import shutil\n'), ((1269, 1323), 'ayeaye.Connect', 'ayeaye.Connect', ([], {'engine_url': "('csv://' + EXAMPLE_CSV_PATH)"}), "(engine_url='csv://' + EXAMPLE_CSV_PATH)\n", (1283, 1323), False, 'import ayeaye\n'), ((1395, 1441), 'ayeaye.Connect', 'ayeaye.Connect', ([], {'access': 'ayeaye.AccessMode.WRITE'}), '(access=ayeaye.AccessMode.WRITE)\n', (1409, 1441), False, 'import ayeaye\n'), ((2154, 2166), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2163, 2166), False, 'import json\n')]
|
# Copyright 2014 - Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlalchemy
from solum.common import exception
from solum.objects import plan as abstract
from solum.objects.sqlalchemy import models as sql
from solum.openstack.common.db.sqlalchemy import session as db_session
class Plan(sql.Base, abstract.Plan):
"""Represent a plan in sqlalchemy."""
__tablename__ = 'plan'
__table_args__ = sql.table_args()
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True,
autoincrement=True)
uuid = sqlalchemy.Column(sqlalchemy.String(36))
project_id = sqlalchemy.Column(sqlalchemy.String(36))
user_id = sqlalchemy.Column(sqlalchemy.String(36))
glance_id = sqlalchemy.Column(sqlalchemy.String(36))
name = sqlalchemy.Column(sqlalchemy.String(255))
raw_content = sqlalchemy.Column(sqlalchemy.Text)
@classmethod
def _raise_duplicate_object(cls, e, self):
raise exception.PlanExists()
@classmethod
def get_by_uuid(cls, context, item_uuid):
query = db_session.get_session().query(cls).filter_by(uuid=item_uuid)
result = query.first()
if not result:
cls._raise_not_found(item_uuid)
return result
class PlanList(abstract.PlanList):
"""Represent a list of plans in sqlalchemy."""
@classmethod
def get_all(cls, context):
return PlanList(sql.model_query(context, Plan))
|
[
"solum.common.exception.PlanExists",
"solum.openstack.common.db.sqlalchemy.session.get_session",
"solum.objects.sqlalchemy.models.table_args",
"solum.objects.sqlalchemy.models.model_query",
"sqlalchemy.Column",
"sqlalchemy.String"
] |
[((931, 947), 'solum.objects.sqlalchemy.models.table_args', 'sql.table_args', ([], {}), '()\n', (945, 947), True, 'from solum.objects.sqlalchemy import models as sql\n'), ((958, 1033), 'sqlalchemy.Column', 'sqlalchemy.Column', (['sqlalchemy.Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(sqlalchemy.Integer, primary_key=True, autoincrement=True)\n', (975, 1033), False, 'import sqlalchemy\n'), ((1354, 1388), 'sqlalchemy.Column', 'sqlalchemy.Column', (['sqlalchemy.Text'], {}), '(sqlalchemy.Text)\n', (1371, 1388), False, 'import sqlalchemy\n'), ((1090, 1111), 'sqlalchemy.String', 'sqlalchemy.String', (['(36)'], {}), '(36)\n', (1107, 1111), False, 'import sqlalchemy\n'), ((1148, 1169), 'sqlalchemy.String', 'sqlalchemy.String', (['(36)'], {}), '(36)\n', (1165, 1169), False, 'import sqlalchemy\n'), ((1203, 1224), 'sqlalchemy.String', 'sqlalchemy.String', (['(36)'], {}), '(36)\n', (1220, 1224), False, 'import sqlalchemy\n'), ((1260, 1281), 'sqlalchemy.String', 'sqlalchemy.String', (['(36)'], {}), '(36)\n', (1277, 1281), False, 'import sqlalchemy\n'), ((1312, 1334), 'sqlalchemy.String', 'sqlalchemy.String', (['(255)'], {}), '(255)\n', (1329, 1334), False, 'import sqlalchemy\n'), ((1468, 1490), 'solum.common.exception.PlanExists', 'exception.PlanExists', ([], {}), '()\n', (1488, 1490), False, 'from solum.common import exception\n'), ((1914, 1944), 'solum.objects.sqlalchemy.models.model_query', 'sql.model_query', (['context', 'Plan'], {}), '(context, Plan)\n', (1929, 1944), True, 'from solum.objects.sqlalchemy import models as sql\n'), ((1571, 1595), 'solum.openstack.common.db.sqlalchemy.session.get_session', 'db_session.get_session', ([], {}), '()\n', (1593, 1595), True, 'from solum.openstack.common.db.sqlalchemy import session as db_session\n')]
|
import abc
import os
from smartva.data_prep import Prep
class GrapherPrep(Prep):
__metaclass__ = abc.ABCMeta
def __init__(self, working_dir_path):
super(GrapherPrep, self).__init__(working_dir_path)
self.output_dir_path = os.path.join(self.input_dir_path, 'figures')
def run(self):
super(GrapherPrep, self).run()
self._update_status()
graph_data = self._read_graph_data()
self._make_graphs(graph_data)
@abc.abstractmethod
def _update_status(self):
pass
@abc.abstractmethod
def _read_graph_data(self):
pass
@abc.abstractmethod
def _make_graphs(self, graph_data):
pass
|
[
"os.path.join"
] |
[((250, 294), 'os.path.join', 'os.path.join', (['self.input_dir_path', '"""figures"""'], {}), "(self.input_dir_path, 'figures')\n", (262, 294), False, 'import os\n')]
|
import pytest
from arc import CLI, Context, errors, callback
class CallbackException(Exception):
"""Used to assert that callbacks are actually running"""
def __init__(self, ctx: Context, **kwargs):
self.ctx = ctx
self.kwargs = kwargs
def test_execute(cli: CLI):
@callback.create()
def cb(_args, ctx):
raise CallbackException(ctx)
@cb
@cli.command()
def test():
return 10
with pytest.raises(CallbackException):
cli("test")
cb.remove(test)
assert cli("test") == 10
def test_exception(cli: CLI):
@callback.create()
def cb(_args, ctx):
try:
yield
except errors.ExecutionError:
raise CallbackException(ctx)
@cb
@cli.command()
def test():
raise errors.ExecutionError()
with pytest.raises(CallbackException):
cli("test")
def test_final(cli: CLI):
@callback.create()
def cb(_args, ctx):
try:
yield
finally:
raise CallbackException(ctx)
@cb
@cli.command()
def test1():
raise errors.ExecutionError()
@cb
@cli.command()
def test2():
...
with pytest.raises(CallbackException):
cli("test1")
with pytest.raises(CallbackException):
cli("test2")
def test_inheritance(cli: CLI):
@callback.create()
def cb(_args, ctx):
raise CallbackException(ctx)
@callback.create(inherit=False)
def cb2(_args, ctx):
raise CallbackException(ctx)
@cb
@cb2
@cli.subcommand()
def command():
...
@command.subcommand()
def sub1():
...
assert cb in sub1.callbacks
assert cb2 not in sub1.callbacks
with pytest.raises(CallbackException):
cli("command:sub1")
@callback.remove(cb)
@command.subcommand()
def sub2():
...
assert cb not in sub2.callbacks
cli("command:sub2")
@cb.remove
@command.subcommand()
def sub3():
...
assert cb not in sub3.callbacks
cli("command:sub3")
def test_missing_yield(cli: CLI):
@callback.create()
def cb(_args, ctx):
...
@cb
@cli.subcommand()
def command():
...
with pytest.raises(errors.CallbackError):
cli("command")
def test_callback_alias(cli: CLI):
@cli.callback()
def cb(_args, ctx):
raise CallbackException(ctx)
@cli.command()
def command():
...
with pytest.raises(CallbackException):
cli("command")
|
[
"pytest.raises",
"arc.callback.create",
"arc.callback.remove",
"arc.errors.ExecutionError"
] |
[((296, 313), 'arc.callback.create', 'callback.create', ([], {}), '()\n', (311, 313), False, 'from arc import CLI, Context, errors, callback\n'), ((588, 605), 'arc.callback.create', 'callback.create', ([], {}), '()\n', (603, 605), False, 'from arc import CLI, Context, errors, callback\n'), ((919, 936), 'arc.callback.create', 'callback.create', ([], {}), '()\n', (934, 936), False, 'from arc import CLI, Context, errors, callback\n'), ((1359, 1376), 'arc.callback.create', 'callback.create', ([], {}), '()\n', (1374, 1376), False, 'from arc import CLI, Context, errors, callback\n'), ((1444, 1474), 'arc.callback.create', 'callback.create', ([], {'inherit': '(False)'}), '(inherit=False)\n', (1459, 1474), False, 'from arc import CLI, Context, errors, callback\n'), ((1810, 1829), 'arc.callback.remove', 'callback.remove', (['cb'], {}), '(cb)\n', (1825, 1829), False, 'from arc import CLI, Context, errors, callback\n'), ((2117, 2134), 'arc.callback.create', 'callback.create', ([], {}), '()\n', (2132, 2134), False, 'from arc import CLI, Context, errors, callback\n'), ((447, 479), 'pytest.raises', 'pytest.raises', (['CallbackException'], {}), '(CallbackException)\n', (460, 479), False, 'import pytest\n'), ((798, 821), 'arc.errors.ExecutionError', 'errors.ExecutionError', ([], {}), '()\n', (819, 821), False, 'from arc import CLI, Context, errors, callback\n'), ((832, 864), 'pytest.raises', 'pytest.raises', (['CallbackException'], {}), '(CallbackException)\n', (845, 864), False, 'import pytest\n'), ((1109, 1132), 'arc.errors.ExecutionError', 'errors.ExecutionError', ([], {}), '()\n', (1130, 1132), False, 'from arc import CLI, Context, errors, callback\n'), ((1200, 1232), 'pytest.raises', 'pytest.raises', (['CallbackException'], {}), '(CallbackException)\n', (1213, 1232), False, 'import pytest\n'), ((1265, 1297), 'pytest.raises', 'pytest.raises', (['CallbackException'], {}), '(CallbackException)\n', (1278, 1297), False, 'import pytest\n'), ((1742, 1774), 'pytest.raises', 'pytest.raises', (['CallbackException'], {}), '(CallbackException)\n', (1755, 1774), False, 'import pytest\n'), ((2243, 2278), 'pytest.raises', 'pytest.raises', (['errors.CallbackError'], {}), '(errors.CallbackError)\n', (2256, 2278), False, 'import pytest\n'), ((2482, 2514), 'pytest.raises', 'pytest.raises', (['CallbackException'], {}), '(CallbackException)\n', (2495, 2514), False, 'import pytest\n')]
|
import os
import json
import copy
import pyblish.api
class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"""Collect ftrack component data (not integrate yet).
Add ftrack component list to instance.
"""
order = pyblish.api.IntegratorOrder + 0.48
label = "Integrate Ftrack Component"
families = ["ftrack"]
family_mapping = {
"camera": "cam",
"look": "look",
"mayaascii": "scene",
"model": "geo",
"rig": "rig",
"setdress": "setdress",
"pointcache": "cache",
"render": "render",
"render2d": "render",
"nukescript": "comp",
"write": "render",
"review": "mov",
"plate": "img",
"audio": "audio",
"workfile": "scene",
"animation": "cache",
"image": "img",
"reference": "reference"
}
def process(self, instance):
self.log.debug("instance {}".format(instance))
instance_version = instance.data.get("version")
if instance_version is None:
raise ValueError("Instance version not set")
version_number = int(instance_version)
family = instance.data["family"]
family_low = instance.data["family"].lower()
asset_type = instance.data.get("ftrackFamily")
if not asset_type and family_low in self.family_mapping:
asset_type = self.family_mapping[family_low]
# Ignore this instance if neither "ftrackFamily" or a family mapping is
# found.
if not asset_type:
self.log.info((
"Family \"{}\" does not match any asset type mapping"
).format(family))
return
instance_repres = instance.data.get("representations")
if not instance_repres:
self.log.info((
"Skipping instance. Does not have any representations {}"
).format(str(instance)))
return
# Prepare FPS
instance_fps = instance.data.get("fps")
if instance_fps is None:
instance_fps = instance.context.data["fps"]
# Base of component item data
# - create a copy of this object when want to use it
base_component_item = {
"assettype_data": {
"short": asset_type,
},
"asset_data": {
"name": instance.data["subset"],
},
"assetversion_data": {
"version": version_number,
"comment": instance.context.data.get("comment") or ""
},
"component_overwrite": False,
# This can be change optionally
"thumbnail": False,
# These must be changed for each component
"component_data": None,
"component_path": None,
"component_location": None
}
ft_session = instance.context.data["ftrackSession"]
# Filter types of representations
review_representations = []
thumbnail_representations = []
other_representations = []
for repre in instance_repres:
self.log.debug("Representation {}".format(repre))
repre_tags = repre.get("tags") or []
if repre.get("thumbnail") or "thumbnail" in repre_tags:
thumbnail_representations.append(repre)
elif "ftrackreview" in repre_tags:
review_representations.append(repre)
else:
other_representations.append(repre)
# Prepare ftrack locations
unmanaged_location = ft_session.query(
"Location where name is \"ftrack.unmanaged\""
).one()
ftrack_server_location = ft_session.query(
"Location where name is \"ftrack.server\""
).one()
# Components data
component_list = []
# Components that will be duplicated to unmanaged location
src_components_to_add = []
# Create thumbnail components
# TODO what if there is multiple thumbnails?
first_thumbnail_component = None
for repre in thumbnail_representations:
published_path = repre.get("published_path")
if not published_path:
comp_files = repre["files"]
if isinstance(comp_files, (tuple, list, set)):
filename = comp_files[0]
else:
filename = comp_files
published_path = os.path.join(
repre["stagingDir"], filename
)
if not os.path.exists(published_path):
continue
repre["published_path"] = published_path
# Create copy of base comp item and append it
thumbnail_item = copy.deepcopy(base_component_item)
thumbnail_item["component_path"] = repre["published_path"]
thumbnail_item["component_data"] = {
"name": "thumbnail"
}
thumbnail_item["thumbnail"] = True
# Create copy of item before setting location
src_components_to_add.append(copy.deepcopy(thumbnail_item))
# Create copy of first thumbnail
if first_thumbnail_component is None:
first_thumbnail_component = copy.deepcopy(thumbnail_item)
# Set location
thumbnail_item["component_location"] = ftrack_server_location
# Add item to component list
component_list.append(thumbnail_item)
# Create review components
# Change asset name of each new component for review
is_first_review_repre = True
not_first_components = []
for repre in review_representations:
frame_start = repre.get("frameStartFtrack")
frame_end = repre.get("frameEndFtrack")
if frame_start is None or frame_end is None:
frame_start = instance.data["frameStart"]
frame_end = instance.data["frameEnd"]
# Frame end of uploaded video file should be duration in frames
# - frame start is always 0
# - frame end is duration in frames
duration = frame_end - frame_start + 1
fps = repre.get("fps")
if fps is None:
fps = instance_fps
# Create copy of base comp item and append it
review_item = copy.deepcopy(base_component_item)
# Change location
review_item["component_path"] = repre["published_path"]
# Change component data
review_item["component_data"] = {
# Default component name is "main".
"name": "ftrackreview-mp4",
"metadata": {
"ftr_meta": json.dumps({
"frameIn": 0,
"frameOut": int(duration),
"frameRate": float(fps)
})
}
}
# Create copy of item before setting location or changing asset
src_components_to_add.append(copy.deepcopy(review_item))
if is_first_review_repre:
is_first_review_repre = False
else:
# Add representation name to asset name of "not first" review
asset_name = review_item["asset_data"]["name"]
review_item["asset_data"]["name"] = "_".join(
(asset_name, repre["name"])
)
not_first_components.append(review_item)
# Set location
review_item["component_location"] = ftrack_server_location
# Add item to component list
component_list.append(review_item)
# Duplicate thumbnail component for all not first reviews
if first_thumbnail_component is not None:
for component_item in not_first_components:
asset_name = component_item["asset_data"]["name"]
new_thumbnail_component = copy.deepcopy(
first_thumbnail_component
)
new_thumbnail_component["asset_data"]["name"] = asset_name
new_thumbnail_component["component_location"] = (
ftrack_server_location
)
component_list.append(new_thumbnail_component)
# Add source components for review and thubmnail components
for copy_src_item in src_components_to_add:
# Make sure thumbnail is disabled
copy_src_item["thumbnail"] = False
# Set location
copy_src_item["component_location"] = unmanaged_location
# Modify name of component to have suffix "_src"
component_data = copy_src_item["component_data"]
component_name = component_data["name"]
component_data["name"] = component_name + "_src"
component_list.append(copy_src_item)
# Add others representations as component
for repre in other_representations:
published_path = repre.get("published_path")
if not published_path:
continue
# Create copy of base comp item and append it
other_item = copy.deepcopy(base_component_item)
other_item["component_data"] = {
"name": repre["name"]
}
other_item["component_location"] = unmanaged_location
other_item["component_path"] = published_path
component_list.append(other_item)
def json_obj_parser(obj):
return str(obj)
self.log.debug("Components list: {}".format(
json.dumps(
component_list,
sort_keys=True,
indent=4,
default=json_obj_parser
)
))
instance.data["ftrackComponentsList"] = component_list
|
[
"copy.deepcopy",
"os.path.exists",
"os.path.join",
"json.dumps"
] |
[((4804, 4838), 'copy.deepcopy', 'copy.deepcopy', (['base_component_item'], {}), '(base_component_item)\n', (4817, 4838), False, 'import copy\n'), ((6437, 6471), 'copy.deepcopy', 'copy.deepcopy', (['base_component_item'], {}), '(base_component_item)\n', (6450, 6471), False, 'import copy\n'), ((9289, 9323), 'copy.deepcopy', 'copy.deepcopy', (['base_component_item'], {}), '(base_component_item)\n', (9302, 9323), False, 'import copy\n'), ((4493, 4536), 'os.path.join', 'os.path.join', (["repre['stagingDir']", 'filename'], {}), "(repre['stagingDir'], filename)\n", (4505, 4536), False, 'import os\n'), ((5155, 5184), 'copy.deepcopy', 'copy.deepcopy', (['thumbnail_item'], {}), '(thumbnail_item)\n', (5168, 5184), False, 'import copy\n'), ((5325, 5354), 'copy.deepcopy', 'copy.deepcopy', (['thumbnail_item'], {}), '(thumbnail_item)\n', (5338, 5354), False, 'import copy\n'), ((7132, 7158), 'copy.deepcopy', 'copy.deepcopy', (['review_item'], {}), '(review_item)\n', (7145, 7158), False, 'import copy\n'), ((8056, 8096), 'copy.deepcopy', 'copy.deepcopy', (['first_thumbnail_component'], {}), '(first_thumbnail_component)\n', (8069, 8096), False, 'import copy\n'), ((9720, 9797), 'json.dumps', 'json.dumps', (['component_list'], {'sort_keys': '(True)', 'indent': '(4)', 'default': 'json_obj_parser'}), '(component_list, sort_keys=True, indent=4, default=json_obj_parser)\n', (9730, 9797), False, 'import json\n'), ((4598, 4628), 'os.path.exists', 'os.path.exists', (['published_path'], {}), '(published_path)\n', (4612, 4628), False, 'import os\n')]
|
import process_operations as po
import module_tableau_materials
def process_entry(processor, txt_file, entry, index):
output_list = ["tab_%s %d %s %d %d %d %d %d %d" % entry[0:9]]
output_list.extend(processor.process_block(entry[9], entry[0]))
output_list.append("\r\n")
txt_file.write("".join(output_list))
export = po.make_export(data=module_tableau_materials.tableaus, data_name="tableau_materials",
tag="tableau", header_format="%d\r\n", process_entry=process_entry)
|
[
"process_operations.make_export"
] |
[((336, 498), 'process_operations.make_export', 'po.make_export', ([], {'data': 'module_tableau_materials.tableaus', 'data_name': '"""tableau_materials"""', 'tag': '"""tableau"""', 'header_format': "'%d\\r\\n'", 'process_entry': 'process_entry'}), "(data=module_tableau_materials.tableaus, data_name=\n 'tableau_materials', tag='tableau', header_format='%d\\r\\n',\n process_entry=process_entry)\n", (350, 498), True, 'import process_operations as po\n')]
|
# Author: Hologram <<EMAIL>>
#
# Copyright 2016 - Hologram (Konekt, Inc.)
#
# LICENSE: Distributed under the terms of the MIT License
#
# test_Cellular.py - This file implements unit tests for the Cellular class.
import sys
import pytest
sys.path.append(".")
sys.path.append("..")
sys.path.append("../..")
from Hologram.Network import Cellular
class TestCellular(object):
def test_invalid_cellular_type(self):
pass
|
[
"sys.path.append"
] |
[((240, 260), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (255, 260), False, 'import sys\n'), ((261, 282), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (276, 282), False, 'import sys\n'), ((283, 307), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (298, 307), False, 'import sys\n')]
|
# Generated by Django 3.0.6 on 2020-05-27 10:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('django_celery_beat', '0012_periodictask_expire_seconds'),
('silviacontrol', '0004_auto_20180915_1228'),
]
operations = [
migrations.AlterField(
model_name='schedulemodel',
name='name',
field=models.CharField(default='Schedule reinvigoration', max_length=20),
),
migrations.AlterField(
model_name='schedulemodel',
name='schedule_off',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='schedule_off', to='django_celery_beat.PeriodicTask'),
),
migrations.AlterField(
model_name='schedulemodel',
name='schedule_on',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='schedule_on', to='django_celery_beat.PeriodicTask'),
),
]
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey"
] |
[((448, 514), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""Schedule reinvigoration"""', 'max_length': '(20)'}), "(default='Schedule reinvigoration', max_length=20)\n", (464, 514), False, 'from django.db import migrations, models\n'), ((649, 811), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""schedule_off"""', 'to': '"""django_celery_beat.PeriodicTask"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='schedule_off', to=\n 'django_celery_beat.PeriodicTask')\n", (666, 811), False, 'from django.db import migrations, models\n'), ((935, 1096), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""schedule_on"""', 'to': '"""django_celery_beat.PeriodicTask"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='schedule_on', to=\n 'django_celery_beat.PeriodicTask')\n", (952, 1096), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/python
import sys
import json
import requests
import time
import base64
import datetime
baseUrl = 'http://radio.pw-sat.pl'
headers = {'content-type': 'application/json'}
def authenticate(credentials_path):
credentials = loadCredentials(credentials_path)
url = baseUrl+'/api/authenticate'
response = requests.post(url, data=json.dumps(credentials), headers=headers)
return response.cookies;
def putPacket(cookies, full_frame):
url = baseUrl+'/communication/frame'
payload = { 'frame': full_frame[2],
'timestamp': int(time.mktime(datetime.datetime.strptime(full_frame[0],
"%Y-%m-%d_%H:%M:%S:%f").timetuple()))*1000,
'traffic': 'Rx' if full_frame[1] == 'D' else 'Tx'}
response = requests.put(url, data=json.dumps(payload), headers=headers, cookies=cookies)
return response.text;
def loadCredentials(path):
with open(path) as f:
credentials = json.load(f)
return credentials
credentials_path = sys.argv[1]
file_path = sys.argv[2]
with open(file_path, "rb") as f:
for line in f:
full_frame = line.split(',')
print(putPacket(authenticate(credentials_path), full_frame))
|
[
"datetime.datetime.strptime",
"json.load",
"json.dumps"
] |
[((955, 967), 'json.load', 'json.load', (['f'], {}), '(f)\n', (964, 967), False, 'import json\n'), ((353, 376), 'json.dumps', 'json.dumps', (['credentials'], {}), '(credentials)\n', (363, 376), False, 'import json\n'), ((797, 816), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (807, 816), False, 'import json\n'), ((589, 654), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['full_frame[0]', '"""%Y-%m-%d_%H:%M:%S:%f"""'], {}), "(full_frame[0], '%Y-%m-%d_%H:%M:%S:%f')\n", (615, 654), False, 'import datetime\n')]
|
from collections import Counter
class Solution:
def judgeCircle(self, moves: str) -> bool:
counter = Counter(moves)
return counter['U'] == counter['D'] and counter['R'] == counter['L']
|
[
"collections.Counter"
] |
[((115, 129), 'collections.Counter', 'Counter', (['moves'], {}), '(moves)\n', (122, 129), False, 'from collections import Counter\n')]
|
from mutation import AddConnectionMutation,AddNodeMutation,ChangeNodeMutation,ChangeConnectionMutation,ToggleConnectionMutation,ToggleNodeMutation,tests
from gene import ConnectionGene, NodeGene, PseudoGene, tests
from genome import Genome, tests
from phenome import Phenome, tests
from fitness import Fitness, tests
from organism import Organism, tests
from pressure import Pressure, tests
from environment import Environment, tests
print('hello')
env1 = Environment(1, 2)
env1
p1 = Pressure(lambda:True, [], lambda x: x , ['loss'], 10, 'minimize cross entropy loss')
p2 = Pressure(lambda x: x>20 and x%3 == 0, ['generation'], lambda x: -x, ['network-size'], 5, 'decrease network size')
env1.add_pressure(p1)
env1.add_pressure(p2)
env1.generate_initial_population(100)
env1.population
env1.active_pressures
env1.assemble()
env1.active_pressures
env1.size
|
[
"pressure.Pressure",
"environment.Environment"
] |
[((458, 475), 'environment.Environment', 'Environment', (['(1)', '(2)'], {}), '(1, 2)\n', (469, 475), False, 'from environment import Environment, tests\n'), ((486, 575), 'pressure.Pressure', 'Pressure', (['(lambda : True)', '[]', '(lambda x: x)', "['loss']", '(10)', '"""minimize cross entropy loss"""'], {}), "(lambda : True, [], lambda x: x, ['loss'], 10,\n 'minimize cross entropy loss')\n", (494, 575), False, 'from pressure import Pressure, tests\n'), ((576, 698), 'pressure.Pressure', 'Pressure', (['(lambda x: x > 20 and x % 3 == 0)', "['generation']", '(lambda x: -x)', "['network-size']", '(5)', '"""decrease network size"""'], {}), "(lambda x: x > 20 and x % 3 == 0, ['generation'], lambda x: -x, [\n 'network-size'], 5, 'decrease network size')\n", (584, 698), False, 'from pressure import Pressure, tests\n')]
|
import itertools
import json
import os
import tempfile
import pytest
from dagger.dag import DAG
from dagger.input import FromNodeOutput, FromParam
from dagger.output import FromReturnValue
from dagger.runtime.cli.cli import invoke
from dagger.runtime.cli.locations import (
PARTITION_MANIFEST_FILENAME,
store_output_in_location,
)
from dagger.runtime.local import PartitionedOutput
from dagger.serializer import AsPickle
from dagger.task import Task
def test__invoke__whole_dag():
dag = DAG(
nodes=dict(
double=Task(
lambda x: x * 2,
inputs=dict(x=FromParam()),
outputs=dict(x_doubled=FromReturnValue()),
),
square=Task(
lambda x: x ** 2,
inputs=dict(x=FromNodeOutput("double", "x_doubled")),
outputs=dict(x_squared=FromReturnValue()),
),
),
inputs=dict(x=FromParam()),
outputs=dict(x_doubled_and_squared=FromNodeOutput("square", "x_squared")),
)
with tempfile.TemporaryDirectory() as tmp:
x_input = os.path.join(tmp, "x_input")
x_output = os.path.join(tmp, "x_output")
with open(x_input, "wb") as f:
f.write(b"4")
invoke(
dag,
argv=itertools.chain(
*[
["--input", "x", x_input],
[
"--output",
"x_doubled_and_squared",
x_output,
],
]
),
)
with open(x_output, "rb") as f:
assert f.read() == b"64"
def test__invoke__selecting_a_node_that_does_not_exist():
dag = DAG(
{
"single-node": Task(lambda: 1),
}
)
with pytest.raises(ValueError) as e:
invoke(dag, argv=["--node-name", "missing-node"])
assert (
str(e.value)
== "You selected node 'missing-node'. However, this DAG does not contain any node with such a name. These are the names the DAG contains: ['single-node']"
)
def test__invoke__selecting_a_node():
dag = DAG(
nodes=dict(
square=Task(
lambda x: x ** 2,
inputs=dict(x=FromParam()),
outputs=dict(x_squared=FromReturnValue()),
),
),
inputs=dict(x=FromParam()),
outputs=dict(x_squared=FromNodeOutput("square", "x_squared")),
)
with tempfile.TemporaryDirectory() as tmp:
x_input = os.path.join(tmp, "x_input")
x_output = os.path.join(tmp, "x_output")
with open(x_input, "wb") as f:
f.write(b"4")
invoke(
dag,
argv=itertools.chain(
*[
["--node-name", "square"],
["--input", "x", x_input],
["--output", "x_squared", x_output],
]
),
)
with open(x_output, "rb") as f:
assert f.read() == b"16"
def test__invoke__selecting_a_node_from_nested_dag():
dag = DAG(
{
"double": Task(
lambda x: 2 * x,
inputs=dict(x=FromParam()),
outputs=dict(x=FromReturnValue()),
),
"nested": DAG(
{
"square": Task(
lambda x: x ** 2,
inputs=dict(x=FromParam()),
outputs=dict(x=FromReturnValue()),
),
},
inputs=dict(x=FromParam()),
),
},
inputs=dict(x=FromParam()),
outputs=dict(x=FromNodeOutput("double", "x")),
)
with tempfile.TemporaryDirectory() as tmp:
x_input = os.path.join(tmp, "x_input")
x_output = os.path.join(tmp, "x_output")
with open(x_input, "wb") as f:
f.write(b"4")
invoke(
dag,
argv=itertools.chain(
*[
["--node-name", "nested.square"],
["--input", "x", x_input],
["--output", "x", x_output],
]
),
)
with open(x_output, "rb") as f:
assert f.read() == b"16"
def test__invoke__selecting_a_nested_node_that_does_not_exist():
dag = DAG(
{
"nested": DAG({"single-node": Task(lambda: 1)}),
}
)
with pytest.raises(ValueError) as e:
invoke(dag, argv=["--node-name", "nested.missing-node"])
assert (
str(e.value)
== "You selected node 'nested.missing-node'. However, DAG 'nested' does not contain any node with such a name. These are the names the DAG contains: ['single-node']"
)
def test__invoke__selecting_a_nested_dag():
dag = DAG(
{
"nested": DAG(
{
"square": Task(
lambda x: x ** 2,
inputs=dict(x=FromParam()),
outputs=dict(x=FromReturnValue()),
),
},
inputs=dict(x=FromParam()),
outputs=dict(x=FromNodeOutput("square", "x")),
),
},
inputs=dict(x=FromParam()),
)
with tempfile.TemporaryDirectory() as tmp:
x_input = os.path.join(tmp, "x_input")
x_output = os.path.join(tmp, "x_output")
with open(x_input, "wb") as f:
f.write(b"4")
invoke(
dag,
argv=itertools.chain(
*[
["--node-name", "nested"],
["--input", "x", x_input],
["--output", "x", x_output],
]
),
)
with open(x_output, "rb") as f:
assert f.read() == b"16"
def test__invoke__nested_node_with_inputs_from_another_node_output():
non_default_serializer = AsPickle()
dag = DAG(
inputs={"x": FromParam()},
outputs={"x": FromNodeOutput("l1-a", "x")},
nodes={
"l1-a": Task(
lambda: 4,
outputs={"x": FromReturnValue(serializer=non_default_serializer)},
),
"l1-b": DAG(
inputs={
"l1-b-y": FromNodeOutput(
"l1-a", "x", serializer=non_default_serializer
)
},
nodes={
"l2-a": Task(lambda: 3, outputs={"x": FromReturnValue()}),
"l2-b": Task(
lambda x, y: x * y,
inputs={
"x": FromNodeOutput("l2-a", "x"),
"y": FromParam("l1-b-y", serializer=non_default_serializer),
},
outputs={"x": FromReturnValue()},
),
},
),
},
)
with tempfile.TemporaryDirectory() as tmp:
x_input = os.path.join(tmp, "x_input")
y_input = os.path.join(tmp, "y_input")
x_output = os.path.join(tmp, "x_output")
with open(x_input, "wb") as f:
f.write(b"5")
with open(y_input, "wb") as f:
f.write(AsPickle().serialize(6))
invoke(
dag,
argv=itertools.chain(
*[
["--node-name", "l1-b.l2-b"],
["--input", "x", x_input],
["--input", "y", y_input],
["--output", "x", x_output],
]
),
)
with open(x_output, "rb") as f:
assert f.read() == b"30"
def test__invoke__with_missing_input_parameter():
dag = DAG(
inputs={"x": FromParam()},
nodes={"l1-a": Task(lambda: 1)},
)
with pytest.raises(ValueError) as e:
invoke(dag, argv=["--input", "y", "f"])
assert (
str(e.value)
== "This node is supposed to receive a pointer to an input named 'x'. However, only the following input pointers were supplied: ['y']"
)
def test__invoke__with_missing_output_parameter():
dag = DAG(
outputs={"x": FromNodeOutput("n", "x")},
nodes={"n": Task(lambda: 1, outputs={"x": FromReturnValue()})},
)
with pytest.raises(ValueError) as e:
invoke(dag, argv=["--output", "y", "f"])
assert (
str(e.value)
== "This node is supposed to receive a pointer to an output named 'x'. However, only the following output pointers were supplied: ['y']"
)
def test__invoke__node_with_partitioned_output():
dag = DAG(
{
"t": Task(
lambda: [1, 2, 3],
outputs={"list": FromReturnValue(is_partitioned=True)},
),
}
)
with tempfile.TemporaryDirectory() as tmp:
list_output = os.path.join(tmp, "list_output")
invoke(
dag,
argv=itertools.chain(
*[
["--node-name", "t"],
["--output", "list", list_output],
]
),
)
assert os.path.isdir(list_output)
partitions = []
with open(os.path.join(list_output, PARTITION_MANIFEST_FILENAME), "rb") as f:
partition_filenames = json.load(f)
for partition_filename in partition_filenames:
with open(os.path.join(list_output, partition_filename), "rb") as p:
partitions.append(p.read())
assert partitions == [b"1", b"2", b"3"]
def test__invoke__node_with_partitioned_input():
dag = DAG(
inputs={"partitioned": FromParam()},
outputs={"together": FromNodeOutput("t", "together")},
nodes={
"t": Task(
lambda partitioned: partitioned,
inputs={"partitioned": FromParam()},
outputs={"together": FromReturnValue()},
),
},
)
with tempfile.TemporaryDirectory() as tmp:
partitioned_input = os.path.join(tmp, "partitioned_input")
together_output = os.path.join(tmp, "together_output")
store_output_in_location(
output_location=partitioned_input,
output_value=PartitionedOutput([b"1", b"2", b"3"]),
)
assert os.path.isdir(partitioned_input)
invoke(
dag,
argv=itertools.chain(
*[
["--input", "partitioned", partitioned_input],
["--output", "together", together_output],
]
),
)
with open(together_output, "rb") as f:
assert f.read() == b"[1, 2, 3]"
|
[
"json.load",
"tempfile.TemporaryDirectory",
"dagger.runtime.cli.cli.invoke",
"os.path.isdir",
"dagger.input.FromNodeOutput",
"pytest.raises",
"dagger.runtime.local.PartitionedOutput",
"dagger.output.FromReturnValue",
"dagger.serializer.AsPickle",
"dagger.task.Task",
"itertools.chain",
"dagger.input.FromParam",
"os.path.join"
] |
[((6002, 6012), 'dagger.serializer.AsPickle', 'AsPickle', ([], {}), '()\n', (6010, 6012), False, 'from dagger.serializer import AsPickle\n'), ((1053, 1082), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1080, 1082), False, 'import tempfile\n'), ((1109, 1137), 'os.path.join', 'os.path.join', (['tmp', '"""x_input"""'], {}), "(tmp, 'x_input')\n", (1121, 1137), False, 'import os\n'), ((1157, 1186), 'os.path.join', 'os.path.join', (['tmp', '"""x_output"""'], {}), "(tmp, 'x_output')\n", (1169, 1186), False, 'import os\n'), ((1826, 1851), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1839, 1851), False, 'import pytest\n'), ((1866, 1915), 'dagger.runtime.cli.cli.invoke', 'invoke', (['dag'], {'argv': "['--node-name', 'missing-node']"}), "(dag, argv=['--node-name', 'missing-node'])\n", (1872, 1915), False, 'from dagger.runtime.cli.cli import invoke\n'), ((2506, 2535), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2533, 2535), False, 'import tempfile\n'), ((2562, 2590), 'os.path.join', 'os.path.join', (['tmp', '"""x_input"""'], {}), "(tmp, 'x_input')\n", (2574, 2590), False, 'import os\n'), ((2610, 2639), 'os.path.join', 'os.path.join', (['tmp', '"""x_output"""'], {}), "(tmp, 'x_output')\n", (2622, 2639), False, 'import os\n'), ((3770, 3799), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3797, 3799), False, 'import tempfile\n'), ((3826, 3854), 'os.path.join', 'os.path.join', (['tmp', '"""x_input"""'], {}), "(tmp, 'x_input')\n", (3838, 3854), False, 'import os\n'), ((3874, 3903), 'os.path.join', 'os.path.join', (['tmp', '"""x_output"""'], {}), "(tmp, 'x_output')\n", (3886, 3903), False, 'import os\n'), ((4506, 4531), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4519, 4531), False, 'import pytest\n'), ((4546, 4602), 'dagger.runtime.cli.cli.invoke', 'invoke', (['dag'], {'argv': "['--node-name', 'nested.missing-node']"}), "(dag, argv=['--node-name', 'nested.missing-node'])\n", (4552, 4602), False, 'from dagger.runtime.cli.cli import invoke\n'), ((5350, 5379), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (5377, 5379), False, 'import tempfile\n'), ((5406, 5434), 'os.path.join', 'os.path.join', (['tmp', '"""x_input"""'], {}), "(tmp, 'x_input')\n", (5418, 5434), False, 'import os\n'), ((5454, 5483), 'os.path.join', 'os.path.join', (['tmp', '"""x_output"""'], {}), "(tmp, 'x_output')\n", (5466, 5483), False, 'import os\n'), ((7024, 7053), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (7051, 7053), False, 'import tempfile\n'), ((7080, 7108), 'os.path.join', 'os.path.join', (['tmp', '"""x_input"""'], {}), "(tmp, 'x_input')\n", (7092, 7108), False, 'import os\n'), ((7127, 7155), 'os.path.join', 'os.path.join', (['tmp', '"""y_input"""'], {}), "(tmp, 'y_input')\n", (7139, 7155), False, 'import os\n'), ((7175, 7204), 'os.path.join', 'os.path.join', (['tmp', '"""x_output"""'], {}), "(tmp, 'x_output')\n", (7187, 7204), False, 'import os\n'), ((7915, 7940), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7928, 7940), False, 'import pytest\n'), ((7955, 7994), 'dagger.runtime.cli.cli.invoke', 'invoke', (['dag'], {'argv': "['--input', 'y', 'f']"}), "(dag, argv=['--input', 'y', 'f'])\n", (7961, 7994), False, 'from dagger.runtime.cli.cli import invoke\n'), ((8383, 8408), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8396, 8408), False, 'import pytest\n'), ((8423, 8463), 'dagger.runtime.cli.cli.invoke', 'invoke', (['dag'], {'argv': "['--output', 'y', 'f']"}), "(dag, argv=['--output', 'y', 'f'])\n", (8429, 8463), False, 'from dagger.runtime.cli.cli import invoke\n'), ((8898, 8927), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (8925, 8927), False, 'import tempfile\n'), ((8958, 8990), 'os.path.join', 'os.path.join', (['tmp', '"""list_output"""'], {}), "(tmp, 'list_output')\n", (8970, 8990), False, 'import os\n'), ((9234, 9260), 'os.path.isdir', 'os.path.isdir', (['list_output'], {}), '(list_output)\n', (9247, 9260), False, 'import os\n'), ((10075, 10104), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (10102, 10104), False, 'import tempfile\n'), ((10141, 10179), 'os.path.join', 'os.path.join', (['tmp', '"""partitioned_input"""'], {}), "(tmp, 'partitioned_input')\n", (10153, 10179), False, 'import os\n'), ((10206, 10242), 'os.path.join', 'os.path.join', (['tmp', '"""together_output"""'], {}), "(tmp, 'together_output')\n", (10218, 10242), False, 'import os\n'), ((10414, 10446), 'os.path.isdir', 'os.path.isdir', (['partitioned_input'], {}), '(partitioned_input)\n', (10427, 10446), False, 'import os\n'), ((1784, 1800), 'dagger.task.Task', 'Task', (['(lambda : 1)'], {}), '(lambda : 1)\n', (1788, 1800), False, 'from dagger.task import Task\n'), ((9406, 9418), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9415, 9418), False, 'import json\n'), ((1304, 1402), 'itertools.chain', 'itertools.chain', (["*[['--input', 'x', x_input], ['--output', 'x_doubled_and_squared', x_output]]"], {}), "(*[['--input', 'x', x_input], ['--output',\n 'x_doubled_and_squared', x_output]])\n", (1319, 1402), False, 'import itertools\n'), ((2757, 2871), 'itertools.chain', 'itertools.chain', (["*[['--node-name', 'square'], ['--input', 'x', x_input], ['--output',\n 'x_squared', x_output]]"], {}), "(*[['--node-name', 'square'], ['--input', 'x', x_input], [\n '--output', 'x_squared', x_output]])\n", (2772, 2871), False, 'import itertools\n'), ((4021, 4133), 'itertools.chain', 'itertools.chain', (["*[['--node-name', 'nested.square'], ['--input', 'x', x_input], ['--output',\n 'x', x_output]]"], {}), "(*[['--node-name', 'nested.square'], ['--input', 'x',\n x_input], ['--output', 'x', x_output]])\n", (4036, 4133), False, 'import itertools\n'), ((5601, 5707), 'itertools.chain', 'itertools.chain', (["*[['--node-name', 'nested'], ['--input', 'x', x_input], ['--output', 'x',\n x_output]]"], {}), "(*[['--node-name', 'nested'], ['--input', 'x', x_input], [\n '--output', 'x', x_output]])\n", (5616, 5707), False, 'import itertools\n'), ((6049, 6060), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (6058, 6060), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((6085, 6112), 'dagger.input.FromNodeOutput', 'FromNodeOutput', (['"""l1-a"""', '"""x"""'], {}), "('l1-a', 'x')\n", (6099, 6112), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((7407, 7542), 'itertools.chain', 'itertools.chain', (["*[['--node-name', 'l1-b.l2-b'], ['--input', 'x', x_input], ['--input', 'y',\n y_input], ['--output', 'x', x_output]]"], {}), "(*[['--node-name', 'l1-b.l2-b'], ['--input', 'x', x_input],\n ['--input', 'y', y_input], ['--output', 'x', x_output]])\n", (7422, 7542), False, 'import itertools\n'), ((7845, 7856), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (7854, 7856), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((7882, 7898), 'dagger.task.Task', 'Task', (['(lambda : 1)'], {}), '(lambda : 1)\n', (7886, 7898), False, 'from dagger.task import Task\n'), ((8269, 8293), 'dagger.input.FromNodeOutput', 'FromNodeOutput', (['"""n"""', '"""x"""'], {}), "('n', 'x')\n", (8283, 8293), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((9042, 9117), 'itertools.chain', 'itertools.chain', (["*[['--node-name', 't'], ['--output', 'list', list_output]]"], {}), "(*[['--node-name', 't'], ['--output', 'list', list_output]])\n", (9057, 9117), False, 'import itertools\n'), ((9304, 9358), 'os.path.join', 'os.path.join', (['list_output', 'PARTITION_MANIFEST_FILENAME'], {}), '(list_output, PARTITION_MANIFEST_FILENAME)\n', (9316, 9358), False, 'import os\n'), ((9758, 9769), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (9767, 9769), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((9801, 9832), 'dagger.input.FromNodeOutput', 'FromNodeOutput', (['"""t"""', '"""together"""'], {}), "('t', 'together')\n", (9815, 9832), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((10350, 10387), 'dagger.runtime.local.PartitionedOutput', 'PartitionedOutput', (["[b'1', b'2', b'3']"], {}), "([b'1', b'2', b'3'])\n", (10367, 10387), False, 'from dagger.runtime.local import PartitionedOutput\n'), ((10498, 10611), 'itertools.chain', 'itertools.chain', (["*[['--input', 'partitioned', partitioned_input], ['--output', 'together',\n together_output]]"], {}), "(*[['--input', 'partitioned', partitioned_input], [\n '--output', 'together', together_output]])\n", (10513, 10611), False, 'import itertools\n'), ((940, 951), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (949, 951), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((997, 1034), 'dagger.input.FromNodeOutput', 'FromNodeOutput', (['"""square"""', '"""x_squared"""'], {}), "('square', 'x_squared')\n", (1011, 1034), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((2405, 2416), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (2414, 2416), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((2450, 2487), 'dagger.input.FromNodeOutput', 'FromNodeOutput', (['"""square"""', '"""x_squared"""'], {}), "('square', 'x_squared')\n", (2464, 2487), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((3685, 3696), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (3694, 3696), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((3722, 3751), 'dagger.input.FromNodeOutput', 'FromNodeOutput', (['"""double"""', '"""x"""'], {}), "('double', 'x')\n", (3736, 3751), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((4462, 4478), 'dagger.task.Task', 'Task', (['(lambda : 1)'], {}), '(lambda : 1)\n', (4466, 4478), False, 'from dagger.task import Task\n'), ((5320, 5331), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (5329, 5331), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((7331, 7341), 'dagger.serializer.AsPickle', 'AsPickle', ([], {}), '()\n', (7339, 7341), False, 'from dagger.serializer import AsPickle\n'), ((8818, 8854), 'dagger.output.FromReturnValue', 'FromReturnValue', ([], {'is_partitioned': '(True)'}), '(is_partitioned=True)\n', (8833, 8854), False, 'from dagger.output import FromReturnValue\n'), ((9505, 9550), 'os.path.join', 'os.path.join', (['list_output', 'partition_filename'], {}), '(list_output, partition_filename)\n', (9517, 9550), False, 'import os\n'), ((3237, 3248), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (3246, 3248), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((3282, 3299), 'dagger.output.FromReturnValue', 'FromReturnValue', ([], {}), '()\n', (3297, 3299), False, 'from dagger.output import FromReturnValue\n'), ((3623, 3634), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (3632, 3634), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((5195, 5206), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (5204, 5206), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((5240, 5269), 'dagger.input.FromNodeOutput', 'FromNodeOutput', (['"""square"""', '"""x"""'], {}), "('square', 'x')\n", (5254, 5269), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((6214, 6264), 'dagger.output.FromReturnValue', 'FromReturnValue', ([], {'serializer': 'non_default_serializer'}), '(serializer=non_default_serializer)\n', (6229, 6264), False, 'from dagger.output import FromReturnValue\n'), ((6362, 6424), 'dagger.input.FromNodeOutput', 'FromNodeOutput', (['"""l1-a"""', '"""x"""'], {'serializer': 'non_default_serializer'}), "('l1-a', 'x', serializer=non_default_serializer)\n", (6376, 6424), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((8346, 8363), 'dagger.output.FromReturnValue', 'FromReturnValue', ([], {}), '()\n', (8361, 8363), False, 'from dagger.output import FromReturnValue\n'), ((9962, 9973), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (9971, 9973), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((10013, 10030), 'dagger.output.FromReturnValue', 'FromReturnValue', ([], {}), '()\n', (10028, 10030), False, 'from dagger.output import FromReturnValue\n'), ((616, 627), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (625, 627), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((669, 686), 'dagger.output.FromReturnValue', 'FromReturnValue', ([], {}), '()\n', (684, 686), False, 'from dagger.output import FromReturnValue\n'), ((793, 830), 'dagger.input.FromNodeOutput', 'FromNodeOutput', (['"""double"""', '"""x_doubled"""'], {}), "('double', 'x_doubled')\n", (807, 830), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((872, 889), 'dagger.output.FromReturnValue', 'FromReturnValue', ([], {}), '()\n', (887, 889), False, 'from dagger.output import FromReturnValue\n'), ((2284, 2295), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (2293, 2295), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((2337, 2354), 'dagger.output.FromReturnValue', 'FromReturnValue', ([], {}), '()\n', (2352, 2354), False, 'from dagger.output import FromReturnValue\n'), ((3478, 3489), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (3487, 3489), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((3531, 3548), 'dagger.output.FromReturnValue', 'FromReturnValue', ([], {}), '()\n', (3546, 3548), False, 'from dagger.output import FromReturnValue\n'), ((5050, 5061), 'dagger.input.FromParam', 'FromParam', ([], {}), '()\n', (5059, 5061), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((5103, 5120), 'dagger.output.FromReturnValue', 'FromReturnValue', ([], {}), '()\n', (5118, 5120), False, 'from dagger.output import FromReturnValue\n'), ((6572, 6589), 'dagger.output.FromReturnValue', 'FromReturnValue', ([], {}), '()\n', (6587, 6589), False, 'from dagger.output import FromReturnValue\n'), ((6737, 6764), 'dagger.input.FromNodeOutput', 'FromNodeOutput', (['"""l2-a"""', '"""x"""'], {}), "('l2-a', 'x')\n", (6751, 6764), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((6799, 6853), 'dagger.input.FromParam', 'FromParam', (['"""l1-b-y"""'], {'serializer': 'non_default_serializer'}), "('l1-b-y', serializer=non_default_serializer)\n", (6808, 6853), False, 'from dagger.input import FromNodeOutput, FromParam\n'), ((6920, 6937), 'dagger.output.FromReturnValue', 'FromReturnValue', ([], {}), '()\n', (6935, 6937), False, 'from dagger.output import FromReturnValue\n')]
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
from azure.cli.core.commands import CliCommandType
from ._client_factory import (
topics_factory,
domains_factory,
domain_topics_factory,
system_topics_factory,
system_topic_event_subscriptions_factory,
event_subscriptions_factory,
topic_types_factory,
extension_topics_factory,
partner_registrations_factory,
partner_namespaces_factory,
event_channels_factory,
partner_topics_factory,
partner_topic_event_subscriptions_factory
)
def load_command_table(self, _):
topics_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#TopicsOperations.{}',
client_factory=topics_factory,
client_arg_name='self'
)
extension_topics_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#ExtensionTopicsOperations.{}',
client_factory=extension_topics_factory,
client_arg_name='self'
)
domains_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#DomainsOperations.{}',
client_factory=domains_factory,
client_arg_name='self'
)
domain_topics_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#DomainTopicsOperations.{}',
client_factory=domain_topics_factory,
client_arg_name='self'
)
system_topics_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#SystemTopicsOperations.{}',
client_factory=system_topics_factory,
client_arg_name='self'
)
system_topic_event_subscriptions_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#SystemTopicEventSubscriptionsOperations.{}',
client_factory=system_topic_event_subscriptions_factory,
client_arg_name='self'
)
partner_registrations_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#PartnerRegistrationsOperations.{}',
client_factory=partner_registrations_factory,
client_arg_name='self'
)
partner_namespaces_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#PartnerNamespacesOperations.{}',
client_factory=partner_namespaces_factory,
client_arg_name='self'
)
event_channels_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#EventChannelsOperations.{}',
client_factory=event_channels_factory,
client_arg_name='self'
)
partner_topics_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#PartnerTopicsOperations.{}',
client_factory=partner_topics_factory,
client_arg_name='self'
)
partner_topic_event_subscriptions_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#PartnerTopicEventSubscriptionsOperations.{}',
client_factory=partner_topic_event_subscriptions_factory,
client_arg_name='self'
)
topic_type_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#TopicTypesOperations.{}',
client_factory=topic_types_factory,
client_arg_name='self'
)
with self.command_group('eventgrid topic', topics_mgmt_util, client_factory=topics_factory) as g:
g.show_command('show', 'get')
g.command('key list', 'list_shared_access_keys')
g.command('delete', 'begin_delete')
g.custom_command('key regenerate', 'cli_topic_regenerate_key')
g.custom_command('list', 'cli_topic_list')
g.custom_command('create', 'cli_topic_create_or_update')
g.custom_command('update', 'cli_topic_update')
with self.command_group('eventgrid extension-topic', extension_topics_mgmt_util, client_factory=extension_topics_factory) as g:
g.show_command('show', 'get')
with self.command_group('eventgrid domain topic', domain_topics_mgmt_util, client_factory=domain_topics_factory) as g:
g.show_command('show', 'get')
g.custom_command('list', 'cli_domain_topic_list')
g.custom_command('delete', 'cli_domain_topic_delete')
g.custom_command('create', 'cli_domain_topic_create_or_update')
with self.command_group('eventgrid domain', domains_mgmt_util, client_factory=domains_factory) as g:
g.show_command('show', 'get')
g.command('key list', 'list_shared_access_keys')
g.custom_command('key regenerate', 'cli_domain_regenerate_key')
g.custom_command('list', 'cli_domain_list')
g.custom_command('create', 'cli_domain_create_or_update')
g.command('delete', 'begin_delete')
g.custom_command('update', 'cli_domain_update')
with self.command_group('eventgrid system-topic', system_topics_mgmt_util, client_factory=system_topics_factory) as g:
g.show_command('show', 'get')
g.command('delete', 'begin_delete', confirmation=True)
g.custom_command('list', 'cli_system_topic_list')
g.custom_command('create', 'cli_system_topic_create_or_update')
g.custom_command('update', 'cli_system_topic_update')
with self.command_group('eventgrid system-topic event-subscription', system_topic_event_subscriptions_mgmt_util, client_factory=system_topic_event_subscriptions_factory) as g:
g.custom_show_command('show', 'cli_system_topic_event_subscription_get')
g.command('delete', 'begin_delete', confirmation=True)
g.custom_command('list', 'cli_system_topic_event_subscription_list')
g.custom_command('create', 'cli_system_topic_event_subscription_create_or_update')
g.custom_command('update', 'cli_system_topic_event_subscription_update')
with self.command_group('eventgrid partner registration', partner_registrations_mgmt_util, client_factory=partner_registrations_factory, is_preview=True) as g:
g.show_command('show', 'get')
g.command('delete', 'delete', confirmation=True)
g.custom_command('list', 'cli_partner_registration_list')
g.custom_command('create', 'cli_partner_registration_create_or_update')
# g.custom_command('update', 'cli_partner_registration_update')
with self.command_group('eventgrid partner namespace', partner_namespaces_mgmt_util, client_factory=partner_namespaces_factory, is_preview=True) as g:
g.show_command('show', 'get')
g.command('delete', 'begin_delete', confirmation=True)
g.custom_command('list', 'cli_partner_namespace_list')
g.custom_command('create', 'cli_partner_namespace_create_or_update')
g.command('key list', 'list_shared_access_keys')
g.custom_command('key regenerate', 'cli_partner_namespace_regenerate_key')
# g.custom_command('update', 'cli_partner_namespace_update')
with self.command_group('eventgrid partner namespace event-channel', event_channels_mgmt_util, client_factory=event_channels_factory, is_preview=True) as g:
g.show_command('show', 'get')
g.command('delete', 'begin_delete', confirmation=True)
g.custom_command('list', 'cli_event_channel_list')
# g.custom_command('update', 'cli_event_channel_update')
g.custom_command('create', 'cli_event_channel_create_or_update')
with self.command_group('eventgrid partner topic', partner_topics_mgmt_util, client_factory=partner_topics_factory, is_preview=True) as g:
g.show_command('show', 'get')
g.command('delete', 'begin_delete', confirmation=True)
g.command('activate', 'activate')
g.command('deactivate', 'deactivate')
g.custom_command('list', 'cli_partner_topic_list')
# g.custom_command('create', 'cli_partner_topic_create_or_update')
# g.custom_command('update', 'cli_partner_topic_update')
with self.command_group('eventgrid partner topic event-subscription', partner_topic_event_subscriptions_mgmt_util, client_factory=partner_topic_event_subscriptions_factory, is_preview=True) as g:
g.custom_show_command('show', 'cli_partner_topic_event_subscription_get')
g.command('delete', 'begin_delete', confirmation=True)
g.custom_command('list', 'cli_partner_topic_event_subscription_list')
g.custom_command('create', 'cli_partner_topic_event_subscription_create_or_update')
g.custom_command('update', 'cli_partner_topic_event_subscription_update')
custom_tmpl = 'azure.cli.command_modules.eventgrid.custom#{}'
eventgrid_custom = CliCommandType(operations_tmpl=custom_tmpl)
with self.command_group('eventgrid event-subscription', client_factory=event_subscriptions_factory) as g:
g.custom_command('create', 'cli_eventgrid_event_subscription_create')
g.custom_show_command('show', 'cli_eventgrid_event_subscription_get')
g.custom_command('delete', 'cli_eventgrid_event_subscription_delete')
g.custom_command('list', 'cli_event_subscription_list')
g.generic_update_command('update',
getter_type=eventgrid_custom,
setter_type=eventgrid_custom,
getter_name='event_subscription_getter',
setter_name='event_subscription_setter',
custom_func_name='update_event_subscription')
with self.command_group('eventgrid topic-type', topic_type_mgmt_util) as g:
g.command('list', 'list')
g.show_command('show', 'get')
g.command('list-event-types', 'list_event_types')
|
[
"azure.cli.core.commands.CliCommandType"
] |
[((957, 1107), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azure.mgmt.eventgrid.operations#TopicsOperations.{}"""', 'client_factory': 'topics_factory', 'client_arg_name': '"""self"""'}), "(operations_tmpl=\n 'azure.mgmt.eventgrid.operations#TopicsOperations.{}', client_factory=\n topics_factory, client_arg_name='self')\n", (971, 1107), False, 'from azure.cli.core.commands import CliCommandType\n'), ((1162, 1330), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azure.mgmt.eventgrid.operations#ExtensionTopicsOperations.{}"""', 'client_factory': 'extension_topics_factory', 'client_arg_name': '"""self"""'}), "(operations_tmpl=\n 'azure.mgmt.eventgrid.operations#ExtensionTopicsOperations.{}',\n client_factory=extension_topics_factory, client_arg_name='self')\n", (1176, 1330), False, 'from azure.cli.core.commands import CliCommandType\n'), ((1377, 1529), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azure.mgmt.eventgrid.operations#DomainsOperations.{}"""', 'client_factory': 'domains_factory', 'client_arg_name': '"""self"""'}), "(operations_tmpl=\n 'azure.mgmt.eventgrid.operations#DomainsOperations.{}', client_factory=\n domains_factory, client_arg_name='self')\n", (1391, 1529), False, 'from azure.cli.core.commands import CliCommandType\n'), ((1581, 1743), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azure.mgmt.eventgrid.operations#DomainTopicsOperations.{}"""', 'client_factory': 'domain_topics_factory', 'client_arg_name': '"""self"""'}), "(operations_tmpl=\n 'azure.mgmt.eventgrid.operations#DomainTopicsOperations.{}',\n client_factory=domain_topics_factory, client_arg_name='self')\n", (1595, 1743), False, 'from azure.cli.core.commands import CliCommandType\n'), ((1796, 1958), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azure.mgmt.eventgrid.operations#SystemTopicsOperations.{}"""', 'client_factory': 'system_topics_factory', 'client_arg_name': '"""self"""'}), "(operations_tmpl=\n 'azure.mgmt.eventgrid.operations#SystemTopicsOperations.{}',\n client_factory=system_topics_factory, client_arg_name='self')\n", (1810, 1958), False, 'from azure.cli.core.commands import CliCommandType\n'), ((2030, 2233), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azure.mgmt.eventgrid.operations#SystemTopicEventSubscriptionsOperations.{}"""', 'client_factory': 'system_topic_event_subscriptions_factory', 'client_arg_name': '"""self"""'}), "(operations_tmpl=\n 'azure.mgmt.eventgrid.operations#SystemTopicEventSubscriptionsOperations.{}'\n , client_factory=system_topic_event_subscriptions_factory,\n client_arg_name='self')\n", (2044, 2233), False, 'from azure.cli.core.commands import CliCommandType\n'), ((2289, 2467), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azure.mgmt.eventgrid.operations#PartnerRegistrationsOperations.{}"""', 'client_factory': 'partner_registrations_factory', 'client_arg_name': '"""self"""'}), "(operations_tmpl=\n 'azure.mgmt.eventgrid.operations#PartnerRegistrationsOperations.{}',\n client_factory=partner_registrations_factory, client_arg_name='self')\n", (2303, 2467), False, 'from azure.cli.core.commands import CliCommandType\n'), ((2525, 2697), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azure.mgmt.eventgrid.operations#PartnerNamespacesOperations.{}"""', 'client_factory': 'partner_namespaces_factory', 'client_arg_name': '"""self"""'}), "(operations_tmpl=\n 'azure.mgmt.eventgrid.operations#PartnerNamespacesOperations.{}',\n client_factory=partner_namespaces_factory, client_arg_name='self')\n", (2539, 2697), False, 'from azure.cli.core.commands import CliCommandType\n'), ((2751, 2915), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azure.mgmt.eventgrid.operations#EventChannelsOperations.{}"""', 'client_factory': 'event_channels_factory', 'client_arg_name': '"""self"""'}), "(operations_tmpl=\n 'azure.mgmt.eventgrid.operations#EventChannelsOperations.{}',\n client_factory=event_channels_factory, client_arg_name='self')\n", (2765, 2915), False, 'from azure.cli.core.commands import CliCommandType\n'), ((2969, 3133), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azure.mgmt.eventgrid.operations#PartnerTopicsOperations.{}"""', 'client_factory': 'partner_topics_factory', 'client_arg_name': '"""self"""'}), "(operations_tmpl=\n 'azure.mgmt.eventgrid.operations#PartnerTopicsOperations.{}',\n client_factory=partner_topics_factory, client_arg_name='self')\n", (2983, 3133), False, 'from azure.cli.core.commands import CliCommandType\n'), ((3206, 3411), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azure.mgmt.eventgrid.operations#PartnerTopicEventSubscriptionsOperations.{}"""', 'client_factory': 'partner_topic_event_subscriptions_factory', 'client_arg_name': '"""self"""'}), "(operations_tmpl=\n 'azure.mgmt.eventgrid.operations#PartnerTopicEventSubscriptionsOperations.{}'\n , client_factory=partner_topic_event_subscriptions_factory,\n client_arg_name='self')\n", (3220, 3411), False, 'from azure.cli.core.commands import CliCommandType\n'), ((3456, 3614), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azure.mgmt.eventgrid.operations#TopicTypesOperations.{}"""', 'client_factory': 'topic_types_factory', 'client_arg_name': '"""self"""'}), "(operations_tmpl=\n 'azure.mgmt.eventgrid.operations#TopicTypesOperations.{}',\n client_factory=topic_types_factory, client_arg_name='self')\n", (3470, 3614), False, 'from azure.cli.core.commands import CliCommandType\n'), ((8891, 8934), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': 'custom_tmpl'}), '(operations_tmpl=custom_tmpl)\n', (8905, 8934), False, 'from azure.cli.core.commands import CliCommandType\n')]
|
from tensorflow.keras.models import clone_model
from tensorflow.keras.layers import Dropout
def dropout_model(model, dropout):
"""
Create a keras function to predict with dropout
Credits to https://github.com/keras-team/keras/issues/8826 and to
sfblake: https://medium.com/hal24k-techblog/how-to-generate-neural-network-
confidence-intervals-with-keras-e4c0b78ebbdf
model : keras model
dropout : fraction dropout to apply to all layers
Returns
model_new : model with updated dropout rate
"""
# 1. Use keras.models.clone_model
model_new = clone_model(model)
# 2. change dropout rate
for layer in model_new.layers:
if isinstance(layer, Dropout):
layer.rate = dropout
# 3. Compile the model
# model_new.compile(optimizer="Adam", loss="mse")
# 4. set_weights of cloned model with get_weights
model_new.set_weights(model.get_weights())
return model_new
|
[
"tensorflow.keras.models.clone_model"
] |
[((603, 621), 'tensorflow.keras.models.clone_model', 'clone_model', (['model'], {}), '(model)\n', (614, 621), False, 'from tensorflow.keras.models import clone_model\n')]
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import importlib
import logging
import operator
import os
import tempfile
import time
from collections import defaultdict
from functools import reduce
from typing import Dict, Iterator, Optional, Set, Type, List
from .... import oscar as mo
from ....config import Config
from ....core import ChunkGraph, TileableGraph
from ....core.operand import Fetch, FetchShuffle
from ....dataframe.core import DATAFRAME_TYPE, SERIES_TYPE
from ....metrics import Metrics
from ....optimization.logical import OptimizationRecords
from ....oscar.profiling import (
ProfilingData,
MARS_ENABLE_PROFILING,
)
from ....tensor.core import TENSOR_TYPE
from ....typing import ChunkType, TileableType
from ....utils import build_fetch, Timer, get_params_fields
from ...meta.api import WorkerMetaAPI
from ...subtask import SubtaskResult, SubtaskStatus, SubtaskGraph, Subtask
from ..core import Task, TaskResult, TaskStatus, new_task_id
from ..execution.api import TaskExecutor, ExecutionChunkResult
from .preprocessor import TaskPreprocessor
logger = logging.getLogger(__name__)
MARS_ENABLE_DUMPING_SUBTASK_GRAPH = int(os.environ.get("MARS_DUMP_SUBTASK_GRAPH", 0))
class TaskProcessor:
_tileable_to_subtasks: Dict[TileableType, List[Subtask]]
_tileable_id_to_tileable: Dict[str, TileableType]
_meta_updated_tileables: Set[TileableType]
def __init__(
self,
task: Task,
preprocessor: TaskPreprocessor,
executor: TaskExecutor,
):
self._task = task
self._preprocessor = preprocessor
self._executor = executor
self._session_id = self._task.session_id
self._tileable_to_subtasks = dict()
self._tileable_id_to_tileable = dict()
self._meta_updated_tileables = set()
if MARS_ENABLE_PROFILING:
ProfilingData.init(task.task_id)
elif task.extra_config and task.extra_config.get("enable_profiling"):
ProfilingData.init(task.task_id, task.extra_config["enable_profiling"])
self._dump_subtask_graph = False
if MARS_ENABLE_DUMPING_SUBTASK_GRAPH or (
task.extra_config and task.extra_config.get("dump_subtask_graph")
):
self._dump_subtask_graph = True
self.result = TaskResult(
task_id=task.task_id,
session_id=task.session_id,
start_time=time.time(),
status=TaskStatus.pending,
)
self.done = asyncio.Event()
# add metrics
self._chunk_graph_gen_time = Metrics.gauge(
"mars.chunk_graph_gen_time_secs",
"Time consuming in seconds to generate a chunk graph",
("session_id", "task_id"),
)
self._subtask_graph_gen_time = Metrics.gauge(
"mars.subtask_graph_gen_time_secs",
"Time consuming in seconds to generate a subtask graph",
("session_id", "task_id", "stage_id"),
)
self._task_execution_time = Metrics.gauge(
"mars.task_execution_time_secs",
"Time consuming in seconds to execute a task",
("session_id", "task_id"),
)
@property
def task_id(self):
return self._task.task_id
@property
def tileable_graph(self):
return self._preprocessor.tileable_graph
@property
def tileable_to_subtasks(self):
return self._tileable_to_subtasks
@property
def tileable_id_to_tileable(self):
return self._tileable_id_to_tileable
@property
def stage_processors(self):
# TODO(fyrestone): Remove it.
return self._executor.get_stage_processors()
def get_tiled(self, tileable: TileableType):
return self._preprocessor.get_tiled(tileable)
@staticmethod
async def _get_next_chunk_graph(
chunk_graph_iter: Iterator[ChunkGraph],
) -> Optional[ChunkGraph]:
def next_chunk_graph():
try:
return next(chunk_graph_iter)
except StopIteration:
return
fut = asyncio.to_thread(next_chunk_graph)
chunk_graph = await fut
return chunk_graph
async def _iter_stage_chunk_graph(self):
tileable_graph = self._preprocessor.tileable_graph
chunk_graph_iter = iter(self._preprocessor.tile(tileable_graph))
while True:
with Timer() as stage_timer:
with Timer() as timer:
chunk_graph = await self._get_next_chunk_graph(chunk_graph_iter)
if chunk_graph is None:
# tile finished
self._preprocessor.done = True
return
stage_id = new_task_id()
stage_profiler = ProfilingData[self._task.task_id, "general"].nest(
f"stage_{stage_id}"
)
stage_profiler.set(f"tile({len(chunk_graph)})", timer.duration)
logger.info(
"Time consuming to gen a chunk graph is %ss with session id %s, task id %s",
timer.duration,
self._task.session_id,
self._task.task_id,
)
self._chunk_graph_gen_time.record(
timer.duration,
{
"session_id": self._task.session_id,
"task_id": self._task.task_id,
},
)
yield stage_id, stage_profiler, chunk_graph
stage_profiler.set("total", stage_timer.duration)
async def _process_stage_chunk_graph(
self,
stage_id: str,
stage_profiler,
chunk_graph: ChunkGraph,
):
available_bands = await self._executor.get_available_band_slots()
meta_api = self._executor._meta_api
get_meta_tasks = []
fetch_op_keys = []
for c in chunk_graph.iter_indep():
if isinstance(c.op, Fetch):
get_meta_tasks.append(
meta_api.get_chunk_meta.delay(c.key, fields=["bands"])
)
fetch_op_keys.append(c.op.key)
key_to_bands = await meta_api.get_chunk_meta.batch(*get_meta_tasks)
fetch_op_to_bands = dict(
(key, meta["bands"][0]) for key, meta in zip(fetch_op_keys, key_to_bands)
)
with Timer() as timer:
subtask_graph = await asyncio.to_thread(
self._preprocessor.analyze,
chunk_graph,
available_bands,
stage_id=stage_id,
op_to_bands=fetch_op_to_bands,
)
stage_profiler.set(f"gen_subtask_graph({len(subtask_graph)})", timer.duration)
logger.info(
"Time consuming to gen a subtask graph is %ss with session id %s, task id %s, stage id %s",
timer.duration,
self._task.session_id,
self._task.task_id,
stage_id,
)
self._subtask_graph_gen_time.record(
timer.duration,
{
"session_id": self._task.session_id,
"task_id": self._task.task_id,
"stage_id": stage_id,
},
)
tileable_to_subtasks = await asyncio.to_thread(
self._get_tileable_to_subtasks,
self._preprocessor.tileable_graph,
self._preprocessor.tile_context,
subtask_graph,
)
self._tileable_to_subtasks.update(tileable_to_subtasks)
with Timer() as timer:
execution_chunk_results = await self._executor.execute_subtask_graph(
stage_id, subtask_graph, chunk_graph
)
stage_profiler.set("run", timer.duration)
self._preprocessor.post_chunk_graph_execution()
if self._preprocessor.chunk_optimization_records_list:
optimization_records = self._preprocessor.chunk_optimization_records_list[
-1
]
else:
optimization_records = None
await self._update_meta(
chunk_graph, execution_chunk_results, optimization_records
)
async def _update_meta(
self,
chunk_graph: ChunkGraph,
execution_chunk_results: List[ExecutionChunkResult],
optimization_records: OptimizationRecords,
):
result_chunks = [c for c in chunk_graph.results if not isinstance(c.op, Fetch)]
chunk_to_band = {
result.chunk: result.meta["bands"][0][0]
for result in execution_chunk_results
}
update_meta_chunks = set(result_chunks)
update_meta_tileables = dict()
updated = self._meta_updated_tileables
for tileable in self.tileable_graph:
if tileable in updated:
continue
tiled_tileable = self._preprocessor.tile_context.get(tileable)
if tiled_tileable is not None:
tileable_chunks = [c.data for c in tiled_tileable.chunks]
if any(c not in chunk_to_band for c in tileable_chunks):
continue
update_meta_tileables[tiled_tileable] = tileable
# we no longer update the chunk meta directly,
# we try to update their meta via tileable,
# e.g. for DataFrame, chunks (0, 0) and (1, 0)
# have the same dtypes_value, thus we only need to update one
update_meta_chunks.difference_update(tileable_chunks)
worker_meta_api_to_chunk_delays = defaultdict(dict)
for c in update_meta_chunks:
meta_api = await WorkerMetaAPI.create(self._session_id, chunk_to_band[c])
call = meta_api.get_chunk_meta.delay(c.key, fields=get_params_fields(c))
worker_meta_api_to_chunk_delays[meta_api][c] = call
for tileable in update_meta_tileables:
chunks = [c.data for c in tileable.chunks]
for c, params_fields in zip(chunks, self._get_params_fields(tileable)):
meta_api = await WorkerMetaAPI.create(
self._session_id, chunk_to_band[c]
)
call = meta_api.get_chunk_meta.delay(c.key, fields=params_fields)
worker_meta_api_to_chunk_delays[meta_api][c] = call
coros = []
for worker_meta_api, chunk_delays in worker_meta_api_to_chunk_delays.items():
coros.append(worker_meta_api.get_chunk_meta.batch(*chunk_delays.values()))
worker_metas = await asyncio.gather(*coros)
chunk_to_meta = dict()
for chunk_delays, metas in zip(
worker_meta_api_to_chunk_delays.values(), worker_metas
):
for c, meta in zip(chunk_delays, metas):
chunk_to_meta[c] = meta
# update meta
for c in update_meta_chunks:
params = c.params = chunk_to_meta[c]
original_chunk = (
optimization_records and optimization_records.get_original_entity(c)
)
if original_chunk is not None:
original_chunk.params = params
# update tileable
for tiled, tileable in update_meta_tileables.items():
self._update_tileable_meta(tiled, chunk_to_meta, optimization_records)
tileable.params = tiled.params
updated.add(tileable)
@classmethod
def _get_params_fields(cls, tileable: TileableType):
params_fields = []
fields = get_params_fields(tileable.chunks[0])
if isinstance(tileable, DATAFRAME_TYPE):
for c in tileable.chunks:
cur_fields = set(fields)
if c.index[1] > 0:
# skip fetch index_value for i >= 1 on column axis
cur_fields.remove("index_value")
if c.index[0] > 0:
# skip fetch dtypes_value for i >= 1 on index axis
cur_fields.remove("dtypes_value")
if c.index[0] > 0 and c.index[1] > 0:
# fetch shape only for i == 0 on index or column axis
cur_fields.remove("shape")
params_fields.append(list(cur_fields))
elif isinstance(tileable, SERIES_TYPE):
for c in tileable.chunks:
cur_fields = set(fields)
if c.index[0] > 0:
# skip fetch name and dtype for i >= 1
cur_fields.remove("name")
cur_fields.remove("dtype")
params_fields.append(list(cur_fields))
elif isinstance(tileable, TENSOR_TYPE):
for i, c in enumerate(tileable.chunks):
cur_fields = set(fields)
if c.ndim > 1 and all(j > 0 for j in c.index):
cur_fields.remove("shape")
if i > 0:
cur_fields.remove("dtype")
cur_fields.remove("order")
params_fields.append(list(cur_fields))
else:
for _ in tileable.chunks:
params_fields.append(fields)
return params_fields
@classmethod
def _update_tileable_meta(
cls,
tileable: TileableType,
chunk_to_meta: Dict[ChunkType, dict],
optimization_records: OptimizationRecords,
):
chunks = [c.data for c in tileable.chunks]
if isinstance(tileable, DATAFRAME_TYPE):
for c in chunks:
i, j = c.index
meta = chunk_to_meta[c]
shape = meta.get("shape")
update_shape = shape is None
shape = shape if not update_shape else [None, None]
if i > 0:
# update dtypes_value
c0j = chunk_to_meta[tileable.cix[0, j].data]
meta["dtypes_value"] = c0j["dtypes_value"]
if update_shape:
shape[1] = c0j["shape"][1]
if j > 0:
# update index_value
ci0 = chunk_to_meta[tileable.cix[i, 0].data]
meta["index_value"] = ci0["index_value"]
if update_shape:
shape[0] = ci0["shape"][0]
if update_shape:
meta["shape"] = tuple(shape)
elif isinstance(tileable, SERIES_TYPE):
first_meta = chunk_to_meta[chunks[0]]
for c in chunks:
i = c.index[0]
meta = chunk_to_meta[c]
if i > 0:
meta["name"] = first_meta["name"]
meta["dtype"] = first_meta["dtype"]
elif isinstance(tileable, TENSOR_TYPE):
ndim = tileable.ndim
for i, c in enumerate(chunks):
meta = chunk_to_meta[c]
if "shape" not in meta:
shape = []
for i, ind in enumerate(c.index):
ind0 = [0] * ndim
ind0[i] = ind
c0 = tileable.cix[tuple(ind0)].data
shape.append(chunk_to_meta[c0]["shape"][i])
meta["shape"] = tuple(shape)
if i > 0:
first = chunk_to_meta[chunks[0]]
meta["dtype"] = first["dtype"]
meta["order"] = first["order"]
for c in chunks:
params = c.params = chunk_to_meta[c]
original_chunk = (
optimization_records and optimization_records.get_original_entity(c)
)
if original_chunk is not None:
original_chunk.params = params
tileable.refresh_params()
async def run(self):
profiling = ProfilingData[self.task_id, "general"]
self.result.status = TaskStatus.running
# optimization
with Timer() as timer:
# optimization, run it in executor,
# since optimization may be a CPU intensive operation
await asyncio.to_thread(self._preprocessor.optimize)
profiling.set("optimize", timer.duration)
self._tileable_id_to_tileable = await asyncio.to_thread(
self._get_tileable_id_to_tileable, self._preprocessor.tileable_graph
)
try:
async with self._executor:
async for stage_args in self._iter_stage_chunk_graph():
await self._process_stage_chunk_graph(*stage_args)
except Exception as ex:
self.result.error = ex
self.result.traceback = ex.__traceback__
finally:
self._gen_result()
self._finish()
async def get_progress(self):
# get tileable proportion that is tiled
tileable_graph = self._preprocessor.tileable_graph
tileable_context = self._preprocessor.tile_context
tiled_percentage = len(tileable_context) / len(tileable_graph)
return tiled_percentage * await self._executor.get_progress()
async def cancel(self):
self._preprocessor.cancel()
await self._executor.cancel()
async def set_subtask_result(self, subtask_result: SubtaskResult):
await self._executor.set_subtask_result(subtask_result)
@staticmethod
def _get_tileable_to_subtasks(
tileable_graph: TileableGraph,
tile_context: Dict[TileableType, TileableType],
subtask_graph: SubtaskGraph,
) -> Dict[TileableType, List[Subtask]]:
tileable_to_chunks = defaultdict(set)
chunk_to_subtasks = dict()
for tileable in tileable_graph:
if tileable not in tile_context:
continue
for chunk in tile_context[tileable].chunks:
tileable_to_chunks[tileable].add(chunk.key)
# register chunk mapping for tiled terminals
chunk_to_subtasks[chunk.key] = set()
for subtask in subtask_graph:
for chunk in subtask.chunk_graph:
# for every non-fuse chunks (including fused),
# register subtasks if needed
if (
isinstance(chunk.op, (FetchShuffle, Fetch))
or chunk.key not in chunk_to_subtasks
):
continue
chunk_to_subtasks[chunk.key].add(subtask)
tileable_to_subtasks = dict()
# collect subtasks for tileables
for tileable, chunk_keys in tileable_to_chunks.items():
tileable_to_subtasks[tileable] = list(
reduce(
operator.or_,
[chunk_to_subtasks[chunk_key] for chunk_key in chunk_keys],
)
)
return tileable_to_subtasks
@staticmethod
def _get_tileable_id_to_tileable(
tileable_graph: TileableGraph,
) -> Dict[str, TileableType]:
tileable_id_to_tileable = dict()
for tileable in tileable_graph:
tileable_id_to_tileable[str(tileable.key)] = tileable
return tileable_id_to_tileable
def _gen_result(self):
self.result.status = TaskStatus.terminated
self.result.end_time = time.time()
cost_time_secs = self.result.end_time - self.result.start_time
logger.info(
"Time consuming to execute a task is %ss with session id %s, task id %s",
cost_time_secs,
self._task.session_id,
self._task.task_id,
)
self._task_execution_time.record(
cost_time_secs,
{"session_id": self._task.session_id, "task_id": self._task.task_id},
)
def dump_subtask_graph(self):
from .graph_visualizer import GraphVisualizer
try: # pragma: no cover
import graphviz
except ImportError:
graphviz = None
dot = GraphVisualizer(self).to_dot()
directory = tempfile.gettempdir()
file_name = f"mars-{self.task_id}"
logger.debug(
"subtask graph is stored in %s", os.path.join(directory, file_name)
)
if graphviz is not None: # pragma: no cover
g = graphviz.Source(dot)
g.view(file_name, directory=directory)
else:
with open(os.path.join(directory, file_name), "w") as f:
f.write(dot)
def _finish(self):
self.done.set()
if self._dump_subtask_graph:
self.dump_subtask_graph()
if MARS_ENABLE_PROFILING or (
self._task.extra_config and self._task.extra_config.get("enable_profiling")
):
ProfilingData[self._task.task_id, "general"].set(
"total", time.time() - self.result.start_time
)
serialization = ProfilingData[self._task.task_id, "serialization"]
if not serialization.empty():
serialization.set(
"total",
sum(serialization.values()),
)
data = ProfilingData.pop(self._task.task_id)
self.result.profiling = {
"supervisor": data,
}
def is_done(self) -> bool:
return self.done.is_set()
class TaskProcessorActor(mo.Actor):
_task_id_to_processor: Dict[str, TaskProcessor]
_cur_processor: Optional[TaskProcessor]
def __init__(
self,
session_id: str,
task_id: str,
task_name: str = None,
task_processor_cls: Type[TaskPreprocessor] = None,
):
self.session_id = session_id
self.task_id = task_id
self.task_name = task_name
self._task_processor_cls = self._get_task_processor_cls(task_processor_cls)
self._task_id_to_processor = dict()
self._cur_processor = None
@classmethod
def gen_uid(cls, session_id: str, task_id: str):
return f"task_processor_{session_id}_{task_id}"
async def add_task(
self,
task: Task,
tiled_context: Dict[TileableType, TileableType],
config: Config,
task_executor_config: Dict,
task_preprocessor_cls: Type[TaskPreprocessor],
):
task_preprocessor = task_preprocessor_cls(
task, tiled_context=tiled_context, config=config
)
task_executor = await TaskExecutor.create(
task_executor_config,
task=task,
session_id=self.session_id,
address=self.address,
tileable_graph=task_preprocessor.tileable_graph,
tile_context=task_preprocessor.tile_context,
)
processor = self._task_processor_cls(
task,
task_preprocessor,
task_executor,
)
self._task_id_to_processor[task.task_id] = processor
# tell self to start running
await self.ref().start.tell()
@classmethod
def _get_task_processor_cls(cls, task_processor_cls):
if task_processor_cls is not None:
assert isinstance(task_processor_cls, str)
module, name = task_processor_cls.rsplit(".", 1)
return getattr(importlib.import_module(module), name)
else:
return TaskProcessor
def _get_unprocessed_task_processor(self):
for processor in self._task_id_to_processor.values():
if processor.result.status == TaskStatus.pending:
return processor
async def start(self):
if self._cur_processor is not None: # pragma: no cover
# some processor is running
return
processor = self._get_unprocessed_task_processor()
if processor is None: # pragma: no cover
return
self._cur_processor = processor
try:
yield processor.run()
finally:
self._cur_processor = None
async def wait(self, timeout: int = None):
fs = [
asyncio.ensure_future(processor.done.wait())
for processor in self._task_id_to_processor.values()
]
_, pending = yield asyncio.wait(fs, timeout=timeout)
if not pending:
raise mo.Return(self.result())
else:
[fut.cancel() for fut in pending]
async def cancel(self):
if self._cur_processor:
await self._cur_processor.cancel()
def result(self):
terminated_result = None
for processor in self._task_id_to_processor.values():
if processor.result.status != TaskStatus.terminated:
return processor.result
else:
terminated_result = processor.result
return terminated_result
async def progress(self):
processor_progresses = [
await processor.get_progress()
for processor in self._task_id_to_processor.values()
]
return sum(processor_progresses) / len(processor_progresses)
def get_result_tileables(self):
processor = list(self._task_id_to_processor.values())[-1]
tileable_graph = processor.tileable_graph
result = []
for result_tileable in tileable_graph.result_tileables:
tiled = processor.get_tiled(result_tileable)
result.append(build_fetch(tiled))
return result
def get_subtask_graphs(self, task_id: str) -> List[SubtaskGraph]:
return [
stage_processor.subtask_graph
for stage_processor in self._task_id_to_processor[task_id].stage_processors
]
def get_tileable_graph_as_dict(self):
processor = list(self._task_id_to_processor.values())[-1]
tileable_graph = processor.tileable_graph
node_list = []
edge_list = []
visited = set()
for chunk in tileable_graph:
if chunk.key in visited:
continue
visited.add(chunk.key)
node_name = str(chunk.op)
node_list.append({"tileableId": chunk.key, "tileableName": node_name})
for inp, is_pure_dep in zip(chunk.inputs, chunk.op.pure_depends):
if inp not in tileable_graph: # pragma: no cover
continue
edge_list.append(
{
"fromTileableId": inp.key,
"toTileableId": chunk.key,
"linkType": 1 if is_pure_dep else 0,
}
)
graph_dict = {"tileables": node_list, "dependencies": edge_list}
return graph_dict
def get_tileable_details(self):
tileable_to_subtasks = dict()
subtask_results = dict()
for processor in self._task_id_to_processor.values():
tileable_to_subtasks.update(processor.tileable_to_subtasks)
for stage in processor.stage_processors:
for subtask, result in stage.subtask_results.items():
subtask_results[subtask.subtask_id] = result
for subtask, result in stage.subtask_snapshots.items():
if subtask.subtask_id in subtask_results:
continue
subtask_results[subtask.subtask_id] = result
tileable_infos = dict()
for tileable, subtasks in tileable_to_subtasks.items():
results = [
subtask_results.get(
subtask.subtask_id,
SubtaskResult(
progress=0.0,
status=SubtaskStatus.pending,
stage_id=subtask.stage_id,
),
)
for subtask in subtasks
]
# calc progress
if not results: # pragma: no cover
progress = 1.0
else:
progress = (
1.0 * sum(result.progress for result in results) / len(results)
)
# calc status
statuses = set(result.status for result in results)
if not results or statuses == {SubtaskStatus.succeeded}:
status = SubtaskStatus.succeeded
elif statuses == {SubtaskStatus.cancelled}:
status = SubtaskStatus.cancelled
elif statuses == {SubtaskStatus.pending}:
status = SubtaskStatus.pending
elif SubtaskStatus.errored in statuses:
status = SubtaskStatus.errored
else:
status = SubtaskStatus.running
fields = tileable.op._FIELDS
field_values = tileable.op._FIELD_VALUES
props = {
fields[attr_name].tag: value
for attr_name, value in field_values.items()
if attr_name not in ("_key", "_id")
and isinstance(value, (int, float, str))
}
tileable_infos[tileable.key] = {
"progress": progress,
"subtaskCount": len(results),
"status": status.value,
"properties": props,
}
return tileable_infos
def get_tileable_subtasks(self, tileable_id: str, with_input_output: bool):
returned_subtasks = dict()
subtask_id_to_types = dict()
subtask_details = dict()
subtask_graph = subtask_results = subtask_snapshots = None
for processor in self._task_id_to_processor.values():
tileable_to_subtasks = processor.tileable_to_subtasks
tileable_id_to_tileable = processor.tileable_id_to_tileable
for stage in processor.stage_processors:
if tileable_id in tileable_id_to_tileable:
tileable = tileable_id_to_tileable[tileable_id]
returned_subtasks = {
subtask.subtask_id: subtask
for subtask in tileable_to_subtasks[tileable]
}
subtask_graph = stage.subtask_graph
subtask_results = stage.subtask_results
subtask_snapshots = stage.subtask_snapshots
break
if returned_subtasks:
break
if subtask_graph is None: # pragma: no cover
return {}
if with_input_output:
for subtask in list(returned_subtasks.values()):
for pred in subtask_graph.iter_predecessors(subtask):
if pred.subtask_id in returned_subtasks: # pragma: no cover
continue
returned_subtasks[pred.subtask_id] = pred
subtask_id_to_types[pred.subtask_id] = "Input"
for succ in subtask_graph.iter_successors(subtask):
if succ.subtask_id in returned_subtasks: # pragma: no cover
continue
returned_subtasks[succ.subtask_id] = succ
subtask_id_to_types[succ.subtask_id] = "Output"
for subtask in returned_subtasks.values():
subtask_result = subtask_results.get(
subtask,
subtask_snapshots.get(
subtask,
SubtaskResult(
progress=0.0,
status=SubtaskStatus.pending,
stage_id=subtask.stage_id,
),
),
)
subtask_details[subtask.subtask_id] = {
"name": subtask.subtask_name,
"status": subtask_result.status.value,
"progress": subtask_result.progress,
"nodeType": subtask_id_to_types.get(subtask.subtask_id, "Calculation"),
}
for subtask in returned_subtasks.values():
pred_ids = []
for pred in subtask_graph.iter_predecessors(subtask):
if pred.subtask_id in returned_subtasks:
pred_ids.append(pred.subtask_id)
subtask_details[subtask.subtask_id]["fromSubtaskIds"] = pred_ids
return subtask_details
def get_result_tileable(self, tileable_key: str):
processor = list(self._task_id_to_processor.values())[-1]
tileable_graph = processor.tileable_graph
for result_tileable in tileable_graph.result_tileables:
if result_tileable.key == tileable_key:
tiled = processor.get_tiled(result_tileable)
return build_fetch(tiled)
raise KeyError(f"Tileable {tileable_key} does not exist") # pragma: no cover
async def set_subtask_result(self, subtask_result: SubtaskResult):
logger.debug(
"Set subtask %s with result %s.", subtask_result.subtask_id, subtask_result
)
if self._cur_processor is not None:
await self._cur_processor.set_subtask_result(subtask_result)
def is_done(self) -> bool:
for processor in self._task_id_to_processor.values():
if not processor.is_done():
return False
return True
|
[
"asyncio.gather",
"importlib.import_module",
"asyncio.Event",
"tempfile.gettempdir",
"functools.reduce",
"time.time",
"os.environ.get",
"collections.defaultdict",
"graphviz.Source",
"asyncio.wait",
"asyncio.to_thread",
"os.path.join",
"logging.getLogger"
] |
[((1647, 1674), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1664, 1674), False, 'import logging\n'), ((1716, 1760), 'os.environ.get', 'os.environ.get', (['"""MARS_DUMP_SUBTASK_GRAPH"""', '(0)'], {}), "('MARS_DUMP_SUBTASK_GRAPH', 0)\n", (1730, 1760), False, 'import os\n'), ((3048, 3063), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (3061, 3063), False, 'import asyncio\n'), ((4639, 4674), 'asyncio.to_thread', 'asyncio.to_thread', (['next_chunk_graph'], {}), '(next_chunk_graph)\n', (4656, 4674), False, 'import asyncio\n'), ((10174, 10191), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (10185, 10191), False, 'from collections import defaultdict\n'), ((18142, 18158), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (18153, 18158), False, 'from collections import defaultdict\n'), ((19807, 19818), 'time.time', 'time.time', ([], {}), '()\n', (19816, 19818), False, 'import time\n'), ((20537, 20558), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (20556, 20558), False, 'import tempfile\n'), ((7880, 8017), 'asyncio.to_thread', 'asyncio.to_thread', (['self._get_tileable_to_subtasks', 'self._preprocessor.tileable_graph', 'self._preprocessor.tile_context', 'subtask_graph'], {}), '(self._get_tileable_to_subtasks, self._preprocessor.\n tileable_graph, self._preprocessor.tile_context, subtask_graph)\n', (7897, 8017), False, 'import asyncio\n'), ((11149, 11171), 'asyncio.gather', 'asyncio.gather', (['*coros'], {}), '(*coros)\n', (11163, 11171), False, 'import asyncio\n'), ((16801, 16893), 'asyncio.to_thread', 'asyncio.to_thread', (['self._get_tileable_id_to_tileable', 'self._preprocessor.tileable_graph'], {}), '(self._get_tileable_id_to_tileable, self._preprocessor.\n tileable_graph)\n', (16818, 16893), False, 'import asyncio\n'), ((20669, 20703), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (20681, 20703), False, 'import os\n'), ((20783, 20803), 'graphviz.Source', 'graphviz.Source', (['dot'], {}), '(dot)\n', (20798, 20803), False, 'import graphviz\n'), ((24671, 24704), 'asyncio.wait', 'asyncio.wait', (['fs'], {'timeout': 'timeout'}), '(fs, timeout=timeout)\n', (24683, 24704), False, 'import asyncio\n'), ((2966, 2977), 'time.time', 'time.time', ([], {}), '()\n', (2975, 2977), False, 'import time\n'), ((7032, 7161), 'asyncio.to_thread', 'asyncio.to_thread', (['self._preprocessor.analyze', 'chunk_graph', 'available_bands'], {'stage_id': 'stage_id', 'op_to_bands': 'fetch_op_to_bands'}), '(self._preprocessor.analyze, chunk_graph, available_bands,\n stage_id=stage_id, op_to_bands=fetch_op_to_bands)\n', (7049, 7161), False, 'import asyncio\n'), ((16657, 16703), 'asyncio.to_thread', 'asyncio.to_thread', (['self._preprocessor.optimize'], {}), '(self._preprocessor.optimize)\n', (16674, 16703), False, 'import asyncio\n'), ((19189, 19274), 'functools.reduce', 'reduce', (['operator.or_', '[chunk_to_subtasks[chunk_key] for chunk_key in chunk_keys]'], {}), '(operator.or_, [chunk_to_subtasks[chunk_key] for chunk_key in chunk_keys]\n )\n', (19195, 19274), False, 'from functools import reduce\n'), ((23734, 23765), 'importlib.import_module', 'importlib.import_module', (['module'], {}), '(module)\n', (23757, 23765), False, 'import importlib\n'), ((20891, 20925), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (20903, 20925), False, 'import os\n'), ((21314, 21325), 'time.time', 'time.time', ([], {}), '()\n', (21323, 21325), False, 'import time\n')]
|
#!/usr/bin/env python
"""
<Program Name>
formats.py
<Author>
<NAME>
<NAME> <<EMAIL>>
<Started>
Refactored April 30, 2012. -vladimir.v.diaz
<Copyright>
See LICENSE for licensing information.
<Purpose>
A central location for all format-related checking of TUF objects.
Note: 'formats.py' depends heavily on 'schema.py', so the 'schema.py'
module should be read and understood before tackling this module.
'formats.py' can be broken down into three sections. (1) Schemas and object
matching. (2) Functions that help produce or verify TUF objects.
The first section deals with schemas and object matching based on format.
There are two ways of checking the format of objects. The first method
raises a 'tuf.FormatError' exception if the match fails and the other
returns a Boolean result.
tuf.formats.<SCHEMA>.check_match(object)
tuf.formats.<SCHEMA>.matches(object)
Example:
rsa_key = {'keytype': 'rsa'
'keyid': <KEY>
'keyval': {'public': 'public_key',
'private': 'private_key'}
tuf.formats.RSAKEY_SCHEMA.check_match(rsa_key)
tuf.formats.RSAKEY_SCHEMA.matches(rsa_key)
In this example, if a dict key or dict value is missing or incorrect,
the match fails. There are numerous variations of object checking
provided by 'formats.py' and 'schema.py'.
The second section contains miscellaneous functions related to the format of
TUF objects.
Example:
signable_object = make_signable(unsigned_object)
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import binascii
import calendar
import re
import string
import datetime
import time
import tuf
import tuf.schema as SCHEMA
import six
# Note that in the schema definitions below, the 'SCHEMA.Object' types allow
# additional keys which are not defined. Thus, any additions to them will be
# easily backwards compatible with clients that are already deployed.
# A datetime in 'YYYY-MM-DDTHH:MM:SSZ' ISO 8601 format. The "Z" zone designator
# for the zero UTC offset is always used (i.e., a numerical offset is not
# supported.) Example: '2015-10-21T13:20:00Z'. Note: This is a simple format
# check, and an ISO8601 string should be fully verified when it is parsed.
ISO8601_DATETIME_SCHEMA = SCHEMA.RegularExpression(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z')
# A Unix/POSIX time format. An integer representing the number of seconds
# since the epoch (January 1, 1970.) Metadata uses this format for the
# 'expires' field. Set 'hi' to the upper timestamp limit (year 2038), the max
# value of an int.
UNIX_TIMESTAMP_SCHEMA = SCHEMA.Integer(lo=0, hi=2147483647)
# A hexadecimal value in '23432df87ab..' format.
HASH_SCHEMA = SCHEMA.RegularExpression(r'[a-fA-F0-9]+')
# A dict in {'sha256': '23432df87ab..', 'sha512': '34324abc34df..', ...} format.
HASHDICT_SCHEMA = SCHEMA.DictOf(
key_schema = SCHEMA.AnyString(),
value_schema = HASH_SCHEMA)
# A hexadecimal value in '23432df87ab..' format.
HEX_SCHEMA = SCHEMA.RegularExpression(r'[a-fA-F0-9]+')
# A key identifier (e.g., a hexadecimal value identifying an RSA key).
KEYID_SCHEMA = HASH_SCHEMA
# A list of KEYID_SCHEMA.
KEYIDS_SCHEMA = SCHEMA.ListOf(KEYID_SCHEMA)
# The method used for a generated signature (e.g., 'RSASSA-PSS').
SIG_METHOD_SCHEMA = SCHEMA.AnyString()
# A relative file path (e.g., 'metadata/root/').
RELPATH_SCHEMA = SCHEMA.AnyString()
RELPATHS_SCHEMA = SCHEMA.ListOf(RELPATH_SCHEMA)
# An absolute path.
PATH_SCHEMA = SCHEMA.AnyString()
PATHS_SCHEMA = SCHEMA.ListOf(PATH_SCHEMA)
# Uniform Resource Locator identifier (e.g., 'https://www.updateframework.com/').
URL_SCHEMA = SCHEMA.AnyString()
# A dictionary holding version information.
VERSION_SCHEMA = SCHEMA.Object(
object_name = 'VERSION_SCHEMA',
major = SCHEMA.Integer(lo=0),
minor = SCHEMA.Integer(lo=0),
fix = SCHEMA.Integer(lo=0))
# An integer representing the numbered version of a metadata file.
# Must be 1, or greater.
METADATAVERSION_SCHEMA = SCHEMA.Integer(lo=0)
# An integer representing length. Must be 0, or greater.
LENGTH_SCHEMA = SCHEMA.Integer(lo=0)
# An integer representing logger levels, such as logging.CRITICAL (=50).
# Must be between 0 and 50.
LOGLEVEL_SCHEMA = SCHEMA.Integer(lo=0, hi=50)
# A string representing a named object.
NAME_SCHEMA = SCHEMA.AnyString()
NAMES_SCHEMA = SCHEMA.ListOf(NAME_SCHEMA)
# A byte string representing data.
DATA_SCHEMA = SCHEMA.AnyBytes()
# Supported hash algorithms.
HASHALGORITHMS_SCHEMA = SCHEMA.ListOf(SCHEMA.OneOf(
[SCHEMA.String('md5'), SCHEMA.String('sha1'),
SCHEMA.String('sha224'), SCHEMA.String('sha256'),
SCHEMA.String('sha384'), SCHEMA.String('sha512')]))
# The contents of an encrypted TUF key. Encrypted TUF keys are saved to files
# in this format.
ENCRYPTEDKEY_SCHEMA = SCHEMA.AnyBytes()
# A value that is either True or False, on or off, etc.
BOOLEAN_SCHEMA = SCHEMA.Boolean()
# A role's threshold value (i.e., the minimum number
# of signatures required to sign a metadata file).
# Must be 1 and greater.
THRESHOLD_SCHEMA = SCHEMA.Integer(lo=1)
# A string representing a role's name.
ROLENAME_SCHEMA = SCHEMA.AnyString()
# The minimum number of bits for an RSA key. Must be 2048 bits, or greater
# (recommended by TUF). Crypto modules like 'pycrypto_keys.py' may set further
# restrictions on keys (e.g., the number of bits must be a multiple of 256).
# Recommended RSA key sizes:
# http://www.emc.com/emc-plus/rsa-labs/historical/twirl-and-rsa-key-size.htm#table1
RSAKEYBITS_SCHEMA = SCHEMA.Integer(lo=2048)
# The number of hashed bins, or the number of delegated roles. See
# delegate_hashed_bins() in 'repository_tool.py' for an example. Note:
# Tools may require further restrictions on the number of bins, such
# as requiring them to be a power of 2.
NUMBINS_SCHEMA = SCHEMA.Integer(lo=1)
# A PyCrypto signature.
PYCRYPTOSIGNATURE_SCHEMA = SCHEMA.AnyBytes()
# A pyca-cryptography signature.
PYCACRYPTOSIGNATURE_SCHEMA = SCHEMA.AnyBytes()
# An RSA key in PEM format.
PEMRSA_SCHEMA = SCHEMA.AnyString()
# A string representing a password.
PASSWORD_SCHEMA = SCHEMA.AnyString()
# A list of passwords.
PASSWORDS_SCHEMA = SCHEMA.ListOf(PASSWORD_SCHEMA)
# The actual values of a key, as opposed to meta data such as a key type and
# key identifier ('rsa', 233df889cb). For RSA keys, the key value is a pair of
# public and private keys in PEM Format stored as strings.
KEYVAL_SCHEMA = SCHEMA.Object(
object_name = 'KEYVAL_SCHEMA',
public = SCHEMA.AnyString(),
private = SCHEMA.Optional(SCHEMA.AnyString()))
# Supported TUF key types.
KEYTYPE_SCHEMA = SCHEMA.OneOf(
[SCHEMA.String('rsa'), SCHEMA.String('ed25519')])
# A generic TUF key. All TUF keys should be saved to metadata files in this
# format.
KEY_SCHEMA = SCHEMA.Object(
object_name = 'KEY_SCHEMA',
keytype = SCHEMA.AnyString(),
keyval = KEYVAL_SCHEMA,
expires = SCHEMA.Optional(ISO8601_DATETIME_SCHEMA))
# A TUF key object. This schema simplifies validation of keys that may be one
# of the supported key types. Supported key types: 'rsa', 'ed25519'.
ANYKEY_SCHEMA = SCHEMA.Object(
object_name = 'ANYKEY_SCHEMA',
keytype = KEYTYPE_SCHEMA,
keyid = KEYID_SCHEMA,
keyid_hash_algorithms = SCHEMA.Optional(HASHALGORITHMS_SCHEMA),
keyval = KEYVAL_SCHEMA,
expires = SCHEMA.Optional(ISO8601_DATETIME_SCHEMA))
# A list of TUF key objects.
ANYKEYLIST_SCHEMA = SCHEMA.ListOf(ANYKEY_SCHEMA)
# An RSA TUF key.
RSAKEY_SCHEMA = SCHEMA.Object(
object_name = 'RSAKEY_SCHEMA',
keytype = SCHEMA.String('rsa'),
keyid = KEYID_SCHEMA,
keyid_hash_algorithms = SCHEMA.Optional(HASHALGORITHMS_SCHEMA),
keyval = KEYVAL_SCHEMA)
# An ED25519 raw public key, which must be 32 bytes.
ED25519PUBLIC_SCHEMA = SCHEMA.LengthBytes(32)
# An ED25519 raw seed key, which must be 32 bytes.
ED25519SEED_SCHEMA = SCHEMA.LengthBytes(32)
# An ED25519 raw signature, which must be 64 bytes.
ED25519SIGNATURE_SCHEMA = SCHEMA.LengthBytes(64)
# Required installation libraries expected by the repository tools and other
# cryptography modules.
REQUIRED_LIBRARIES_SCHEMA = SCHEMA.ListOf(SCHEMA.OneOf(
[SCHEMA.String('general'), SCHEMA.String('ed25519'), SCHEMA.String('rsa')]))
# An ed25519 TUF key.
ED25519KEY_SCHEMA = SCHEMA.Object(
object_name = 'ED25519KEY_SCHEMA',
keytype = SCHEMA.String('ed25519'),
keyid = KEYID_SCHEMA,
keyid_hash_algorithms = SCHEMA.Optional(HASHALGORITHMS_SCHEMA),
keyval = KEYVAL_SCHEMA)
# Information about target files, like file length and file hash(es). This
# schema allows the storage of multiple hashes for the same file (e.g., sha256
# and sha512 may be computed for the same file and stored).
FILEINFO_SCHEMA = SCHEMA.Object(
object_name = 'FILEINFO_SCHEMA',
length = LENGTH_SCHEMA,
hashes = HASHDICT_SCHEMA,
version = SCHEMA.Optional(METADATAVERSION_SCHEMA),
custom = SCHEMA.Optional(SCHEMA.Object()))
# Version information specified in "snapshot.json" for each role available on
# the TUF repository. The 'FILEINFO_SCHEMA' object was previously listed in
# the snapshot role, but was switched to this object format to reduce the
# amount of metadata that needs to be downloaded. Listing version numbers in
# "snapshot.json" also prevents rollback attacks for roles that clients have
# not downloaded.
VERSIONINFO_SCHEMA = SCHEMA.Object(
object_name = 'VERSIONINFO_SCHEMA',
version = METADATAVERSION_SCHEMA)
# A dict holding the version or file information for a particular metadata
# role. The dict keys hold the relative file paths, and the dict values the
# corresponding version numbers and/or file information.
FILEINFODICT_SCHEMA = SCHEMA.DictOf(
key_schema = RELPATH_SCHEMA,
value_schema = SCHEMA.OneOf([VERSIONINFO_SCHEMA, FILEINFO_SCHEMA]))
# A dict holding the information for a particular target / file. The dict keys
# hold the relative file paths, and the dict values the corresponding file
# information.
FILEDICT_SCHEMA = SCHEMA.DictOf(
key_schema = RELPATH_SCHEMA,
value_schema = FILEINFO_SCHEMA)
# A dict holding a target file.
TARGETFILE_SCHEMA = SCHEMA.Object(
object_name = 'TARGETFILE_SCHEMA',
filepath = RELPATH_SCHEMA,
fileinfo = FILEINFO_SCHEMA)
# A list of TARGETFILE_SCHEMA.
TARGETFILES_SCHEMA = SCHEMA.ListOf(TARGETFILE_SCHEMA)
# A single signature of an object. Indicates the signature, the ID of the
# signing key, and the signing method.
# I debated making the signature schema not contain the key ID and instead have
# the signatures of a file be a dictionary with the key being the keyid and the
# value being the signature schema without the keyid. That would be under
# the argument that a key should only be able to sign a file once. However,
# one can imagine that maybe a key wants to sign multiple times with different
# signature methods.
SIGNATURE_SCHEMA = SCHEMA.Object(
object_name = 'SIGNATURE_SCHEMA',
keyid = KEYID_SCHEMA,
method = SIG_METHOD_SCHEMA,
sig = HEX_SCHEMA)
# List of SIGNATURE_SCHEMA.
SIGNATURES_SCHEMA = SCHEMA.ListOf(SIGNATURE_SCHEMA)
# A schema holding the result of checking the signatures of a particular
# 'SIGNABLE_SCHEMA' role.
# For example, how many of the signatures for the 'Target' role are
# valid? This SCHEMA holds this information. See 'sig.py' for
# more information.
SIGNATURESTATUS_SCHEMA = SCHEMA.Object(
object_name = 'SIGNATURESTATUS_SCHEMA',
threshold = SCHEMA.Integer(),
good_sigs = KEYIDS_SCHEMA,
bad_sigs = KEYIDS_SCHEMA,
unknown_sigs = KEYIDS_SCHEMA,
untrusted_sigs = KEYIDS_SCHEMA,
unknown_method_sigs = KEYIDS_SCHEMA)
# A signable object. Holds the signing role and its associated signatures.
SIGNABLE_SCHEMA = SCHEMA.Object(
object_name = 'SIGNABLE_SCHEMA',
signed = SCHEMA.Any(),
signatures = SCHEMA.ListOf(SIGNATURE_SCHEMA))
# A dictionary where the dict keys hold a keyid and the dict values a key
# object.
KEYDICT_SCHEMA = SCHEMA.DictOf(
key_schema = KEYID_SCHEMA,
value_schema = KEY_SCHEMA)
# The format used by the key database to store keys. The dict keys hold a key
# identifier and the dict values any object. The key database should store
# key objects in the values (e.g., 'RSAKEY_SCHEMA', 'DSAKEY_SCHEMA').
KEYDB_SCHEMA = SCHEMA.DictOf(
key_schema = KEYID_SCHEMA,
value_schema = SCHEMA.Any())
# The format of the resulting "scp config dict" after extraction from the
# push configuration file (i.e., push.cfg). In the case of a config file
# utilizing the scp transfer module, it must contain the 'general' and 'scp'
# sections, where 'general' must contain a 'transfer_module' and
# 'metadata_path' entry, and 'scp' the 'host', 'user', 'identity_file', and
# 'remote_directory' entries. See 'tuf/pushtools/pushtoolslib.py' and
# 'tuf/pushtools/push.py'.
SCPCONFIG_SCHEMA = SCHEMA.Object(
object_name = 'SCPCONFIG_SCHEMA',
general = SCHEMA.Object(
object_name = '[general]',
transfer_module = SCHEMA.String('scp'),
metadata_path = PATH_SCHEMA,
targets_directory = PATH_SCHEMA),
scp=SCHEMA.Object(
object_name = '[scp]',
host = URL_SCHEMA,
user = NAME_SCHEMA,
identity_file = PATH_SCHEMA,
remote_directory = PATH_SCHEMA))
# The format of the resulting "receive config dict" after extraction from the
# receive configuration file (i.e., receive.cfg). The receive config file
# must contain a 'general' section, and this section the 'pushroots',
# 'repository_directory', 'metadata_directory', 'targets_directory', and
# 'backup_directory' entries.
# see 'tuf/pushtools/pushtoolslib.py' and 'tuf/pushtools/receive/receive.py'
RECEIVECONFIG_SCHEMA = SCHEMA.Object(
object_name = 'RECEIVECONFIG_SCHEMA', general=SCHEMA.Object(
object_name = '[general]',
pushroots = SCHEMA.ListOf(PATH_SCHEMA),
repository_directory = PATH_SCHEMA,
metadata_directory = PATH_SCHEMA,
targets_directory = PATH_SCHEMA,
backup_directory = PATH_SCHEMA))
# A path hash prefix is a hexadecimal string.
PATH_HASH_PREFIX_SCHEMA = HEX_SCHEMA
# A list of path hash prefixes.
PATH_HASH_PREFIXES_SCHEMA = SCHEMA.ListOf(PATH_HASH_PREFIX_SCHEMA)
# THIS SCHEMA is used for a variety of different things, like delegation
# metadata and the list of keyids and threshold for top-level roles in root
# metadata. That's... not good.
# Role object in {'keyids': [keydids..], 'name': 'ABC', 'threshold': 1,
# 'paths':[filepaths..]} format.
ROLE_SCHEMA = SCHEMA.Object(
object_name = 'ROLE_SCHEMA',
name = SCHEMA.Optional(ROLENAME_SCHEMA),
keyids = KEYIDS_SCHEMA,
threshold = THRESHOLD_SCHEMA,
backtrack = SCHEMA.Optional(BOOLEAN_SCHEMA),
paths = SCHEMA.Optional(RELPATHS_SCHEMA),
path_hash_prefixes = SCHEMA.Optional(PATH_HASH_PREFIXES_SCHEMA))
# A dict of roles where the dict keys are role names and the dict values holding
# the role data/information.
ROLEDICT_SCHEMA = SCHEMA.DictOf(
key_schema = ROLENAME_SCHEMA,
value_schema = ROLE_SCHEMA)
# A dictionary of ROLEDICT, where dictionary keys can be repository names, and
# dictionary values containing information for each role available on the
# repository (corresponding to the repository belonging to named repository in
# the dictionary key)
ROLEDICTDB_SCHEMA = SCHEMA.DictOf(
key_schema = NAME_SCHEMA,
value_schema = ROLEDICT_SCHEMA)
# Like ROLEDICT_SCHEMA, except that ROLE_SCHEMA instances are stored in order.
ROLELIST_SCHEMA = SCHEMA.ListOf(ROLE_SCHEMA)
# Simply a list of role names, to be used in MULTI_ROLE_DELEGATION_SCHEMA.
ROLENAMELIST_SCHEMA = SCHEMA.ListOf(ROLENAME_SCHEMA)
MULTI_ROLE_DELEGATION_SCHEMA = SCHEMA.Object(
# Role object in {'keyids': [keydids..], 'name': 'ABC', 'threshold': 1,
# 'paths':[filepaths..]} format.
object_name = 'MULTI_ROLE_DELEGATION_SCHEMA',
#name = SCHEMA.Optional(ROLENAME_SCHEMA), #MRDs don't have names.
#keyids = KEYIDS_SCHEMA,
required_roles = ROLENAMELIST_SCHEMA,
#threshold = THRESHOLD_SCHEMA,
backtrack = SCHEMA.Optional(BOOLEAN_SCHEMA),
# If target info differs among the required roles, throw error or allow
# backtracking?
abort_on_disagreement = SCHEMA.Optional(BOOLEAN_SCHEMA),
paths = SCHEMA.Optional(RELPATHS_SCHEMA))
#path_hash_prefixes = SCHEMA.Optional(PATH_HASH_PREFIXES_SCHEMA)) # TODO
MULTI_ROLE_DELEGATION_LIST_SCHEMA = SCHEMA.ListOf(MULTI_ROLE_DELEGATION_SCHEMA)
# The delegated roles of a Targets role (a parent).
DELEGATIONS_SCHEMA = SCHEMA.Object(
keys = KEYDICT_SCHEMA,
roles = ROLELIST_SCHEMA,
multiroledelegations = SCHEMA.Optional(MULTI_ROLE_DELEGATION_LIST_SCHEMA))
# Supported compression extension (e.g., 'gz').
COMPRESSION_SCHEMA = SCHEMA.OneOf([SCHEMA.String(''), SCHEMA.String('gz')])
# List of supported compression extensions.
COMPRESSIONS_SCHEMA = SCHEMA.ListOf(
SCHEMA.OneOf([SCHEMA.String(''), SCHEMA.String('gz')]))
# The fileinfo format of targets specified in the repository and
# developer tools. The second element of this list holds custom data about the
# target, such as file permissions, author(s), last modified, etc.
CUSTOM_SCHEMA = SCHEMA.Object()
PATH_FILEINFO_SCHEMA = SCHEMA.DictOf(
key_schema = RELPATH_SCHEMA,
value_schema = CUSTOM_SCHEMA)
# tuf.roledb
ROLEDB_SCHEMA = SCHEMA.Object(
object_name = 'ROLEDB_SCHEMA',
keyids = KEYIDS_SCHEMA,
signing_keyids = SCHEMA.Optional(KEYIDS_SCHEMA),
threshold = THRESHOLD_SCHEMA,
version = SCHEMA.Optional(METADATAVERSION_SCHEMA),
expires = SCHEMA.Optional(ISO8601_DATETIME_SCHEMA),
signatures = SCHEMA.Optional(SIGNATURES_SCHEMA),
compressions = SCHEMA.Optional(COMPRESSIONS_SCHEMA),
paths = SCHEMA.Optional(SCHEMA.OneOf([RELPATHS_SCHEMA, PATH_FILEINFO_SCHEMA])),
path_hash_prefixes = SCHEMA.Optional(PATH_HASH_PREFIXES_SCHEMA),
delegations = SCHEMA.Optional(DELEGATIONS_SCHEMA),
partial_loaded = SCHEMA.Optional(BOOLEAN_SCHEMA))
# Root role: indicates root keys and top-level roles.
ROOT_SCHEMA = SCHEMA.Object(
object_name = 'ROOT_SCHEMA',
_type = SCHEMA.String('Root'),
version = METADATAVERSION_SCHEMA,
consistent_snapshot = BOOLEAN_SCHEMA,
compression_algorithms = COMPRESSIONS_SCHEMA,
expires = ISO8601_DATETIME_SCHEMA,
keys = KEYDICT_SCHEMA,
roles = ROLEDICT_SCHEMA)
# Targets role: Indicates targets and delegates target paths to other roles.
TARGETS_SCHEMA = SCHEMA.Object(
object_name = 'TARGETS_SCHEMA',
_type = SCHEMA.String('Targets'),
version = METADATAVERSION_SCHEMA,
expires = ISO8601_DATETIME_SCHEMA,
targets = FILEDICT_SCHEMA,
delegations = SCHEMA.Optional(DELEGATIONS_SCHEMA))
# Snapshot role: indicates the latest versions of all metadata (except timestamp).
SNAPSHOT_SCHEMA = SCHEMA.Object(
object_name = 'SNAPSHOT_SCHEMA',
_type = SCHEMA.String('Snapshot'),
version = METADATAVERSION_SCHEMA,
expires = ISO8601_DATETIME_SCHEMA,
meta = FILEINFODICT_SCHEMA)
# Timestamp role: indicates the latest version of the snapshot file.
TIMESTAMP_SCHEMA = SCHEMA.Object(
object_name = 'TIMESTAMP_SCHEMA',
_type = SCHEMA.String('Timestamp'),
version = METADATAVERSION_SCHEMA,
expires = ISO8601_DATETIME_SCHEMA,
meta = FILEDICT_SCHEMA)
# project.cfg file: stores information about the project in a json dictionary
PROJECT_CFG_SCHEMA = SCHEMA.Object(
object_name = 'PROJECT_CFG_SCHEMA',
project_name = SCHEMA.AnyString(),
layout_type = SCHEMA.OneOf([SCHEMA.String('repo-like'), SCHEMA.String('flat')]),
targets_location = PATH_SCHEMA,
metadata_location = PATH_SCHEMA,
prefix = PATH_SCHEMA,
public_keys = KEYDICT_SCHEMA,
threshold = SCHEMA.Integer(lo = 0, hi = 2)
)
# A schema containing information a repository mirror may require,
# such as a url, the path of the directory metadata files, etc.
MIRROR_SCHEMA = SCHEMA.Object(
object_name = 'MIRROR_SCHEMA',
url_prefix = URL_SCHEMA,
metadata_path = RELPATH_SCHEMA,
targets_path = RELPATH_SCHEMA,
confined_target_dirs = SCHEMA.Optional(RELPATHS_SCHEMA), # should now default to ['']
custom = SCHEMA.Optional(SCHEMA.Object()))
# A dictionary of mirrors where the dict keys hold the mirror's name and
# and the dict values the mirror's data (i.e., 'MIRROR_SCHEMA').
# The SingleRepoUpdater class of 'updater.py' accepts dictionaries of this
# type.
MIRRORDICT_SCHEMA = SCHEMA.DictOf(
key_schema = SCHEMA.AnyString(),
value_schema = MIRROR_SCHEMA)
# # A dictionary of mirror dictionaries, one mirror dictionary per repository.
# # The Updater class of 'updater.py' accepts dictionaries of this type.
# MULTIREPO_MIRRORDICT_SCHEMA = SCHEMA.DictOf(
# key_schema = SCHEMA.AnyString(), # Repository name
# value_schema = MIRRORDICT_SCHEMA)
# A Mirrorlist: indicates all the live mirrors, and what documents they
# serve.
MIRRORLIST_SCHEMA = SCHEMA.Object(
object_name = 'MIRRORLIST_SCHEMA',
_type = SCHEMA.String('Mirrors'),
version = METADATAVERSION_SCHEMA,
expires = ISO8601_DATETIME_SCHEMA,
mirrors = SCHEMA.ListOf(MIRROR_SCHEMA))
# Per tentative design for pinned.json. To replace MIRROR_SCHEMA et al. when
# confirmed.
ALT_MIRROR_SCHEMA = URL_SCHEMA
ALT_MIRRORLIST_SCHEMA = SCHEMA.ListOf(ALT_MIRROR_SCHEMA)
REPOSITORY_NAME_SCHEMA = SCHEMA.AnyString()
# A repository listing inside pinned.json.
PINNED_REPOSITORY_SCHEMA = SCHEMA.Object(
#local_metadata_directory = SCHEMA.AnyString(), # path to client's local metadata directory
#root_override_URLs = SCHEMA.Optional(ListOf(SCHEMA.AnyString())), # URLs for root files, optional
mirrors = ALT_MIRRORLIST_SCHEMA)
# PINNED_REPOSITORIES_SCHEMA = SCHEMA.DictOf(
# key_schema = REPOSITORY_NAME_SCHEMA,
# value_schema = PINNED_REPOSITORY_SCHEMA)
# A delegation inside pinned.json.
PINNING_DELEGATION_SCHEMA = SCHEMA.Object(
paths = SCHEMA.ListOf(SCHEMA.AnyString()),
repositories = SCHEMA.ListOf(SCHEMA.AnyString()),
terminating = SCHEMA.Optional(SCHEMA.Boolean()))
# pinned.json: client-only file that determines which repository/repositories
# to use for which targets.
PINNING_FILE_SCHEMA = SCHEMA.Object(
#object_name = 'PINNINGS_FILE_SCHEMA',
repositories = SCHEMA.DictOf(
key_schema = REPOSITORY_NAME_SCHEMA,
value_schema = PINNED_REPOSITORY_SCHEMA),
delegations = SCHEMA.ListOf(PINNING_DELEGATION_SCHEMA))
# Any of the role schemas (e.g., TIMESTAMP_SCHEMA, SNAPSHOT_SCHEMA, etc.)
ANYROLE_SCHEMA = SCHEMA.OneOf([ROOT_SCHEMA, TARGETS_SCHEMA, SNAPSHOT_SCHEMA,
TIMESTAMP_SCHEMA, MIRROR_SCHEMA])
class MetaFile(object):
"""
<Purpose>
Base class for all metadata file classes.
Classes representing metadata files such as RootFile
and SnapshotFile all inherit from MetaFile. The
__eq__, __ne__, perform 'equal' and 'not equal' comparisons
between Metadata File objects.
"""
info = None
def __eq__(self, other):
return isinstance(other, MetaFile) and self.info == other.info
__hash__ = None
def __ne__(self, other):
return not self.__eq__(other)
def __getattr__(self, name):
"""
Allow all metafile objects to have their interesting attributes
referred to directly without the info dict. The info dict is just
to be able to do the __eq__ comparison generically.
"""
if name in self.info:
return self.info[name]
else:
raise AttributeError(name)
def build_dict_conforming_to_schema(schema, **kwargs):
"""
Given a schema object (for example, TIMESTAMP_SCHEMA from this module) and
a set of keyword arguments, create a dictionary that conforms to the given
schema, using the keyword arguments to define the elements of the new dict.
Checks the result to make sure that it conforms to the given schema, raising
an error if not.
Returns the new dict conforming to the schema if there are no problems.
"""
# Check that schema supports a check_match call.
# Duck typing version of this check:
if not hasattr(schema, 'check_match'):
raise ValueError(
'The given "schema" does not seem to be a schema. It has no '
'"check_match" method. Given schema: ' + repr(schema))
# # Strict typing version of this check:
# # Check that schema_name is a SCHEMA.Object.
# if not isinstance(schema, schema.Schema):
# raise ValueError(
# 'The first argument must be a schema.Schema object, but is not. '
# 'Given schema: ' + repr(schema))
# The return value.
d = {}
for key, value in kwargs.items():
d[key] = value
schema.check_match(d)
return d
# A dict holding the recognized schemas for the top-level roles.
SCHEMAS_BY_TYPE = {
'Root' : ROOT_SCHEMA,
'Targets' : TARGETS_SCHEMA,
'Snapshot' : SNAPSHOT_SCHEMA,
'Timestamp' : TIMESTAMP_SCHEMA,
'Mirrors' : MIRRORLIST_SCHEMA}
def datetime_to_unix_timestamp(datetime_object):
"""
<Purpose>
Convert 'datetime_object' (in datetime.datetime()) format) to a Unix/POSIX
timestamp. For example, Python's time.time() returns a Unix timestamp, and
includes the number of microseconds. 'datetime_object' is converted to UTC.
>>> datetime_object = datetime.datetime(1985, 10, 26, 1, 22)
>>> timestamp = datetime_to_unix_timestamp(datetime_object)
>>> timestamp
499137720
<Arguments>
datetime_object:
The datetime.datetime() object to convert to a Unix timestamp.
<Exceptions>
tuf.FormatError, if 'datetime_object' is not a datetime.datetime() object.
<Side Effects>
None.
<Returns>
A unix (posix) timestamp (e.g., 499137660).
"""
# Is 'datetime_object' a datetime.datetime() object?
# Raise 'tuf.FormatError' if not.
if not isinstance(datetime_object, datetime.datetime):
message = repr(datetime_object) + ' is not a datetime.datetime() object.'
raise tuf.FormatError(message)
unix_timestamp = calendar.timegm(datetime_object.timetuple())
return unix_timestamp
def unix_timestamp_to_datetime(unix_timestamp):
"""
<Purpose>
Convert 'unix_timestamp' (i.e., POSIX time, in UNIX_TIMESTAMP_SCHEMA format)
to a datetime.datetime() object. 'unix_timestamp' is the number of seconds
since the epoch (January 1, 1970.)
>>> datetime_object = unix_timestamp_to_datetime(1445455680)
>>> datetime_object
datetime.datetime(2015, 10, 21, 19, 28)
<Arguments>
unix_timestamp:
An integer representing the time (e.g., 1445455680). Conformant to
'tuf.formats.UNIX_TIMESTAMP_SCHEMA'.
<Exceptions>
tuf.FormatError, if 'unix_timestamp' is improperly formatted.
<Side Effects>
None.
<Returns>
A datetime.datetime() object corresponding to 'unix_timestamp'.
"""
# Is 'unix_timestamp' properly formatted?
# Raise 'tuf.FormatError' if there is a mismatch.
UNIX_TIMESTAMP_SCHEMA.check_match(unix_timestamp)
# Convert 'unix_timestamp' to a 'time.struct_time', in UTC. The Daylight
# Savings Time (DST) flag is set to zero. datetime.fromtimestamp() is not
# used because it returns a local datetime.
struct_time = time.gmtime(unix_timestamp)
# Extract the (year, month, day, hour, minutes, seconds) arguments for the
# datetime object to be returned.
datetime_object = datetime.datetime(*struct_time[:6])
return datetime_object
def format_base64(data):
"""
<Purpose>
Return the base64 encoding of 'data' with whitespace
and '=' signs omitted.
<Arguments>
data:
Binary or buffer of data to convert.
<Exceptions>
tuf.FormatError, if the base64 encoding fails or the argument
is invalid.
<Side Effects>
None.
<Returns>
A base64-encoded string.
"""
try:
return binascii.b2a_base64(data).decode('utf-8').rstrip('=\n ')
except (TypeError, binascii.Error) as e:
raise tuf.FormatError('Invalid base64 encoding: ' + str(e))
def parse_base64(base64_string):
"""
<Purpose>
Parse a base64 encoding with whitespace and '=' signs omitted.
<Arguments>
base64_string:
A string holding a base64 value.
<Exceptions>
tuf.FormatError, if 'base64_string' cannot be parsed due to
an invalid base64 encoding.
<Side Effects>
None.
<Returns>
A byte string representing the parsed based64 encoding of
'base64_string'.
"""
if not isinstance(base64_string, six.string_types):
message = 'Invalid argument: '+repr(base64_string)
raise tuf.FormatError(message)
extra = len(base64_string) % 4
if extra:
padding = '=' * (4 - extra)
base64_string = base64_string + padding
try:
return binascii.a2b_base64(base64_string.encode('utf-8'))
except (TypeError, binascii.Error) as e:
raise tuf.FormatError('Invalid base64 encoding: ' + str(e))
def make_signable(object):
"""
<Purpose>
Return the role metadata 'object' in 'SIGNABLE_SCHEMA' format.
'object' is added to the 'signed' key, and an empty list
initialized to the 'signatures' key. The caller adds signatures
to this second field.
Note: check_signable_object_format() should be called after
make_signable() and signatures added to ensure the final
signable object has a valid format (i.e., a signable containing
a supported role metadata).
<Arguments>
object:
A role schema dict (e.g., 'ROOT_SCHEMA', 'SNAPSHOT_SCHEMA').
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A dict in 'SIGNABLE_SCHEMA' format.
"""
if not isinstance(object, dict) or 'signed' not in object:
return { 'signed' : object, 'signatures' : [] }
else:
return object
def make_fileinfo(length, hashes, version=None, custom=None):
"""
<Purpose>
Create a dictionary conformant to 'FILEINFO_SCHEMA'.
This dict describes both metadata and target files.
<Arguments>
length:
An integer representing the size of the file.
hashes:
A dict of hashes in 'HASHDICT_SCHEMA' format, which has the form:
{'sha256': 123df8a9b12, 'sha512': 324324dfc121, ...}
version:
An optional integer representing the version of the file.
custom:
An optional object providing additional information about the file.
<Exceptions>
tuf.FormatError, if the 'FILEINFO_SCHEMA' to be returned
does not have the correct format.
<Side Effects>
If any of the arguments are incorrectly formatted, the dict
returned will be checked for formatting errors, and if found,
will raise a 'tuf.FormatError' exception.
<Returns>
A dictionary conformant to 'FILEINFO_SCHEMA', representing the file
information of a metadata or target file.
"""
fileinfo = {'length' : length, 'hashes' : hashes}
if version is not None:
fileinfo['version'] = version
if custom is not None:
fileinfo['custom'] = custom
# Raise 'tuf.FormatError' if the check fails.
FILEINFO_SCHEMA.check_match(fileinfo)
return fileinfo
def make_versioninfo(version_number):
"""
<Purpose>
Create a dictionary conformant to 'VERSIONINFO_SCHEMA'. This dict
describes both metadata and target files.
<Arguments>
version_number:
An integer representing the version of a particular metadata role.
The dictionary returned by this function is expected to be included
in Snapshot metadata.
<Exceptions>
tuf.FormatError, if the dict to be returned does not have the correct
format (i.e., VERSIONINFO_SCHEMA).
<Side Effects>
None.
<Returns>
A dictionary conformant to 'VERSIONINFO_SCHEMA', containing the version
information of a metadata role.
"""
versioninfo = {'version': version_number}
# Raise 'tuf.FormatError' if 'versioninfo' is improperly formatted.
try:
VERSIONINFO_SCHEMA.check_match(versioninfo)
except:
raise
else:
return versioninfo
# TODO: Destroy this function. Use build_dict_conforming_to_schema instead.
def make_role_metadata(keyids, threshold, name=None, paths=None,
path_hash_prefixes=None):
"""
<Purpose>
Create a dictionary conforming to 'tuf.formats.ROLE_SCHEMA',
representing the role with 'keyids', 'threshold', and 'paths'
as field values. 'paths' is optional (i.e., used only by the
'Target' role).
<Arguments>
keyids: a list of key ids.
threshold:
An integer denoting the number of required keys
for the signing role.
name:
A string that is the name of this role.
paths:
The 'Target' role stores the paths of target files
in its metadata file. 'paths' is a list of
file paths.
path_hash_prefixes:
The 'Target' role stores the paths of target files in its metadata file.
'path_hash_prefixes' is a succint way to describe a set of paths to
target files.
<Exceptions>
tuf.FormatError, if the returned role meta is
formatted incorrectly.
<Side Effects>
If any of the arguments do not have a proper format, a
tuf.formats exception is raised when the 'ROLE_SCHEMA' dict
is created.
<Returns>
A properly formatted role meta dict, conforming to
'ROLE_SCHEMA'.
"""
role_meta = {}
role_meta['keyids'] = keyids
role_meta['threshold'] = threshold
if name is not None:
role_meta['name'] = name
# According to the specification, the 'paths' and 'path_hash_prefixes' must
# be mutually exclusive. However, at the time of writing we do not always
# ensure that this is the case with the schema checks (see #83). Therefore,
# we must do it for ourselves.
if paths is not None and path_hash_prefixes is not None:
raise \
tuf.FormatError('Both "paths" and "path_hash_prefixes" are specified.')
if path_hash_prefixes is not None:
role_meta['path_hash_prefixes'] = path_hash_prefixes
elif paths is not None:
role_meta['paths'] = paths
# Does 'role_meta' have the correct type?
# This check ensures 'role_meta' conforms to
# tuf.formats.ROLE_SCHEMA.
ROLE_SCHEMA.check_match(role_meta)
return role_meta
def expected_meta_rolename(meta_rolename):
"""
<Purpose>
Ensure 'meta_rolename' is properly formatted.
'targets' is returned as 'Targets'.
'targets role1' is returned as 'Targets Role1'.
The words in the string (i.e., separated by whitespace)
are capitalized.
<Arguments>
meta_rolename:
A string representing the rolename.
E.g., 'root', 'targets'.
<Exceptions>
tuf.FormatError, if 'meta_rolename' is improperly formatted.
<Side Effects>
None.
<Returns>
A string (e.g., 'Root', 'Targets').
"""
# Does 'meta_rolename' have the correct type?
# This check ensures 'meta_rolename' conforms to
# 'tuf.formats.NAME_SCHEMA'.
# Raise 'tuf.FormatError' if there is a mismatch.
NAME_SCHEMA.check_match(meta_rolename)
return string.capwords(meta_rolename)
def check_signable_object_format(object):
"""
<Purpose>
Ensure 'object' is properly formatted, conformant to
'tuf.formats.SIGNABLE_SCHEMA'. Return the signing role on success.
Note: The 'signed' field of a 'SIGNABLE_SCHEMA' is checked against
tuf.schema.Any(). The 'signed' field, however, should actually
hold one of the supported role schemas (e.g., 'ROOT_SCHEMA',
'TARGETS_SCHEMA'). The role schemas all differ in their format, so this
function determines exactly which schema is listed in the 'signed'
field.
<Arguments>
object:
The object compare against 'SIGNABLE.SCHEMA'.
<Exceptions>
tuf.FormatError, if 'object' does not have the correct format.
<Side Effects>
None.
<Returns>
A string representing the signing role (e.g., 'root', 'targets').
The role string is returned with characters all lower case.
"""
# Does 'object' have the correct type?
# This check ensures 'object' conforms to
# 'tuf.formats.SIGNABLE_SCHEMA'.
SIGNABLE_SCHEMA.check_match(object)
try:
role_type = object['signed']['_type']
except (KeyError, TypeError):
raise tuf.FormatError('Untyped object')
try:
schema = SCHEMAS_BY_TYPE[role_type]
except KeyError:
raise tuf.FormatError('Unrecognized type ' + repr(role_type))
# 'tuf.FormatError' raised if 'object' does not have a properly
# formatted role schema.
schema.check_match(object['signed'])
return role_type.lower()
def _canonical_string_encoder(string):
"""
<Purpose>
Encode 'string' to canonical string format.
<Arguments>
string:
The string to encode.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A string with the canonical-encoded 'string' embedded.
"""
string = '"%s"' % re.sub(r'(["\\])', r'\\\1', string)
return string
def _encode_canonical(object, output_function):
# Helper for encode_canonical. Older versions of json.encoder don't
# even let us replace the separators.
if isinstance(object, six.string_types):
output_function(_canonical_string_encoder(object))
elif object is True:
output_function("true")
elif object is False:
output_function("false")
elif object is None:
output_function("null")
elif isinstance(object, six.integer_types):
output_function(str(object))
elif isinstance(object, (tuple, list)):
output_function("[")
if len(object):
for item in object[:-1]:
_encode_canonical(item, output_function)
output_function(",")
_encode_canonical(object[-1], output_function)
output_function("]")
elif isinstance(object, dict):
output_function("{")
if len(object):
items = sorted(six.iteritems(object))
for key, value in items[:-1]:
output_function(_canonical_string_encoder(key))
output_function(":")
_encode_canonical(value, output_function)
output_function(",")
key, value = items[-1]
output_function(_canonical_string_encoder(key))
output_function(":")
_encode_canonical(value, output_function)
output_function("}")
else:
raise tuf.FormatError('I cannot encode '+repr(object))
def encode_canonical(object, output_function=None):
"""
<Purpose>
Encode 'object' in canonical JSON form, as specified at
http://wiki.laptop.org/go/Canonical_JSON . It's a restricted
dialect of JSON in which keys are always lexically sorted,
there is no whitespace, floats aren't allowed, and only quote
and backslash get escaped. The result is encoded in UTF-8,
and the resulting bits are passed to output_function (if provided),
or joined into a string and returned.
Note: This function should be called prior to computing the hash or
signature of a JSON object in TUF. For example, generating a signature
of a signing role object such as 'ROOT_SCHEMA' is required to ensure
repeatable hashes are generated across different json module versions
and platforms. Code elsewhere is free to dump JSON objects in any format
they wish (e.g., utilizing indentation and single quotes around object
keys). These objects are only required to be in "canonical JSON" format
when their hashes or signatures are needed.
>>> encode_canonical("")
'""'
>>> encode_canonical([1, 2, 3])
'[1,2,3]'
>>> encode_canonical([])
'[]'
>>> encode_canonical({"A": [99]})
'{"A":[99]}'
>>> encode_canonical({"x" : 3, "y" : 2})
'{"x":3,"y":2}'
<Arguments>
object:
The object to be encoded.
output_function:
The result will be passed as arguments to 'output_function'
(e.g., output_function('result')).
<Exceptions>
tuf.FormatError, if 'object' cannot be encoded or 'output_function'
is not callable.
<Side Effects>
The results are fed to 'output_function()' if 'output_function' is set.
<Returns>
A string representing the 'object' encoded in canonical JSON form.
"""
result = None
# If 'output_function' is unset, treat it as
# appending to a list.
if output_function is None:
result = []
output_function = result.append
try:
_encode_canonical(object, output_function)
except (TypeError, tuf.FormatError) as e:
message = 'Could not encode ' + repr(object) + ': ' + str(e)
raise tuf.FormatError(message)
# Return the encoded 'object' as a string.
# Note: Implies 'output_function' is None,
# otherwise results are sent to 'output_function'.
if result is not None:
return ''.join(result)
if __name__ == '__main__':
# The interactive sessions of the documentation strings can
# be tested by running formats.py as a standalone module.
# python -B formats.py
import doctest
doctest.testmod()
|
[
"tuf.schema.String",
"tuf.schema.Optional",
"tuf.schema.Boolean",
"tuf.schema.DictOf",
"six.iteritems",
"tuf.schema.ListOf",
"doctest.testmod",
"tuf.schema.AnyString",
"tuf.FormatError",
"tuf.schema.Object",
"string.capwords",
"binascii.b2a_base64",
"re.sub",
"tuf.schema.LengthBytes",
"datetime.datetime",
"tuf.schema.AnyBytes",
"tuf.schema.Any",
"tuf.schema.Integer",
"tuf.schema.RegularExpression",
"time.gmtime",
"tuf.schema.OneOf"
] |
[((2602, 2672), 'tuf.schema.RegularExpression', 'SCHEMA.RegularExpression', (['"""\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}Z"""'], {}), "('\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}Z')\n", (2626, 2672), True, 'import tuf.schema as SCHEMA\n'), ((2938, 2973), 'tuf.schema.Integer', 'SCHEMA.Integer', ([], {'lo': '(0)', 'hi': '(2147483647)'}), '(lo=0, hi=2147483647)\n', (2952, 2973), True, 'import tuf.schema as SCHEMA\n'), ((3038, 3078), 'tuf.schema.RegularExpression', 'SCHEMA.RegularExpression', (['"""[a-fA-F0-9]+"""'], {}), "('[a-fA-F0-9]+')\n", (3062, 3078), True, 'import tuf.schema as SCHEMA\n'), ((3323, 3363), 'tuf.schema.RegularExpression', 'SCHEMA.RegularExpression', (['"""[a-fA-F0-9]+"""'], {}), "('[a-fA-F0-9]+')\n", (3347, 3363), True, 'import tuf.schema as SCHEMA\n'), ((3507, 3534), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['KEYID_SCHEMA'], {}), '(KEYID_SCHEMA)\n', (3520, 3534), True, 'import tuf.schema as SCHEMA\n'), ((3622, 3640), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (3638, 3640), True, 'import tuf.schema as SCHEMA\n'), ((3708, 3726), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (3724, 3726), True, 'import tuf.schema as SCHEMA\n'), ((3745, 3774), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['RELPATH_SCHEMA'], {}), '(RELPATH_SCHEMA)\n', (3758, 3774), True, 'import tuf.schema as SCHEMA\n'), ((3810, 3828), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (3826, 3828), True, 'import tuf.schema as SCHEMA\n'), ((3844, 3870), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['PATH_SCHEMA'], {}), '(PATH_SCHEMA)\n', (3857, 3870), True, 'import tuf.schema as SCHEMA\n'), ((3967, 3985), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (3983, 3985), True, 'import tuf.schema as SCHEMA\n'), ((4309, 4329), 'tuf.schema.Integer', 'SCHEMA.Integer', ([], {'lo': '(0)'}), '(lo=0)\n', (4323, 4329), True, 'import tuf.schema as SCHEMA\n'), ((4405, 4425), 'tuf.schema.Integer', 'SCHEMA.Integer', ([], {'lo': '(0)'}), '(lo=0)\n', (4419, 4425), True, 'import tuf.schema as SCHEMA\n'), ((4546, 4573), 'tuf.schema.Integer', 'SCHEMA.Integer', ([], {'lo': '(0)', 'hi': '(50)'}), '(lo=0, hi=50)\n', (4560, 4573), True, 'import tuf.schema as SCHEMA\n'), ((4629, 4647), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (4645, 4647), True, 'import tuf.schema as SCHEMA\n'), ((4663, 4689), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['NAME_SCHEMA'], {}), '(NAME_SCHEMA)\n', (4676, 4689), True, 'import tuf.schema as SCHEMA\n'), ((4740, 4757), 'tuf.schema.AnyBytes', 'SCHEMA.AnyBytes', ([], {}), '()\n', (4755, 4757), True, 'import tuf.schema as SCHEMA\n'), ((5116, 5133), 'tuf.schema.AnyBytes', 'SCHEMA.AnyBytes', ([], {}), '()\n', (5131, 5133), True, 'import tuf.schema as SCHEMA\n'), ((5208, 5224), 'tuf.schema.Boolean', 'SCHEMA.Boolean', ([], {}), '()\n', (5222, 5224), True, 'import tuf.schema as SCHEMA\n'), ((5374, 5394), 'tuf.schema.Integer', 'SCHEMA.Integer', ([], {'lo': '(1)'}), '(lo=1)\n', (5388, 5394), True, 'import tuf.schema as SCHEMA\n'), ((5454, 5472), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (5470, 5472), True, 'import tuf.schema as SCHEMA\n'), ((5839, 5862), 'tuf.schema.Integer', 'SCHEMA.Integer', ([], {'lo': '(2048)'}), '(lo=2048)\n', (5853, 5862), True, 'import tuf.schema as SCHEMA\n'), ((6131, 6151), 'tuf.schema.Integer', 'SCHEMA.Integer', ([], {'lo': '(1)'}), '(lo=1)\n', (6145, 6151), True, 'import tuf.schema as SCHEMA\n'), ((6204, 6221), 'tuf.schema.AnyBytes', 'SCHEMA.AnyBytes', ([], {}), '()\n', (6219, 6221), True, 'import tuf.schema as SCHEMA\n'), ((6285, 6302), 'tuf.schema.AnyBytes', 'SCHEMA.AnyBytes', ([], {}), '()\n', (6300, 6302), True, 'import tuf.schema as SCHEMA\n'), ((6348, 6366), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (6364, 6366), True, 'import tuf.schema as SCHEMA\n'), ((6422, 6440), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (6438, 6440), True, 'import tuf.schema as SCHEMA\n'), ((6484, 6514), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['PASSWORD_SCHEMA'], {}), '(PASSWORD_SCHEMA)\n', (6497, 6514), True, 'import tuf.schema as SCHEMA\n'), ((7708, 7736), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['ANYKEY_SCHEMA'], {}), '(ANYKEY_SCHEMA)\n', (7721, 7736), True, 'import tuf.schema as SCHEMA\n'), ((8047, 8069), 'tuf.schema.LengthBytes', 'SCHEMA.LengthBytes', (['(32)'], {}), '(32)\n', (8065, 8069), True, 'import tuf.schema as SCHEMA\n'), ((8145, 8167), 'tuf.schema.LengthBytes', 'SCHEMA.LengthBytes', (['(32)'], {}), '(32)\n', (8163, 8167), True, 'import tuf.schema as SCHEMA\n'), ((8249, 8271), 'tuf.schema.LengthBytes', 'SCHEMA.LengthBytes', (['(64)'], {}), '(64)\n', (8267, 8271), True, 'import tuf.schema as SCHEMA\n'), ((9619, 9698), 'tuf.schema.Object', 'SCHEMA.Object', ([], {'object_name': '"""VERSIONINFO_SCHEMA"""', 'version': 'METADATAVERSION_SCHEMA'}), "(object_name='VERSIONINFO_SCHEMA', version=METADATAVERSION_SCHEMA)\n", (9632, 9698), True, 'import tuf.schema as SCHEMA\n'), ((10245, 10315), 'tuf.schema.DictOf', 'SCHEMA.DictOf', ([], {'key_schema': 'RELPATH_SCHEMA', 'value_schema': 'FILEINFO_SCHEMA'}), '(key_schema=RELPATH_SCHEMA, value_schema=FILEINFO_SCHEMA)\n', (10258, 10315), True, 'import tuf.schema as SCHEMA\n'), ((10378, 10479), 'tuf.schema.Object', 'SCHEMA.Object', ([], {'object_name': '"""TARGETFILE_SCHEMA"""', 'filepath': 'RELPATH_SCHEMA', 'fileinfo': 'FILEINFO_SCHEMA'}), "(object_name='TARGETFILE_SCHEMA', filepath=RELPATH_SCHEMA,\n fileinfo=FILEINFO_SCHEMA)\n", (10391, 10479), True, 'import tuf.schema as SCHEMA\n'), ((10542, 10574), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['TARGETFILE_SCHEMA'], {}), '(TARGETFILE_SCHEMA)\n', (10555, 10574), True, 'import tuf.schema as SCHEMA\n'), ((11119, 11231), 'tuf.schema.Object', 'SCHEMA.Object', ([], {'object_name': '"""SIGNATURE_SCHEMA"""', 'keyid': 'KEYID_SCHEMA', 'method': 'SIG_METHOD_SCHEMA', 'sig': 'HEX_SCHEMA'}), "(object_name='SIGNATURE_SCHEMA', keyid=KEYID_SCHEMA, method=\n SIG_METHOD_SCHEMA, sig=HEX_SCHEMA)\n", (11132, 11231), True, 'import tuf.schema as SCHEMA\n'), ((11293, 11324), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['SIGNATURE_SCHEMA'], {}), '(SIGNATURE_SCHEMA)\n', (11306, 11324), True, 'import tuf.schema as SCHEMA\n'), ((12173, 12236), 'tuf.schema.DictOf', 'SCHEMA.DictOf', ([], {'key_schema': 'KEYID_SCHEMA', 'value_schema': 'KEY_SCHEMA'}), '(key_schema=KEYID_SCHEMA, value_schema=KEY_SCHEMA)\n', (12186, 12236), True, 'import tuf.schema as SCHEMA\n'), ((14313, 14351), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['PATH_HASH_PREFIX_SCHEMA'], {}), '(PATH_HASH_PREFIX_SCHEMA)\n', (14326, 14351), True, 'import tuf.schema as SCHEMA\n'), ((15089, 15156), 'tuf.schema.DictOf', 'SCHEMA.DictOf', ([], {'key_schema': 'ROLENAME_SCHEMA', 'value_schema': 'ROLE_SCHEMA'}), '(key_schema=ROLENAME_SCHEMA, value_schema=ROLE_SCHEMA)\n', (15102, 15156), True, 'import tuf.schema as SCHEMA\n'), ((15441, 15508), 'tuf.schema.DictOf', 'SCHEMA.DictOf', ([], {'key_schema': 'NAME_SCHEMA', 'value_schema': 'ROLEDICT_SCHEMA'}), '(key_schema=NAME_SCHEMA, value_schema=ROLEDICT_SCHEMA)\n', (15454, 15508), True, 'import tuf.schema as SCHEMA\n'), ((15616, 15642), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['ROLE_SCHEMA'], {}), '(ROLE_SCHEMA)\n', (15629, 15642), True, 'import tuf.schema as SCHEMA\n'), ((15741, 15771), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['ROLENAME_SCHEMA'], {}), '(ROLENAME_SCHEMA)\n', (15754, 15771), True, 'import tuf.schema as SCHEMA\n'), ((16494, 16537), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['MULTI_ROLE_DELEGATION_SCHEMA'], {}), '(MULTI_ROLE_DELEGATION_SCHEMA)\n', (16507, 16537), True, 'import tuf.schema as SCHEMA\n'), ((17250, 17265), 'tuf.schema.Object', 'SCHEMA.Object', ([], {}), '()\n', (17263, 17265), True, 'import tuf.schema as SCHEMA\n'), ((17290, 17358), 'tuf.schema.DictOf', 'SCHEMA.DictOf', ([], {'key_schema': 'RELPATH_SCHEMA', 'value_schema': 'CUSTOM_SCHEMA'}), '(key_schema=RELPATH_SCHEMA, value_schema=CUSTOM_SCHEMA)\n', (17303, 17358), True, 'import tuf.schema as SCHEMA\n'), ((21244, 21276), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['ALT_MIRROR_SCHEMA'], {}), '(ALT_MIRROR_SCHEMA)\n', (21257, 21276), True, 'import tuf.schema as SCHEMA\n'), ((21303, 21321), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (21319, 21321), True, 'import tuf.schema as SCHEMA\n'), ((21393, 21437), 'tuf.schema.Object', 'SCHEMA.Object', ([], {'mirrors': 'ALT_MIRRORLIST_SCHEMA'}), '(mirrors=ALT_MIRRORLIST_SCHEMA)\n', (21406, 21437), True, 'import tuf.schema as SCHEMA\n'), ((22467, 22564), 'tuf.schema.OneOf', 'SCHEMA.OneOf', (['[ROOT_SCHEMA, TARGETS_SCHEMA, SNAPSHOT_SCHEMA, TIMESTAMP_SCHEMA, MIRROR_SCHEMA]'], {}), '([ROOT_SCHEMA, TARGETS_SCHEMA, SNAPSHOT_SCHEMA,\n TIMESTAMP_SCHEMA, MIRROR_SCHEMA])\n', (22479, 22564), True, 'import tuf.schema as SCHEMA\n'), ((27122, 27149), 'time.gmtime', 'time.gmtime', (['unix_timestamp'], {}), '(unix_timestamp)\n', (27133, 27149), False, 'import time\n'), ((27285, 27320), 'datetime.datetime', 'datetime.datetime', (['*struct_time[:6]'], {}), '(*struct_time[:6])\n', (27302, 27320), False, 'import datetime\n'), ((34857, 34887), 'string.capwords', 'string.capwords', (['meta_rolename'], {}), '(meta_rolename)\n', (34872, 34887), False, 'import string\n'), ((40680, 40697), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (40695, 40697), False, 'import doctest\n'), ((3210, 3228), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (3226, 3228), True, 'import tuf.schema as SCHEMA\n'), ((4107, 4127), 'tuf.schema.Integer', 'SCHEMA.Integer', ([], {'lo': '(0)'}), '(lo=0)\n', (4121, 4127), True, 'import tuf.schema as SCHEMA\n'), ((4139, 4159), 'tuf.schema.Integer', 'SCHEMA.Integer', ([], {'lo': '(0)'}), '(lo=0)\n', (4153, 4159), True, 'import tuf.schema as SCHEMA\n'), ((4169, 4189), 'tuf.schema.Integer', 'SCHEMA.Integer', ([], {'lo': '(0)'}), '(lo=0)\n', (4183, 4189), True, 'import tuf.schema as SCHEMA\n'), ((6807, 6825), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (6823, 6825), True, 'import tuf.schema as SCHEMA\n'), ((6939, 6959), 'tuf.schema.String', 'SCHEMA.String', (['"""rsa"""'], {}), "('rsa')\n", (6952, 6959), True, 'import tuf.schema as SCHEMA\n'), ((6961, 6985), 'tuf.schema.String', 'SCHEMA.String', (['"""ed25519"""'], {}), "('ed25519')\n", (6974, 6985), True, 'import tuf.schema as SCHEMA\n'), ((7146, 7164), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (7162, 7164), True, 'import tuf.schema as SCHEMA\n'), ((7204, 7244), 'tuf.schema.Optional', 'SCHEMA.Optional', (['ISO8601_DATETIME_SCHEMA'], {}), '(ISO8601_DATETIME_SCHEMA)\n', (7219, 7244), True, 'import tuf.schema as SCHEMA\n'), ((7538, 7576), 'tuf.schema.Optional', 'SCHEMA.Optional', (['HASHALGORITHMS_SCHEMA'], {}), '(HASHALGORITHMS_SCHEMA)\n', (7553, 7576), True, 'import tuf.schema as SCHEMA\n'), ((7616, 7656), 'tuf.schema.Optional', 'SCHEMA.Optional', (['ISO8601_DATETIME_SCHEMA'], {}), '(ISO8601_DATETIME_SCHEMA)\n', (7631, 7656), True, 'import tuf.schema as SCHEMA\n'), ((7832, 7852), 'tuf.schema.String', 'SCHEMA.String', (['"""rsa"""'], {}), "('rsa')\n", (7845, 7852), True, 'import tuf.schema as SCHEMA\n'), ((7904, 7942), 'tuf.schema.Optional', 'SCHEMA.Optional', (['HASHALGORITHMS_SCHEMA'], {}), '(HASHALGORITHMS_SCHEMA)\n', (7919, 7942), True, 'import tuf.schema as SCHEMA\n'), ((8616, 8640), 'tuf.schema.String', 'SCHEMA.String', (['"""ed25519"""'], {}), "('ed25519')\n", (8629, 8640), True, 'import tuf.schema as SCHEMA\n'), ((8692, 8730), 'tuf.schema.Optional', 'SCHEMA.Optional', (['HASHALGORITHMS_SCHEMA'], {}), '(HASHALGORITHMS_SCHEMA)\n', (8707, 8730), True, 'import tuf.schema as SCHEMA\n'), ((9108, 9147), 'tuf.schema.Optional', 'SCHEMA.Optional', (['METADATAVERSION_SCHEMA'], {}), '(METADATAVERSION_SCHEMA)\n', (9123, 9147), True, 'import tuf.schema as SCHEMA\n'), ((10003, 10054), 'tuf.schema.OneOf', 'SCHEMA.OneOf', (['[VERSIONINFO_SCHEMA, FILEINFO_SCHEMA]'], {}), '([VERSIONINFO_SCHEMA, FILEINFO_SCHEMA])\n', (10015, 10054), True, 'import tuf.schema as SCHEMA\n'), ((11673, 11689), 'tuf.schema.Integer', 'SCHEMA.Integer', ([], {}), '()\n', (11687, 11689), True, 'import tuf.schema as SCHEMA\n'), ((12009, 12021), 'tuf.schema.Any', 'SCHEMA.Any', ([], {}), '()\n', (12019, 12021), True, 'import tuf.schema as SCHEMA\n'), ((12038, 12069), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['SIGNATURE_SCHEMA'], {}), '(SIGNATURE_SCHEMA)\n', (12051, 12069), True, 'import tuf.schema as SCHEMA\n'), ((12548, 12560), 'tuf.schema.Any', 'SCHEMA.Any', ([], {}), '()\n', (12558, 12560), True, 'import tuf.schema as SCHEMA\n'), ((13276, 13406), 'tuf.schema.Object', 'SCHEMA.Object', ([], {'object_name': '"""[scp]"""', 'host': 'URL_SCHEMA', 'user': 'NAME_SCHEMA', 'identity_file': 'PATH_SCHEMA', 'remote_directory': 'PATH_SCHEMA'}), "(object_name='[scp]', host=URL_SCHEMA, user=NAME_SCHEMA,\n identity_file=PATH_SCHEMA, remote_directory=PATH_SCHEMA)\n", (13289, 13406), True, 'import tuf.schema as SCHEMA\n'), ((14709, 14741), 'tuf.schema.Optional', 'SCHEMA.Optional', (['ROLENAME_SCHEMA'], {}), '(ROLENAME_SCHEMA)\n', (14724, 14741), True, 'import tuf.schema as SCHEMA\n'), ((14815, 14846), 'tuf.schema.Optional', 'SCHEMA.Optional', (['BOOLEAN_SCHEMA'], {}), '(BOOLEAN_SCHEMA)\n', (14830, 14846), True, 'import tuf.schema as SCHEMA\n'), ((14858, 14890), 'tuf.schema.Optional', 'SCHEMA.Optional', (['RELPATHS_SCHEMA'], {}), '(RELPATHS_SCHEMA)\n', (14873, 14890), True, 'import tuf.schema as SCHEMA\n'), ((14915, 14957), 'tuf.schema.Optional', 'SCHEMA.Optional', (['PATH_HASH_PREFIXES_SCHEMA'], {}), '(PATH_HASH_PREFIXES_SCHEMA)\n', (14930, 14957), True, 'import tuf.schema as SCHEMA\n'), ((16154, 16185), 'tuf.schema.Optional', 'SCHEMA.Optional', (['BOOLEAN_SCHEMA'], {}), '(BOOLEAN_SCHEMA)\n', (16169, 16185), True, 'import tuf.schema as SCHEMA\n'), ((16305, 16336), 'tuf.schema.Optional', 'SCHEMA.Optional', (['BOOLEAN_SCHEMA'], {}), '(BOOLEAN_SCHEMA)\n', (16320, 16336), True, 'import tuf.schema as SCHEMA\n'), ((16348, 16380), 'tuf.schema.Optional', 'SCHEMA.Optional', (['RELPATHS_SCHEMA'], {}), '(RELPATHS_SCHEMA)\n', (16363, 16380), True, 'import tuf.schema as SCHEMA\n'), ((16704, 16754), 'tuf.schema.Optional', 'SCHEMA.Optional', (['MULTI_ROLE_DELEGATION_LIST_SCHEMA'], {}), '(MULTI_ROLE_DELEGATION_LIST_SCHEMA)\n', (16719, 16754), True, 'import tuf.schema as SCHEMA\n'), ((16840, 16857), 'tuf.schema.String', 'SCHEMA.String', (['""""""'], {}), "('')\n", (16853, 16857), True, 'import tuf.schema as SCHEMA\n'), ((16859, 16878), 'tuf.schema.String', 'SCHEMA.String', (['"""gz"""'], {}), "('gz')\n", (16872, 16878), True, 'import tuf.schema as SCHEMA\n'), ((17491, 17521), 'tuf.schema.Optional', 'SCHEMA.Optional', (['KEYIDS_SCHEMA'], {}), '(KEYIDS_SCHEMA)\n', (17506, 17521), True, 'import tuf.schema as SCHEMA\n'), ((17567, 17606), 'tuf.schema.Optional', 'SCHEMA.Optional', (['METADATAVERSION_SCHEMA'], {}), '(METADATAVERSION_SCHEMA)\n', (17582, 17606), True, 'import tuf.schema as SCHEMA\n'), ((17620, 17660), 'tuf.schema.Optional', 'SCHEMA.Optional', (['ISO8601_DATETIME_SCHEMA'], {}), '(ISO8601_DATETIME_SCHEMA)\n', (17635, 17660), True, 'import tuf.schema as SCHEMA\n'), ((17677, 17711), 'tuf.schema.Optional', 'SCHEMA.Optional', (['SIGNATURES_SCHEMA'], {}), '(SIGNATURES_SCHEMA)\n', (17692, 17711), True, 'import tuf.schema as SCHEMA\n'), ((17730, 17766), 'tuf.schema.Optional', 'SCHEMA.Optional', (['COMPRESSIONS_SCHEMA'], {}), '(COMPRESSIONS_SCHEMA)\n', (17745, 17766), True, 'import tuf.schema as SCHEMA\n'), ((17873, 17915), 'tuf.schema.Optional', 'SCHEMA.Optional', (['PATH_HASH_PREFIXES_SCHEMA'], {}), '(PATH_HASH_PREFIXES_SCHEMA)\n', (17888, 17915), True, 'import tuf.schema as SCHEMA\n'), ((17933, 17968), 'tuf.schema.Optional', 'SCHEMA.Optional', (['DELEGATIONS_SCHEMA'], {}), '(DELEGATIONS_SCHEMA)\n', (17948, 17968), True, 'import tuf.schema as SCHEMA\n'), ((17989, 18020), 'tuf.schema.Optional', 'SCHEMA.Optional', (['BOOLEAN_SCHEMA'], {}), '(BOOLEAN_SCHEMA)\n', (18004, 18020), True, 'import tuf.schema as SCHEMA\n'), ((18147, 18168), 'tuf.schema.String', 'SCHEMA.String', (['"""Root"""'], {}), "('Root')\n", (18160, 18168), True, 'import tuf.schema as SCHEMA\n'), ((18537, 18561), 'tuf.schema.String', 'SCHEMA.String', (['"""Targets"""'], {}), "('Targets')\n", (18550, 18561), True, 'import tuf.schema as SCHEMA\n'), ((18681, 18716), 'tuf.schema.Optional', 'SCHEMA.Optional', (['DELEGATIONS_SCHEMA'], {}), '(DELEGATIONS_SCHEMA)\n', (18696, 18716), True, 'import tuf.schema as SCHEMA\n'), ((18880, 18905), 'tuf.schema.String', 'SCHEMA.String', (['"""Snapshot"""'], {}), "('Snapshot')\n", (18893, 18905), True, 'import tuf.schema as SCHEMA\n'), ((19160, 19186), 'tuf.schema.String', 'SCHEMA.String', (['"""Timestamp"""'], {}), "('Timestamp')\n", (19173, 19186), True, 'import tuf.schema as SCHEMA\n'), ((19461, 19479), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (19477, 19479), True, 'import tuf.schema as SCHEMA\n'), ((19715, 19741), 'tuf.schema.Integer', 'SCHEMA.Integer', ([], {'lo': '(0)', 'hi': '(2)'}), '(lo=0, hi=2)\n', (19729, 19741), True, 'import tuf.schema as SCHEMA\n'), ((20067, 20099), 'tuf.schema.Optional', 'SCHEMA.Optional', (['RELPATHS_SCHEMA'], {}), '(RELPATHS_SCHEMA)\n', (20082, 20099), True, 'import tuf.schema as SCHEMA\n'), ((20447, 20465), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (20463, 20465), True, 'import tuf.schema as SCHEMA\n'), ((20956, 20980), 'tuf.schema.String', 'SCHEMA.String', (['"""Mirrors"""'], {}), "('Mirrors')\n", (20969, 20980), True, 'import tuf.schema as SCHEMA\n'), ((21067, 21095), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['MIRROR_SCHEMA'], {}), '(MIRROR_SCHEMA)\n', (21080, 21095), True, 'import tuf.schema as SCHEMA\n'), ((22204, 22296), 'tuf.schema.DictOf', 'SCHEMA.DictOf', ([], {'key_schema': 'REPOSITORY_NAME_SCHEMA', 'value_schema': 'PINNED_REPOSITORY_SCHEMA'}), '(key_schema=REPOSITORY_NAME_SCHEMA, value_schema=\n PINNED_REPOSITORY_SCHEMA)\n', (22217, 22296), True, 'import tuf.schema as SCHEMA\n'), ((22332, 22372), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['PINNING_DELEGATION_SCHEMA'], {}), '(PINNING_DELEGATION_SCHEMA)\n', (22345, 22372), True, 'import tuf.schema as SCHEMA\n'), ((25875, 25899), 'tuf.FormatError', 'tuf.FormatError', (['message'], {}), '(message)\n', (25890, 25899), False, 'import tuf\n'), ((28468, 28492), 'tuf.FormatError', 'tuf.FormatError', (['message'], {}), '(message)\n', (28483, 28492), False, 'import tuf\n'), ((33651, 33722), 'tuf.FormatError', 'tuf.FormatError', (['"""Both "paths" and "path_hash_prefixes" are specified."""'], {}), '(\'Both "paths" and "path_hash_prefixes" are specified.\')\n', (33666, 33722), False, 'import tuf\n'), ((36701, 36739), 're.sub', 're.sub', (['"""(["\\\\\\\\])"""', '"""\\\\\\\\\\\\1"""', 'string'], {}), '(\'(["\\\\\\\\])\', \'\\\\\\\\\\\\1\', string)\n', (36707, 36739), False, 'import re\n'), ((4843, 4863), 'tuf.schema.String', 'SCHEMA.String', (['"""md5"""'], {}), "('md5')\n", (4856, 4863), True, 'import tuf.schema as SCHEMA\n'), ((4865, 4886), 'tuf.schema.String', 'SCHEMA.String', (['"""sha1"""'], {}), "('sha1')\n", (4878, 4886), True, 'import tuf.schema as SCHEMA\n'), ((4891, 4914), 'tuf.schema.String', 'SCHEMA.String', (['"""sha224"""'], {}), "('sha224')\n", (4904, 4914), True, 'import tuf.schema as SCHEMA\n'), ((4916, 4939), 'tuf.schema.String', 'SCHEMA.String', (['"""sha256"""'], {}), "('sha256')\n", (4929, 4939), True, 'import tuf.schema as SCHEMA\n'), ((4944, 4967), 'tuf.schema.String', 'SCHEMA.String', (['"""sha384"""'], {}), "('sha384')\n", (4957, 4967), True, 'import tuf.schema as SCHEMA\n'), ((4969, 4992), 'tuf.schema.String', 'SCHEMA.String', (['"""sha512"""'], {}), "('sha512')\n", (4982, 4992), True, 'import tuf.schema as SCHEMA\n'), ((6855, 6873), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (6871, 6873), True, 'import tuf.schema as SCHEMA\n'), ((8433, 8457), 'tuf.schema.String', 'SCHEMA.String', (['"""general"""'], {}), "('general')\n", (8446, 8457), True, 'import tuf.schema as SCHEMA\n'), ((8459, 8483), 'tuf.schema.String', 'SCHEMA.String', (['"""ed25519"""'], {}), "('ed25519')\n", (8472, 8483), True, 'import tuf.schema as SCHEMA\n'), ((8485, 8505), 'tuf.schema.String', 'SCHEMA.String', (['"""rsa"""'], {}), "('rsa')\n", (8498, 8505), True, 'import tuf.schema as SCHEMA\n'), ((9176, 9191), 'tuf.schema.Object', 'SCHEMA.Object', ([], {}), '()\n', (9189, 9191), True, 'import tuf.schema as SCHEMA\n'), ((16979, 16996), 'tuf.schema.String', 'SCHEMA.String', (['""""""'], {}), "('')\n", (16992, 16996), True, 'import tuf.schema as SCHEMA\n'), ((16998, 17017), 'tuf.schema.String', 'SCHEMA.String', (['"""gz"""'], {}), "('gz')\n", (17011, 17017), True, 'import tuf.schema as SCHEMA\n'), ((17794, 17847), 'tuf.schema.OneOf', 'SCHEMA.OneOf', (['[RELPATHS_SCHEMA, PATH_FILEINFO_SCHEMA]'], {}), '([RELPATHS_SCHEMA, PATH_FILEINFO_SCHEMA])\n', (17806, 17847), True, 'import tuf.schema as SCHEMA\n'), ((20157, 20172), 'tuf.schema.Object', 'SCHEMA.Object', ([], {}), '()\n', (20170, 20172), True, 'import tuf.schema as SCHEMA\n'), ((21874, 21892), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (21890, 21892), True, 'import tuf.schema as SCHEMA\n'), ((21926, 21944), 'tuf.schema.AnyString', 'SCHEMA.AnyString', ([], {}), '()\n', (21942, 21944), True, 'import tuf.schema as SCHEMA\n'), ((21979, 21995), 'tuf.schema.Boolean', 'SCHEMA.Boolean', ([], {}), '()\n', (21993, 21995), True, 'import tuf.schema as SCHEMA\n'), ((36044, 36077), 'tuf.FormatError', 'tuf.FormatError', (['"""Untyped object"""'], {}), "('Untyped object')\n", (36059, 36077), False, 'import tuf\n'), ((40261, 40285), 'tuf.FormatError', 'tuf.FormatError', (['message'], {}), '(message)\n', (40276, 40285), False, 'import tuf\n'), ((13177, 13197), 'tuf.schema.String', 'SCHEMA.String', (['"""scp"""'], {}), "('scp')\n", (13190, 13197), True, 'import tuf.schema as SCHEMA\n'), ((13987, 14013), 'tuf.schema.ListOf', 'SCHEMA.ListOf', (['PATH_SCHEMA'], {}), '(PATH_SCHEMA)\n', (14000, 14013), True, 'import tuf.schema as SCHEMA\n'), ((19513, 19539), 'tuf.schema.String', 'SCHEMA.String', (['"""repo-like"""'], {}), "('repo-like')\n", (19526, 19539), True, 'import tuf.schema as SCHEMA\n'), ((19541, 19562), 'tuf.schema.String', 'SCHEMA.String', (['"""flat"""'], {}), "('flat')\n", (19554, 19562), True, 'import tuf.schema as SCHEMA\n'), ((27741, 27766), 'binascii.b2a_base64', 'binascii.b2a_base64', (['data'], {}), '(data)\n', (27760, 27766), False, 'import binascii\n'), ((37625, 37646), 'six.iteritems', 'six.iteritems', (['object'], {}), '(object)\n', (37638, 37646), False, 'import six\n')]
|
#!/usr/bin/python
# ---------------------------------------------------------------------------
# File: admipex8.py
# Version 12.8.0
# ---------------------------------------------------------------------------
# Licensed Materials - Property of IBM
# 5725-A06 5725-A29 5724-Y48 5724-Y49 5724-Y54 5724-Y55 5655-Y21
# Copyright IBM Corporation 2009, 2017. All Rights Reserved.
#
# US Government Users Restricted Rights - Use, duplication or
# disclosure restricted by GSA ADP Schedule Contract with
# IBM Corp.
# ---------------------------------------------------------------------------
"""
Solve a facility location problem with cut callbacks or lazy constraints.
Given a set of locations J and a set of clients C, the following model is
solved:
Minimize
sum(j in J) fixedCost[j]*used[j] +
sum(j in J)sum(c in C) cost[c][j]*supply[c][j]
Subject to
sum(j in J) supply[c][j] == 1 for all c in C
sum(c in C) supply[c][j] <= (|C| - 1) * used[j] for all j in J
supply[c][j] in {0, 1} for all c in C, j in J
used[j] in {0, 1} for all j in J
In addition to the constraints stated above, the code also separates
a disaggregated version of the capacity constraints (see comments for the
cut callback) to improve performance.
Optionally, the capacity constraints can be separated from a lazy
constraint callback instead of being stated as part of the initial model.
See the usage message for how to switch between these options.
"""
from __future__ import print_function
import sys
import traceback
import cplex
from inputdata import read_dat_file
# epsilon used for violation of cuts
EPS = 1e-6
def usage(name):
"""Prints a usage statement."""
msg = """Usage: %s [options...]
By default, a user cut callback is used to dynamically
separate constraints.
Supported options are:
-table Instead of the default behavior, use a
static table that holds all cuts and
scan that table for violated cuts.
-no-cuts Do not separate any cuts.
-lazy Do not include capacity constraints in the
model. Instead, separate them from a lazy
constraint callback.
-data=<dir> Specify the directory in which the data
file facility.dat is located.
"""
print(msg % name)
sys.exit(2)
class FacilityCallback(object):
"""Callback function for the facility location problem.
This callback can do three different things:
- Generate disaggregated constraints algorithmically
- Generate disaggregated constraints looking through a table
- Generate capacity constraint as lazy constraints.
Everything is setup in the invoke function that
is called by CPLEX.
"""
def __init__(self, clients, locations, used, supply):
self.clients = clients
self.locations = locations
self.used = used
self.supply = supply
self.cutlhs = None
self.cutrhs = None
def disaggregate(self, context):
"""Separate the disaggregated capacity constraints.
In the model we have for each location j the constraint
sum(c in clients) supply[c][j] <= (nbClients-1) * used[j]
Clearly, a client can only be serviced from a location that is
used, so we also have a constraint
supply[c][j] <= used[j]
that must be satisfied by every feasible solution. These
constraints tend to be violated in LP relaxation. In this
callback we separate them.
"""
for j in self.locations:
for c in self.clients:
s, o = context.get_relaxation_point(
[self.supply[c][j], self.used[j]])
if s > o + EPS:
print('Adding supply(%d)(%d) <= used(%d) [%f > %f]' %
(c, j, j, s, o))
cutmanagement = cplex.callbacks.UserCutCallback.use_cut.purge
context.add_user_cut(
cut=cplex.SparsePair([self.supply[c][j], self.used[j]],
[1.0, -1.0]),
sense='L', rhs=0.0,
cutmanagement=cutmanagement,
local=False)
def cuts_from_table(self, context):
"""Generate disaggregated constraints looking through a table."""
for lhs, rhs in zip(self.cutlhs, self.cutrhs):
# Compute activity of left-hand side
act = sum(c * x for c, x in zip(lhs.val,
context.get_relaxation_point(lhs.ind)))
if act > rhs + EPS:
print('Adding %s [act = %f]' % (str(lhs), act))
cutmanagement = cplex.callbacks.UserCutCallback.use_cut.purge
context.add_user_cut(cut=lhs, sense="L", rhs=rhs,
cutmanagement=cutmanagement, local=False)
def lazy_capacity(self, context):
"""Generate capacity constraint as lazy constraints."""
# We only work with bounded models
if not context.is_candidate_point():
raise Exception('Unbounded solution')
for j in self.locations:
isused = context.get_candidate_point(self.used[j])
served = sum(context.get_candidate_point(
[self.supply[c][j] for c in self.clients]))
if served > (len(self.clients) - 1.0) * isused + EPS:
print('Adding lazy constraint %s <= %d*used(%d)' %
(' + '.join(['supply(%d)(%d)' % (x, j) for x in self.clients]),
len(self.clients) - 1, j))
context.reject_candidate(
constraints=[cplex.SparsePair(
[self.supply[c][j] for c in self.clients] + [self.used[j]],
[1.0] * len(self.clients) + [-(len(self.clients) - 1)]), ],
senses='L',
rhs=[0.0, ])
def invoke(self, context):
"""Whenever CPLEX needs to invoke the callback it calls this
method with exactly one argument: an instance of
cplex.callbacks.Context.
"""
try:
if context.in_relaxation():
if self.cutlhs:
self.cuts_from_table(context)
else:
self.disaggregate(context)
elif context.in_candidate():
self.lazy_capacity(context)
except:
info = sys.exc_info()
print('#### Exception in callback: ', info[0])
print('#### ', info[1])
print('#### ', info[2])
traceback.print_tb(info[2], file=sys.stdout)
raise
#endif
def admipex8(datadir, from_table, lazy, use_callback):
"""Solve a facility location problem with cut callbacks or lazy
constraints.
"""
# Read in data file. The data we read is
# fixedcost -- a list/array of facility fixed cost
# cost -- a matrix for the costs to serve each client by each
# facility
# pylint: disable=unbalanced-tuple-unpacking
fixedcost, cost, _ = read_dat_file(datadir + '/' + 'facility.dat')
# Create the model
locations = list(range(len(fixedcost)))
clients = list(range(len(cost)))
cpx = cplex.Cplex()
# Create variables.
# - used[j] If location j is used.
# - supply[c][j] Amount shipped from location j to client c. This is a
# number in [0,1] and specifies the percentage of c's
# demand that is served from location i.
# Note that we also create the objective function along with the variables
# by specifying the objective coefficient for each variable in the 'obj'
# argument.
used = cpx.variables.add(obj=fixedcost,
lb=[0] * len(locations), ub=[1] * len(locations),
types=['B'] * len(locations),
names=['used(%d)' % (j) for j in locations])
supply = [cpx.variables.add(obj=[cost[c][j] for j in locations],
lb=[0] * len(locations), ub=[1] * len(locations),
types=['B'] * len(locations),
names=['supply(%d)(%d)' % (c, j) for j in locations])
for c in clients]
# The supply for each client must sum to 1, i.e., the demand of each
# client must be met.
cpx.linear_constraints.add(
lin_expr=[cplex.SparsePair(supply[c], [1.0] * len(supply[c]))
for c in clients],
senses=['E'] * len(clients),
rhs=[1.0] * len(clients))
# Capacity constraint for each location. We just require that a single
# location cannot serve all clients, that is, the capacity of each
# location is nbClients-1. This makes the model a little harder to
# solve and allows us to separate more cuts.
if not lazy:
cpx.linear_constraints.add(
lin_expr=[cplex.SparsePair(
[supply[c][j] for c in clients] + [used[j]],
[1.0] * len(clients) + [-(len(clients) - 1.0)])
for j in locations],
senses=['L'] * len(locations),
rhs=[0] * len(locations))
# Tweak some CPLEX parameters so that CPLEX has a harder time to
# solve the model and our cut separators can actually kick in.
cpx.parameters.mip.strategy.heuristicfreq.set(-1)
cpx.parameters.mip.cuts.mircut.set(-1)
cpx.parameters.mip.cuts.implied.set(-1)
cpx.parameters.mip.cuts.gomory.set(-1)
cpx.parameters.mip.cuts.flowcovers.set(-1)
cpx.parameters.mip.cuts.pathcut.set(-1)
cpx.parameters.mip.cuts.liftproj.set(-1)
cpx.parameters.mip.cuts.zerohalfcut.set(-1)
cpx.parameters.mip.cuts.cliques.set(-1)
cpx.parameters.mip.cuts.covers.set(-1)
# Setup the callback.
# We instantiate the callback object and attach the necessary data
# to it.
# We also setup the contexmask parameter to indicate when the callback
# should be called.
facilitycb = FacilityCallback(clients, locations, used, supply)
contextmask = 0
if use_callback:
contextmask |= cplex.callbacks.Context.id.relaxation
if from_table:
# Generate all disaggregated constraints and put them into a
# table that is scanned by the callback.
facilitycb.cutlhs = [cplex.SparsePair([supply[c][j], used[j]],
[1.0, -1.0])
for j in locations for c in clients]
facilitycb.cutrhs = [0] * len(locations) * len(clients)
if lazy:
contextmask |= cplex.callbacks.Context.id.candidate
# Callback is setup attach it to the model
if contextmask:
cpx.set_callback(facilitycb, contextmask)
cpx.write('model.lp')
cpx.solve()
print('Solution status: %d' % cpx.solution.get_status())
print('Nodes processed: %d' %
cpx.solution.progress.get_num_nodes_processed())
print('Active user cuts/lazy constraints: %d' %
cpx.solution.MIP.get_num_cuts(cpx.solution.MIP.cut_type.user))
tol = cpx.parameters.mip.tolerances.integrality.get()
print('Optimal value: %f' %
cpx.solution.get_objective_value())
values = cpx.solution.get_values()
for j in [x for x in locations if values[used[x]] >= 1 - tol]:
print('Facility %d is used, it serves clients %s' %
(j, ', '.join([str(x) for x in clients
if values[supply[x][j]] >= 1 - tol])))
def main():
"""Set default arguments and parse command line."""
# If a directory is not given on the command line, we use the
# following default.
datadir = '../../../examples/data'
from_table = False
lazy = False
use_callback = True
for arg in sys.argv[1:]:
if arg.startswith('-data='):
datadir = arg[6:]
elif arg == '-table':
from_table = True
elif arg == '-lazy':
lazy = True
elif arg == '-no-cuts':
use_callback = False
else:
print('Unknown argument %s' % arg)
usage(sys.argv[0])
admipex8(datadir, from_table, lazy, use_callback)
if __name__ == "__main__":
main()
|
[
"cplex.Cplex",
"traceback.print_tb",
"cplex.SparsePair",
"inputdata.read_dat_file",
"sys.exc_info",
"sys.exit"
] |
[((2352, 2363), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2360, 2363), False, 'import sys\n'), ((7241, 7286), 'inputdata.read_dat_file', 'read_dat_file', (["(datadir + '/' + 'facility.dat')"], {}), "(datadir + '/' + 'facility.dat')\n", (7254, 7286), False, 'from inputdata import read_dat_file\n'), ((7402, 7415), 'cplex.Cplex', 'cplex.Cplex', ([], {}), '()\n', (7413, 7415), False, 'import cplex\n'), ((6540, 6554), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (6552, 6554), False, 'import sys\n'), ((6744, 6788), 'traceback.print_tb', 'traceback.print_tb', (['info[2]'], {'file': 'sys.stdout'}), '(info[2], file=sys.stdout)\n', (6762, 6788), False, 'import traceback\n'), ((10524, 10578), 'cplex.SparsePair', 'cplex.SparsePair', (['[supply[c][j], used[j]]', '[1.0, -1.0]'], {}), '([supply[c][j], used[j]], [1.0, -1.0])\n', (10540, 10578), False, 'import cplex\n'), ((4047, 4111), 'cplex.SparsePair', 'cplex.SparsePair', (['[self.supply[c][j], self.used[j]]', '[1.0, -1.0]'], {}), '([self.supply[c][j], self.used[j]], [1.0, -1.0])\n', (4063, 4111), False, 'import cplex\n')]
|
import random
from os import path
import hangman_words
from hangman_art import logo
from hangman_art import stages
chosen_word=random.choice(hangman_words.word_list)
print(f'the chosen word is : {chosen_word}\n')
display=[]
print(logo)
for i in range(len(chosen_word)):
display += "_"
print(display)
lives=6
while True:
guess=input("please guess a letter: ").lower()
if guess in display:
print("you already guessed this word")
for i in range(len(chosen_word)):
if chosen_word[i]==guess:
display[i]=guess
if guess not in chosen_word:
lives -= 1
print(f'remaining lives: {lives}')
if lives==0:
print("You Lost NIGGA")
print(stages[lives])
break
print(f"{' '.join(display)}")
if "_" not in display:
print("You Won")
break
print(stages[lives])
|
[
"random.choice"
] |
[((127, 165), 'random.choice', 'random.choice', (['hangman_words.word_list'], {}), '(hangman_words.word_list)\n', (140, 165), False, 'import random\n')]
|
#!/usr/bin/env python
"""Test TermCounts object used in Resnik and Lin similarity calculations."""
from __future__ import print_function
import os
import sys
import timeit
import datetime
from goatools.base import get_godag
from goatools.semantic import TermCounts
from goatools.semantic import get_info_content
from goatools.test_data.gafs import ASSOCIATIONS
from goatools.associations import dnld_annotation
from goatools.anno.gaf_reader import GafReader
from goatools.godag.consts import NS2NAMESPACE
TIC = timeit.default_timer()
REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
def test_semantic_similarity(usr_assc=None):
"""Computing basic semantic similarities between GO terms."""
not_these = {'goa_uniprot_all.gaf', 'goa_uniprot_all_noiea.gaf'}
associations = sorted(ASSOCIATIONS.difference(not_these))
go2obj = get_go2obj()
# goids = go2obj.keys()
# http://current.geneontology.org/annotations/
if usr_assc is not None:
associations = [usr_assc]
not_found = set()
errs = []
for assc_name in associations: # Limit test numbers for speed
tic = timeit.default_timer()
# Get all the annotations from arabidopsis.
fin_gaf = os.path.join(REPO, assc_name)
if not os.path.exists(fin_gaf):
dnld_annotation(fin_gaf)
annoobj = GafReader(fin_gaf)
#### for nspc in ['BP', 'MF', 'CC']:
assc_gene2gos = annoobj.get_id2gos('all')
if not assc_gene2gos:
not_found.add(assc_name)
continue
# Calculate the information content of the single term, GO:0048364
# "Information content (GO:0048364) = 7.75481392334
# Initialize the counts of each GO term.
tcntobj = TermCounts(go2obj, assc_gene2gos)
go_cnt = tcntobj.gocnts.most_common()
#print tcntobj.gocnts.most_common()
if go_cnt:
print("{ASSC}".format(ASSC=assc_name))
print(tcntobj.aspect_counts)
gocnt_max = go_cnt[0][1]
prt_info(tcntobj, go_cnt, None)
prt_info(tcntobj, go_cnt, gocnt_max/2.0)
prt_info(tcntobj, go_cnt, gocnt_max/10.0)
print("{HMS} {hms} {ASSC}\n".format(ASSC=assc_name, HMS=_hms(TIC), hms=_hms(tic)))
print('{HMS} {N} Associations'.format(HMS=_hms(TIC), N=len(associations)))
if not_found:
_prt_not_found(not_found)
if errs:
fout_err = 'namespace_errors.txt'
with open(fout_err, 'w') as prt:
for err in errs:
prt.write(err)
print(' {N} ERRORS WROTE: {TXT}'.format(N=len(errs), TXT=fout_err))
def _prt_not_found(not_found):
print('**WARNING: {N} EMPTY ASSOCIATIONS:'.format(N=len(not_found)))
for idx, assc in enumerate(not_found):
print(' {I}) {ASSC}'.format(I=idx, ASSC=assc))
def _hms(tic):
"""Get Timing."""
return '{HMS}'.format(HMS=str(datetime.timedelta(seconds=(timeit.default_timer()-tic))))
def prt_info(tcntobj, go_cnt, max_val):
"""Print the information content of a frequently used GO ID."""
go_id, cnt = get_goid(go_cnt, max_val)
infocontent = get_info_content(go_id, tcntobj)
msg = 'Information content ({GO} {CNT:7,}) = {INFO:8.6f} {NAME}'
print(msg.format(GO=go_id, CNT=cnt, INFO=infocontent, NAME=tcntobj.go2obj[go_id].name))
def get_goid(go_cnt, max_val):
"""Get frequently used GO ID."""
if max_val is not None:
for goid, cnt in go_cnt:
if cnt < max_val:
return goid, cnt
return go_cnt[-1][0], go_cnt[-1][1]
return go_cnt[0][0], go_cnt[0][1]
def get_go2obj():
"""Read GODag and return go2obj."""
godag = get_godag(os.path.join(REPO, "go-basic.obo"), loading_bar=None)
return {go:o for go, o in godag.items() if not o.is_obsolete}
if __name__ == '__main__':
ASSC_NAME = None if len(sys.argv) == 1 else sys.argv[1]
test_semantic_similarity(ASSC_NAME)
|
[
"os.path.abspath",
"goatools.semantic.get_info_content",
"timeit.default_timer",
"goatools.anno.gaf_reader.GafReader",
"os.path.exists",
"goatools.test_data.gafs.ASSOCIATIONS.difference",
"goatools.semantic.TermCounts",
"os.path.join",
"goatools.associations.dnld_annotation"
] |
[((514, 536), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (534, 536), False, 'import timeit\n'), ((3157, 3189), 'goatools.semantic.get_info_content', 'get_info_content', (['go_id', 'tcntobj'], {}), '(go_id, tcntobj)\n', (3173, 3189), False, 'from goatools.semantic import get_info_content\n'), ((573, 598), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (588, 598), False, 'import os\n'), ((814, 848), 'goatools.test_data.gafs.ASSOCIATIONS.difference', 'ASSOCIATIONS.difference', (['not_these'], {}), '(not_these)\n', (837, 848), False, 'from goatools.test_data.gafs import ASSOCIATIONS\n'), ((1135, 1157), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1155, 1157), False, 'import timeit\n'), ((1228, 1257), 'os.path.join', 'os.path.join', (['REPO', 'assc_name'], {}), '(REPO, assc_name)\n', (1240, 1257), False, 'import os\n'), ((1353, 1371), 'goatools.anno.gaf_reader.GafReader', 'GafReader', (['fin_gaf'], {}), '(fin_gaf)\n', (1362, 1371), False, 'from goatools.anno.gaf_reader import GafReader\n'), ((1765, 1798), 'goatools.semantic.TermCounts', 'TermCounts', (['go2obj', 'assc_gene2gos'], {}), '(go2obj, assc_gene2gos)\n', (1775, 1798), False, 'from goatools.semantic import TermCounts\n'), ((3707, 3741), 'os.path.join', 'os.path.join', (['REPO', '"""go-basic.obo"""'], {}), "(REPO, 'go-basic.obo')\n", (3719, 3741), False, 'import os\n'), ((1273, 1296), 'os.path.exists', 'os.path.exists', (['fin_gaf'], {}), '(fin_gaf)\n', (1287, 1296), False, 'import os\n'), ((1310, 1334), 'goatools.associations.dnld_annotation', 'dnld_annotation', (['fin_gaf'], {}), '(fin_gaf)\n', (1325, 1334), False, 'from goatools.associations import dnld_annotation\n'), ((2956, 2978), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2976, 2978), False, 'import timeit\n')]
|
from __future__ import annotations
import discord
import contextlib
from datetime import datetime, timedelta
from typing import Union, Any, List, Dict, TYPE_CHECKING
from .useful import (
GetEmoji,
GetFormat,
calculate_level_xp,
format_relative,
iso_to_time,
JSON
)
from ..locale_v2 import ValorantTranslator
VLR_locale = ValorantTranslator()
if TYPE_CHECKING:
from bot import ValorantBot
class Embed(discord.Embed): # Custom Embed
def __init__(self, description:str = None, color: Union[discord.Color, int] = 0xfd4554, **kwargs: Any) -> None:
super().__init__(description=description, color=color, **kwargs)
class GetEmbed:
def __giorgio_embed(skin: Dict, bot: ValorantBot) -> discord.Embed:
"""EMBED DESIGN Giorgio"""
uuid, name, price, icon = skin['uuid'], skin['name'], skin['price'], skin['icon']
emoji = GetEmoji.tier_by_bot(uuid, bot)
vp_emoji = GetEmoji.point_by_bot('ValorantPointIcon', bot)
embed = Embed(f"{emoji} **{name}**\n{vp_emoji} {price}", color=0x0F1923)
embed.set_thumbnail(url=icon)
return embed
@classmethod
def store(cls, player: str, offer: Dict, response: Dict, bot: ValorantBot) -> List[discord.Embed]:
"""Embed Store"""
store_esponse = response.get('RESPONSE')
data = GetFormat.offer_format(offer)
duration = data.pop('duration')
description = store_esponse.format(username=player, duration= format_relative(datetime.utcnow() + timedelta(seconds=duration)))
embed = Embed(description)
embeds = [embed]
[embeds.append(cls.__giorgio_embed(data[skin], bot)) for skin in data]
return embeds
# ---------- MISSION EMBED ---------- #
def mission(player:str, mission: Dict, response: Dict) -> discord.Embed:
"""Embed Mission"""
# language
title_mission = response.get('TITLE')
title_daily = response.get('DAILY')
title_weekly = response.get('WEEKLY')
title_newplayer = response.get('NEWPLAYER')
clear_all_mission = response.get('NO_MISSION')
reset_in = response.get('DAILY_RESET')
refill_in = response.get('REFILLS')
# mission format
data = GetFormat.mission_format(mission)
daily_format = data['daily']
daily_end = data['daily_end']
weekly_format = data['weekly']
weekly_end = data['weekly_end']
newplayer_format = data['newplayer']
daily = ''.join(daily_format)
weekly = ''.join(weekly_format)
newplayer = ''.join(newplayer_format)
weekly_end_time = ''
with contextlib.suppress(Exception):
weekly_end_time = f"{refill_in.format(duration=format_relative(iso_to_time(weekly_end)))}"
embed = Embed(title=f"**{title_mission}**")
embed.set_footer(text=player)
if len(daily) != 0:
embed.add_field(
name=f"**{title_daily}**",
value=f"{daily}\n{reset_in.format(duration=format_relative(iso_to_time(daily_end)))}",
inline=False
)
if len(weekly) != 0:
embed.add_field(
name=f"**{title_weekly}**",
value=f"{weekly}\n\n{weekly_end_time}",
inline=False
)
if len(newplayer) != 0:
embed.add_field(
name=f"**{title_newplayer}**",
value=f"{newplayer}",
inline=False
)
if len(embed.fields) == 0:
embed.color = 0x77dd77
embed.description = clear_all_mission
return embed
# ---------- POINT EMBED ---------- #
def point(player:str, wallet: Dict, response: Dict, bot: ValorantBot) -> discord.Embed:
"""Embed Point"""
# language
title_point = response.get('POINT')
cache = JSON.read('cache')
point = cache['currencies']
vp_uuid = '85ad13f7-3d1b-5128-9eb2-7cd8ee0b5741'
rad_uuid = 'e59aa87c-4cbf-517a-5983-6e81511be9b7'
valorant_point = wallet['Balances'][vp_uuid]
radiant_point = wallet['Balances'][rad_uuid]
rad = point[rad_uuid]['names'][str(VLR_locale)]
vp = point[vp_uuid]['names'][str(VLR_locale)]
if vp == 'VP': vp = 'Valorant Points'
embed = Embed(title=f"{title_point}:")
vp_emoji = GetEmoji.point_by_bot('ValorantPointIcon', bot)
rad_emoji = GetEmoji.point_by_bot('RadianitePointIcon', bot)
embed.add_field(name=vp, value=f"{vp_emoji} {valorant_point}")
embed.add_field(name=rad, value=f"{rad_emoji} {radiant_point}")
embed.set_footer(text=player)
return embed
# ---------- NIGHTMARKET EMBED ---------- #
def __nightmarket_embed(skins: Dict, bot: ValorantBot) -> discord.Embed:
"""Generate Embed Nightmarket"""
uuid, name, icon, price, dpice = skins['uuid'], skins['name'], skins['icon'], skins['price'], skins['disprice']
vp_emoji = GetEmoji.point_by_bot('ValorantPointIcon', bot)
embed = Embed(f"{GetEmoji.tier(uuid)} **{name}**\n{vp_emoji} {dpice} ~~{price}~~", color=0x0F1923)
embed.set_thumbnail(url=icon)
return embed
@classmethod
def nightmarket(cls, player:str, offer: Dict, bot: ValorantBot, response: Dict) -> discord.Embed:
"""Embed Nightmarket"""
# language
msg_response = response.get('RESPONSE')
night_mk = GetFormat.nightmarket_format(offer, response)
skins = night_mk['nightmarket']
duration = night_mk['duration']
description = msg_response.format(username=player, duration=format_relative(datetime.utcnow() + timedelta(seconds=duration)))
embed = Embed(description)
embeds = [embed]
[embeds.append(cls.__nightmarket_embed(skins[skin], bot)) for skin in skins]
return embeds
# ---------- BATTLEPASS EMBED ---------- #
def battlepass(player:str, data: Dict, season: Dict, response: Dict) -> discord.Embed:
"""Embed Battlepass"""
# language
MSG_RESPONSE = response.get('RESPONSE')
MSG_TIER = response.get('TIER')
BTP = GetFormat.battlepass_format(data, season, response)
item = BTP['data']
reward = item['reward']
xp = item['xp']
act = item['act']
tier = item['tier']
icon = item['icon']
season_end = item['end']
item_type = item['type']
original_type = item['original_type']
description = MSG_RESPONSE.format(next=f'`{reward}`', type=f'`{item_type}`', xp=f'`{xp:,}/{calculate_level_xp(tier + 1):,}`', end=format_relative(season_end))
embed = Embed(description, title=f"BATTLEPASS")
if icon:
if original_type in ['PlayerCard', 'EquippableSkinLevel']:
embed.set_image(url=icon)
else:
embed.set_thumbnail(url=icon)
if tier >= 50:
embed.color = 0xf1b82d
if tier == 55:
embed.description = str(reward)
embed.set_footer(text=f"{MSG_TIER} {tier} | {act}\n{player}")
return embed
# ---------- NOTIFY EMBED ---------- #
def notify_specified_send(uuid: str) -> discord.Embed:
...
@classmethod
def notify_all_send(cls, player:str, offer: Dict, response: Dict, bot: ValorantBot) -> discord.Embed:
description_format = response.get('RESPONSE_ALL')
data = GetFormat.offer_format(offer)
duration = data.pop('duration')
description = description_format.format(username=player, duration=format_relative(datetime.utcnow() + timedelta(seconds=duration)))
embed = Embed(description)
embeds = [embed]
[embeds.append(cls.__giorgio_embed(data[skin], bot)) for skin in data]
return embeds
|
[
"datetime.datetime.utcnow",
"datetime.timedelta",
"contextlib.suppress"
] |
[((2666, 2696), 'contextlib.suppress', 'contextlib.suppress', (['Exception'], {}), '(Exception)\n', (2685, 2696), False, 'import contextlib\n'), ((1497, 1514), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1512, 1514), False, 'from datetime import datetime, timedelta\n'), ((1517, 1544), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'duration'}), '(seconds=duration)\n', (1526, 1544), False, 'from datetime import datetime, timedelta\n'), ((5736, 5753), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5751, 5753), False, 'from datetime import datetime, timedelta\n'), ((5756, 5783), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'duration'}), '(seconds=duration)\n', (5765, 5783), False, 'from datetime import datetime, timedelta\n'), ((7730, 7747), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (7745, 7747), False, 'from datetime import datetime, timedelta\n'), ((7750, 7777), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'duration'}), '(seconds=duration)\n', (7759, 7777), False, 'from datetime import datetime, timedelta\n')]
|
from torch import nn, Tensor
class Model(nn.Module):
def __init__(self, input_n: int, output_n: int, hidden_n: int) -> None:
super().__init__()
self.input_shape = (input_n,)
self.output_shape = (output_n,)
self.hidden_n = hidden_n
self.acctivate = nn.Softplus()
self.fc1 = nn.Linear(input_n, self.hidden_n)
self.fc2 = nn.Linear(self.hidden_n, self.hidden_n)
self.fc3 = nn.Linear(self.hidden_n, output_n)
def forward(self, x: Tensor) -> Tensor:
x = self.fc1(x)
x = self.acctivate(x)
x = self.fc2(x)
x = self.acctivate(x)
x = self.fc3(x)
return x
if __name__ == '__main__':
from torchinfo import summary
model = Model(1, 1, 64)
summary(model)
|
[
"torch.nn.Softplus",
"torchinfo.summary",
"torch.nn.Linear"
] |
[((768, 782), 'torchinfo.summary', 'summary', (['model'], {}), '(model)\n', (775, 782), False, 'from torchinfo import summary\n'), ((297, 310), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (308, 310), False, 'from torch import nn, Tensor\n'), ((331, 364), 'torch.nn.Linear', 'nn.Linear', (['input_n', 'self.hidden_n'], {}), '(input_n, self.hidden_n)\n', (340, 364), False, 'from torch import nn, Tensor\n'), ((384, 423), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_n', 'self.hidden_n'], {}), '(self.hidden_n, self.hidden_n)\n', (393, 423), False, 'from torch import nn, Tensor\n'), ((443, 477), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_n', 'output_n'], {}), '(self.hidden_n, output_n)\n', (452, 477), False, 'from torch import nn, Tensor\n')]
|
from pathlib import Path
from typing import Callable, List, Optional, Union
import torch
from torch import Tensor
from torch_geometric.data import Data, InMemoryDataset
from torch_geometric.utils import stochastic_blockmodel_graph
class StochasticBlockModelDataset(InMemoryDataset):
r"""A synthetic graph dataset generated by the stochastic block model.
The node features of each block are sampled from normal distributions where
the centers of clusters are vertices of a hypercube, as computed by the
:meth:`sklearn.datasets.make_classification` method.
Args:
root (string): Root directory where the dataset should be saved.
block_sizes ([int] or LongTensor): The sizes of blocks.
edge_probs ([[float]] or FloatTensor): The density of edges going from
each block to each other block. Must be symmetric if the graph is
undirected.
num_channels (int, optional): The number of node features. If given
as :obj:`None`, node features are not generated.
(default: :obj:`None`)
is_undirected (bool, optional): Whether the graph to generate is
undirected. (default: :obj:`True`)
transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
every access. (default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes
in an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed
before being saved to disk. (default: :obj:`None`)
**kwargs (optional): The keyword arguments that are passed down to the
:meth:`sklearn.datasets.make_classification` method for drawing
node features.
"""
def __init__(
self,
root: str,
block_sizes: Union[List[int], Tensor],
edge_probs: Union[List[List[float]], Tensor],
num_channels: Optional[int] = None,
is_undirected: bool = True,
transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None,
**kwargs,
):
if not isinstance(block_sizes, torch.Tensor):
block_sizes = torch.tensor(block_sizes, dtype=torch.long)
if not isinstance(edge_probs, torch.Tensor):
edge_probs = torch.tensor(edge_probs, dtype=torch.float)
self.block_sizes = block_sizes
self.edge_probs = edge_probs
self.num_channels = num_channels
self.is_undirected = is_undirected
self.kwargs = {
'n_informative': num_channels,
'n_redundant': 0,
'flip_y': 0.0,
'shuffle': False,
}
self.kwargs.update(kwargs)
super().__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def processed_dir(self) -> str:
return Path.joinpath(Path(self.root), self.__class__.__name__,
'processed')
@property
def processed_file_names(self) -> str:
block_sizes = self.block_sizes.view(-1).tolist()
hash1 = '-'.join([f'{x:.1f}' for x in block_sizes])
edge_probs = self.edge_probs.view(-1).tolist()
hash2 = '-'.join([f'{x:.1f}' for x in edge_probs])
return f'data_{self.num_channels}_{hash1}_{hash2}.pt'
def process(self):
from sklearn.datasets import make_classification
edge_index = stochastic_blockmodel_graph(
self.block_sizes, self.edge_probs, directed=not self.is_undirected)
num_samples = int(self.block_sizes.sum())
num_classes = self.block_sizes.size(0)
x = None
if self.num_channels is not None:
x, _ = make_classification(
n_samples=num_samples,
n_features=self.num_channels,
n_classes=num_classes,
weights=self.block_sizes / num_samples,
**self.kwargs,
)
x = torch.from_numpy(x).to(torch.float)
y = torch.arange(num_classes).repeat_interleave(self.block_sizes)
data = Data(x=x, edge_index=edge_index, y=y)
if self.pre_transform is not None:
data = self.pre_transform(data)
torch.save(self.collate([data]), self.processed_paths[0])
class RandomPartitionGraphDataset(StochasticBlockModelDataset):
r"""The random partition graph dataset from the `"How to Find Your
Friendly Neighborhood: Graph Attention Design with Self-Supervision"
<https://openreview.net/forum?id=Wi5KUNlqWty>`_ paper.
This is a synthetic graph of communities controlled by the node homophily
and the average degree, and each community is considered as a class.
The node features are sampled from normal distributions where the centers
of clusters are vertices of a hypercube, as computed by the
:meth:`sklearn.datasets.make_classification` method.
Args:
root (string): Root directory where the dataset should be saved.
num_classes (int): The number of classes.
num_nodes_per_class (int): The number of nodes per class.
node_homophily_ratio (float): The degree of node homophily.
average_degree (float): The average degree of the graph.
num_channels (int, optional): The number of node features. If given
as :obj:`None`, node features are not generated.
(default: :obj:`None`)
is_undirected (bool, optional): Whether the graph to generate is
undirected. (default: :obj:`True`)
transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
every access. (default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes
in an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
**kwargs (optional): The keyword arguments that are passed down
to :meth:`sklearn.datasets.make_classification` method in
drawing node features.
"""
def __init__(self, root, num_classes: int, num_nodes_per_class: int,
node_homophily_ratio: float, average_degree: float,
num_channels: Optional[int] = None,
is_undirected: bool = True,
transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None, **kwargs):
self._num_classes = num_classes
self.num_nodes_per_class = num_nodes_per_class
self.node_homophily_ratio = node_homophily_ratio
self.average_degree = average_degree
# (p_in + (C - 1) * p_out) / C = |E|/|V|^2
# i.e., p_in + (C - 1) * p_out = average_degree / num_nodes_per_class
ec_over_v2 = average_degree / num_nodes_per_class
p_in = node_homophily_ratio * ec_over_v2
p_out = (ec_over_v2 - p_in) / (num_classes - 1)
block_sizes = [num_nodes_per_class for _ in range(num_classes)]
edge_probs = [[p_out for _ in range(num_classes)]
for _ in range(num_classes)]
for r in range(num_classes):
edge_probs[r][r] = p_in
super().__init__(root, block_sizes, edge_probs, num_channels,
is_undirected, transform, pre_transform, **kwargs)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def processed_file_names(self) -> str:
return (f'data_{self.num_channels}_{self._num_classes}_'
f'{self.num_nodes_per_class}_{self.node_homophily_ratio:.1f}_'
f'{self.average_degree:.1f}.pt')
def process(self):
return super().process()
|
[
"torch_geometric.utils.stochastic_blockmodel_graph",
"torch.load",
"sklearn.datasets.make_classification",
"pathlib.Path",
"torch_geometric.data.Data",
"torch.arange",
"torch.tensor",
"torch.from_numpy"
] |
[((2986, 3021), 'torch.load', 'torch.load', (['self.processed_paths[0]'], {}), '(self.processed_paths[0])\n', (2996, 3021), False, 'import torch\n'), ((3642, 3741), 'torch_geometric.utils.stochastic_blockmodel_graph', 'stochastic_blockmodel_graph', (['self.block_sizes', 'self.edge_probs'], {'directed': '(not self.is_undirected)'}), '(self.block_sizes, self.edge_probs, directed=not\n self.is_undirected)\n', (3669, 3741), False, 'from torch_geometric.utils import stochastic_blockmodel_graph\n'), ((4317, 4354), 'torch_geometric.data.Data', 'Data', ([], {'x': 'x', 'edge_index': 'edge_index', 'y': 'y'}), '(x=x, edge_index=edge_index, y=y)\n', (4321, 4354), False, 'from torch_geometric.data import Data, InMemoryDataset\n'), ((7779, 7814), 'torch.load', 'torch.load', (['self.processed_paths[0]'], {}), '(self.processed_paths[0])\n', (7789, 7814), False, 'import torch\n'), ((2368, 2411), 'torch.tensor', 'torch.tensor', (['block_sizes'], {'dtype': 'torch.long'}), '(block_sizes, dtype=torch.long)\n', (2380, 2411), False, 'import torch\n'), ((2490, 2533), 'torch.tensor', 'torch.tensor', (['edge_probs'], {'dtype': 'torch.float'}), '(edge_probs, dtype=torch.float)\n', (2502, 2533), False, 'import torch\n'), ((3102, 3117), 'pathlib.Path', 'Path', (['self.root'], {}), '(self.root)\n', (3106, 3117), False, 'from pathlib import Path\n'), ((3928, 4087), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': 'num_samples', 'n_features': 'self.num_channels', 'n_classes': 'num_classes', 'weights': '(self.block_sizes / num_samples)'}), '(n_samples=num_samples, n_features=self.num_channels,\n n_classes=num_classes, weights=self.block_sizes / num_samples, **self.\n kwargs)\n', (3947, 4087), False, 'from sklearn.datasets import make_classification\n'), ((4239, 4264), 'torch.arange', 'torch.arange', (['num_classes'], {}), '(num_classes)\n', (4251, 4264), False, 'import torch\n'), ((4190, 4209), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (4206, 4209), False, 'import torch\n')]
|
import jwt
from app.repositories.admin_repo import AdminRepo
from app.repositories.student_repo import StudentRepo
from config import get_env
from functools import wraps
from flask import request, jsonify, make_response
class Auth:
""" This class will house Authentication and Authorization Methods """
""" Routes The Location Header Should Not Be Applied To """
location_header_ignore = [
'/locations'
]
""" Routes The Authentication Header Should Not Be Applied To """
authentication_header_ignore = [
'/auth/login',
'/auth/admin/login',
'/accounts/student/signup',
'/accounts/admin/signup',
'/docs',
'/status',
'/',
'loaderio-038d65e72fe3012184259133474caec4.txt'
]
@staticmethod
def check_token():
if request.method != 'OPTIONS':
for endpoint in Auth.authentication_header_ignore:
if request.path.find(endpoint) > -1: # If endpoint in request.path, ignore this check
return None
try:
token = Auth.get_token()
except Exception as e:
print(e)
return make_response(jsonify({'msg': str(e)}), 401)
try:
decoded = Auth.decode_token(token)
admin = AdminRepo().find_first(id=decoded['identity']['id'], auth_key=decoded['identity']['authKey'])
student = StudentRepo().find_first(id=decoded['identity']['id'], auth_key=decoded['identity']['authKey'])
user_exist = False
if admin or student:
user_exist = True
if not user_exist:
return make_response(jsonify({'msg': 'Token Invalid. Please Login Again'}), 401)
except Exception as e:
return make_response(jsonify({'msg': str(e)}), 401)
@staticmethod
def _get_user():
token = None
try:
token = Auth.get_token()
except Exception as e:
raise e
try:
if token:
return Auth.decode_token(token)['identity']
except Exception as e:
raise e
@staticmethod
def user(*keys):
user = Auth._get_user()
if keys:
if len(keys) > 1:
values = list()
for key in keys:
values.append(user[key]) if key in user else values.append(None)
return values
if len(keys) == 1 and keys[0] in user:
return user[keys[0]]
return user
@staticmethod
def get_token(request_obj=None):
if request_obj:
header = request_obj.headers.get('Authorization', None)
else:
header = request.headers.get('Authorization', None)
if not header:
raise Exception('Authorization Header is Expected')
header_parts = header.split()
if header_parts[0].lower() != 'bearer':
raise Exception('Authorization Header Must Start With Bearer')
elif len(header_parts) > 1:
return header_parts[1]
raise Exception('Internal Application Error')
@staticmethod
def decode_token(token):
jwt_secret = get_env('SECRET_KEY')
try:
decoded = jwt.decode(token, jwt_secret, verify=True)
return decoded
except jwt.ExpiredSignature:
raise Exception('Token is Expired')
except jwt.DecodeError:
raise Exception('Invalid Token - Could Not Verify Signature')
@staticmethod
def check_location_header():
if request.method != 'OPTIONS':
for endpoint in Auth.location_header_ignore:
if request.path.find(endpoint) > -1: # If endpoint in request.path, ignore this check
return None
try:
Auth.get_location()
except Exception as e:
return make_response(jsonify({'msg': str(e)}), 400)
@staticmethod
def get_location():
location = request.headers.get('X-Location', None)
if not location:
raise Exception('Location Header is Expected')
if not location.isdigit():
raise Exception('Location Header Value is Invalid')
return int(location)
@staticmethod
def has_permission(user_type):
def permission_checker(f):
@wraps(f)
def decorated(*args, **kwargs):
admin_repo = AdminRepo()
student_repo = StudentRepo()
user_id = Auth.user('id')
user_email = Auth.user('email')
if user_type == 'admin':
user = admin_repo.find_first(**{'id': user_id, 'email': user_email})
if not user_id:
return make_response(jsonify({'msg': 'Missing User ID in token'})), 400
if not user:
return make_response(jsonify({'msg': f'Access Error - {user_type.capitalize()} User Not Found'})), 400
if 'isPremium' in user.serialize():
return make_response(jsonify({'msg': f'Access Error - Permission Denied For Non-{user_type.capitalize()} Users'})), 400
elif user_type == 'student':
user = student_repo.find_first(**{'id': user_id, 'email': user_email})
if not user_id:
return make_response(jsonify({'msg': 'Missing User ID in token'})), 400
if not user:
return make_response(jsonify({'msg': f'Access Error - {user_type.capitalize()} User Not Found'})), 400
if 'isLecturer' in user.serialize():
return make_response(jsonify({'msg': f'Access Error - Permission Denied For Non-{user_type.capitalize()} Users'})), 400
return f(*args, **kwargs)
return decorated
return permission_checker
|
[
"flask.request.headers.get",
"flask.request.path.find",
"app.repositories.admin_repo.AdminRepo",
"flask.jsonify",
"functools.wraps",
"app.repositories.student_repo.StudentRepo",
"config.get_env",
"jwt.decode"
] |
[((3288, 3309), 'config.get_env', 'get_env', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (3295, 3309), False, 'from config import get_env\n'), ((4108, 4147), 'flask.request.headers.get', 'request.headers.get', (['"""X-Location"""', 'None'], {}), "('X-Location', None)\n", (4127, 4147), False, 'from flask import request, jsonify, make_response\n'), ((2800, 2842), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""', 'None'], {}), "('Authorization', None)\n", (2819, 2842), False, 'from flask import request, jsonify, make_response\n'), ((3345, 3387), 'jwt.decode', 'jwt.decode', (['token', 'jwt_secret'], {'verify': '(True)'}), '(token, jwt_secret, verify=True)\n', (3355, 3387), False, 'import jwt\n'), ((4464, 4472), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (4469, 4472), False, 'from functools import wraps\n'), ((4546, 4557), 'app.repositories.admin_repo.AdminRepo', 'AdminRepo', ([], {}), '()\n', (4555, 4557), False, 'from app.repositories.admin_repo import AdminRepo\n'), ((4589, 4602), 'app.repositories.student_repo.StudentRepo', 'StudentRepo', ([], {}), '()\n', (4600, 4602), False, 'from app.repositories.student_repo import StudentRepo\n'), ((939, 966), 'flask.request.path.find', 'request.path.find', (['endpoint'], {}), '(endpoint)\n', (956, 966), False, 'from flask import request, jsonify, make_response\n'), ((3774, 3801), 'flask.request.path.find', 'request.path.find', (['endpoint'], {}), '(endpoint)\n', (3791, 3801), False, 'from flask import request, jsonify, make_response\n'), ((1335, 1346), 'app.repositories.admin_repo.AdminRepo', 'AdminRepo', ([], {}), '()\n', (1344, 1346), False, 'from app.repositories.admin_repo import AdminRepo\n'), ((1455, 1468), 'app.repositories.student_repo.StudentRepo', 'StudentRepo', ([], {}), '()\n', (1466, 1468), False, 'from app.repositories.student_repo import StudentRepo\n'), ((1737, 1790), 'flask.jsonify', 'jsonify', (["{'msg': 'Token Invalid. Please Login Again'}"], {}), "({'msg': 'Token Invalid. Please Login Again'})\n", (1744, 1790), False, 'from flask import request, jsonify, make_response\n'), ((4906, 4950), 'flask.jsonify', 'jsonify', (["{'msg': 'Missing User ID in token'}"], {}), "({'msg': 'Missing User ID in token'})\n", (4913, 4950), False, 'from flask import request, jsonify, make_response\n'), ((5537, 5581), 'flask.jsonify', 'jsonify', (["{'msg': 'Missing User ID in token'}"], {}), "({'msg': 'Missing User ID in token'})\n", (5544, 5581), False, 'from flask import request, jsonify, make_response\n')]
|
# Copyright (c) 2020 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
_devece_ddtype_tensor_map = {
'cpu': {
torch.float32: torch.FloatTensor,
torch.float64: torch.DoubleTensor,
torch.float16: torch.HalfTensor,
torch.uint8: torch.ByteTensor,
torch.int8: torch.CharTensor,
torch.int16: torch.ShortTensor,
torch.int32: torch.IntTensor,
torch.int64: torch.LongTensor,
torch.bool: torch.BoolTensor,
},
'cuda': {
torch.float32: torch.cuda.FloatTensor,
torch.float64: torch.cuda.DoubleTensor,
torch.float16: torch.cuda.HalfTensor,
torch.uint8: torch.cuda.ByteTensor,
torch.int8: torch.cuda.CharTensor,
torch.int16: torch.cuda.ShortTensor,
torch.int32: torch.cuda.IntTensor,
torch.int64: torch.cuda.LongTensor,
torch.bool: torch.cuda.BoolTensor,
}
}
def set_default_device(device_name):
"""Set the default device.
Cannot find a native torch function for setting default device. We have to
hack our own.
Args:
device_name (str): one of ("cpu", "cuda")
"""
torch.set_default_tensor_type(
_devece_ddtype_tensor_map[device_name][torch.get_default_dtype()])
def get_default_device():
return torch._C._get_default_device()
class device(object):
"""Specifies the device for tensors created in this context."""
def __init__(self, device_name):
"""Create the context with default device with name `device_name`
Args:
device_name (str): one of ("cpu", "cuda")
"""
self._device_name = device_name
def __enter__(self):
self._prev_device_name = get_default_device()
set_default_device(self._device_name)
def __exit__(self, type, value, traceback):
set_default_device(self._prev_device_name)
|
[
"torch._C._get_default_device",
"torch.get_default_dtype"
] |
[((1841, 1871), 'torch._C._get_default_device', 'torch._C._get_default_device', ([], {}), '()\n', (1869, 1871), False, 'import torch\n'), ((1774, 1799), 'torch.get_default_dtype', 'torch.get_default_dtype', ([], {}), '()\n', (1797, 1799), False, 'import torch\n')]
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
from telemetry.core import platform as platform_module
from telemetry.core import util
from telemetry import decorators
from telemetry.internal.browser import browser_finder
from telemetry.internal.browser import browser_finder_exceptions
from telemetry.internal.browser import browser_info as browser_info_module
from telemetry.internal.platform.profiler import profiler_finder
from telemetry.internal.util import file_handle
from telemetry.page import cache_temperature
from telemetry.page import traffic_setting
from telemetry import story as story_module
from telemetry.util import screenshot
from telemetry.util import wpr_modes
from telemetry.web_perf import timeline_based_measurement
def _PrepareFinderOptions(finder_options, test, device_type):
browser_options = finder_options.browser_options
# Set up user agent.
browser_options.browser_user_agent_type = device_type
test.CustomizeBrowserOptions(finder_options.browser_options)
if finder_options.profiler:
profiler_class = profiler_finder.FindProfiler(finder_options.profiler)
profiler_class.CustomizeBrowserOptions(browser_options.browser_type,
finder_options)
class SharedPageState(story_module.SharedState):
"""
This class contains all specific logic necessary to run a Chrome browser
benchmark.
"""
_device_type = None
def __init__(self, test, finder_options, story_set):
super(SharedPageState, self).__init__(test, finder_options, story_set)
if isinstance(test, timeline_based_measurement.TimelineBasedMeasurement):
if finder_options.profiler:
assert not 'trace' in finder_options.profiler, (
'This is a Timeline Based Measurement benchmark. You cannot run it '
'with trace profiler enabled. If you need trace data, tracing is '
'always enabled in Timeline Based Measurement benchmarks and you '
'can get the trace data with the default --output-format=html.')
# This is to avoid the cyclic-import caused by timeline_based_page_test.
from telemetry.web_perf import timeline_based_page_test
self._test = timeline_based_page_test.TimelineBasedPageTest(test)
else:
self._test = test
if (self._device_type == 'desktop' and
platform_module.GetHostPlatform().GetOSName() == 'chromeos'):
self._device_type = 'chromeos'
_PrepareFinderOptions(finder_options, self._test, self._device_type)
self._browser = None
self._finder_options = finder_options
self._possible_browser = self._GetPossibleBrowser(
self._test, finder_options)
self._first_browser = True
self._previous_page = None
self._current_page = None
self._current_tab = None
self._test.SetOptions(self._finder_options)
# TODO(crbug/404771): Move network controller options out of
# browser_options and into finder_options.
browser_options = self._finder_options.browser_options
if self._finder_options.use_live_sites:
wpr_mode = wpr_modes.WPR_OFF
elif browser_options.wpr_mode == wpr_modes.WPR_RECORD:
wpr_mode = wpr_modes.WPR_RECORD
else:
wpr_mode = wpr_modes.WPR_REPLAY
self._extra_wpr_args = browser_options.extra_wpr_args
self.platform.SetFullPerformanceModeEnabled(
finder_options.full_performance_mode)
self.platform.network_controller.Open(wpr_mode)
self.platform.Initialize()
@property
def possible_browser(self):
return self._possible_browser
@property
def browser(self):
return self._browser
def _FindBrowser(self, finder_options):
possible_browser = browser_finder.FindBrowser(finder_options)
if not possible_browser:
raise browser_finder_exceptions.BrowserFinderException(
'No browser found.\n\nAvailable browsers:\n%s\n' %
'\n'.join(browser_finder.GetAllAvailableBrowserTypes(finder_options)))
return possible_browser
def _GetPossibleBrowser(self, test, finder_options):
"""Return a possible_browser with the given options for |test|. """
possible_browser = self._FindBrowser(finder_options)
finder_options.browser_options.browser_type = (
possible_browser.browser_type)
enabled, msg = decorators.IsEnabled(test, possible_browser)
if not enabled and not finder_options.run_disabled_tests:
logging.warning(msg)
logging.warning('You are trying to run a disabled test.')
if possible_browser.IsRemote():
possible_browser.RunRemote()
sys.exit(0)
return possible_browser
def DumpStateUponFailure(self, page, results):
# Dump browser standard output and log.
if self._browser:
self._browser.DumpStateUponFailure()
else:
logging.warning('Cannot dump browser state: No browser.')
# Capture a screenshot
if self._finder_options.browser_options.take_screenshot_for_failed_page:
fh = screenshot.TryCaptureScreenShot(self.platform, self._current_tab)
if fh is not None:
results.AddArtifact(page.name, 'screenshot', fh)
else:
logging.warning('Taking screenshots upon failures disabled.')
def DidRunStory(self, results):
if self._finder_options.profiler:
self._StopProfiling(results)
self._AllowInteractionForStage('after-run-story')
try:
self._previous_page = None
if self.ShouldStopBrowserAfterStoryRun(self._current_page):
self._StopBrowser()
elif self._current_tab:
# We might hang while trying to close the connection, and need to
# guarantee the page will get cleaned up to avoid future tests failing
# in weird ways.
try:
if self._current_tab.IsAlive():
self._current_tab.CloseConnections()
self._previous_page = self._current_page
except Exception as exc: # pylint: disable=broad-except
logging.warning(
'%s raised while closing tab connections; tab will be closed.',
type(exc).__name__)
self._current_tab.Close()
finally:
self._current_page = None
self._current_tab = None
def ShouldStopBrowserAfterStoryRun(self, story):
"""Specify whether the browser should be closed after running a story.
Defaults to always closing the browser on all platforms to help keeping
story runs independent of each other; except on ChromeOS where restarting
the browser is expensive.
Subclasses may override this method to change this behavior.
"""
del story
return self.platform.GetOSName() != 'chromeos'
@property
def platform(self):
return self._possible_browser.platform
def _AllowInteractionForStage(self, stage):
if self._finder_options.pause == stage:
raw_input('Pausing for interaction at %s... Press Enter to continue.' %
stage)
def _StartBrowser(self, page):
assert self._browser is None
self._AllowInteractionForStage('before-start-browser')
self._test.WillStartBrowser(self.platform)
# Create a deep copy of browser_options so that we can add page-level
# arguments and url to it without polluting the run for the next page.
browser_options = self._finder_options.browser_options.Copy()
if page.startup_url:
browser_options.startup_url = page.startup_url
browser_options.AppendExtraBrowserArgs(page.extra_browser_args)
self._possible_browser.SetUpEnvironment(browser_options)
self._browser = self._possible_browser.Create()
self._test.DidStartBrowser(self.browser)
if self._first_browser:
self._first_browser = False
self._AllowInteractionForStage('after-start-browser')
def WillRunStory(self, page):
if not self.platform.tracing_controller.is_tracing_running:
# For TimelineBasedMeasurement benchmarks, tracing has already started.
# For PageTest benchmarks, tracing has not yet started. We need to make
# sure no tracing state is left before starting the browser for PageTest
# benchmarks.
self.platform.tracing_controller.ClearStateIfNeeded()
page_set = page.page_set
self._current_page = page
if self._browser and page.startup_url:
assert not self.platform.tracing_controller.is_tracing_running, (
'Should not restart browser when tracing is already running. For '
'TimelineBasedMeasurement (TBM) benchmarks, you should not use '
'startup_url.')
self._StopBrowser()
started_browser = not self.browser
archive_path = page_set.WprFilePathForStory(page, self.platform.GetOSName())
# TODO(nednguyen, perezju): Ideally we should just let the network
# controller raise an exception when the archive_path is not found.
if archive_path is not None and not os.path.isfile(archive_path):
logging.warning('WPR archive missing: %s', archive_path)
archive_path = None
self.platform.network_controller.StartReplay(
archive_path, page.make_javascript_deterministic, self._extra_wpr_args)
if not self.browser:
self._StartBrowser(page)
if self.browser.supports_tab_control and self._test.close_tabs_before_run:
# Create a tab if there's none.
if len(self.browser.tabs) == 0:
self.browser.tabs.New()
# Ensure only one tab is open.
while len(self.browser.tabs) > 1:
self.browser.tabs[-1].Close()
# Must wait for tab to commit otherwise it can commit after the next
# navigation has begun and RenderFrameHostManager::DidNavigateMainFrame()
# will cancel the next navigation because it's pending. This manifests as
# the first navigation in a PageSet freezing indefinitely because the
# navigation was silently canceled when |self.browser.tabs[0]| was
# committed. Only do this when we just started the browser, otherwise
# there are cases where previous pages in a PageSet never complete
# loading so we'll wait forever.
if started_browser:
self.browser.tabs[0].WaitForDocumentReadyStateToBeComplete()
# Reset traffic shaping to speed up cache temperature setup.
self.platform.network_controller.UpdateTrafficSettings(0, 0, 0)
cache_temperature.EnsurePageCacheTemperature(
self._current_page, self.browser, self._previous_page)
if self._current_page.traffic_setting != traffic_setting.NONE:
s = traffic_setting.NETWORK_CONFIGS[self._current_page.traffic_setting]
self.platform.network_controller.UpdateTrafficSettings(
round_trip_latency_ms=s.round_trip_latency_ms,
download_bandwidth_kbps=s.download_bandwidth_kbps,
upload_bandwidth_kbps=s.upload_bandwidth_kbps)
self._AllowInteractionForStage('before-run-story')
# Start profiling if needed.
if self._finder_options.profiler:
self._StartProfiling(self._current_page)
def CanRunStory(self, page):
return self.CanRunOnBrowser(browser_info_module.BrowserInfo(self.browser),
page)
def CanRunOnBrowser(self, browser_info,
page): # pylint: disable=unused-argument
"""Override this to return whether the browser brought up by this state
instance is suitable for running the given page.
Args:
browser_info: an instance of telemetry.core.browser_info.BrowserInfo
page: an instance of telemetry.page.Page
"""
del browser_info, page # unused
return True
def _PreparePage(self):
self._current_tab = self._test.TabForPage(self._current_page, self.browser)
if self._current_page.is_file:
self.platform.SetHTTPServerDirectories(
self._current_page.page_set.serving_dirs |
set([self._current_page.serving_dir]))
if self._test.clear_cache_before_each_run:
self._current_tab.ClearCache(force=True)
@property
def current_page(self):
return self._current_page
@property
def current_tab(self):
return self._current_tab
@property
def page_test(self):
return self._test
def RunStory(self, results):
self._PreparePage()
self._current_page.Run(self)
self._test.ValidateAndMeasurePage(
self._current_page, self._current_tab, results)
def TearDownState(self):
self._StopBrowser()
self.platform.StopAllLocalServers()
self.platform.network_controller.Close()
self.platform.SetFullPerformanceModeEnabled(False)
def _StopBrowser(self):
if self._browser:
self._browser.Close()
self._browser = None
if self._possible_browser:
self._possible_browser.CleanUpEnvironment()
def _StartProfiling(self, page):
output_file = os.path.join(self._finder_options.output_dir,
page.file_safe_name)
if self._finder_options.pageset_repeat != 1:
output_file = util.GetSequentialFileName(output_file)
self.browser.profiling_controller.Start(
self._finder_options.profiler, output_file)
def _StopProfiling(self, results):
if self.browser:
profiler_files = self.browser.profiling_controller.Stop()
for f in profiler_files:
if os.path.isfile(f):
results.AddProfilingFile(self._current_page,
file_handle.FromFilePath(f))
class SharedMobilePageState(SharedPageState):
_device_type = 'mobile'
class SharedDesktopPageState(SharedPageState):
_device_type = 'desktop'
class SharedTabletPageState(SharedPageState):
_device_type = 'tablet'
class Shared10InchTabletPageState(SharedPageState):
_device_type = 'tablet_10_inch'
|
[
"telemetry.util.screenshot.TryCaptureScreenShot",
"telemetry.web_perf.timeline_based_page_test.TimelineBasedPageTest",
"logging.warning",
"telemetry.internal.browser.browser_finder.FindBrowser",
"telemetry.core.util.GetSequentialFileName",
"telemetry.decorators.IsEnabled",
"telemetry.page.cache_temperature.EnsurePageCacheTemperature",
"os.path.isfile",
"telemetry.internal.platform.profiler.profiler_finder.FindProfiler",
"telemetry.internal.util.file_handle.FromFilePath",
"telemetry.internal.browser.browser_finder.GetAllAvailableBrowserTypes",
"telemetry.internal.browser.browser_info.BrowserInfo",
"os.path.join",
"sys.exit",
"telemetry.core.platform.GetHostPlatform"
] |
[((1201, 1254), 'telemetry.internal.platform.profiler.profiler_finder.FindProfiler', 'profiler_finder.FindProfiler', (['finder_options.profiler'], {}), '(finder_options.profiler)\n', (1229, 1254), False, 'from telemetry.internal.platform.profiler import profiler_finder\n'), ((3813, 3855), 'telemetry.internal.browser.browser_finder.FindBrowser', 'browser_finder.FindBrowser', (['finder_options'], {}), '(finder_options)\n', (3839, 3855), False, 'from telemetry.internal.browser import browser_finder\n'), ((4413, 4457), 'telemetry.decorators.IsEnabled', 'decorators.IsEnabled', (['test', 'possible_browser'], {}), '(test, possible_browser)\n', (4433, 4457), False, 'from telemetry import decorators\n'), ((10326, 10430), 'telemetry.page.cache_temperature.EnsurePageCacheTemperature', 'cache_temperature.EnsurePageCacheTemperature', (['self._current_page', 'self.browser', 'self._previous_page'], {}), '(self._current_page, self.\n browser, self._previous_page)\n', (10370, 10430), False, 'from telemetry.page import cache_temperature\n'), ((12764, 12830), 'os.path.join', 'os.path.join', (['self._finder_options.output_dir', 'page.file_safe_name'], {}), '(self._finder_options.output_dir, page.file_safe_name)\n', (12776, 12830), False, 'import os\n'), ((2337, 2389), 'telemetry.web_perf.timeline_based_page_test.TimelineBasedPageTest', 'timeline_based_page_test.TimelineBasedPageTest', (['test'], {}), '(test)\n', (2383, 2389), False, 'from telemetry.web_perf import timeline_based_page_test\n'), ((4526, 4546), 'logging.warning', 'logging.warning', (['msg'], {}), '(msg)\n', (4541, 4546), False, 'import logging\n'), ((4553, 4610), 'logging.warning', 'logging.warning', (['"""You are trying to run a disabled test."""'], {}), "('You are trying to run a disabled test.')\n", (4568, 4610), False, 'import logging\n'), ((4689, 4700), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4697, 4700), False, 'import sys\n'), ((4904, 4961), 'logging.warning', 'logging.warning', (['"""Cannot dump browser state: No browser."""'], {}), "('Cannot dump browser state: No browser.')\n", (4919, 4961), False, 'import logging\n'), ((5078, 5143), 'telemetry.util.screenshot.TryCaptureScreenShot', 'screenshot.TryCaptureScreenShot', (['self.platform', 'self._current_tab'], {}), '(self.platform, self._current_tab)\n', (5109, 5143), False, 'from telemetry.util import screenshot\n'), ((5242, 5303), 'logging.warning', 'logging.warning', (['"""Taking screenshots upon failures disabled."""'], {}), "('Taking screenshots upon failures disabled.')\n", (5257, 5303), False, 'import logging\n'), ((8947, 9003), 'logging.warning', 'logging.warning', (['"""WPR archive missing: %s"""', 'archive_path'], {}), "('WPR archive missing: %s', archive_path)\n", (8962, 9003), False, 'import logging\n'), ((11055, 11100), 'telemetry.internal.browser.browser_info.BrowserInfo', 'browser_info_module.BrowserInfo', (['self.browser'], {}), '(self.browser)\n', (11086, 11100), True, 'from telemetry.internal.browser import browser_info as browser_info_module\n'), ((12931, 12970), 'telemetry.core.util.GetSequentialFileName', 'util.GetSequentialFileName', (['output_file'], {}), '(output_file)\n', (12957, 12970), False, 'from telemetry.core import util\n'), ((8911, 8939), 'os.path.isfile', 'os.path.isfile', (['archive_path'], {}), '(archive_path)\n', (8925, 8939), False, 'import os\n'), ((13233, 13250), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (13247, 13250), False, 'import os\n'), ((2476, 2509), 'telemetry.core.platform.GetHostPlatform', 'platform_module.GetHostPlatform', ([], {}), '()\n', (2507, 2509), True, 'from telemetry.core import platform as platform_module\n'), ((4028, 4086), 'telemetry.internal.browser.browser_finder.GetAllAvailableBrowserTypes', 'browser_finder.GetAllAvailableBrowserTypes', (['finder_options'], {}), '(finder_options)\n', (4070, 4086), False, 'from telemetry.internal.browser import browser_finder\n'), ((13342, 13369), 'telemetry.internal.util.file_handle.FromFilePath', 'file_handle.FromFilePath', (['f'], {}), '(f)\n', (13366, 13369), False, 'from telemetry.internal.util import file_handle\n')]
|
import math
def say_hi():
print("Hi")
say_hi()
x = 100
another_variable = 1
print(another_variable + x)
print("I have", x, "DKK")
y = x * 2
# Formatted values
first = "Carlotta"
last = "Porcelli"
name = "First Name: {}, Last Name: {}".format(first, last)
name2 = f"First Name: {first}, Last Name: {last}"
print(name)
print(name2)
decimal = 12.56345634534
integer = int(decimal)
print(integer)
a = '12'
float(a)
# get input from user
value1 = input("Please enter the first value:\n")
value1 = int(value1)
value2 = input("Please enter the second value:\n")
value2 = int(value2)
print(f'You entered {value1} and {value2} \n the multiplication is: {value1 * value2}')
print(f'You entered {value1} and {value2} \n the squares of the numbers are : {value1 ** 2} and '
f'{value2 ** 2}')
print(math.sqrt(25))
|
[
"math.sqrt"
] |
[((814, 827), 'math.sqrt', 'math.sqrt', (['(25)'], {}), '(25)\n', (823, 827), False, 'import math\n')]
|
#!/usr/bin/python
import json
import subprocess
import os
os.chdir('../terraform/stage/')
output = subprocess.check_output(['terraform', 'output', '-json'])
j =json.loads(output)
for i in j:
app_ip = j['app_external_ip']['value']
db_ip = j['db_external_ip']['value']
#print(app_ip)
#print(db_ip)
out = {
"_meta": {
"hostvars": {
"appserver": {
"ansible_host": app_ip
},
"dbserver": {
"ansible_host": db_ip
}
}
},
"all": {
"children": [
"app",
"db"
]
},
"app": {
"hosts": [ "appserver" ]
},
"db": {
"hosts": [ "dbserver" ]
}
}
print(json.dumps(out))
|
[
"subprocess.check_output",
"os.chdir",
"json.dumps",
"json.loads"
] |
[((58, 89), 'os.chdir', 'os.chdir', (['"""../terraform/stage/"""'], {}), "('../terraform/stage/')\n", (66, 89), False, 'import os\n'), ((99, 156), 'subprocess.check_output', 'subprocess.check_output', (["['terraform', 'output', '-json']"], {}), "(['terraform', 'output', '-json'])\n", (122, 156), False, 'import subprocess\n'), ((160, 178), 'json.loads', 'json.loads', (['output'], {}), '(output)\n', (170, 178), False, 'import json\n'), ((726, 741), 'json.dumps', 'json.dumps', (['out'], {}), '(out)\n', (736, 741), False, 'import json\n')]
|
import requests
import json
print('Requesting...')
url = 'https://platform.antares.id:8443/~/antares-cse/antares-id/{}/{}'.format('weather-station', 'station1')
headers = {
'X-M2M-Origin' : 'b4e89ce2436b9d90:202c7b14b849c084',
'Content-Type' : 'application/json;ty=4',
'Accept' : 'application/json',
}
data = 'helo'
strData = ''
try:
strData = json.dumps(data)
except:
strData = data
dataTemplate = {
"m2m:cin" : {
"con" : "hullo",
}
}
dataTemplate = json.dumps(dataTemplate)
print(dataTemplate)
print(url)
r = requests.post(url, headers=headers, data=dataTemplate)
response = r.text
print(response)
# return response
|
[
"requests.post",
"json.dumps"
] |
[((494, 518), 'json.dumps', 'json.dumps', (['dataTemplate'], {}), '(dataTemplate)\n', (504, 518), False, 'import json\n'), ((556, 610), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'data': 'dataTemplate'}), '(url, headers=headers, data=dataTemplate)\n', (569, 610), False, 'import requests\n'), ((362, 378), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (372, 378), False, 'import json\n')]
|
import urllib3
import json
TOKEN = ''
AUTHORIZATION = ''
http = urllib3.PoolManager()
def informacoes_basicas_aluno(matricula):
global TOKEN, http
r = http.request(
'GET',
'https://suap.ifrn.edu.br/api/v2/edu/alunos/{}/'.format(matricula),
headers={'Accept': 'application/json',
'X-CSRFToken': TOKEN,
'Authorization': AUTHORIZATION,
}
)
return json.loads(r.data.decode('utf-8'))
def informacoes_basicas_servidor(token_servidor, matricula):
global TOKEN, http
r = http.request(
'GET',
'https://suap.ifrn.edu.br/api/v2/edu/servidores/{}/'.format(matricula),
headers={'Accept': 'application/json',
'X-CSRFToken': token_servidor,
'Authorization': AUTHORIZATION,
}
)
return json.loads(r.data.decode('utf-8'))
def informacoes_curso(id_curso):
global TOKEN, http
r = http.request(
'GET',
'https://suap.ifrn.edu.br/api/v2/edu/cursos/{}/'.format(id_curso),
headers={'Accept': 'application/json',
'X-CSRFToken': TOKEN,
}
)
return json.loads(r.data.decode('utf-8'))
|
[
"urllib3.PoolManager"
] |
[((66, 87), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (85, 87), False, 'import urllib3\n')]
|
import vodka
import vodka.app
# make sure the plugin is available
import graphsrv_example.plugins.test_plot
# we dont do anything with the applet other than to make sure it exists
@vodka.app.register("graphsrv_example")
class MyApplication(vodka.app.Application):
pass
|
[
"vodka.app.register"
] |
[((183, 221), 'vodka.app.register', 'vodka.app.register', (['"""graphsrv_example"""'], {}), "('graphsrv_example')\n", (201, 221), False, 'import vodka\n')]
|
import numpy as np
import torch
from torchvision import models
import torch.nn as nn
from nn_ood.data.cifar10 import Cifar10Data
from nn_ood.posteriors import LocalEnsemble, SCOD, Ensemble, Naive, KFAC, Mahalanobis
from nn_ood.distributions import CategoricalLogit
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from densenet import densenet121
# WHERE TO SAVE THE MODEL
FILENAME = "model"
## HYPERPARAMS
N_MODELS = 1
LEARNING_RATE = 0.001
SGD_MOMENTUM = 0.9
LR_DROP_FACTOR = 0.5
EPOCHS_PER_DROP = 2
BATCH_SIZE = 5
N_EPOCHS = 0
## SET UP DATASETS
dataset_class = Cifar10Data
test_dataset_args = ['val', 'ood', 'svhn', 'tIN', 'lsun']
## DEFINE VISUALIZATION FUNCTIONS
def plt_image(ax, inp):
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.4914, 0.4822, 0.4465])
std = np.array([0.2471, 0.2435, 0.2616])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return ax.imshow(inp)
def viz_dataset_sample(ax, dataset, idx=0, model=None, unc_model=None):
input, target = dataset[idx]
plt_image(ax, input)
xlabel = 'Target: %d' % target
if unc_model is not None:
input = input.to(device)
pred, unc = unc_model(input.unsqueeze(0))
pred = pred[0].detach().cpu().numpy()
unc = unc.item()
xlabel += '\nPred: %d\nUnc: %0.3f' % (np.argmax(pred), unc)
elif model is not None:
input = input.to(device)
pred = model(input.unsqueeze(0))[0].detach().cpu().numpy()
xlabel += '\nPred: %d' % np.argmax(pred)
ax.set_xlabel(xlabel)
def viz_datasets(idx=0, unc_model=None, model=None):
num_plots = len(test_dataset_args)
fig, axes = plt.subplots(1,num_plots, figsize=[5*num_plots, 5], dpi=100)
for i, split in enumerate( test_dataset_args ):
dataset = dataset_class(split)
viz_dataset_sample(axes[i], dataset, idx=idx, unc_model=unc_model, model=model)
## USE CUDA IF POSSIBLE
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
classesToKeep = [0,1,2,3,4]
class classSelector(nn.Module):
def __init__(self, idx_list):
super().__init__()
self.idx_list = idx_list
def forward(self,x):
result = x[..., self.idx_list]
return result
## MODEL SET UP
og_req_grads = []
def make_model():
global og_req_grads
base = densenet121(pretrained=True)
selector = classSelector(classesToKeep)
model = nn.Sequential(base, selector)
og_req_grads = [p.requires_grad for p in model.parameters()]
return model
def freeze_model(model, freeze_frac=None):
# freeze up the the last
n_params = len(list(model.parameters()))
if freeze_frac is None:
freeze_frac = 1.
print(freeze_frac)
for i, p in enumerate(model.parameters()):
if i < freeze_frac*n_params:
p.requires_grad = False
# make last layer trainable
for i,m in enumerate(model.children()):
if i > 0:
break
for p in m.classifier.parameters():
p.requires_grad = True
def unfreeze_model(model):
global og_req_grads
# unfreeze everything
for p,v in zip( model.parameters(), og_req_grads):
p.requires_grad = v
def disable_batchnorm(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
for n,p in m.named_parameters():
p.requires_grad_(False)
def get_features_and_classifier(model):
"""works for Densenet with class selector only"""
features = nn.Sequential(
model[0].features,
nn.ReLU(),
nn.AdaptiveAvgPool2d(1),
nn.Flatten()
)
classifier = nn.Sequential(
model[0].classifier,
model[1]
)
return features, classifier
## OPTIMIZATION
dist_fam = CategoricalLogit().to(device) #criterion = nn.MSELoss()
opt_class = torch.optim.SGD
opt_kwargs = {
'lr': LEARNING_RATE,
'momentum': SGD_MOMENTUM
}
sched_class = torch.optim.lr_scheduler.StepLR
sched_kwargs = {
'step_size': EPOCHS_PER_DROP,
'gamma': LR_DROP_FACTOR
}
# recipe for preparing uncertainty models
prep_unc_models = {
# 'local_ensemble': {
# 'class': LocalEnsemble,
# 'kwargs': {
# 'batch_size': 32,
# 'max_samples': 4,
# 'num_eigs': 20,
# 'device': 'gpu'
# }
# },
'scod_SRFT_s184_n30_freeze_0.85': {
'class': SCOD,
'kwargs': {
'num_samples': 184,
'num_eigs': 30,
'device':'gpu',
'sketch_type': 'srft'
},
'freeze': 0.85
},
# 'scod_SRFT_s76_n12': {
# 'class': SCOD,
# 'kwargs': {
# 'num_samples': 76,
# 'num_eigs': 12,
# 'device':'gpu',
# 'sketch_type': 'srft'
# },
# },
# 'scod_SRFT_s76_n12_freeze_0.5': {
# 'class': SCOD,
# 'kwargs': {
# 'num_samples': 76,
# 'num_eigs': 12,
# 'device':'gpu',
# 'sketch_type': 'srft'
# },
# 'freeze': 0.5,
# },
# 'scod_SRFT_s76_n12_freeze_0.25': {
# 'class': SCOD,
# 'kwargs': {
# 'num_samples': 76,
# 'num_eigs': 12,
# 'device':'gpu',
# 'sketch_type': 'srft'
# },
# 'freeze': 0.25,
# },
# 'scod_SRFT_s76_n12_freeze_0.75': {
# 'class': SCOD,
# 'kwargs': {
# 'num_samples': 76,
# 'num_eigs': 12,
# 'device':'gpu',
# 'sketch_type': 'srft'
# },
# 'freeze': 0.75,
# },
# 'scod_SRFT_s76_n12_freeze_0.85': {
# 'class': SCOD,
# 'kwargs': {
# 'num_samples': 76,
# 'num_eigs': 12,
# 'device':'gpu',
# 'sketch_type': 'srft'
# },
# 'freeze': 0.85,
# },
# 'scod_SRFT_s76_n12_freeze_1.0': {
# 'class': SCOD,
# 'kwargs': {
# 'num_samples': 76,
# 'num_eigs': 12,
# 'device':'gpu',
# 'sketch_type': 'srft'
# },
# 'freeze': 1.0,
# },
'kfac': {
'class': KFAC,
'kwargs': {
'batch_size': 16,
'device':'gpu',
'input_shape': [3,224,224]
},
},
# 'maha': {
# 'class': Mahalanobis,
# 'kwargs': {
# 'device': 'gpu',
# "features_and_classifier": get_features_and_classifier,
# 'num_classes': 5,
# }
# }
}
# recipe for testing uncertainty models
test_unc_models = {
# 'local_ensemble': {
# 'class': LocalEnsemble,
# 'kwargs': {
# 'num_eigs': 20,
# 'device': 'gpu',
# 'n_y_samp': 5,
# },
# 'load_name': 'local_ensemble',
# 'forward_kwargs': {
# 'n_samples': 1
# }
# },
# 'SCOD (T=76,k=12)': {
# 'class': SCOD,
# 'kwargs': {
# 'num_eigs': 12,
# 'device':'gpu',
# },
# 'load_name': 'scod_SRFT_s76_n12',
# 'forward_kwargs': {
# 'n_eigs': 12,
# }
# },
# 'SCOD_freeze_0.25 (T=76,k=12)': {
# 'class': SCOD,
# 'kwargs': {
# 'num_eigs': 12,
# 'device':'gpu',
# },
# 'freeze': 0.25,
# 'load_name': 'scod_SRFT_s76_n12_freeze_0.25',
# 'forward_kwargs': {
# 'n_eigs': 12,
# }
# },
# 'SCOD_freeze_0.5 (T=76,k=12)': {
# 'class': SCOD,
# 'kwargs': {
# 'num_eigs': 12,
# 'device':'gpu',
# },
# 'freeze': 0.5,
# 'load_name': 'scod_SRFT_s76_n12_freeze_0.5',
# 'forward_kwargs': {
# 'n_eigs': 12,
# }
# },
# 'SCOD_freeze_0.75 (T=76,k=12)': {
# 'class': SCOD,
# 'kwargs': {
# 'num_eigs': 12,
# 'device':'gpu',
# },
# 'freeze': 0.75,
# 'load_name': 'scod_SRFT_s76_n12_freeze_0.75',
# 'forward_kwargs': {
# 'n_eigs': 12,
# }
# },
# 'SCOD_freeze_0.85 (T=76,k=12)': {
# 'class': SCOD,
# 'kwargs': {
# 'num_eigs': 12,
# 'device':'gpu',
# },
# 'freeze': 0.85,
# 'load_name': 'scod_SRFT_s76_n12_freeze_0.85',
# 'forward_kwargs': {
# 'n_eigs': 12,
# }
# },
# 'SCOD_freeze_1.0 (T=76,k=12)': {
# 'class': SCOD,
# 'kwargs': {
# 'num_eigs': 12,
# 'device':'gpu',
# },
# 'freeze': 1.0,
# 'load_name': 'scod_SRFT_s76_n12_freeze_1.0',
# 'forward_kwargs': {
# 'n_eigs': 12,
# }
# },
'SCOD_freeze_0.85 (T=184,k=30)': {
'class': SCOD,
'kwargs': {
'num_eigs': 30,
'device':'gpu',
},
'freeze': 0.85,
'load_name': 'scod_SRFT_s184_n30_freeze_0.85',
'forward_kwargs': {
'n_eigs': 30,
}
},
'kfac_n1e6_s5000': {
'class': KFAC,
'kwargs': {
'device':'gpu',
'input_shape':[3,224,224]
},
'load_name': 'kfac',
'forward_kwargs': {
'norm': 1e6,
'scale': 5000.
}
},
# 'naive': {
# 'class': Naive,
# 'kwargs': {
# 'device':'gpu'
# },
# 'load_name': None,
# 'forward_kwargs': {}
# },
# 'ensemble': {
# 'class': Ensemble,
# 'kwargs': {
# 'device': 'gpu',
# },
# 'load_name': None,
# 'multi_model': True,
# 'forward_kwargs': {}
# }
# 'maha': {
# 'class': Mahalanobis,
# 'kwargs': {
# 'device': 'gpu',
# "features_and_classifier": get_features_and_classifier,
# 'num_classes': 5,
# },
# 'load_name': 'maha',
# 'forward_kwargs': {}
# }
}
# OOD PERFORMANCE TESTS
from nn_ood.utils.viz import summarize_ood_results, summarize_ood_results_by_error, plot_perf_vs_runtime
splits_to_use = test_dataset_args
err_thresh = 2.
in_dist_splits = test_dataset_args[0:1]
out_dist_splits = test_dataset_args[2:]
keys_to_compare = [
'SCOD (T=76,k=12)',
'SCOD_freeze_0.85 (T=184,k=30)',
'local_ensemble',
'kfac_n1e6_s5000',
'naive',
'maha',
]
colors = [
'xkcd:azure',
'xkcd:electric blue',
'xkcd:mango',
'xkcd:blood orange',
'xkcd:scarlet',
'xkcd:indigo'
]
plots_to_generate = {
'auroc_vs_runtime.pdf': {
'summary_fn': summarize_ood_results,
'summary_fn_args': [
in_dist_splits,
out_dist_splits
],
'summary_fn_kwargs': {
'keys_to_compare': keys_to_compare,
},
'plot_fn': plot_perf_vs_runtime,
'plot_fn_args': [],
'plot_fn_kwargs': {
'colors': colors,
'figsize': [4,2.5],
'dpi': 150,
'normalize_x': True,
},
'legend': {
'labels': [
'SCOD',
'SCOD (LL)',
'Local Ensemble',
'KFAC',
'Naive',
'Maha'
]
},
'title': "CIFAR10",
},
}
freeze_keys = [
'SCOD (T=76,k=12)',
'SCOD_freeze_0.25 (T=76,k=12)',
'SCOD_freeze_0.5 (T=76,k=12)',
'SCOD_freeze_0.75 (T=76,k=12)',
'SCOD_freeze_0.85 (T=76,k=12)',
'SCOD_freeze_1.0 (T=76,k=12)',
'SCOD_freeze_0.85 (T=184,k=30)',
]
freeze_labels = [
"SCOD (T=76,k=12)",
"SCOD (LL 0.75) (T=76,k=12)",
"SCOD (LL 0.50) (T=76,k=12)",
"SCOD (LL 0.25) (T=76,k=12)",
"SCOD (LL 0.15) (T=76,k=12)",
"SCOD (only linear) (T=76,k=12)",
"SCOD (LL 0.15) (T=184,k=30)"
]
import seaborn as sns
cmap = sns.color_palette("crest", as_cmap=True)
freeze_colors = [cmap(k/6) for k in range(6)] + ["xkcd:azure"]
for split, label in zip(['ood', 'svhn', 'tIN', 'lsun'],['CIFAR class >= 5','SVHN','TinyImageNet', 'LSUN']):
plot = {
'summary_fn': summarize_ood_results,
'summary_fn_args': [
in_dist_splits,
[split],
],
'summary_fn_kwargs': {
'keys_to_compare': freeze_keys,
},
'plot_fn': plot_perf_vs_runtime,
'plot_fn_args': [],
'plot_fn_kwargs': {
'colors': freeze_colors,
'figsize': [6,3],
'dpi': 150,
'normalize_x': True,
},
'legend': {
'labels': freeze_labels
},
'title': "In: CIFAR class < 5" + " | Out: " + label,
}
plot_name = 'freeze_test_%s.pdf' % split
plots_to_generate[plot_name] = plot
|
[
"torch.nn.AdaptiveAvgPool2d",
"nn_ood.distributions.CategoricalLogit",
"torch.nn.ReLU",
"torch.nn.Sequential",
"numpy.argmax",
"numpy.clip",
"numpy.array",
"densenet.densenet121",
"seaborn.color_palette",
"torch.cuda.is_available",
"matplotlib.pyplot.subplots",
"torch.nn.Flatten"
] |
[((11970, 12010), 'seaborn.color_palette', 'sns.color_palette', (['"""crest"""'], {'as_cmap': '(True)'}), "('crest', as_cmap=True)\n", (11987, 12010), True, 'import seaborn as sns\n'), ((783, 817), 'numpy.array', 'np.array', (['[0.4914, 0.4822, 0.4465]'], {}), '([0.4914, 0.4822, 0.4465])\n', (791, 817), True, 'import numpy as np\n'), ((828, 862), 'numpy.array', 'np.array', (['[0.2471, 0.2435, 0.2616]'], {}), '([0.2471, 0.2435, 0.2616])\n', (836, 862), True, 'import numpy as np\n'), ((900, 918), 'numpy.clip', 'np.clip', (['inp', '(0)', '(1)'], {}), '(inp, 0, 1)\n', (907, 918), True, 'import numpy as np\n'), ((1689, 1752), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', 'num_plots'], {'figsize': '[5 * num_plots, 5]', 'dpi': '(100)'}), '(1, num_plots, figsize=[5 * num_plots, 5], dpi=100)\n', (1701, 1752), True, 'import matplotlib.pyplot as plt\n'), ((2365, 2393), 'densenet.densenet121', 'densenet121', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2376, 2393), False, 'from densenet import densenet121\n'), ((2450, 2479), 'torch.nn.Sequential', 'nn.Sequential', (['base', 'selector'], {}), '(base, selector)\n', (2463, 2479), True, 'import torch.nn as nn\n'), ((3706, 3750), 'torch.nn.Sequential', 'nn.Sequential', (['model[0].classifier', 'model[1]'], {}), '(model[0].classifier, model[1])\n', (3719, 3750), True, 'import torch.nn as nn\n'), ((1988, 2013), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2011, 2013), False, 'import torch\n'), ((3618, 3627), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3625, 3627), True, 'import torch.nn as nn\n'), ((3637, 3660), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (3657, 3660), True, 'import torch.nn as nn\n'), ((3670, 3682), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (3680, 3682), True, 'import torch.nn as nn\n'), ((3846, 3864), 'nn_ood.distributions.CategoricalLogit', 'CategoricalLogit', ([], {}), '()\n', (3862, 3864), False, 'from nn_ood.distributions import CategoricalLogit\n'), ((1345, 1360), 'numpy.argmax', 'np.argmax', (['pred'], {}), '(pred)\n', (1354, 1360), True, 'import numpy as np\n'), ((1528, 1543), 'numpy.argmax', 'np.argmax', (['pred'], {}), '(pred)\n', (1537, 1543), True, 'import numpy as np\n')]
|
import sys
import shutil
import hashlib
import bz2
from shutil import copyfileobj
import os
class Disk:
srcPath = None
destPath = None
def __init__(self):
return
def set_src_path(self, srcpath):
self.srcPath = srcpath
def set_dst_path(self, dstpath):
self.destPath = dstpath
def disk_copy(self):
try:
shutil.copy2(self.srcPath, self.destPath)
except IOError as e:
print("Unable to copy file: %s" % e)
def remove(self):
dsk = self.destPath
if(dsk):
try:
os.remove(dsk)
print("Uncompressed backup image removed")
except OSError as e:
print("Error: %s - %s." % (e.filename,e.strerror))
else:
print("Sorry, I can not find $s file." % dsk)
def disk_check_sum(self):
if self.checksum(self.srcPath) == self.checksum(self.destPath):
return True
else:
return False
def checksum(self, n):
file_f = open(n, 'rb')
hash_h = hashlib.sha256()
for chunk in self.chunker(file_f, 4096):
hash_h.update(chunk)
print((hash_h.hexdigest()))
file_f.close()
return hash_h.hexdigest()
@staticmethod
def chunker(fileobj, size):
while True:
data = fileobj.read(size)
if not data:
return
yield data
@staticmethod
def compress(source, dest):
if "_snapshot" in dest:
dest = dest.split("_snapshot")[0] + ".img"
else:
dest = dest + ".img"
with open(source, 'rb') as data:
with bz2.BZ2File(dest + ".bz2", 'wb') as outfile:
copyfileobj(data, outfile)
|
[
"os.remove",
"shutil.copy2",
"bz2.BZ2File",
"hashlib.sha256",
"shutil.copyfileobj"
] |
[((1089, 1105), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (1103, 1105), False, 'import hashlib\n'), ((380, 421), 'shutil.copy2', 'shutil.copy2', (['self.srcPath', 'self.destPath'], {}), '(self.srcPath, self.destPath)\n', (392, 421), False, 'import shutil\n'), ((601, 615), 'os.remove', 'os.remove', (['dsk'], {}), '(dsk)\n', (610, 615), False, 'import os\n'), ((1704, 1736), 'bz2.BZ2File', 'bz2.BZ2File', (["(dest + '.bz2')", '"""wb"""'], {}), "(dest + '.bz2', 'wb')\n", (1715, 1736), False, 'import bz2\n'), ((1765, 1791), 'shutil.copyfileobj', 'copyfileobj', (['data', 'outfile'], {}), '(data, outfile)\n', (1776, 1791), False, 'from shutil import copyfileobj\n')]
|
""" Cisco_IOS_XR_shellutil_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR shellutil package configuration.
This module contains definitions
for the following management objects\:
host\-names\: Container Schema for hostname configuration
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class HostNames(_Entity_):
"""
Container Schema for hostname configuration
.. attribute:: host_name
Configure system's hostname
**type**\: str
"""
_prefix = 'shellutil-cfg'
_revision = '2015-10-12'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(HostNames, self).__init__()
self._top_entity = None
self.yang_name = "host-names"
self.yang_parent_name = "Cisco-IOS-XR-shellutil-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('host_name', (YLeaf(YType.str, 'host-name'), ['str'])),
])
self.host_name = None
self._segment_path = lambda: "Cisco-IOS-XR-shellutil-cfg:host-names"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(HostNames, ['host_name'], name, value)
def clone_ptr(self):
self._top_entity = HostNames()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_shellutil_cfg as meta
return meta._meta_table['HostNames']['meta_info']
|
[
"collections.OrderedDict",
"ydk.types.YLeaf"
] |
[((1517, 1532), 'collections.OrderedDict', 'OrderedDict', (['[]'], {}), '([])\n', (1528, 1532), False, 'from collections import OrderedDict\n'), ((1596, 1625), 'ydk.types.YLeaf', 'YLeaf', (['YType.str', '"""host-name"""'], {}), "(YType.str, 'host-name')\n", (1601, 1625), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n')]
|
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from flask_login import current_user
from flaskblog.models import User
class RegistrationForm(FlaskForm):
username = StringField('Username', [validators.InputRequired(),validators.Length(min=2,max=20)])
email = StringField('Email', [validators.InputRequired(), validators.Email()])
password = PasswordField('Password', [validators.InputRequired()])
confirm_password = PasswordField('Confirm Password', [validators.InputRequired(), validators.EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username is already taken. Please choose a different one')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email is already taken. Please choose a different one')
class LoginForm(FlaskForm):
email = StringField('Email', [validators.InputRequired(), validators.Email()])
password = PasswordField('Password', [validators.InputRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
class UpdateAccountForm(FlaskForm):
username = StringField('Username', [validators.InputRequired(),validators.Length(min=2,max=20)])
email = StringField('Email', [validators.InputRequired(), validators.Email()])
picture = FileField('Update Profile Picture', validators=[FileAllowed(['jpg', 'png'], 'File doesnot have an approved extension: jpg, png!')])
submit = SubmitField('Update')
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('This Username is already taken. Please choose a different one')
def validate_email(self, email):
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('This Email Id is already taken. Please choose a different one')
class ResetForm(FlaskForm):
email = StringField('Email', [validators.InputRequired(), validators.Email()])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError('This Email Id doesn\'t exsit. You must register first')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', [validators.InputRequired()])
confirm_password = PasswordField('Confirm Password', [validators.InputRequired(), validators.EqualTo('password')])
submit = SubmitField('Reset Password')
|
[
"wtforms.validators.Email",
"wtforms.validators.Length",
"wtforms.validators.InputRequired",
"wtforms.BooleanField",
"wtforms.SubmitField",
"wtforms.validators.EqualTo",
"flask_wtf.file.FileAllowed",
"flaskblog.models.User.query.filter_by",
"wtforms.validators.ValidationError"
] |
[((769, 791), 'wtforms.SubmitField', 'SubmitField', (['"""Sign Up"""'], {}), "('Sign Up')\n", (780, 791), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((1476, 1503), 'wtforms.BooleanField', 'BooleanField', (['"""Remember Me"""'], {}), "('Remember Me')\n", (1488, 1503), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((1518, 1538), 'wtforms.SubmitField', 'SubmitField', (['"""Login"""'], {}), "('Login')\n", (1529, 1538), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((1931, 1952), 'wtforms.SubmitField', 'SubmitField', (['"""Update"""'], {}), "('Update')\n", (1942, 1952), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((2675, 2712), 'wtforms.SubmitField', 'SubmitField', (['"""Request Password Reset"""'], {}), "('Request Password Reset')\n", (2686, 2712), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((3198, 3227), 'wtforms.SubmitField', 'SubmitField', (['"""Reset Password"""'], {}), "('Reset Password')\n", (3209, 3227), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((414, 440), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (438, 440), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((441, 473), 'wtforms.validators.Length', 'validators.Length', ([], {'min': '(2)', 'max': '(20)'}), '(min=2, max=20)\n', (458, 473), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((510, 536), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (534, 536), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((538, 556), 'wtforms.validators.Email', 'validators.Email', ([], {}), '()\n', (554, 556), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((604, 630), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (628, 630), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((692, 718), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (716, 718), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((720, 750), 'wtforms.validators.EqualTo', 'validators.EqualTo', (['"""password"""'], {}), "('password')\n", (738, 750), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((956, 1031), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Username is already taken. Please choose a different one"""'], {}), "('Username is already taken. Please choose a different one')\n", (971, 1031), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError\n'), ((1198, 1270), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Email is already taken. Please choose a different one"""'], {}), "('Email is already taken. Please choose a different one')\n", (1213, 1270), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError\n'), ((1339, 1365), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (1363, 1365), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((1367, 1385), 'wtforms.validators.Email', 'validators.Email', ([], {}), '()\n', (1383, 1385), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((1431, 1457), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (1455, 1457), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((1621, 1647), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (1645, 1647), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((1648, 1680), 'wtforms.validators.Length', 'validators.Length', ([], {'min': '(2)', 'max': '(20)'}), '(min=2, max=20)\n', (1665, 1680), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((1717, 1743), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (1741, 1743), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((1745, 1763), 'wtforms.validators.Email', 'validators.Email', ([], {}), '()\n', (1761, 1763), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((2612, 2638), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (2636, 2638), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((2640, 2658), 'wtforms.validators.Email', 'validators.Email', ([], {}), '()\n', (2656, 2658), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((2875, 2946), 'wtforms.validators.ValidationError', 'ValidationError', (['"""This Email Id doesn\'t exsit. You must register first"""'], {}), '("This Email Id doesn\'t exsit. You must register first")\n', (2890, 2946), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError\n'), ((3033, 3059), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (3057, 3059), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((3122, 3148), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {}), '()\n', (3146, 3148), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((3150, 3180), 'wtforms.validators.EqualTo', 'validators.EqualTo', (['"""password"""'], {}), "('password')\n", (3168, 3180), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField, validators\n'), ((858, 902), 'flaskblog.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'username.data'}), '(username=username.data)\n', (878, 902), False, 'from flaskblog.models import User\n'), ((1106, 1144), 'flaskblog.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': 'email.data'}), '(email=email.data)\n', (1126, 1144), False, 'from flaskblog.models import User\n'), ((1833, 1918), 'flask_wtf.file.FileAllowed', 'FileAllowed', (["['jpg', 'png']", '"""File doesnot have an approved extension: jpg, png!"""'], {}), "(['jpg', 'png'],\n 'File doesnot have an approved extension: jpg, png!')\n", (1844, 1918), False, 'from flask_wtf.file import FileField, FileAllowed\n'), ((2176, 2261), 'wtforms.validators.ValidationError', 'ValidationError', (['"""This Username is already taken. Please choose a different one"""'], {}), "('This Username is already taken. Please choose a different one'\n )\n", (2191, 2261), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError\n'), ((2465, 2550), 'wtforms.validators.ValidationError', 'ValidationError', (['"""This Email Id is already taken. Please choose a different one"""'], {}), "('This Email Id is already taken. Please choose a different one'\n )\n", (2480, 2550), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError\n'), ((2775, 2813), 'flaskblog.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': 'email.data'}), '(email=email.data)\n', (2795, 2813), False, 'from flaskblog.models import User\n'), ((2077, 2121), 'flaskblog.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'username.data'}), '(username=username.data)\n', (2097, 2121), False, 'from flaskblog.models import User\n'), ((2373, 2411), 'flaskblog.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': 'email.data'}), '(email=email.data)\n', (2393, 2411), False, 'from flaskblog.models import User\n')]
|
# Copyright 2013 IBM Corp.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
import webob.dec
from nova.api import openstack as openstack_api
from nova.api.openstack import auth
from nova.api.openstack import compute
from nova.api.openstack import urlmap
from nova import test
from nova.tests.unit.api.openstack import fakes
class TestNoAuthMiddlewareV21(test.NoDBTestCase):
def setUp(self):
super(TestNoAuthMiddlewareV21, self).setUp()
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_networking(self)
self.wsgi_app = fakes.wsgi_app_v21(use_no_auth=True)
self.req_url = '/v2'
self.expected_url = "http://localhost/v2/user1_project"
def test_authorize_user(self):
req = webob.Request.blank(self.req_url)
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
req.headers['X-Auth-Project-Id'] = 'user1_project'
result = req.get_response(self.wsgi_app)
self.assertEqual(result.status, '204 No Content')
self.assertEqual(result.headers['X-Server-Management-Url'],
self.expected_url)
def test_authorize_user_trailing_slash(self):
# make sure it works with trailing slash on the request
self.req_url = self.req_url + '/'
req = webob.Request.blank(self.req_url)
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
req.headers['X-Auth-Project-Id'] = 'user1_project'
result = req.get_response(self.wsgi_app)
self.assertEqual(result.status, '204 No Content')
self.assertEqual(result.headers['X-Server-Management-Url'],
self.expected_url)
def test_auth_token_no_empty_headers(self):
req = webob.Request.blank(self.req_url)
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
req.headers['X-Auth-Project-Id'] = 'user1_project'
result = req.get_response(self.wsgi_app)
self.assertEqual(result.status, '204 No Content')
self.assertNotIn('X-CDN-Management-Url', result.headers)
self.assertNotIn('X-Storage-Url', result.headers)
class TestNoAuthMiddlewareV3(TestNoAuthMiddlewareV21):
def setUp(self):
super(TestNoAuthMiddlewareV3, self).setUp()
api_router = compute.APIRouterV3()
api_v3 = openstack_api.FaultWrapper(auth.NoAuthMiddlewareV3(
api_router))
self.wsgi_app = urlmap.URLMap()
self.wsgi_app['/v3'] = api_v3
self.req_url = '/v3'
self.expected_url = "http://localhost/v3"
|
[
"nova.tests.unit.api.openstack.fakes.wsgi_app_v21",
"nova.tests.unit.api.openstack.fakes.stub_out_networking",
"webob.Request.blank",
"nova.tests.unit.api.openstack.fakes.stub_out_rate_limiting",
"nova.api.openstack.auth.NoAuthMiddlewareV3",
"nova.api.openstack.urlmap.URLMap",
"nova.api.openstack.compute.APIRouterV3"
] |
[((1060, 1100), 'nova.tests.unit.api.openstack.fakes.stub_out_rate_limiting', 'fakes.stub_out_rate_limiting', (['self.stubs'], {}), '(self.stubs)\n', (1088, 1100), False, 'from nova.tests.unit.api.openstack import fakes\n'), ((1109, 1140), 'nova.tests.unit.api.openstack.fakes.stub_out_networking', 'fakes.stub_out_networking', (['self'], {}), '(self)\n', (1134, 1140), False, 'from nova.tests.unit.api.openstack import fakes\n'), ((1165, 1201), 'nova.tests.unit.api.openstack.fakes.wsgi_app_v21', 'fakes.wsgi_app_v21', ([], {'use_no_auth': '(True)'}), '(use_no_auth=True)\n', (1183, 1201), False, 'from nova.tests.unit.api.openstack import fakes\n'), ((1345, 1378), 'webob.Request.blank', 'webob.Request.blank', (['self.req_url'], {}), '(self.req_url)\n', (1364, 1378), False, 'import webob\n'), ((1908, 1941), 'webob.Request.blank', 'webob.Request.blank', (['self.req_url'], {}), '(self.req_url)\n', (1927, 1941), False, 'import webob\n'), ((2363, 2396), 'webob.Request.blank', 'webob.Request.blank', (['self.req_url'], {}), '(self.req_url)\n', (2382, 2396), False, 'import webob\n'), ((2931, 2952), 'nova.api.openstack.compute.APIRouterV3', 'compute.APIRouterV3', ([], {}), '()\n', (2950, 2952), False, 'from nova.api.openstack import compute\n'), ((3071, 3086), 'nova.api.openstack.urlmap.URLMap', 'urlmap.URLMap', ([], {}), '()\n', (3084, 3086), False, 'from nova.api.openstack import urlmap\n'), ((2997, 3032), 'nova.api.openstack.auth.NoAuthMiddlewareV3', 'auth.NoAuthMiddlewareV3', (['api_router'], {}), '(api_router)\n', (3020, 3032), False, 'from nova.api.openstack import auth\n')]
|
from bdict import BDict
class Term:
def __init__(self, term=None):
self.term = term
self.times = 0
self.occur = dict()
def jsonfy(self):
d = dict()
d['term'] = self.term
d['times'] = self.times
d['occur'] = self.occur
return d
def unjsonfy(self, d):
self.term = d['term']
self.times = d['times']
self.occur = d['occur']
return self
def get_links(self):
bdict = BDict()
base_uri = 'http://shakespeare.mit.edu/'
return [(base_uri + bdict.search(1, key), len(self.occur[key])) for key in self.occur.keys()]
# define operations for the keys(terms),
# should only compare the word(term) field
def __lt__(self, other):
return self.term < other.term
def __le__(self, other):
return self.term <= other.term
def __gt__(self, other):
return self.term > other.term
def __ge__(self, other):
return self.term >= other.term
def __eq__(self, other):
return self.term == other.term
def insert(self, doc, ith):
if doc in self.occur:
self.occur[doc].append(ith)
else:
self.occur[doc] = [ith]
self.times += 1
|
[
"bdict.BDict"
] |
[((485, 492), 'bdict.BDict', 'BDict', ([], {}), '()\n', (490, 492), False, 'from bdict import BDict\n')]
|
import gym.spaces
import numpy as np
import pytest
from metarl.envs.wrappers import Resize
from tests.fixtures.envs.dummy import DummyDiscrete2DEnv
class TestResize:
def setup_method(self):
self.width = 16
self.height = 16
self.env = DummyDiscrete2DEnv()
self.env_r = Resize(
DummyDiscrete2DEnv(), width=self.width, height=self.height)
def teardown_method(self):
self.env.close()
self.env_r.close()
def test_resize_invalid_environment_type(self):
with pytest.raises(ValueError):
self.env.observation_space = gym.spaces.Discrete(64)
Resize(self.env, width=self.width, height=self.height)
def test_resize_invalid_environment_shape(self):
with pytest.raises(ValueError):
self.env.observation_space = gym.spaces.Box(
low=0, high=255, shape=(4, ), dtype=np.uint8)
Resize(self.env, width=self.width, height=self.height)
def test_resize_output_observation_space(self):
assert self.env_r.observation_space.shape == (self.width, self.height)
def test_resize_output_reset(self):
assert self.env_r.reset().shape == (self.width, self.height)
def test_resize_output_step(self):
self.env_r.reset()
obs_r, _, _, _ = self.env_r.step(1)
assert obs_r.shape == (self.width, self.height)
|
[
"pytest.raises",
"tests.fixtures.envs.dummy.DummyDiscrete2DEnv",
"metarl.envs.wrappers.Resize"
] |
[((265, 285), 'tests.fixtures.envs.dummy.DummyDiscrete2DEnv', 'DummyDiscrete2DEnv', ([], {}), '()\n', (283, 285), False, 'from tests.fixtures.envs.dummy import DummyDiscrete2DEnv\n'), ((327, 347), 'tests.fixtures.envs.dummy.DummyDiscrete2DEnv', 'DummyDiscrete2DEnv', ([], {}), '()\n', (345, 347), False, 'from tests.fixtures.envs.dummy import DummyDiscrete2DEnv\n'), ((537, 562), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (550, 562), False, 'import pytest\n'), ((641, 695), 'metarl.envs.wrappers.Resize', 'Resize', (['self.env'], {'width': 'self.width', 'height': 'self.height'}), '(self.env, width=self.width, height=self.height)\n', (647, 695), False, 'from metarl.envs.wrappers import Resize\n'), ((763, 788), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (776, 788), False, 'import pytest\n'), ((921, 975), 'metarl.envs.wrappers.Resize', 'Resize', (['self.env'], {'width': 'self.width', 'height': 'self.height'}), '(self.env, width=self.width, height=self.height)\n', (927, 975), False, 'from metarl.envs.wrappers import Resize\n')]
|
import sys
import os
import gzip
ntfile = sys.argv[1]
rulesfile = sys.argv[2]
outdir = sys.argv[3]
edbfile = sys.argv[4]
# First process the rule files to understand which binary predicates we need
binaryPredicates = {}
unaryPredicates = {}
for line in open(rulesfile, 'rt'):
line = line[:-1]
tkns = line.split(' :- ')
predicateName = tkns[0][0:tkns[0].find('(')]
body = tkns[1]
if 'TE' in body:
tkns = body.split(',')
obj = tkns[2]
if '<' in obj:
# unary
assert(tkns[1] == 'rdf:type')
obj = obj[:-1]
unaryPredicates[predicateName] = obj
else:
binaryPredicates[predicateName] = tkns[1]
invBinaryPredicates = {}
for key, value in binaryPredicates.items():
invBinaryPredicates[value] = key
invUnaryPredicates = {}
for key, value in unaryPredicates.items():
invUnaryPredicates[value] = key
# Now populate the relations
shortunarynames = {}
shortbinarynames = {}
allshortnames = {}
binaryRelations = {}
unaryRelations = {}
old2newpred = {}
count = 0
for line in open(ntfile, 'rt'):
if count % 1000000 == 0:
print(count)
count += 1
try:
line = line[:-1]
tkns = line.split(' ')
subj = tkns[0]
pred = tkns[1]
obj = ''
for i in range(2, len(tkns)):
obj += tkns[i]
obj = obj[:-1]
if pred in invBinaryPredicates:
if pred not in shortbinarynames:
# Change relname is something meaninful
idx = pred.rfind("#")
if idx == -1:
idx = pred.rfind("/")
relname = pred[idx + 1:-1]
relname = relname.lower()
# Check that relname is unique
idx = 0
while relname in allshortnames:
relname = relname + str(idx)
idx += 1
allshortnames[relname] = 0
shortbinarynames[pred] = relname
oldrelname = invBinaryPredicates[pred] # e.g., RP1
old2newpred[oldrelname] = relname
else:
relname = shortbinarynames[pred]
if relname not in binaryRelations:
binaryRelations[relname] = []
binaryRelations[relname].append((subj, obj))
elif pred == 'rdf::type' or pred == '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>':
if obj in invUnaryPredicates:
if obj not in shortunarynames:
idx = obj.rfind("#")
if idx == -1:
idx = obj.rfind("/")
relname = obj[idx+1:-1]
relname = relname.lower()
# Check that relname is unique
idx = 0
while relname in allshortnames:
relname = relname + str(idx)
idx += 1
if idx > 30:
print(allshortnames)
print(relname)
break
allshortnames[relname] = 0
shortunarynames[obj] = relname
oldrelname = invUnaryPredicates[obj]
old2newpred[oldrelname] = relname
else:
relname = shortunarynames[obj]
if relname not in unaryRelations:
unaryRelations[relname] = []
unaryRelations[relname].append(subj)
except:
print("Ignored line", line)
# Write the relations into files
if not os.path.exists(outdir):
os.makedirs(outdir)
for key, value in binaryRelations.items():
fout = gzip.open(outdir + '/e_' + key + '.csv.gz', 'wt')
for p in value:
fout.write(p[0] + '\t' + p[1] + '\n')
fout.close()
for key, value in unaryRelations.items():
fout = gzip.open(outdir + '/e_' + key + '.csv.gz', 'wt')
for p in value:
fout.write(p + '\n')
fout.close()
# Create the EDB file
fedb = open(edbfile, 'wt')
i = 0
for key in binaryRelations:
fedb.write('EDB' + str(i) + '_predname=e_' + key + '\n')
fedb.write('EDB' + str(i) + '_type=INMEMORY\n')
fedb.write('EDB' + str(i) + '_param0=' + outdir + '\n')
fedb.write('EDB' + str(i) + '_param1=e_' + key + '\n')
fedb.write('EDB' + str(i) + '_param2=t\n')
fedb.write('\n')
i += 1
for key in unaryRelations:
fedb.write('EDB' + str(i) + '_predname=e_' + key + '\n')
fedb.write('EDB' + str(i) + '_type=INMEMORY\n')
fedb.write('EDB' + str(i) + '_param0=' + outdir + '\n')
fedb.write('EDB' + str(i) + '_param1=e_' + key + '\n')
fedb.write('EDB' + str(i) + '_param2=t\n')
fedb.write('\n')
i += 1
fedb.close()
# Create a new rulefile with the replaced predicates
text = ""
for line in open(rulesfile, 'rt'):
if "TE" not in line:
text += line
for key, value in old2newpred.items():
text = text.replace(key + '(', value + '(')
frules = open(rulesfile + ".new", "wt")
invold2newpred = {}
for key, value in old2newpred.items():
invold2newpred[value] = key
# Write the dependencies from edb to idb predicates
for key in unaryRelations:
head = key
frules.write(head + '(X) :- e_' + key + '(X)\n')
for key in binaryRelations:
head = key
frules.write(head + '(X,Y) :- e_' + key + '(X,Y)\n')
frules.write(text)
frules.close()
|
[
"os.path.exists",
"os.makedirs",
"gzip.open"
] |
[((3678, 3700), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (3692, 3700), False, 'import os\n'), ((3706, 3725), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (3717, 3725), False, 'import os\n'), ((3781, 3830), 'gzip.open', 'gzip.open', (["(outdir + '/e_' + key + '.csv.gz')", '"""wt"""'], {}), "(outdir + '/e_' + key + '.csv.gz', 'wt')\n", (3790, 3830), False, 'import gzip\n'), ((3968, 4017), 'gzip.open', 'gzip.open', (["(outdir + '/e_' + key + '.csv.gz')", '"""wt"""'], {}), "(outdir + '/e_' + key + '.csv.gz', 'wt')\n", (3977, 4017), False, 'import gzip\n')]
|
import pandas as pd
import os
from os import listdir
from os.path import isfile, join
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge
from sklearn.ensemble import RandomForestRegressor
import util
def gen():
meter_path = "../microphone"
meter_files = [f for f in listdir(meter_path) if isfile(join(meter_path, f))]
meter_files.sort()
meter_files = [f for f in meter_files if f[0:5] == "meter"]
meter_files
phonometer_path = "../phonometer"
phonometer_files = [f for f in listdir(phonometer_path) if isfile(join(phonometer_path, f))]
phonometer_files.sort()
phonometer_files = [f for f in phonometer_files if f[0:10] == "phonometer"]
phonometer_files
len(meter_files) == len(phonometer_files)
dataset = pd.DataFrame()
for m, p in zip(meter_files, phonometer_files):
# Step 1: read data from csv file.
microphone, phonometer = util.read_data(m, p);
# Step 2: drop first and last row in microphone csv.
util.drop_dumb_data(microphone)
# Step 3: merge Date and Time in the same field.
util.create_datetime(microphone, phonometer)
# Step 4: remove duplicates.
util.remove_duplicate(microphone, phonometer)
# Step 5: join between DataFrame on DateTime coloumn.
merged = util.join(microphone, phonometer)
# Step 6: remove and save NaN values.
util.remove_and_save_NaN(merged)
# Step 7: append in the dataset.
dataset = dataset.append(merged)
# Save the dataset in a csv file.
if not os.path.exists("DB_dataset_first_model.csv"):
dataset.to_csv("DB_dataset_first_model.csv", sep=' ')
dataset.shape
dataset.head()
dataset.tail()
################################################################################################
# SECOND DATASET #
################################################################################################
canarin_first_month = pd.read_csv("../canarin-first-month.csv", skiprows=4)
#print(canarin_first_month.shape)
#print(canarin_first_month.head())
#print(canarin_first_month.tail())
#print(canarin_first_month.drop(['GPS_Lat', 'GPS_Lng', 'GPS_Alt', 'Node', "Timestamp"], axis='columns', inplace=True))
#print(canarin_first_month.columns)
# - Renaming of the coloumns that have too long names.
canarin_first_month["Datetime"] = pd.to_datetime(canarin_first_month["Datetime(UTC+2)"])
canarin_first_month["Temperature"] = canarin_first_month["Temperature Ext"]
canarin_first_month["Humidity"] = canarin_first_month["Humidity Ext"]
# - Drop the columns that have been renamed.
canarin_first_month.drop(["Datetime(UTC+2)", "Temperature Ext", "Humidity Ext", "Node"], axis='columns', inplace=True)
canarin_first_month.head()
canarin_first_month.set_index("Datetime", inplace=True)
# - Expand the data of the canarin station per second, in order to have them at the same frequency as those of the dataset containing the decibels of the microphone and the sound level meter.
canarin_upsampled = canarin_first_month.resample('1S')
# - We use interpolation to fill in previously expanded data.
canarin_upsampled_interpolated = canarin_upsampled.interpolate(method="linear")
#print(type(canarin_upsampled_interpolated))
# - Once expanded, the data of the canary per second are to be combined with those of the dataset, previously used, containing the decibels.
dataset_canarin_seconds = dataset.merge(canarin_upsampled_interpolated, left_index=True, right_index=True)
#print(dataset_canarin_seconds.shape)
# - Export in csv file the dataset.
if not os.path.exists("DB_dataset_canarin_second_model.csv"):
dataset_canarin_seconds.to_csv("DB_dataset_canarin_second_model.csv", sep=' ')
################################################################################################
# THIRD DATASET #
################################################################################################
meter_path = "../microphone-second-month"
meter_files = [f for f in listdir(meter_path) if isfile(os.path.join(meter_path, f))]
meter_files.sort()
meter_files = [f for f in meter_files if f[0:5] == "meter"]
#print(meter_files)
phonometer_path = "../phonometer-second-month"
phonometer_files = [f for f in listdir(phonometer_path) if isfile(os.path.join(phonometer_path, f))]
phonometer_files.sort()
phonometer_files = [f for f in phonometer_files if f[0:10] == "phonometer"]
#print(phonometer_files)
len(meter_files) == len(phonometer_files)
# - Same step used in the previous method in order to merge microphone files and phonometer files.
dataset_db_second_month = pd.DataFrame()
for m, p in zip(meter_files, phonometer_files):
# Step 1: read data from csv file.
microphone_second_month, phonometer_second_month = util.read_data(m, p, path_mic = "../microphone-second-month", path_phon = "../phonometer-second-month");
# Step 2: drop first and last row in microphone csv.
util.drop_dumb_data(microphone_second_month)
# Step 3: merge Date and Time in the same field.
util.create_datetime(microphone_second_month, phonometer_second_month)
# Step 4: remove duplicates.
util.remove_duplicate(microphone_second_month, phonometer_second_month)
# Step 5: join between DataFrame on DateTime coloumn.
merged_second_month = util.join(microphone_second_month, phonometer_second_month)
# Step 6: remove and save NaN values.
util.remove_and_save_NaN(merged_second_month)
# Step 7: append in the dataset.
dataset_db_second_month = dataset_db_second_month.append(merged_second_month)
#print(dataset_db_second_month.head())
#print(dataset_db_second_month.tail())
db_dataset_2_month = dataset.append(dataset_db_second_month)
#print(db_dataset_2_month.head())
#db_dataset_2_month.tail()
# - read second month canarin data for the third experiment
canarin_second_month = pd.read_csv("../canarin-second-month.csv", skiprows=4)
#print(canarin_second_month.head())
#canarin_second_month.tail()
canarin_second_month.drop(['GPS_Lat', 'GPS_Lng', 'GPS_Alt', 'Node', "Timestamp"], axis='columns', inplace=True)
#print(canarin_second_month.columns)
# - Renaming some coloumns.
canarin_second_month["Datetime"] = pd.to_datetime(canarin_second_month["Datetime(UTC+2)"])
canarin_second_month["Temperature"] = canarin_second_month["Temperature Ext"]
canarin_second_month["Humidity"] = canarin_second_month["Humidity Ext"]
canarin_second_month.drop(["Datetime(UTC+2)", "Temperature Ext", "Humidity Ext"], axis='columns', inplace=True)
canarin_second_month.head()
canarin_second_month.set_index("Datetime", inplace=True)
#print(canarin_second_month.head())
# - Append second month data in first month data.
canarin_2_month = canarin_first_month.append(canarin_second_month)
#canarin_2_month.head()
#canarin_2_month.tail()
# - Unlike the previous case, we are going to delete the data with NaN values.
canarin_2_month.dropna(inplace=True)
# - Finally, to have the complete dataset on which to perform the analysis we go to make the join between the dataset containing the two-month Decibels and the one containing the data of the two-month canary.
dataset_canarin_minutes = db_dataset_2_month.merge(canarin_2_month, left_index=True, right_index=True)
print(dataset_canarin_minutes.head())
print(dataset_canarin_minutes.tail())
if not os.path.exists("DB_dataset_canarin_third_model.csv"):
dataset_canarin_minutes.to_csv("DB_dataset_canarin_third_model.csv", sep=' ')
# return datasets
return dataset, dataset_canarin_seconds, dataset_canarin_minutes
|
[
"pandas.DataFrame",
"util.create_datetime",
"os.listdir",
"util.remove_and_save_NaN",
"pandas.read_csv",
"util.join",
"util.remove_duplicate",
"os.path.exists",
"pandas.to_datetime",
"util.read_data",
"os.path.join",
"util.drop_dumb_data"
] |
[((1134, 1148), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1146, 1148), True, 'import pandas as pd\n'), ((2428, 2481), 'pandas.read_csv', 'pd.read_csv', (['"""../canarin-first-month.csv"""'], {'skiprows': '(4)'}), "('../canarin-first-month.csv', skiprows=4)\n", (2439, 2481), True, 'import pandas as pd\n'), ((2860, 2914), 'pandas.to_datetime', 'pd.to_datetime', (["canarin_first_month['Datetime(UTC+2)']"], {}), "(canarin_first_month['Datetime(UTC+2)'])\n", (2874, 2914), True, 'import pandas as pd\n'), ((5307, 5321), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5319, 5321), True, 'import pandas as pd\n'), ((6644, 6698), 'pandas.read_csv', 'pd.read_csv', (['"""../canarin-second-month.csv"""'], {'skiprows': '(4)'}), "('../canarin-second-month.csv', skiprows=4)\n", (6655, 6698), True, 'import pandas as pd\n'), ((7001, 7056), 'pandas.to_datetime', 'pd.to_datetime', (["canarin_second_month['Datetime(UTC+2)']"], {}), "(canarin_second_month['Datetime(UTC+2)'])\n", (7015, 7056), True, 'import pandas as pd\n'), ((1277, 1297), 'util.read_data', 'util.read_data', (['m', 'p'], {}), '(m, p)\n', (1291, 1297), False, 'import util\n'), ((1368, 1399), 'util.drop_dumb_data', 'util.drop_dumb_data', (['microphone'], {}), '(microphone)\n', (1387, 1399), False, 'import util\n'), ((1465, 1509), 'util.create_datetime', 'util.create_datetime', (['microphone', 'phonometer'], {}), '(microphone, phonometer)\n', (1485, 1509), False, 'import util\n'), ((1555, 1600), 'util.remove_duplicate', 'util.remove_duplicate', (['microphone', 'phonometer'], {}), '(microphone, phonometer)\n', (1576, 1600), False, 'import util\n'), ((1680, 1713), 'util.join', 'util.join', (['microphone', 'phonometer'], {}), '(microphone, phonometer)\n', (1689, 1713), False, 'import util\n'), ((1768, 1800), 'util.remove_and_save_NaN', 'util.remove_and_save_NaN', (['merged'], {}), '(merged)\n', (1792, 1800), False, 'import util\n'), ((1932, 1976), 'os.path.exists', 'os.path.exists', (['"""DB_dataset_first_model.csv"""'], {}), "('DB_dataset_first_model.csv')\n", (1946, 1976), False, 'import os\n'), ((4138, 4191), 'os.path.exists', 'os.path.exists', (['"""DB_dataset_canarin_second_model.csv"""'], {}), "('DB_dataset_canarin_second_model.csv')\n", (4152, 4191), False, 'import os\n'), ((5477, 5581), 'util.read_data', 'util.read_data', (['m', 'p'], {'path_mic': '"""../microphone-second-month"""', 'path_phon': '"""../phonometer-second-month"""'}), "(m, p, path_mic='../microphone-second-month', path_phon=\n '../phonometer-second-month')\n", (5491, 5581), False, 'import util\n'), ((5652, 5696), 'util.drop_dumb_data', 'util.drop_dumb_data', (['microphone_second_month'], {}), '(microphone_second_month)\n', (5671, 5696), False, 'import util\n'), ((5763, 5833), 'util.create_datetime', 'util.create_datetime', (['microphone_second_month', 'phonometer_second_month'], {}), '(microphone_second_month, phonometer_second_month)\n', (5783, 5833), False, 'import util\n'), ((5879, 5950), 'util.remove_duplicate', 'util.remove_duplicate', (['microphone_second_month', 'phonometer_second_month'], {}), '(microphone_second_month, phonometer_second_month)\n', (5900, 5950), False, 'import util\n'), ((6043, 6102), 'util.join', 'util.join', (['microphone_second_month', 'phonometer_second_month'], {}), '(microphone_second_month, phonometer_second_month)\n', (6052, 6102), False, 'import util\n'), ((6157, 6202), 'util.remove_and_save_NaN', 'util.remove_and_save_NaN', (['merged_second_month'], {}), '(merged_second_month)\n', (6181, 6202), False, 'import util\n'), ((8189, 8241), 'os.path.exists', 'os.path.exists', (['"""DB_dataset_canarin_third_model.csv"""'], {}), "('DB_dataset_canarin_third_model.csv')\n", (8203, 8241), False, 'import os\n'), ((653, 672), 'os.listdir', 'listdir', (['meter_path'], {}), '(meter_path)\n', (660, 672), False, 'from os import listdir\n'), ((882, 906), 'os.listdir', 'listdir', (['phonometer_path'], {}), '(phonometer_path)\n', (889, 906), False, 'from os import listdir\n'), ((4661, 4680), 'os.listdir', 'listdir', (['meter_path'], {}), '(meter_path)\n', (4668, 4680), False, 'from os import listdir\n'), ((4919, 4943), 'os.listdir', 'listdir', (['phonometer_path'], {}), '(phonometer_path)\n', (4926, 4943), False, 'from os import listdir\n'), ((683, 702), 'os.path.join', 'join', (['meter_path', 'f'], {}), '(meter_path, f)\n', (687, 702), False, 'from os.path import isfile, join\n'), ((917, 941), 'os.path.join', 'join', (['phonometer_path', 'f'], {}), '(phonometer_path, f)\n', (921, 941), False, 'from os.path import isfile, join\n'), ((4691, 4718), 'os.path.join', 'os.path.join', (['meter_path', 'f'], {}), '(meter_path, f)\n', (4703, 4718), False, 'import os\n'), ((4954, 4986), 'os.path.join', 'os.path.join', (['phonometer_path', 'f'], {}), '(phonometer_path, f)\n', (4966, 4986), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>
#
import abc
import datetime
from oslo_config import cfg
import six
from cloudkitty import storage_state
storage_opts = [
cfg.IntOpt(
'retention_period',
default=2400,
help='Duration after which data should be cleaned up/aggregated. '
'Duration is given in hours. Defaults to 2400 (100 days)'
),
]
CONF = cfg.CONF
CONF.register_opts(storage_opts, 'storage')
@six.add_metaclass(abc.ABCMeta)
class BaseStorage(object):
"""Abstract class for v2 storage objects."""
def __init__(self, *args, **kwargs):
"""Left empty so that child classes don't need to implement this."""
@abc.abstractmethod
def init(self):
"""Called for storage backend initialization"""
# NOTE(peschk_l): scope_id must not be used by any v2 storage backend. It
# is only present for backward compatibility with the v1 storage. It will
# be removed together with the v1 storage
@abc.abstractmethod
def push(self, dataframes, scope_id=None):
"""Pushes dataframes to the storage backend
A dataframe has the following format::
{
"usage": {
"bananas": [ # metric name
{
"vol": {
"unit": "banana",
"qty": 1
},
"rating": {
"price": 1
},
"groupby": {
"xxx_id": "hello",
"yyy_id": "bye",
},
"metadata": {
"flavor": "chocolate",
"eaten_by": "gorilla",
},
}
],
"metric_name2": [...],
}
"period": {
"begin": "1239781290", # timestamp
"end": "1239793490", # timestamp
}
}
:param dataframes: List of dataframes
:type dataframes: list
"""
@abc.abstractmethod
def retrieve(self, begin=None, end=None,
filters=None, group_filters=None,
metric_types=None,
offset=0, limit=1000, paginate=True):
"""Returns the following dict::
{
'total': int, # total amount of measures found
'dataframes': list of dataframes,
}
:param begin: Start date
:type begin: datetime
:param end: End date
:type end: datetime
:param filters: Metadata to filter on. ex: {'flavor_id': '42'}
:type filters: dict
:param group_filters: Groupby to filter on. ex: {'project_id': '123ab'}
:type group_filters: dict
:param metric_types: Metric type to filter on.
:type metric_types: str or list
:param offset: Offset for pagination
:type offset: int
:param limit: Maximum amount of elements to return
:type limit: int
:param paginate: Defaults to True. If False, all found results
will be returned.
:type paginate: bool
:rtype: dict
"""
@abc.abstractmethod
def total(self, groupby=None,
begin=None, end=None,
metric_types=None,
filters=None, group_filters=None,
offset=0, limit=1000, paginate=True):
"""Returns a grouped total for given groupby.
:param groupby: Attributes on which to group by. These attributes must
be part of the 'groupby' section for the given metric
type in metrics.yml. In order to group by metric type,
add 'type' to the groupby list.
:type groupby: list of strings
:param begin: Start date
:type begin: datetime
:param end: End date
:type end: datetime
:param filters: Metadata to filter on. ex: {'flavor_id': '42'}
:type filters: dict
:param group_filters: Groupby to filter on. ex: {'project_id': '123ab'}
:type group_filters: dict
:param metric_types: Metric type to filter on.
:type metric_types: str or list
:param offset: Offset for pagination
:type offset: int
:param limit: Maximum amount of elements to return
:type limit: int
:param paginate: Defaults to True. If False, all found results
will be returned.
:type paginate: bool
:rtype: dict
Returns a dict with the following format::
{
'total': int, # total amount of results found
'results': list of results,
}
Each result has the following format::
{
'begin': XXX,
'end': XXX,
'rate': XXX,
'groupby1': XXX,
'groupby2': XXX
}
"""
@staticmethod
def get_retention():
"""Returns the retention period defined in the configuration.
:rtype: datetime.timedelta
"""
return datetime.timedelta(hours=CONF.storage.retention_period)
# NOTE(lpeschke): This is only kept for v1 storage backward compatibility
def get_tenants(self, begin=None, end=None):
return storage_state.StateManager().get_tenants(begin, end)
|
[
"cloudkitty.storage_state.StateManager",
"datetime.timedelta",
"oslo_config.cfg.IntOpt",
"six.add_metaclass"
] |
[((1056, 1086), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (1073, 1086), False, 'import six\n'), ((780, 957), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (['"""retention_period"""'], {'default': '(2400)', 'help': '"""Duration after which data should be cleaned up/aggregated. Duration is given in hours. Defaults to 2400 (100 days)"""'}), "('retention_period', default=2400, help=\n 'Duration after which data should be cleaned up/aggregated. Duration is given in hours. Defaults to 2400 (100 days)'\n )\n", (790, 957), False, 'from oslo_config import cfg\n'), ((5981, 6036), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'CONF.storage.retention_period'}), '(hours=CONF.storage.retention_period)\n', (5999, 6036), False, 'import datetime\n'), ((6180, 6208), 'cloudkitty.storage_state.StateManager', 'storage_state.StateManager', ([], {}), '()\n', (6206, 6208), False, 'from cloudkitty import storage_state\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pytest
from aiographite.graphite_encoder import GraphiteEncoder
@pytest.mark.parametrize("name", [
'abc_edf',
'abc @edf#',
'abc.@edf#',
'abc_ @ e_df#',
'a.b.c_ @ e_df#',
'a.b.___c d _feg',
'_ . .fda',
'_.',
'汉 字.汉*字',
'%2D%2Ea bcd',
'_hello world.%2E',
'www.zillow.com.%2Ehello%2D',
'',
('a' * 128) # test for very long string
])
def test_consistency(name):
assert GraphiteEncoder.decode(GraphiteEncoder.encode(name)) == name
@pytest.mark.parametrize("name", [
None,
1
])
def test_encode_invalid_input(name):
with pytest.raises(Exception):
GraphiteEncoder.encode(name)
@pytest.mark.parametrize("name", [
None,
1
])
def test_decode_invalid_input(name):
with pytest.raises(Exception):
GraphiteEncoder.decode(name)
|
[
"pytest.mark.parametrize",
"pytest.raises",
"aiographite.graphite_encoder.GraphiteEncoder.decode",
"aiographite.graphite_encoder.GraphiteEncoder.encode"
] |
[((117, 361), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', "['abc_edf', 'abc @edf#', 'abc.@edf#', 'abc_ @ e_df#', 'a.b.c_ @ e_df#',\n 'a.b.___c d _feg', '_ . .fda', '_.', '汉 字.汉*字', '%2D%2Ea bcd',\n '_hello world.%2E', 'www.zillow.com.%2Ehello%2D', '', 'a' * 128]"], {}), "('name', ['abc_edf', 'abc @edf#', 'abc.@edf#',\n 'abc_ @ e_df#', 'a.b.c_ @ e_df#', 'a.b.___c d _feg', '_ . .fda', '_.',\n '汉 字.汉*字', '%2D%2Ea bcd', '_hello world.%2E',\n 'www.zillow.com.%2Ehello%2D', '', 'a' * 128])\n", (140, 361), False, 'import pytest\n'), ((545, 587), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', '[None, 1]'], {}), "('name', [None, 1])\n", (568, 587), False, 'import pytest\n'), ((710, 752), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', '[None, 1]'], {}), "('name', [None, 1])\n", (733, 752), False, 'import pytest\n'), ((644, 668), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (657, 668), False, 'import pytest\n'), ((678, 706), 'aiographite.graphite_encoder.GraphiteEncoder.encode', 'GraphiteEncoder.encode', (['name'], {}), '(name)\n', (700, 706), False, 'from aiographite.graphite_encoder import GraphiteEncoder\n'), ((809, 833), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (822, 833), False, 'import pytest\n'), ((843, 871), 'aiographite.graphite_encoder.GraphiteEncoder.decode', 'GraphiteEncoder.decode', (['name'], {}), '(name)\n', (865, 871), False, 'from aiographite.graphite_encoder import GraphiteEncoder\n'), ((504, 532), 'aiographite.graphite_encoder.GraphiteEncoder.encode', 'GraphiteEncoder.encode', (['name'], {}), '(name)\n', (526, 532), False, 'from aiographite.graphite_encoder import GraphiteEncoder\n')]
|
# Py3 compat layer
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import arcpy
import glob
import os
import shutil
import sys
# create a handle to the windows kernel; want to make Win API calls
try:
import ctypes
from ctypes import wintypes
# pass str() to avoid bpo29082 in Python 2.7.13
kdll = ctypes.windll.LoadLibrary(str("kernel32.dll"))
except (ImportError, TypeError):
kdll = None
from .bootstrap_r import execute_r
from .github_release import save_url, release_info
from .rpath import (
r_lib_path,
r_path,
r_pkg_path,
r_pkg_version,
r_user_lib_path,
r_version,
arcmap_exists,
arcmap_path,
fnf_exception,
handle_fnf,
)
from .utils import mkdtemp, set_env_tmpdir
from .fs import getvolumeinfo, hardlinks_supported, junctions_supported
try:
import winreg
except ImportError:
# py 2
import _winreg as winreg
PACKAGE_NAME = 'arcgisbinding'
PACKAGE_VERSION = r_pkg_version()
def bridge_running(product):
""" Check if the R ArcGIS bridge is running. Installation wil fail
if the DLL is currently loaded."""
running = False
# check for the correct DLL
if product == 'Pro':
proxy_name = "rarcproxy_pro.dll"
else:
proxy_name = "rarcproxy.dll"
kdll.GetModuleHandleW.restype = wintypes.HMODULE
kdll.GetModuleHandleW.argtypes = [wintypes.LPCWSTR]
dll_handle = kdll.GetModuleHandleW(proxy_name) # memory address of DLL
if dll_handle is not None:
running = True
return running
def arcgis_platform():
""" ArcGIS platform details used internally."""
info = arcpy.GetInstallInfo()
install_dir = info['InstallDir']
arc_version = info['Version']
if info['ProductName'] == 'ArcGISPro':
product = 'Pro'
else:
# there are other levels, but this is a PYT run from toolbox,
# so unlikely to be a non-ArcMap context
product = 'ArcMap'
return (install_dir, arc_version, product)
def validate_environment(overwrite=None):
"""Make sure we have a version of the product that works, and that
the library isn't already loaded."""
(install_dir, arc_version, product) = arcgis_platform()
# earlier versions excluded by virtue of not having Python toolbox support
no_hook_versions = ('10.1', '10.2', '10.2.1', '10.2.2', '10.3')
valid_env = True
msg = []
if arc_version in no_hook_versions and product is not 'Pro':
msg.append("The ArcGIS R bridge requires ArcGIS 10.3.1 or later.")
valid_env = False
if arc_version in ('1.0', '1.0.2') and product == 'Pro':
msg.append("The ArcGIS R bridge requires ArcGIS Pro 1.1 or later.")
valid_env = False
if not overwrite and PACKAGE_VERSION:
msg.append("The ArcGIS R bridge is already installed, and "
"overwrite is disabled.")
valid_env = False
if kdll is None:
msg.append("Unable to connect to your Windows configuration, "
"this is likely due to an incorrect Python installation. "
"Try repairing your ArcGIS installation.")
valid_env = False
# check the library isn't loaded
if kdll is not None and bridge_running(product):
msg.append("The ArcGIS R bridge is currently in-use, restart the "
"application and try again.")
valid_env = False
if r_version() is None:
msg.append("It doesn't look like R is installed. Install R prior "
"to running this tool.")
valid_env = False
if not valid_env:
arcpy.AddError("\n\n".join(msg))
sys.exit()
def create_registry_entry(product, arc_version):
"""Create a registry link back to the arcgisbinding package."""
root_key = winreg.HKEY_CURRENT_USER
if product == 'Pro':
product_name = "ArcGISPro"
else:
product_name = "Desktop{}".format(arc_version)
reg_path = "SOFTWARE\\Esri\\{}".format(product_name)
package_key = 'RintegrationProPackagePath'
link_key = None
try:
full_access = (winreg.KEY_WOW64_64KEY + winreg.KEY_ALL_ACCESS)
# find the key, 64- or 32-bit we want it all
link_key = winreg.OpenKey(root_key, reg_path, 0, full_access)
except fnf_exception as error:
handle_fnf(error)
if link_key:
try:
arcpy.AddMessage("Using registry key to link install.")
binding_path = "{}\\{}".format(r_lib_path(), "arcgisbinding")
winreg.SetValueEx(link_key, package_key, 0,
winreg.REG_SZ, binding_path)
except fnf_exception as error:
handle_fnf(error)
def install_package(overwrite=False, r_library_path=r_lib_path()):
"""Install ArcGIS R bindings onto this machine."""
if overwrite is True:
overwrite = True
else:
overwrite = False
(install_dir, arc_version, product) = arcgis_platform()
arcmap_needs_link = False
# check that we're in a sane installation environment
validate_environment(overwrite)
# detect if we we have a 10.3.1 install that needs linking
if product == 'Pro' and arcmap_exists("10.3"):
arcmap_needs_link = True
msg_base = "Pro side by side with 10.3 detected,"
if arcmap_path() is not None:
msg = "{} installing bridge for both environments.".format(msg_base)
arcpy.AddMessage(msg)
else:
msg = "{} but unable to find install path.".format(msg_base) + \
"ArcGIS bridge must be manually installed in ArcGIS 10.3."
arcpy.AddWarning(msg)
# if we're going to install the bridge in 10.3.1, create the appropriate
# directory before trying to install.
if arc_version == '10.3.1' and product == 'ArcMap' or arcmap_needs_link:
r_integration_dir = os.path.join(arcmap_path(), "Rintegration")
# TODO escalate privs here? test on non-admin user
if not os.path.exists(r_integration_dir):
try:
write_test = os.path.join(install_dir, 'test.txt')
with open(write_test, 'w') as f:
f.write('test')
os.remove(write_test)
os.makedirs(r_integration_dir)
except IOError:
arcpy.AddError(
"Insufficient privileges to create 10.3.1 bridge directory."
" Please start {} as an administrator, by right clicking"
" the icon, selecting \"Run as Administrator\", then run this"
" script again.".format(product))
return
# set an R-compatible temporary folder, if needed.
orig_tmpdir = os.getenv("TMPDIR")
if not orig_tmpdir:
set_env_tmpdir()
download_url = release_info()[0]
if download_url is None:
arcpy.AddWarning(
"Unable to get current release information."
" Trying offline installation.")
local_install = False
base_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
zip_glob = glob.glob(os.path.join(base_path, "arcgisbinding*.zip"))
# see if we have a local copy of the binding
if zip_glob and os.path.exists(zip_glob[0]):
local_install = True
zip_path = zip_glob[0]
zip_name = os.path.basename(zip_path)
elif not download_url and not local_install:
arcpy.AddError(
"Unable to access online package, and no "
"local copy of package found.")
return
else:
local_install = False
zip_name = os.path.basename(download_url)
# check for a network-based R installation
if r_path() and r_path()[0:2] == r'\\':
arcpy.AddMessage(
"R installed on a network path, using fallback installation method.")
r_local_install = False
else:
r_local_install = True
# we have a release, write it to disk for installation
with mkdtemp() as temp_dir:
# For R 4.0+, check version from GitHub but install via repo
if r_version() and r_version().split(".")[0] == '4':
cmd = "install.packages(\"arcgisbinding\", repos=\"https://r.esri.com\", type=\"win.binary\")"
install_script = os.path.join(temp_dir, 'install.R')
with open(install_script, 'w') as f:
f.write(cmd)
rcmd_return = execute_r("Rscript", install_script)
if rcmd_return != 0:
arcpy.AddError("Failed to install bridge with `install.packages`, try manualy running the command `{cmd}` from an R session or RStudio.")
else:
package_path = os.path.join(temp_dir, zip_name)
if local_install:
arcpy.AddMessage("Found local copy of binding, installing from zip")
shutil.copyfile(zip_path, package_path)
else:
save_url(download_url, package_path)
if os.path.exists(package_path):
# TODO -- need to do UAC escalation here?
# call the R installation script
rcmd_return = 0
if r_local_install:
rcmd_return = execute_r('Rcmd', 'INSTALL', package_path)
if not r_local_install or rcmd_return != 0:
# if we don't have a per-user library, create one
r_user_lib = r_user_lib_path()
if not os.path.exists(r_user_lib):
try:
arcpy.AddMessage("Creating per-user library directory")
os.makedirs(r_user_lib)
except OSError:
arcpy.AddWarning("Failed to create per-user library.")
# Can't execute Rcmd in this context, write out a temporary
# script and run install.packages() from within an R session.
install_script = os.path.join(temp_dir, 'install.R')
with open(install_script, 'w') as f:
f.write("install.packages(\"{}\", repos=NULL)".format(
package_path.replace("\\", "/")))
rcmd_return = execute_r("Rscript", install_script)
if rcmd_return != 0:
arcpy.AddWarning("Fallback installation method failed.")
else:
arcpy.AddError("No package found at {}".format(package_path))
return
# return TMPDIR to its original value; only need it for Rcmd INSTALL
set_env_tmpdir(orig_tmpdir)
# at 10.4 and Pro <=1.2, if the user has installed a version with a non-
# numeric patch level (e.g. 3.2.4revised), and the bridge is installed
# into Program Files, the link will fail. In this case, set the
# appropriate registry key so that the bridge will still work. Note that
# this isn't ideal, because it will persist after updates, but it is
# better than the bridge failing to work at all.
if (arc_version == '10.4' and product == 'Desktop') or \
(arc_version in ('1.1', '1.1.1', '1.2')
and product == 'Pro'):
if r_version():
(r_major, r_minor, r_patchlevel) = r_version().split(".")
# if we have a patchlevel like '4revised' or '3alpha', and
# the global library path is used, then use the registry key.
if len(r_patchlevel) > 1 and 'Program Files' in r_library_path:
# create_registry_entry(product, arc_version)
msg = ("Currently, the bridge doesn't support patched releases"
" (e.g. 3.2.4 Revised) in a global install. Please use"
" another version of R.")
arcpy.AddError(msg)
return
# at 10.3.1, we _must_ have the bridge installed at the correct location.
# create a symlink that connects back to the correct location on disk.
if arc_version == '10.3.1' and product == 'ArcMap' or arcmap_needs_link:
link_dir = os.path.join(r_integration_dir, PACKAGE_NAME)
if os.path.exists(link_dir):
if junctions_supported(link_dir) or hardlinks_supported(link_dir):
# os.rmdir uses RemoveDirectoryW, and can delete a junction
os.rmdir(link_dir)
else:
shutil.rmtree(link_dir)
# set up the link
r_package_path = r_pkg_path()
if r_package_path:
arcpy.AddMessage("R package path: {}.".format(r_package_path))
else:
arcpy.AddError("Unable to locate R package library. Link failed.")
return
detect_msg = "ArcGIS 10.3.1 detected."
if junctions_supported(link_dir) or hardlinks_supported(link_dir):
arcpy.AddMessage("{} Creating link to package.".format(detect_msg))
kdll.CreateSymbolicLinkW(link_dir, r_package_path, 1)
else:
# working on a non-NTFS volume, copy instead
vol_info = getvolumeinfo(link_dir)
arcpy.AddMessage("{} Drive type: {}. Copying package files.".format(
detect_msg, vol_info[0]))
# NOTE: this will need to be resynced when the package is updated,
# if installed from the R side.
shutil.copytree(r_package_path, link_dir)
# execute as standalone script, get parameters from sys.argv
if __name__ == '__main__':
if len(sys.argv) == 2:
overwrite = sys.argv[1]
else:
overwrite = None
print("library path: {}".format(r_lib_path()))
install_package(overwrite=overwrite, r_library_path=r_lib_path())
|
[
"os.remove",
"arcpy.GetInstallInfo",
"os.makedirs",
"shutil.rmtree",
"os.path.basename",
"_winreg.SetValueEx",
"arcpy.AddMessage",
"os.path.exists",
"os.path.dirname",
"os.rmdir",
"arcpy.AddError",
"arcpy.AddWarning",
"shutil.copyfile",
"shutil.copytree",
"os.path.join",
"os.getenv",
"sys.exit",
"_winreg.OpenKey"
] |
[((1677, 1699), 'arcpy.GetInstallInfo', 'arcpy.GetInstallInfo', ([], {}), '()\n', (1697, 1699), False, 'import arcpy\n'), ((6762, 6781), 'os.getenv', 'os.getenv', (['"""TMPDIR"""'], {}), "('TMPDIR')\n", (6771, 6781), False, 'import os\n'), ((3677, 3687), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3685, 3687), False, 'import sys\n'), ((4250, 4300), '_winreg.OpenKey', 'winreg.OpenKey', (['root_key', 'reg_path', '(0)', 'full_access'], {}), '(root_key, reg_path, 0, full_access)\n', (4264, 4300), True, 'import _winreg as winreg\n'), ((6906, 7002), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""Unable to get current release information. Trying offline installation."""'], {}), "(\n 'Unable to get current release information. Trying offline installation.')\n", (6922, 7002), False, 'import arcpy\n'), ((7157, 7202), 'os.path.join', 'os.path.join', (['base_path', '"""arcgisbinding*.zip"""'], {}), "(base_path, 'arcgisbinding*.zip')\n", (7169, 7202), False, 'import os\n'), ((7273, 7300), 'os.path.exists', 'os.path.exists', (['zip_glob[0]'], {}), '(zip_glob[0])\n', (7287, 7300), False, 'import os\n'), ((7381, 7407), 'os.path.basename', 'os.path.basename', (['zip_path'], {}), '(zip_path)\n', (7397, 7407), False, 'import os\n'), ((7785, 7876), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""R installed on a network path, using fallback installation method."""'], {}), "(\n 'R installed on a network path, using fallback installation method.')\n", (7801, 7876), False, 'import arcpy\n'), ((12136, 12181), 'os.path.join', 'os.path.join', (['r_integration_dir', 'PACKAGE_NAME'], {}), '(r_integration_dir, PACKAGE_NAME)\n', (12148, 12181), False, 'import os\n'), ((12194, 12218), 'os.path.exists', 'os.path.exists', (['link_dir'], {}), '(link_dir)\n', (12208, 12218), False, 'import os\n'), ((4405, 4460), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Using registry key to link install."""'], {}), "('Using registry key to link install.')\n", (4421, 4460), False, 'import arcpy\n'), ((4547, 4619), '_winreg.SetValueEx', 'winreg.SetValueEx', (['link_key', 'package_key', '(0)', 'winreg.REG_SZ', 'binding_path'], {}), '(link_key, package_key, 0, winreg.REG_SZ, binding_path)\n', (4564, 4619), True, 'import _winreg as winreg\n'), ((5453, 5474), 'arcpy.AddMessage', 'arcpy.AddMessage', (['msg'], {}), '(msg)\n', (5469, 5474), False, 'import arcpy\n'), ((5655, 5676), 'arcpy.AddWarning', 'arcpy.AddWarning', (['msg'], {}), '(msg)\n', (5671, 5676), False, 'import arcpy\n'), ((6020, 6053), 'os.path.exists', 'os.path.exists', (['r_integration_dir'], {}), '(r_integration_dir)\n', (6034, 6053), False, 'import os\n'), ((7098, 7123), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7113, 7123), False, 'import os\n'), ((7465, 7556), 'arcpy.AddError', 'arcpy.AddError', (['"""Unable to access online package, and no local copy of package found."""'], {}), "(\n 'Unable to access online package, and no local copy of package found.')\n", (7479, 7556), False, 'import arcpy\n'), ((7654, 7684), 'os.path.basename', 'os.path.basename', (['download_url'], {}), '(download_url)\n', (7670, 7684), False, 'import os\n'), ((8316, 8351), 'os.path.join', 'os.path.join', (['temp_dir', '"""install.R"""'], {}), "(temp_dir, 'install.R')\n", (8328, 8351), False, 'import os\n'), ((8721, 8753), 'os.path.join', 'os.path.join', (['temp_dir', 'zip_name'], {}), '(temp_dir, zip_name)\n', (8733, 8753), False, 'import os\n'), ((9011, 9039), 'os.path.exists', 'os.path.exists', (['package_path'], {}), '(package_path)\n', (9025, 9039), False, 'import os\n'), ((12662, 12728), 'arcpy.AddError', 'arcpy.AddError', (['"""Unable to locate R package library. Link failed."""'], {}), "('Unable to locate R package library. Link failed.')\n", (12676, 12728), False, 'import arcpy\n'), ((13399, 13440), 'shutil.copytree', 'shutil.copytree', (['r_package_path', 'link_dir'], {}), '(r_package_path, link_dir)\n', (13414, 13440), False, 'import shutil\n'), ((6101, 6138), 'os.path.join', 'os.path.join', (['install_dir', '"""test.txt"""'], {}), "(install_dir, 'test.txt')\n", (6113, 6138), False, 'import os\n'), ((6240, 6261), 'os.remove', 'os.remove', (['write_test'], {}), '(write_test)\n', (6249, 6261), False, 'import os\n'), ((6278, 6308), 'os.makedirs', 'os.makedirs', (['r_integration_dir'], {}), '(r_integration_dir)\n', (6289, 6308), False, 'import os\n'), ((8542, 8689), 'arcpy.AddError', 'arcpy.AddError', (['"""Failed to install bridge with `install.packages`, try manualy running the command `{cmd}` from an R session or RStudio."""'], {}), "(\n 'Failed to install bridge with `install.packages`, try manualy running the command `{cmd}` from an R session or RStudio.'\n )\n", (8556, 8689), False, 'import arcpy\n'), ((8800, 8868), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Found local copy of binding, installing from zip"""'], {}), "('Found local copy of binding, installing from zip')\n", (8816, 8868), False, 'import arcpy\n'), ((8885, 8924), 'shutil.copyfile', 'shutil.copyfile', (['zip_path', 'package_path'], {}), '(zip_path, package_path)\n', (8900, 8924), False, 'import shutil\n'), ((11843, 11862), 'arcpy.AddError', 'arcpy.AddError', (['msg'], {}), '(msg)\n', (11857, 11862), False, 'import arcpy\n'), ((12391, 12409), 'os.rmdir', 'os.rmdir', (['link_dir'], {}), '(link_dir)\n', (12399, 12409), False, 'import os\n'), ((12444, 12467), 'shutil.rmtree', 'shutil.rmtree', (['link_dir'], {}), '(link_dir)\n', (12457, 12467), False, 'import shutil\n'), ((10016, 10051), 'os.path.join', 'os.path.join', (['temp_dir', '"""install.R"""'], {}), "(temp_dir, 'install.R')\n", (10028, 10051), False, 'import os\n'), ((9501, 9527), 'os.path.exists', 'os.path.exists', (['r_user_lib'], {}), '(r_user_lib)\n', (9515, 9527), False, 'import os\n'), ((10386, 10442), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""Fallback installation method failed."""'], {}), "('Fallback installation method failed.')\n", (10402, 10442), False, 'import arcpy\n'), ((9586, 9641), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Creating per-user library directory"""'], {}), "('Creating per-user library directory')\n", (9602, 9641), False, 'import arcpy\n'), ((9670, 9693), 'os.makedirs', 'os.makedirs', (['r_user_lib'], {}), '(r_user_lib)\n', (9681, 9693), False, 'import os\n'), ((9762, 9816), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""Failed to create per-user library."""'], {}), "('Failed to create per-user library.')\n", (9778, 9816), False, 'import arcpy\n')]
|
from django.urls import re_path
from . import views
urlpatterns = [
re_path(r"^metadata/$", views.all_metadata),
re_path(r"^metadata/(?P<abbr>[a-zA-Z-]+)/$", views.state_metadata),
re_path(
r"^bills/(?P<abbr>[a-zA-Z-]+)/(?P<session>.+)/"
r"(?P<chamber>upper|lower)/(?P<bill_id>.+)/$",
views.bill_detail,
),
re_path(
r"^bills/(?P<abbr>[a-zA-Z-]+)/(?P<session>.+)/" r"(?P<bill_id>.+)/$",
views.bill_detail,
),
re_path(r"^bills/(?P<billy_bill_id>[A-Z-]+B\d{8})/", views.bill_detail),
re_path(r"^bills/$", views.bill_list),
re_path(r"^legislators/(?P<id>[A-Z-]+L\d{6})/$", views.legislator_detail),
re_path(r"^legislators/$", views.legislator_list),
re_path(r"^legislators/geo/$", views.legislator_list, {"geo": True}),
re_path(r"districts/(?P<abbr>[a-zA-Z-]+)/$", views.district_list),
re_path(
r"districts/(?P<abbr>[a-zA-Z-]+)/(?P<chamber>upper|lower)/$",
views.district_list,
),
# removed views
re_path(r"^committees/(?P<id>[A-Z-]+C\d{6})/$", views.item_404),
re_path(r"^committees/$", views.empty_list),
re_path(r"^events/$", views.empty_list),
re_path(r"^events/(?P<id>[A-Z-]+E\d{8})/$", views.item_404),
]
|
[
"django.urls.re_path"
] |
[((73, 115), 'django.urls.re_path', 're_path', (['"""^metadata/$"""', 'views.all_metadata'], {}), "('^metadata/$', views.all_metadata)\n", (80, 115), False, 'from django.urls import re_path\n'), ((122, 187), 'django.urls.re_path', 're_path', (['"""^metadata/(?P<abbr>[a-zA-Z-]+)/$"""', 'views.state_metadata'], {}), "('^metadata/(?P<abbr>[a-zA-Z-]+)/$', views.state_metadata)\n", (129, 187), False, 'from django.urls import re_path\n'), ((194, 320), 'django.urls.re_path', 're_path', (['"""^bills/(?P<abbr>[a-zA-Z-]+)/(?P<session>.+)/(?P<chamber>upper|lower)/(?P<bill_id>.+)/$"""', 'views.bill_detail'], {}), "(\n '^bills/(?P<abbr>[a-zA-Z-]+)/(?P<session>.+)/(?P<chamber>upper|lower)/(?P<bill_id>.+)/$'\n , views.bill_detail)\n", (201, 320), False, 'from django.urls import re_path\n'), ((352, 447), 'django.urls.re_path', 're_path', (['"""^bills/(?P<abbr>[a-zA-Z-]+)/(?P<session>.+)/(?P<bill_id>.+)/$"""', 'views.bill_detail'], {}), "('^bills/(?P<abbr>[a-zA-Z-]+)/(?P<session>.+)/(?P<bill_id>.+)/$',\n views.bill_detail)\n", (359, 447), False, 'from django.urls import re_path\n'), ((477, 548), 'django.urls.re_path', 're_path', (['"""^bills/(?P<billy_bill_id>[A-Z-]+B\\\\d{8})/"""', 'views.bill_detail'], {}), "('^bills/(?P<billy_bill_id>[A-Z-]+B\\\\d{8})/', views.bill_detail)\n", (484, 548), False, 'from django.urls import re_path\n'), ((554, 590), 'django.urls.re_path', 're_path', (['"""^bills/$"""', 'views.bill_list'], {}), "('^bills/$', views.bill_list)\n", (561, 590), False, 'from django.urls import re_path\n'), ((597, 670), 'django.urls.re_path', 're_path', (['"""^legislators/(?P<id>[A-Z-]+L\\\\d{6})/$"""', 'views.legislator_detail'], {}), "('^legislators/(?P<id>[A-Z-]+L\\\\d{6})/$', views.legislator_detail)\n", (604, 670), False, 'from django.urls import re_path\n'), ((676, 724), 'django.urls.re_path', 're_path', (['"""^legislators/$"""', 'views.legislator_list'], {}), "('^legislators/$', views.legislator_list)\n", (683, 724), False, 'from django.urls import re_path\n'), ((731, 798), 'django.urls.re_path', 're_path', (['"""^legislators/geo/$"""', 'views.legislator_list', "{'geo': True}"], {}), "('^legislators/geo/$', views.legislator_list, {'geo': True})\n", (738, 798), False, 'from django.urls import re_path\n'), ((805, 869), 'django.urls.re_path', 're_path', (['"""districts/(?P<abbr>[a-zA-Z-]+)/$"""', 'views.district_list'], {}), "('districts/(?P<abbr>[a-zA-Z-]+)/$', views.district_list)\n", (812, 869), False, 'from django.urls import re_path\n'), ((876, 970), 'django.urls.re_path', 're_path', (['"""districts/(?P<abbr>[a-zA-Z-]+)/(?P<chamber>upper|lower)/$"""', 'views.district_list'], {}), "('districts/(?P<abbr>[a-zA-Z-]+)/(?P<chamber>upper|lower)/$', views.\n district_list)\n", (883, 970), False, 'from django.urls import re_path\n'), ((1015, 1078), 'django.urls.re_path', 're_path', (['"""^committees/(?P<id>[A-Z-]+C\\\\d{6})/$"""', 'views.item_404'], {}), "('^committees/(?P<id>[A-Z-]+C\\\\d{6})/$', views.item_404)\n", (1022, 1078), False, 'from django.urls import re_path\n'), ((1084, 1126), 'django.urls.re_path', 're_path', (['"""^committees/$"""', 'views.empty_list'], {}), "('^committees/$', views.empty_list)\n", (1091, 1126), False, 'from django.urls import re_path\n'), ((1133, 1171), 'django.urls.re_path', 're_path', (['"""^events/$"""', 'views.empty_list'], {}), "('^events/$', views.empty_list)\n", (1140, 1171), False, 'from django.urls import re_path\n'), ((1178, 1237), 'django.urls.re_path', 're_path', (['"""^events/(?P<id>[A-Z-]+E\\\\d{8})/$"""', 'views.item_404'], {}), "('^events/(?P<id>[A-Z-]+E\\\\d{8})/$', views.item_404)\n", (1185, 1237), False, 'from django.urls import re_path\n')]
|
"""
Tests for the Sellers API class.
"""
import unittest
import mws
from .utils import CommonRequestTestTools
class SellersTestCase(unittest.TestCase, CommonRequestTestTools):
"""
Test cases for Sellers.
"""
# TODO: Add remaining methods for Sellers
def setUp(self):
self.api = mws.Sellers(
self.CREDENTIAL_ACCESS,
self.CREDENTIAL_SECRET,
self.CREDENTIAL_ACCOUNT,
auth_token=self.CREDENTIAL_TOKEN
)
self.api._test_request_params = True
def test_list_marketplace_participations(self):
"""
ListMarketplaceParticipations operation
"""
params = self.api.list_marketplace_participations()
self.assert_common_params(params)
self.assertEqual(params['Action'], 'ListMarketplaceParticipations')
def test_list_marketplace_participations_by_next_token(self):
"""
ListMarketplaceParticipationsByNextToken operation, by way of method decorator.
"""
next_token = 'token_<PASSWORD>'
params = self.api.list_marketplace_participations(next_token=next_token)
self.assert_common_params(params)
self.assertEqual(params['Action'], 'ListMarketplaceParticipationsByNextToken')
self.assertEqual(params['NextToken'], next_token)
def test_list_marketplace_participations_by_next_token_alias(self):
"""
ListMarketplaceParticipationsByNextToken operation, by way of alias method.
"""
next_token = 'token_<PASSWORD>'
params = self.api.list_marketplace_participations_by_next_token(next_token)
self.assert_common_params(params)
self.assertEqual(params['Action'], 'ListMarketplaceParticipationsByNextToken')
self.assertEqual(params['NextToken'], next_token)
|
[
"mws.Sellers"
] |
[((308, 431), 'mws.Sellers', 'mws.Sellers', (['self.CREDENTIAL_ACCESS', 'self.CREDENTIAL_SECRET', 'self.CREDENTIAL_ACCOUNT'], {'auth_token': 'self.CREDENTIAL_TOKEN'}), '(self.CREDENTIAL_ACCESS, self.CREDENTIAL_SECRET, self.\n CREDENTIAL_ACCOUNT, auth_token=self.CREDENTIAL_TOKEN)\n', (319, 431), False, 'import mws\n')]
|
import json
import requests
import html
import random
import time
from YorForger import dispatcher
from YorForger.modules.disable import DisableAbleCommandHandler
from telegram.ext import CallbackContext, CommandHandler, Filters, run_async, CallbackQueryHandler
from YorForger.modules.helper_funcs.chat_status import (is_user_admin)
from YorForger.modules.helper_funcs.extraction import extract_user
from telegram import ParseMode, Update, InlineKeyboardMarkup, InlineKeyboardButton, replymarkup, ChatPermissions
from telegram.error import BadRequest
def anime_quote():
url = "https://animechan.vercel.app/api/random"
# since text attribute returns dictionary like string
response = requests.get(url)
try:
dic = json.loads(response.text)
except Exception:
pass
quote = dic["quote"]
character = dic["character"]
anime = dic["anime"]
return quote, character, anime
def quotes(update: Update, context: CallbackContext):
message = update.effective_message
quote, character, anime = anime_quote()
msg = f"<i>❝{quote}❞</i>\n\n<b>{character} from {anime}</b>"
keyboard = InlineKeyboardMarkup([[
InlineKeyboardButton(
text="Change🔁",
callback_data="change_quote")]])
message.reply_text(
msg,
reply_markup=keyboard,
parse_mode=ParseMode.HTML,
)
def change_quote(update: Update, context: CallbackContext):
query = update.callback_query
chat = update.effective_chat
message = update.effective_message
quote, character, anime = anime_quote()
msg = f"<i>❝{quote}❞</i>\n\n<b>{character} from {anime}</b>"
keyboard = InlineKeyboardMarkup([[
InlineKeyboardButton(
text="Change🔁",
callback_data="quote_change")]])
message.edit_text(msg, reply_markup=keyboard,
parse_mode=ParseMode.HTML)
def animequotes(update: Update, context: CallbackContext):
message = update.effective_message
name = message.reply_to_message.from_user.first_name if message.reply_to_message else message.from_user.first_name
keyboard = [[InlineKeyboardButton(text="Change", callback_data="changek_quote")]]
message.reply_photo(random.choice(QUOTES_IMG),reply_markup=InlineKeyboardMarkup(keyboard))
def changek_quote(update: Update, context: CallbackContext):
query = update.callback_query
chat = update.effective_chat
message = update.effective_message
keyboard = [[InlineKeyboardButton(text="Change", callback_data="quotek_change")]]
message.reply_photo(random.choice(QUOTES_IMG),reply_markup=InlineKeyboardMarkup(keyboard))
QUOTES_IMG = (
"https://i.imgur.com/Iub4RYj.jpg",
"https://i.imgur.com/uvNMdIl.jpg",
"https://i.imgur.com/YOBOntg.jpg",
"https://i.imgur.com/fFpO2ZQ.jpg",
"https://i.imgur.com/f0xZceK.jpg",
"https://i.imgur.com/RlVcCip.jpg",
"https://i.imgur.com/CjpqLRF.jpg",
"https://i.imgur.com/8BHZDk6.jpg",
"https://i.imgur.com/8bHeMgy.jpg",
"https://i.imgur.com/5K3lMvr.jpg",
"https://i.imgur.com/NTzw4RN.jpg",
"https://i.imgur.com/wJxryAn.jpg",
"https://i.imgur.com/9L0DWzC.jpg",
"https://i.imgur.com/sBe8TTs.jpg",
"https://i.imgur.com/1Au8gdf.jpg",
"https://i.imgur.com/28hFQeU.jpg",
"https://i.imgur.com/Qvc03JY.jpg",
"https://i.imgur.com/gSX6Xlf.jpg",
"https://i.imgur.com/iP26Hwa.jpg",
"https://i.imgur.com/uSsJoX8.jpg",
"https://i.imgur.com/OvX3oHB.jpg",
"https://i.imgur.com/JMWuksm.jpg",
"https://i.imgur.com/lhM3fib.jpg",
"https://i.imgur.com/64IYKkw.jpg",
"https://i.imgur.com/nMbyA3J.jpg",
"https://i.imgur.com/7KFQhY3.jpg",
"https://i.imgur.com/mlKb7zt.jpg",
"https://i.imgur.com/JCQGJVw.jpg",
"https://i.imgur.com/hSFYDEz.jpg",
"https://i.imgur.com/PQRjAgl.jpg",
"https://i.imgur.com/ot9624U.jpg",
"https://i.imgur.com/iXmqN9y.jpg",
"https://i.imgur.com/RhNBeGr.jpg",
"https://i.imgur.com/tcMVNa8.jpg",
"https://i.imgur.com/LrVg810.jpg",
"https://i.imgur.com/TcWfQlz.jpg",
"https://i.imgur.com/muAUdvJ.jpg",
"https://i.imgur.com/AtC7ZRV.jpg",
"https://i.imgur.com/sCObQCQ.jpg",
"https://i.imgur.com/AJFDI1r.jpg",
"https://i.imgur.com/TCgmRrH.jpg",
"https://i.imgur.com/LMdmhJU.jpg",
"https://i.imgur.com/eyyax0N.jpg",
"https://i.imgur.com/YtYxV66.jpg",
"https://i.imgur.com/292w4ye.jpg",
"https://i.imgur.com/6Fm1vdw.jpg",
"https://i.imgur.com/2vnBOZd.jpg",
"https://i.imgur.com/j5hI9Eb.jpg",
"https://i.imgur.com/cAv7pJB.jpg",
"https://i.imgur.com/jvI7Vil.jpg",
"https://i.imgur.com/fANpjsg.jpg",
"https://i.imgur.com/5o1SJyo.jpg",
"https://i.imgur.com/dSVxmh8.jpg",
"https://i.imgur.com/02dXlAD.jpg",
"https://i.imgur.com/htvIoGY.jpg",
"https://i.imgur.com/hy6BXOj.jpg",
"https://i.imgur.com/OuwzNYu.jpg",
"https://i.imgur.com/L8vwvc2.jpg",
"https://i.imgur.com/3VMVF9y.jpg",
"https://i.imgur.com/yzjq2n2.jpg",
"https://i.imgur.com/0qK7TAN.jpg",
"https://i.imgur.com/zvcxSOX.jpg",
"https://i.imgur.com/FO7bApW.jpg",
"https://i.imgur.com/KK06gwg.jpg",
"https://i.imgur.com/6lG4tsO.jpg"
)
ANIMEQUOTES_HANDLER = DisableAbleCommandHandler("animequotes", animequotes, run_async = True)
QUOTES_HANDLER = DisableAbleCommandHandler("quote", quotes, run_async = True)
CHANGE_QUOTE = CallbackQueryHandler(
change_quote, pattern=r"change_.*")
QUOTE_CHANGE = CallbackQueryHandler(
change_quote, pattern=r"quote_.*")
CHANGEK_QUOTE = CallbackQueryHandler(
changek_quote, pattern=r"changek_.*")
QUOTEK_CHANGE = CallbackQueryHandler(
changek_quote, pattern=r"quotek_.*")
dispatcher.add_handler(CHANGE_QUOTE)
dispatcher.add_handler(QUOTE_CHANGE)
dispatcher.add_handler(CHANGEK_QUOTE)
dispatcher.add_handler(QUOTEK_CHANGE)
dispatcher.add_handler(ANIMEQUOTES_HANDLER)
dispatcher.add_handler(QUOTES_HANDLER)
__command_list__ = [
"animequotes",
"quote"
]
__handlers__ = [
ANIMEQUOTES_HANDLER,
QUOTES_HANDLER
]
__mod_name__ = "AnimeQuotes"
__help__ = """
*Anime Quotes & Quotes*
/animequotes - gives a random anime quote
/quote - gives a random quote
"""
|
[
"telegram.ext.CallbackQueryHandler",
"json.loads",
"telegram.InlineKeyboardButton",
"YorForger.modules.disable.DisableAbleCommandHandler",
"random.choice",
"telegram.InlineKeyboardMarkup",
"YorForger.dispatcher.add_handler",
"requests.get"
] |
[((5427, 5496), 'YorForger.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (['"""animequotes"""', 'animequotes'], {'run_async': '(True)'}), "('animequotes', animequotes, run_async=True)\n", (5452, 5496), False, 'from YorForger.modules.disable import DisableAbleCommandHandler\n'), ((5516, 5574), 'YorForger.modules.disable.DisableAbleCommandHandler', 'DisableAbleCommandHandler', (['"""quote"""', 'quotes'], {'run_async': '(True)'}), "('quote', quotes, run_async=True)\n", (5541, 5574), False, 'from YorForger.modules.disable import DisableAbleCommandHandler\n'), ((5593, 5648), 'telegram.ext.CallbackQueryHandler', 'CallbackQueryHandler', (['change_quote'], {'pattern': '"""change_.*"""'}), "(change_quote, pattern='change_.*')\n", (5613, 5648), False, 'from telegram.ext import CallbackContext, CommandHandler, Filters, run_async, CallbackQueryHandler\n'), ((5670, 5724), 'telegram.ext.CallbackQueryHandler', 'CallbackQueryHandler', (['change_quote'], {'pattern': '"""quote_.*"""'}), "(change_quote, pattern='quote_.*')\n", (5690, 5724), False, 'from telegram.ext import CallbackContext, CommandHandler, Filters, run_async, CallbackQueryHandler\n'), ((5747, 5804), 'telegram.ext.CallbackQueryHandler', 'CallbackQueryHandler', (['changek_quote'], {'pattern': '"""changek_.*"""'}), "(changek_quote, pattern='changek_.*')\n", (5767, 5804), False, 'from telegram.ext import CallbackContext, CommandHandler, Filters, run_async, CallbackQueryHandler\n'), ((5827, 5883), 'telegram.ext.CallbackQueryHandler', 'CallbackQueryHandler', (['changek_quote'], {'pattern': '"""quotek_.*"""'}), "(changek_quote, pattern='quotek_.*')\n", (5847, 5883), False, 'from telegram.ext import CallbackContext, CommandHandler, Filters, run_async, CallbackQueryHandler\n'), ((5891, 5927), 'YorForger.dispatcher.add_handler', 'dispatcher.add_handler', (['CHANGE_QUOTE'], {}), '(CHANGE_QUOTE)\n', (5913, 5927), False, 'from YorForger import dispatcher\n'), ((5928, 5964), 'YorForger.dispatcher.add_handler', 'dispatcher.add_handler', (['QUOTE_CHANGE'], {}), '(QUOTE_CHANGE)\n', (5950, 5964), False, 'from YorForger import dispatcher\n'), ((5965, 6002), 'YorForger.dispatcher.add_handler', 'dispatcher.add_handler', (['CHANGEK_QUOTE'], {}), '(CHANGEK_QUOTE)\n', (5987, 6002), False, 'from YorForger import dispatcher\n'), ((6003, 6040), 'YorForger.dispatcher.add_handler', 'dispatcher.add_handler', (['QUOTEK_CHANGE'], {}), '(QUOTEK_CHANGE)\n', (6025, 6040), False, 'from YorForger import dispatcher\n'), ((6041, 6084), 'YorForger.dispatcher.add_handler', 'dispatcher.add_handler', (['ANIMEQUOTES_HANDLER'], {}), '(ANIMEQUOTES_HANDLER)\n', (6063, 6084), False, 'from YorForger import dispatcher\n'), ((6085, 6123), 'YorForger.dispatcher.add_handler', 'dispatcher.add_handler', (['QUOTES_HANDLER'], {}), '(QUOTES_HANDLER)\n', (6107, 6123), False, 'from YorForger import dispatcher\n'), ((699, 716), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (711, 716), False, 'import requests\n'), ((740, 765), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (750, 765), False, 'import json\n'), ((2220, 2245), 'random.choice', 'random.choice', (['QUOTES_IMG'], {}), '(QUOTES_IMG)\n', (2233, 2245), False, 'import random\n'), ((2569, 2594), 'random.choice', 'random.choice', (['QUOTES_IMG'], {}), '(QUOTES_IMG)\n', (2582, 2594), False, 'import random\n'), ((2127, 2193), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Change"""', 'callback_data': '"""changek_quote"""'}), "(text='Change', callback_data='changek_quote')\n", (2147, 2193), False, 'from telegram import ParseMode, Update, InlineKeyboardMarkup, InlineKeyboardButton, replymarkup, ChatPermissions\n'), ((2259, 2289), 'telegram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['keyboard'], {}), '(keyboard)\n', (2279, 2289), False, 'from telegram import ParseMode, Update, InlineKeyboardMarkup, InlineKeyboardButton, replymarkup, ChatPermissions\n'), ((2476, 2542), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Change"""', 'callback_data': '"""quotek_change"""'}), "(text='Change', callback_data='quotek_change')\n", (2496, 2542), False, 'from telegram import ParseMode, Update, InlineKeyboardMarkup, InlineKeyboardButton, replymarkup, ChatPermissions\n'), ((2608, 2638), 'telegram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['keyboard'], {}), '(keyboard)\n', (2628, 2638), False, 'from telegram import ParseMode, Update, InlineKeyboardMarkup, InlineKeyboardButton, replymarkup, ChatPermissions\n'), ((1170, 1236), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Change🔁"""', 'callback_data': '"""change_quote"""'}), "(text='Change🔁', callback_data='change_quote')\n", (1190, 1236), False, 'from telegram import ParseMode, Update, InlineKeyboardMarkup, InlineKeyboardButton, replymarkup, ChatPermissions\n'), ((1697, 1763), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Change🔁"""', 'callback_data': '"""quote_change"""'}), "(text='Change🔁', callback_data='quote_change')\n", (1717, 1763), False, 'from telegram import ParseMode, Update, InlineKeyboardMarkup, InlineKeyboardButton, replymarkup, ChatPermissions\n')]
|
#!/usr/bin/python3
"""Recipe for training speaker embeddings (e.g, xvectors) using the VoxCeleb Dataset.
We employ an encoder followed by a speaker classifier.
To run this recipe, use the following command:
> python train_speaker_embeddings.py {hyperparameter_file}
Using your own hyperparameter file or one of the following:
hyperparams/train_x_vectors.yaml (for standard xvectors)
hyperparams/train_ecapa_tdnn.yaml (for the ecapa+tdnn system)
Author
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
"""
import os
import sys
import random
import torch
import torchaudio
import speechbrain as sb
from speechbrain.utils.data_utils import download_file
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
class SpeakerBrain(sb.core.Brain):
"""Class for speaker embedding training"
"""
def compute_forward(self, batch, stage):
"""Computation pipeline based on a encoder + speaker classifier.
Data augmentation and environmental corruption are applied to the
input speech.
"""
batch = batch.to(self.device)
wavs, lens = batch.sig
if stage == sb.Stage.TRAIN:
# Applying the augmentation pipeline
wavs_aug_tot = []
wavs_aug_tot.append(wavs)
for count, augment in enumerate(self.hparams.augment_pipeline):
# Apply augment
wavs_aug = augment(wavs, lens)
# Managing speed change
if wavs_aug.shape[1] > wavs.shape[1]:
wavs_aug = wavs_aug[:, 0 : wavs.shape[1]]
else:
zero_sig = torch.zeros_like(wavs)
zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug
wavs_aug = zero_sig
if self.hparams.concat_augment:
wavs_aug_tot.append(wavs_aug)
else:
wavs = wavs_aug
wavs_aug_tot[0] = wavs
wavs = torch.cat(wavs_aug_tot, dim=0)
self.n_augment = len(wavs_aug_tot)
lens = torch.cat([lens] * self.n_augment)
# Feature extraction and normalization
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
# Embeddings + speaker classifier
embeddings = self.modules.embedding_model(feats)
outputs = self.modules.classifier(embeddings)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss using speaker-id as label.
"""
predictions, lens = predictions
uttid = batch.id
spkid, _ = batch.spk_id_encoded
# Concatenate labels (due to data augmentation)
if stage == sb.Stage.TRAIN:
spkid = torch.cat([spkid] * self.n_augment, dim=0)
loss = self.hparams.compute_cost(predictions, spkid, lens)
if stage == sb.Stage.TRAIN and hasattr(
self.hparams.lr_annealing, "on_batch_end"
):
self.hparams.lr_annealing.on_batch_end(self.optimizer)
if stage != sb.Stage.TRAIN:
self.error_metrics.append(uttid, predictions, spkid, lens)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of an epoch."""
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ErrorRate"] = self.error_metrics.summarize("average")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ErrorRate": stage_stats["ErrorRate"]},
min_keys=["ErrorRate"],
)
def dataio_prep(hparams):
"Creates the datasets and their data processing pipelines."
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
label_encoder = sb.dataio.encoder.CategoricalEncoder()
snt_len_sample = int(hparams["sample_rate"] * hparams["sentence_len"])
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "start", "stop", "duration")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop, duration):
if hparams["random_chunk"]:
duration_sample = int(duration * hparams["sample_rate"])
start = random.randint(0, duration_sample - snt_len_sample)
stop = start + snt_len_sample
else:
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
sig = sig.transpose(0, 1).squeeze(1)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("spk_id")
@sb.utils.data_pipeline.provides("spk_id", "spk_id_encoded")
def label_pipeline(spk_id):
yield spk_id
spk_id_encoded = label_encoder.encode_sequence_torch([spk_id])
yield spk_id_encoded
sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline)
# 3. Fit encoder:
# Load or compute the label encoder (with multi-GPU DDP support)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file, from_didatasets=[train_data], output_key="spk_id",
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "spk_id_encoded"])
return train_data, valid_data, label_encoder
if __name__ == "__main__":
# This flag enables the inbuilt cudnn auto-tuner
torch.backends.cudnn.benchmark = True
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Download verification list (to exlude verification sentences from train)
veri_file_path = os.path.join(
hparams["save_folder"], os.path.basename(hparams["verification_file"])
)
download_file(hparams["verification_file"], veri_file_path)
# Dataset prep (parsing VoxCeleb and annotation into csv files)
from voxceleb_prepare import prepare_voxceleb # noqa
run_on_main(
prepare_voxceleb,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"verification_pairs_file": veri_file_path,
"splits": ["train", "dev"],
"split_ratio": [90, 10],
"seg_dur": hparams["sentence_len"],
"skip_prep": hparams["skip_prep"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, label_encoder = dataio_prep(hparams)
# Create experiment directory
sb.core.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Brain class initialization
speaker_brain = SpeakerBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Training
speaker_brain.fit(
speaker_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
|
[
"speechbrain.nnet.schedulers.update_learning_rate",
"torch.cat",
"os.path.join",
"speechbrain.dataio.dataset.add_dynamic_item",
"speechbrain.utils.distributed.ddp_init_group",
"speechbrain.dataio.dataset.DynamicItemDataset.from_csv",
"random.randint",
"speechbrain.utils.data_pipeline.takes",
"speechbrain.dataio.dataset.set_output_keys",
"speechbrain.core.create_experiment_directory",
"torchaudio.load",
"speechbrain.parse_arguments",
"speechbrain.utils.distributed.run_on_main",
"torch.zeros_like",
"os.path.basename",
"speechbrain.utils.data_pipeline.provides",
"hyperpyyaml.load_hyperpyyaml",
"speechbrain.utils.data_utils.download_file",
"speechbrain.dataio.encoder.CategoricalEncoder"
] |
[((4625, 4754), 'speechbrain.dataio.dataset.DynamicItemDataset.from_csv', 'sb.dataio.dataset.DynamicItemDataset.from_csv', ([], {'csv_path': "hparams['train_annotation']", 'replacements': "{'data_root': data_folder}"}), "(csv_path=hparams[\n 'train_annotation'], replacements={'data_root': data_folder})\n", (4670, 4754), True, 'import speechbrain as sb\n'), ((4791, 4920), 'speechbrain.dataio.dataset.DynamicItemDataset.from_csv', 'sb.dataio.dataset.DynamicItemDataset.from_csv', ([], {'csv_path': "hparams['valid_annotation']", 'replacements': "{'data_root': data_folder}"}), "(csv_path=hparams[\n 'valid_annotation'], replacements={'data_root': data_folder})\n", (4836, 4920), True, 'import speechbrain as sb\n'), ((5000, 5038), 'speechbrain.dataio.encoder.CategoricalEncoder', 'sb.dataio.encoder.CategoricalEncoder', ([], {}), '()\n', (5036, 5038), True, 'import speechbrain as sb\n'), ((5153, 5217), 'speechbrain.utils.data_pipeline.takes', 'sb.utils.data_pipeline.takes', (['"""wav"""', '"""start"""', '"""stop"""', '"""duration"""'], {}), "('wav', 'start', 'stop', 'duration')\n", (5181, 5217), True, 'import speechbrain as sb\n'), ((5223, 5261), 'speechbrain.utils.data_pipeline.provides', 'sb.utils.data_pipeline.provides', (['"""sig"""'], {}), "('sig')\n", (5254, 5261), True, 'import speechbrain as sb\n'), ((5814, 5874), 'speechbrain.dataio.dataset.add_dynamic_item', 'sb.dataio.dataset.add_dynamic_item', (['datasets', 'audio_pipeline'], {}), '(datasets, audio_pipeline)\n', (5848, 5874), True, 'import speechbrain as sb\n'), ((5912, 5950), 'speechbrain.utils.data_pipeline.takes', 'sb.utils.data_pipeline.takes', (['"""spk_id"""'], {}), "('spk_id')\n", (5940, 5950), True, 'import speechbrain as sb\n'), ((5956, 6015), 'speechbrain.utils.data_pipeline.provides', 'sb.utils.data_pipeline.provides', (['"""spk_id"""', '"""spk_id_encoded"""'], {}), "('spk_id', 'spk_id_encoded')\n", (5987, 6015), True, 'import speechbrain as sb\n'), ((6174, 6234), 'speechbrain.dataio.dataset.add_dynamic_item', 'sb.dataio.dataset.add_dynamic_item', (['datasets', 'label_pipeline'], {}), '(datasets, label_pipeline)\n', (6208, 6234), True, 'import speechbrain as sb\n'), ((6346, 6403), 'os.path.join', 'os.path.join', (["hparams['save_folder']", '"""label_encoder.txt"""'], {}), "(hparams['save_folder'], 'label_encoder.txt')\n", (6358, 6403), False, 'import os\n'), ((6548, 6624), 'speechbrain.dataio.dataset.set_output_keys', 'sb.dataio.dataset.set_output_keys', (['datasets', "['id', 'sig', 'spk_id_encoded']"], {}), "(datasets, ['id', 'sig', 'spk_id_encoded'])\n", (6581, 6624), True, 'import speechbrain as sb\n'), ((6852, 6884), 'speechbrain.parse_arguments', 'sb.parse_arguments', (['sys.argv[1:]'], {}), '(sys.argv[1:])\n', (6870, 6884), True, 'import speechbrain as sb\n'), ((6952, 6997), 'speechbrain.utils.distributed.ddp_init_group', 'sb.utils.distributed.ddp_init_group', (['run_opts'], {}), '(run_opts)\n', (6987, 6997), True, 'import speechbrain as sb\n'), ((7350, 7409), 'speechbrain.utils.data_utils.download_file', 'download_file', (["hparams['verification_file']", 'veri_file_path'], {}), "(hparams['verification_file'], veri_file_path)\n", (7363, 7409), False, 'from speechbrain.utils.data_utils import download_file\n'), ((7542, 7837), 'speechbrain.utils.distributed.run_on_main', 'run_on_main', (['prepare_voxceleb'], {'kwargs': "{'data_folder': hparams['data_folder'], 'save_folder': hparams[\n 'save_folder'], 'verification_pairs_file': veri_file_path, 'splits': [\n 'train', 'dev'], 'split_ratio': [90, 10], 'seg_dur': hparams[\n 'sentence_len'], 'skip_prep': hparams['skip_prep']}"}), "(prepare_voxceleb, kwargs={'data_folder': hparams['data_folder'],\n 'save_folder': hparams['save_folder'], 'verification_pairs_file':\n veri_file_path, 'splits': ['train', 'dev'], 'split_ratio': [90, 10],\n 'seg_dur': hparams['sentence_len'], 'skip_prep': hparams['skip_prep']})\n", (7553, 7837), False, 'from speechbrain.utils.distributed import run_on_main\n'), ((8129, 8271), 'speechbrain.core.create_experiment_directory', 'sb.core.create_experiment_directory', ([], {'experiment_directory': "hparams['output_folder']", 'hyperparams_to_save': 'hparams_file', 'overrides': 'overrides'}), "(experiment_directory=hparams[\n 'output_folder'], hyperparams_to_save=hparams_file, overrides=overrides)\n", (8164, 8271), True, 'import speechbrain as sb\n'), ((5659, 5722), 'torchaudio.load', 'torchaudio.load', (['wav'], {'num_frames': 'num_frames', 'frame_offset': 'start'}), '(wav, num_frames=num_frames, frame_offset=start)\n', (5674, 5722), False, 'import torchaudio\n'), ((7113, 7145), 'hyperpyyaml.load_hyperpyyaml', 'load_hyperpyyaml', (['fin', 'overrides'], {}), '(fin, overrides)\n', (7129, 7145), False, 'from hyperpyyaml import load_hyperpyyaml\n'), ((7293, 7339), 'os.path.basename', 'os.path.basename', (["hparams['verification_file']"], {}), "(hparams['verification_file'])\n", (7309, 7339), False, 'import os\n'), ((2018, 2048), 'torch.cat', 'torch.cat', (['wavs_aug_tot'], {'dim': '(0)'}), '(wavs_aug_tot, dim=0)\n', (2027, 2048), False, 'import torch\n'), ((2115, 2149), 'torch.cat', 'torch.cat', (['([lens] * self.n_augment)'], {}), '([lens] * self.n_augment)\n', (2124, 2149), False, 'import torch\n'), ((2838, 2880), 'torch.cat', 'torch.cat', (['([spkid] * self.n_augment)'], {'dim': '(0)'}), '([spkid] * self.n_augment, dim=0)\n', (2847, 2880), False, 'import torch\n'), ((4010, 4073), 'speechbrain.nnet.schedulers.update_learning_rate', 'sb.nnet.schedulers.update_learning_rate', (['self.optimizer', 'new_lr'], {}), '(self.optimizer, new_lr)\n', (4049, 4073), True, 'import speechbrain as sb\n'), ((5439, 5490), 'random.randint', 'random.randint', (['(0)', '(duration_sample - snt_len_sample)'], {}), '(0, duration_sample - snt_len_sample)\n', (5453, 5490), False, 'import random\n'), ((1669, 1691), 'torch.zeros_like', 'torch.zeros_like', (['wavs'], {}), '(wavs)\n', (1685, 1691), False, 'import torch\n')]
|
"""The interface defining class for fit modes."""
from abc import ABC, abstractmethod
import numpy as np
from iminuit import Minuit
class AbstractFitPlugin(ABC):
"""Minuit wrapper to standardize usage with different likelihood function
definitions and parameter transformations.
"""
def __init__(
self,
data_set,
use_expected_counts=True,
rng=None,
has_limits=False,
print_brs_sum_not_1=True,
_precalculated_M=None, # Can be inherited in toy studies,
):
self._data_set = data_set
self._use_expected_counts = use_expected_counts
self.rng = rng
self._precalculated_M = _precalculated_M
self._counts = {}
fcn = self._create_likelihood()
internal_starters = self.transform_to_internal(data_set.fit_start_brs)
self.Minuit = Minuit(fcn, internal_starters)
self.has_limits = has_limits
if not self._enforces_brs_sum_to_1 and print_brs_sum_not_1:
print(
f"INFO: The chosen minimizer plugin {self.__class__.__name__} "
"does not enforce the branching ratios to sum to 1. \n"
" On top of being conceptually problematic, this will break "
"if the signal cross section does not match with the expectation."
)
def __repr__(self):
return self.__class__.__name__
@property
def errors(self):
return np.array(self.covariance).diagonal() ** 0.5
@abstractmethod
def _create_likelihood(self):
pass
@abstractmethod
def transform_to_internal(self, values):
pass
@property
@abstractmethod
def values(self):
pass
@property
@abstractmethod
def parameters(self):
pass
@property
@abstractmethod
def covariance(self):
pass
@property
@abstractmethod
def _default_limits(self):
pass
@property
def has_limits(self):
inf = float("infinity")
return self.Minuit.limits != [(-inf, inf)] * len(self.Minuit.limits)
@has_limits.setter
def has_limits(self, new_has_limits):
if not isinstance(new_has_limits, bool):
raise TypeError(
f"Expected Bool. {type(new_has_limits)=}, {new_has_limits=}."
)
if new_has_limits == self.has_limits:
pass
elif new_has_limits:
self.Minuit.limits = self._default_limits
else:
inf = float("infinity")
self.Minuit.limits = [(-inf, inf)] * len(self.Minuit.limits)
@property
@abstractmethod
def _enforces_brs_sum_to_1(self) -> bool:
"""A True/False hook to allow some relaxing for sub-ideal likelihood descriptions."""
pass
def _prepare_numpy_y_M(self):
"""Prepare the MC counts matrix and box counts as numpy arrays."""
data_set = self._data_set
n_boxes_per_channel = {
k: len(ch.box_names) for k, ch in data_set.get_channels().items()
}
n_boxes = sum(n_boxes_per_channel.values())
n_bkg = 1
y = np.empty(n_boxes)
if self._precalculated_M is None:
n_parameters = len(data_set.decay_names)
M = np.empty((n_boxes, n_parameters + n_bkg))
i_stop = 0
for name, channel in data_set.get_channels().items():
i_start = i_stop
i_stop = i_start + n_boxes_per_channel[name]
# Fill y (in every toy)
if self._use_expected_counts:
self._counts[name] = channel.get_expected_counts()
else:
self._counts[name] = channel.get_toys(rng=self.rng)
y[i_start:i_stop] = self._counts[name]
# Fill M (if necessary)
if self._precalculated_M is None:
signal_factor = channel.signal_cs_default * channel.signal_scaler
M[i_start:i_stop, :-n_bkg] = channel.mc_matrix[channel.decay_names]
M[i_start:i_stop, :-n_bkg] *= signal_factor
bkg_box_probabilities = (
channel.mc_matrix[channel.bkg_names]
* channel.bkg_cs_default
/ channel.bkg_cs_default.sum()
).sum(axis=1)
M[i_start:i_stop, -n_bkg] = bkg_box_probabilities
M[i_start:i_stop, -n_bkg] *= channel.bkg_cs_default.sum()
M[i_start:i_stop, :] *= channel.luminosity_ifb
if self._precalculated_M is None:
self._precalculated_M = M
return y, self._precalculated_M, n_bkg
|
[
"numpy.empty",
"numpy.array",
"iminuit.Minuit"
] |
[((865, 895), 'iminuit.Minuit', 'Minuit', (['fcn', 'internal_starters'], {}), '(fcn, internal_starters)\n', (871, 895), False, 'from iminuit import Minuit\n'), ((3147, 3164), 'numpy.empty', 'np.empty', (['n_boxes'], {}), '(n_boxes)\n', (3155, 3164), True, 'import numpy as np\n'), ((3277, 3318), 'numpy.empty', 'np.empty', (['(n_boxes, n_parameters + n_bkg)'], {}), '((n_boxes, n_parameters + n_bkg))\n', (3285, 3318), True, 'import numpy as np\n'), ((1469, 1494), 'numpy.array', 'np.array', (['self.covariance'], {}), '(self.covariance)\n', (1477, 1494), True, 'import numpy as np\n')]
|
# Copyright 2018 Owkin, inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import contextlib
import enum
import json
import pathlib
import typing
from typing import Optional, List, Dict
import uuid
import pydantic
from substra.sdk import utils
# TODO create a sub-package schemas:
# types
# inputs
# outputs
_SERVER_NAMES = {
'dataset': 'data_manager',
}
class Type(enum.Enum):
Algo = 'algo'
AggregateAlgo = 'aggregate_algo'
CompositeAlgo = 'composite_algo'
DataSample = 'data_sample'
Dataset = 'dataset'
Model = 'model'
Objective = 'objective'
Testtuple = 'testtuple'
Traintuple = 'traintuple'
Aggregatetuple = 'aggregatetuple'
CompositeTraintuple = 'composite_traintuple'
ComputePlan = 'compute_plan'
Node = 'node'
def to_server(self):
"""Returns the name used to identify the asset on the backend."""
name = self.value
return _SERVER_NAMES.get(name, name)
def __str__(self):
return self.name
class _PydanticConfig(pydantic.BaseModel):
"""Shared configuration for all schemas here"""
class Config:
# Raise an error for extra parameters at init
extra = 'forbid'
class _Spec(_PydanticConfig, abc.ABC):
"""Asset creation specification base class."""
class Meta:
file_attributes = None
def is_many(self):
return False
@contextlib.contextmanager
def build_request_kwargs(self):
# TODO should be located in the backends/remote module
# Serialize and deserialize to prevent errors eg with pathlib.Path
data = json.loads(self.json(exclude_unset=True))
if self.Meta.file_attributes:
with utils.extract_files(data, self.Meta.file_attributes) as (data, files):
yield (data, files)
else:
yield data, None
@staticmethod
def compute_key() -> str:
return str(uuid.uuid4())
class Permissions(_PydanticConfig):
"""Specification for permissions. If public is False,
give the list of authorized ids.
"""
public: bool
authorized_ids: typing.List[str] # List of authorized node ids if private
class PrivatePermissions(_PydanticConfig):
"""Specification for private permissions. Only the nodes whose
ids are in authorized_ids can access the asset.
"""
authorized_ids: typing.List[str] # List of authorized node ids
class DataSampleSpec(_Spec):
"""Specification to create one or many data samples
To create one data sample, use the 'path' field, otherwise use
the 'paths' field.
"""
path: Optional[pathlib.Path] # Path to the data sample if only one
paths: Optional[List[pathlib.Path]] # Path to the data samples if several
test_only: bool # If the data sample is for train or test
data_manager_keys: typing.List[str]
type_: typing.ClassVar[Type] = Type.DataSample
def is_many(self):
return self.paths and len(self.paths) > 0
@pydantic.root_validator(pre=True)
def exclusive_paths(cls, values):
"""Check that one and only one path(s) field is defined."""
if 'paths' in values and 'path' in values:
raise ValueError("'path' and 'paths' fields are exclusive.")
if 'paths' not in values and 'path' not in values:
raise ValueError("'path' or 'paths' field must be set.")
return values
@contextlib.contextmanager
def build_request_kwargs(self, local):
# redefine kwargs builder to handle the local paths
# Serialize and deserialize to prevent errors eg with pathlib.Path
data = json.loads(self.json(exclude_unset=True))
if local:
with utils.extract_data_sample_files(data) as (data, files):
yield (data, files)
else:
yield data, None
class ComputePlanTraintupleSpec(_Spec):
"""Specification of a traintuple inside a compute
plan specification"""
algo_key: str
data_manager_key: str
train_data_sample_keys: List[str]
traintuple_id: str
in_models_ids: Optional[List[str]]
tag: Optional[str]
metadata: Optional[Dict[str, str]]
class ComputePlanAggregatetupleSpec(_Spec):
"""Specification of an aggregate tuple inside a compute
plan specification"""
aggregatetuple_id: str
algo_key: str
worker: str
in_models_ids: Optional[List[str]]
tag: Optional[str]
metadata: Optional[Dict[str, str]]
class ComputePlanCompositeTraintupleSpec(_Spec):
"""Specification of a composite traintuple inside a compute
plan specification"""
composite_traintuple_id: str
algo_key: str
data_manager_key: str
train_data_sample_keys: List[str]
in_head_model_id: Optional[str]
in_trunk_model_id: Optional[str]
tag: Optional[str]
out_trunk_model_permissions: Permissions
metadata: Optional[Dict[str, str]]
class ComputePlanTesttupleSpec(_Spec):
"""Specification of a testtuple inside a compute
plan specification"""
objective_key: str
traintuple_id: str
tag: Optional[str]
data_manager_key: Optional[str]
test_data_sample_keys: Optional[List[str]]
metadata: Optional[Dict[str, str]]
class _BaseComputePlanSpec(_Spec, abc.ABC):
traintuples: Optional[List[ComputePlanTraintupleSpec]]
composite_traintuples: Optional[List[ComputePlanCompositeTraintupleSpec]]
aggregatetuples: Optional[List[ComputePlanAggregatetupleSpec]]
testtuples: Optional[List[ComputePlanTesttupleSpec]]
class ComputePlanSpec(_BaseComputePlanSpec):
"""Specification for creating a compute plan"""
tag: Optional[str]
clean_models: Optional[bool]
metadata: Optional[Dict[str, str]]
type_: typing.ClassVar[Type] = Type.ComputePlan
class UpdateComputePlanSpec(_BaseComputePlanSpec):
"""Specification for updating a compute plan"""
pass
class DatasetSpec(_Spec):
"""Specification for creating a dataset"""
name: str
data_opener: pathlib.Path # Path to the data opener
type: str
description: pathlib.Path # Path to the description file
permissions: Permissions
objective_key: Optional[str]
metadata: Optional[Dict[str, str]]
type_: typing.ClassVar[Type] = Type.Dataset
class Meta:
file_attributes = ('data_opener', 'description', )
class ObjectiveSpec(_Spec):
"""Specification for creating an objective"""
name: str
description: pathlib.Path # Path to the description file
metrics_name: str
metrics: pathlib.Path # Path to the metrics file
test_data_sample_keys: Optional[List[str]]
test_data_manager_key: Optional[str]
permissions: Permissions
metadata: Optional[Dict[str, str]]
type_: typing.ClassVar[Type] = Type.Objective
class Meta:
file_attributes = ('metrics', 'description', )
class _AlgoSpec(_Spec):
name: str
description: pathlib.Path
file: pathlib.Path
permissions: Permissions
metadata: Optional[Dict[str, str]]
class Meta:
file_attributes = ('file', 'description', )
class AlgoSpec(_AlgoSpec):
"""Specification for creating an algo"""
type_: typing.ClassVar[Type] = Type.Algo
class AggregateAlgoSpec(_AlgoSpec):
"""Specification for creating an aggregate algo"""
type_: typing.ClassVar[Type] = Type.AggregateAlgo
class CompositeAlgoSpec(_AlgoSpec):
"""Specification for creating a composite algo"""
type_: typing.ClassVar[Type] = Type.CompositeAlgo
class TraintupleSpec(_Spec):
"""Specification for creating a traintuple"""
algo_key: str
data_manager_key: str
train_data_sample_keys: List[str]
in_models_keys: Optional[List[str]]
tag: Optional[str]
compute_plan_key: Optional[str]
rank: Optional[int] # Rank of the traintuple in the compute plan
metadata: Optional[Dict[str, str]]
compute_plan_attr_name: typing.ClassVar[str] = "traintuple_keys"
algo_type: typing.ClassVar[Type] = Type.Algo
type_: typing.ClassVar[Type] = Type.Traintuple
@classmethod
def from_compute_plan(
cls,
compute_plan_key: str,
id_to_key: typing.Dict[str, str],
rank: int,
spec: ComputePlanTraintupleSpec
) -> "TraintupleSpec":
return TraintupleSpec(
algo_key=spec.algo_key,
data_manager_key=spec.data_manager_key,
train_data_sample_keys=spec.train_data_sample_keys,
in_models_keys=[
id_to_key[parent_id] for parent_id in spec.in_models_ids
] if spec.in_models_ids is not None else list(),
tag=spec.tag,
compute_plan_key=compute_plan_key,
rank=rank,
metadata=spec.metadata
)
class AggregatetupleSpec(_Spec):
"""Specification for creating an aggregate tuple"""
algo_key: str
worker: str
in_models_keys: List[str]
tag: Optional[str]
compute_plan_key: Optional[str]
rank: Optional[int]
metadata: Optional[Dict[str, str]]
compute_plan_attr_name: typing.ClassVar[str] = "aggregatetuple_keys"
algo_type: typing.ClassVar[Type] = Type.AggregateAlgo
type_: typing.ClassVar[Type] = Type.Aggregatetuple
@classmethod
def from_compute_plan(
cls,
compute_plan_key: str,
id_to_key: typing.Dict[str, str],
rank: int,
spec: ComputePlanAggregatetupleSpec
) -> "AggregatetupleSpec":
return AggregatetupleSpec(
algo_key=spec.algo_key,
worker=spec.worker,
in_models_keys=[
id_to_key[parent_id]
for parent_id in spec.in_models_ids
],
tag=spec.tag,
compute_plan_key=compute_plan_key,
rank=rank,
metadata=spec.metadata
)
class CompositeTraintupleSpec(_Spec):
"""Specification for creating a composite traintuple"""
algo_key: str
data_manager_key: str
train_data_sample_keys: List[str]
in_head_model_key: Optional[str]
in_trunk_model_key: Optional[str]
tag: Optional[str]
compute_plan_key: Optional[str]
out_trunk_model_permissions: PrivatePermissions
rank: Optional[int]
metadata: Optional[Dict[str, str]]
compute_plan_attr_name: typing.ClassVar[str] = "composite_traintuple_keys"
type_: typing.ClassVar[Type] = Type.CompositeTraintuple
@classmethod
def from_compute_plan(
cls,
compute_plan_key: str,
id_to_key: typing.Dict[str, str],
rank: int,
spec: ComputePlanCompositeTraintupleSpec
) -> "CompositeTraintupleSpec":
return CompositeTraintupleSpec(
algo_key=spec.algo_key,
data_manager_key=spec.data_manager_key,
train_data_sample_keys=spec.train_data_sample_keys,
in_head_model_key=(id_to_key[spec.in_head_model_id]
if spec.in_head_model_id else None),
in_trunk_model_key=(id_to_key[spec.in_trunk_model_id]
if spec.in_trunk_model_id else None),
out_trunk_model_permissions={
"authorized_ids": spec.out_trunk_model_permissions.authorized_ids
},
tag=spec.tag,
compute_plan_key=compute_plan_key,
rank=rank,
metadata=spec.metadata
)
class TesttupleSpec(_Spec):
"""Specification for creating a testtuple"""
objective_key: str
traintuple_key: str
tag: Optional[str]
data_manager_key: Optional[str]
test_data_sample_keys: Optional[List[str]]
metadata: Optional[Dict[str, str]]
type_: typing.ClassVar[Type] = Type.Testtuple
@classmethod
def from_compute_plan(
cls,
id_to_key: typing.Dict[str, str],
spec: ComputePlanTesttupleSpec
) -> "TesttupleSpec":
return TesttupleSpec(
objective_key=spec.objective_key,
traintuple_key=id_to_key[spec.traintuple_id],
tag=spec.tag,
data_manager_key=spec.data_manager_key,
test_data_sample_keys=spec.test_data_sample_keys,
metadata=spec.metadata,
)
|
[
"substra.sdk.utils.extract_files",
"pydantic.root_validator",
"uuid.uuid4",
"substra.sdk.utils.extract_data_sample_files"
] |
[((3490, 3523), 'pydantic.root_validator', 'pydantic.root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (3513, 3523), False, 'import pydantic\n'), ((2428, 2440), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2438, 2440), False, 'import uuid\n'), ((2210, 2262), 'substra.sdk.utils.extract_files', 'utils.extract_files', (['data', 'self.Meta.file_attributes'], {}), '(data, self.Meta.file_attributes)\n', (2229, 2262), False, 'from substra.sdk import utils\n'), ((4206, 4243), 'substra.sdk.utils.extract_data_sample_files', 'utils.extract_data_sample_files', (['data'], {}), '(data)\n', (4237, 4243), False, 'from substra.sdk import utils\n')]
|
from sklearn.datasets import fetch_20newsgroups
import torchvision
from sklearn.feature_extraction.text import TfidfVectorizer
from os.path import join
import numpy as np
import pickle
#TODO: Update mnist examples!!!
def dataset_loader(dataset_path=None, dataset='mnist', seed=1):
"""
Loads a dataset and creates the appropriate train/test splits
:param dataset_path: path to the datasets
:param dataset: the dataset to be loaded
:param seed: seed for creating train/test splits (when appropriate)
:return:
"""
np.random.seed(seed)
n_train = 5000
if dataset == 'mnist':
train_data, train_labels, test_data, test_labels = load_mnist(dataset_path)
train_data = train_data[:n_train, :]
train_labels = train_labels[:n_train]
elif dataset == '20ng':
train_data, train_labels, test_data, test_labels = load_20ng_dataset_bow()
train_data = train_data[:n_train, :]
train_labels = train_labels[:n_train]
elif dataset == '15scene':
data, labels = load_15_scene_bow_features(dataset_path)
# Get a split with 100 samples per class
train_idx, test_idx = [], []
idx = np.random.permutation(data.shape[0])
data = data[idx]
labels = labels[idx]
for i in range(15):
class_idx = np.where(labels == i)[0]
train_idx.append(class_idx[:100])
test_idx.append(class_idx[100:])
train_idx = np.int64(np.concatenate(train_idx))
test_idx = np.int64(np.concatenate(test_idx))
# Get the actual split
train_data = data[train_idx, :]
train_labels = labels[train_idx]
test_data = data[test_idx, :]
test_labels = labels[test_idx]
elif dataset == 'corel':
data, labels = load_corel_bow_features(dataset_path)
idx = np.random.permutation(data.shape[0])
data = data[idx]
labels = labels[idx]
# Get the actual split
train_data = data[:4800, :]
train_labels = labels[:4800]
test_data = data[4800:, :]
test_labels = labels[4800:]
elif dataset == 'yale':
data, labels = load_yale_dataset(dataset_path)
# Get a split with 30 per sample
train_idx, test_idx = [], []
idx = np.random.permutation(data.shape[0])
data = data[idx]
labels = labels[idx]
for i in range(38):
class_idx = np.where(labels == i)[0]
train_idx.append(class_idx[:30])
test_idx.append(class_idx[30:])
train_idx = np.int64(np.concatenate(train_idx))
test_idx = np.int64(np.concatenate(test_idx))
# Get the actual split
train_data = data[train_idx, :]
train_labels = labels[train_idx]
test_data = data[test_idx, :]
test_labels = labels[test_idx]
elif dataset == 'kth':
train_data, train_labels, test_data, test_labels = load_kth(dataset_path)
idx = np.random.permutation(train_data.shape[0])
train_data, train_labels = train_data[idx], train_labels[idx]
idx = np.random.permutation(test_data.shape[0])
test_data, test_labels = test_data[idx], test_labels[idx]
else:
print("Unknown dataset!")
assert False
return train_data, train_labels, test_data, test_labels
def load_mnist(dataset_path):
"""
Loads the MNIST dataset
:return:
"""
# Get the train split
mnist = torchvision.datasets.MNIST(root=dataset_path, download=True, train=True)
x_train, y_train = mnist.train_data.numpy(), mnist.train_labels.numpy()
# Get the test split
mnist = torchvision.datasets.MNIST(root=dataset_path, download=True, train=False)
x_test, y_test = mnist.test_data.numpy(), mnist.test_labels.numpy()
x_train = x_train.reshape((x_train.shape[0], -1)) / 255.0
x_test = x_test.reshape((x_test.shape[0], -1)) / 255.0
return np.float32(x_train), y_train, np.float32(x_test), y_test
def load_20ng_dataset_bow():
"""
Loads the 20NG dataset
:return:
"""
newsgroups_train = fetch_20newsgroups(subset='train')
newsgroups_test = fetch_20newsgroups(subset='test')
# Convert data to tf-idf
vectorizer = TfidfVectorizer(min_df=0.01, max_df=0.95)
train_data = vectorizer.fit_transform(newsgroups_train.data)
test_data = vectorizer.transform(newsgroups_test.data)
train_data = train_data.todense()
test_data = test_data.todense()
train_labels = newsgroups_train.target
test_labels = newsgroups_test.target
return train_data, train_labels, test_data, test_labels
def load_15_scene_bow_features(datasets_path):
"""
Loads the pre-extracted BoF features for the 15-scene dataset
:return:
"""
datafile = join(datasets_path, 'scenes.pickle')
with open(datafile, 'rb') as f:
features = pickle.load(f, encoding='latin1')
labels = pickle.load(f, encoding='latin1')
labels = np.asarray(np.squeeze(labels), dtype='int')
features = np.asarray(features, dtype='float32')
return features, labels
def load_corel_bow_features(datasets_path):
"""
Loads the pre-extracted BoF features for the Corel dataset
:return:
"""
datafile = join(datasets_path, 'corel.pickle')
with open(datafile, 'rb') as f:
features = pickle.load(f, encoding='latin1')
labels = pickle.load(f, encoding='latin1')
labels = np.asarray(np.squeeze(labels), dtype='int')
features = np.asarray(features, dtype='float32')
return features, labels
def load_kth(datasets_path):
"""
Loads the HoF/HoG features for the KTH dataset
:return:
"""
datafile = join(datasets_path, 'kth.pickle')
with open(datafile, 'rb') as f:
train_data, train_labels = pickle.load(f, encoding='latin1'), pickle.load(f, encoding='latin1')
test_data, test_labels = pickle.load(f, encoding='latin1'), pickle.load(f, encoding='latin1')
return train_data, train_labels, test_data, test_labels
def load_yale_dataset(datasets_path):
"""
Loads the ORL dataset
"""
datafile = join(datasets_path, 'yale.pickle')
with open(datafile, 'rb') as f:
features = pickle.load(f)
labels = pickle.load(f)
features = [x for x in features]
features = np.asarray(features) / 255.0
labels = np.asarray(np.squeeze(labels), dtype='int')
return features, labels
|
[
"numpy.random.seed",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.asarray",
"numpy.float32",
"pickle.load",
"numpy.random.permutation",
"numpy.where",
"torchvision.datasets.MNIST",
"sklearn.datasets.fetch_20newsgroups",
"numpy.squeeze",
"os.path.join",
"numpy.concatenate"
] |
[((546, 566), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (560, 566), True, 'import numpy as np\n'), ((3462, 3534), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': 'dataset_path', 'download': '(True)', 'train': '(True)'}), '(root=dataset_path, download=True, train=True)\n', (3488, 3534), False, 'import torchvision\n'), ((3649, 3722), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': 'dataset_path', 'download': '(True)', 'train': '(False)'}), '(root=dataset_path, download=True, train=False)\n', (3675, 3722), False, 'import torchvision\n'), ((4096, 4130), 'sklearn.datasets.fetch_20newsgroups', 'fetch_20newsgroups', ([], {'subset': '"""train"""'}), "(subset='train')\n", (4114, 4130), False, 'from sklearn.datasets import fetch_20newsgroups\n'), ((4153, 4186), 'sklearn.datasets.fetch_20newsgroups', 'fetch_20newsgroups', ([], {'subset': '"""test"""'}), "(subset='test')\n", (4171, 4186), False, 'from sklearn.datasets import fetch_20newsgroups\n'), ((4235, 4276), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': '(0.01)', 'max_df': '(0.95)'}), '(min_df=0.01, max_df=0.95)\n', (4250, 4276), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((4779, 4815), 'os.path.join', 'join', (['datasets_path', '"""scenes.pickle"""'], {}), "(datasets_path, 'scenes.pickle')\n", (4783, 4815), False, 'from os.path import join\n'), ((5028, 5065), 'numpy.asarray', 'np.asarray', (['features'], {'dtype': '"""float32"""'}), "(features, dtype='float32')\n", (5038, 5065), True, 'import numpy as np\n'), ((5247, 5282), 'os.path.join', 'join', (['datasets_path', '"""corel.pickle"""'], {}), "(datasets_path, 'corel.pickle')\n", (5251, 5282), False, 'from os.path import join\n'), ((5496, 5533), 'numpy.asarray', 'np.asarray', (['features'], {'dtype': '"""float32"""'}), "(features, dtype='float32')\n", (5506, 5533), True, 'import numpy as np\n'), ((5689, 5722), 'os.path.join', 'join', (['datasets_path', '"""kth.pickle"""'], {}), "(datasets_path, 'kth.pickle')\n", (5693, 5722), False, 'from os.path import join\n'), ((6123, 6157), 'os.path.join', 'join', (['datasets_path', '"""yale.pickle"""'], {}), "(datasets_path, 'yale.pickle')\n", (6127, 6157), False, 'from os.path import join\n'), ((3929, 3948), 'numpy.float32', 'np.float32', (['x_train'], {}), '(x_train)\n', (3939, 3948), True, 'import numpy as np\n'), ((3959, 3977), 'numpy.float32', 'np.float32', (['x_test'], {}), '(x_test)\n', (3969, 3977), True, 'import numpy as np\n'), ((4871, 4904), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (4882, 4904), False, 'import pickle\n'), ((4922, 4955), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (4933, 4955), False, 'import pickle\n'), ((4980, 4998), 'numpy.squeeze', 'np.squeeze', (['labels'], {}), '(labels)\n', (4990, 4998), True, 'import numpy as np\n'), ((5339, 5372), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (5350, 5372), False, 'import pickle\n'), ((5390, 5423), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (5401, 5423), False, 'import pickle\n'), ((5448, 5466), 'numpy.squeeze', 'np.squeeze', (['labels'], {}), '(labels)\n', (5458, 5466), True, 'import numpy as np\n'), ((6214, 6228), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6225, 6228), False, 'import pickle\n'), ((6246, 6260), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6257, 6260), False, 'import pickle\n'), ((6314, 6334), 'numpy.asarray', 'np.asarray', (['features'], {}), '(features)\n', (6324, 6334), True, 'import numpy as np\n'), ((6367, 6385), 'numpy.squeeze', 'np.squeeze', (['labels'], {}), '(labels)\n', (6377, 6385), True, 'import numpy as np\n'), ((5794, 5827), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (5805, 5827), False, 'import pickle\n'), ((5829, 5862), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (5840, 5862), False, 'import pickle\n'), ((5896, 5929), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (5907, 5929), False, 'import pickle\n'), ((5931, 5964), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (5942, 5964), False, 'import pickle\n'), ((1186, 1222), 'numpy.random.permutation', 'np.random.permutation', (['data.shape[0]'], {}), '(data.shape[0])\n', (1207, 1222), True, 'import numpy as np\n'), ((1474, 1499), 'numpy.concatenate', 'np.concatenate', (['train_idx'], {}), '(train_idx)\n', (1488, 1499), True, 'import numpy as np\n'), ((1529, 1553), 'numpy.concatenate', 'np.concatenate', (['test_idx'], {}), '(test_idx)\n', (1543, 1553), True, 'import numpy as np\n'), ((1849, 1885), 'numpy.random.permutation', 'np.random.permutation', (['data.shape[0]'], {}), '(data.shape[0])\n', (1870, 1885), True, 'import numpy as np\n'), ((1329, 1350), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (1337, 1350), True, 'import numpy as np\n'), ((2292, 2328), 'numpy.random.permutation', 'np.random.permutation', (['data.shape[0]'], {}), '(data.shape[0])\n', (2313, 2328), True, 'import numpy as np\n'), ((2578, 2603), 'numpy.concatenate', 'np.concatenate', (['train_idx'], {}), '(train_idx)\n', (2592, 2603), True, 'import numpy as np\n'), ((2633, 2657), 'numpy.concatenate', 'np.concatenate', (['test_idx'], {}), '(test_idx)\n', (2647, 2657), True, 'import numpy as np\n'), ((2973, 3015), 'numpy.random.permutation', 'np.random.permutation', (['train_data.shape[0]'], {}), '(train_data.shape[0])\n', (2994, 3015), True, 'import numpy as np\n'), ((3100, 3141), 'numpy.random.permutation', 'np.random.permutation', (['test_data.shape[0]'], {}), '(test_data.shape[0])\n', (3121, 3141), True, 'import numpy as np\n'), ((2435, 2456), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (2443, 2456), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
def _to_binary(target):
return (target > target.median()).astype(int)
def generate_test_data(data_size):
df = pd.DataFrame()
np.random.seed(0)
df["A"] = np.random.rand(data_size)
df["B"] = np.random.rand(data_size)
df["C"] = np.random.rand(data_size)
df["D"] = np.random.rand(data_size)
df["target"] = 0.2 * np.random.rand(data_size) + df["A"] * df["D"] + 2 * df["B"]
df["binary_target"] = _to_binary(df["target"])
return df
def generate_unstructured_test_data(data_size):
df = generate_test_data(data_size)
df.loc[np.random.rand(data_size) < 0.3, "A"] = None
df["E"] = np.random.choice(["category1", "category2", "category3"], data_size)
df["target"] = (df["E"] != "category2")*df["target"]
df["binary_target"] = _to_binary(df["target"])
return df
|
[
"pandas.DataFrame",
"numpy.random.rand",
"numpy.random.seed",
"numpy.random.choice"
] |
[((161, 175), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (173, 175), True, 'import pandas as pd\n'), ((181, 198), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (195, 198), True, 'import numpy as np\n'), ((213, 238), 'numpy.random.rand', 'np.random.rand', (['data_size'], {}), '(data_size)\n', (227, 238), True, 'import numpy as np\n'), ((253, 278), 'numpy.random.rand', 'np.random.rand', (['data_size'], {}), '(data_size)\n', (267, 278), True, 'import numpy as np\n'), ((293, 318), 'numpy.random.rand', 'np.random.rand', (['data_size'], {}), '(data_size)\n', (307, 318), True, 'import numpy as np\n'), ((333, 358), 'numpy.random.rand', 'np.random.rand', (['data_size'], {}), '(data_size)\n', (347, 358), True, 'import numpy as np\n'), ((669, 737), 'numpy.random.choice', 'np.random.choice', (["['category1', 'category2', 'category3']", 'data_size'], {}), "(['category1', 'category2', 'category3'], data_size)\n", (685, 737), True, 'import numpy as np\n'), ((385, 410), 'numpy.random.rand', 'np.random.rand', (['data_size'], {}), '(data_size)\n', (399, 410), True, 'import numpy as np\n'), ((610, 635), 'numpy.random.rand', 'np.random.rand', (['data_size'], {}), '(data_size)\n', (624, 635), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""TODO:
-- This module (and admin.py) needs some cleaning up: refactoring
similar to projects.py with ProjectsDB and style of SQLite
usage. Currently only authenticate() has been separated out.
-- Reset password needs to send user an email.
"""
from __future__ import unicode_literals, division, print_function #Py2
import string
import random
import json
import time
import uuid, base64
import logging
import os
import smtplib
try:
from sqlite3 import dbapi2 as sqlite
except ImportError:
from pysqlite2 import dbapi2 as sqlite #for old Python versions
import bcrypt #Ubuntu/Debian: apt-get install python-bcrypt
from httperrs import NotAuthorizedError, ConflictError
LOG = logging.getLogger("APP.AUTH")
def gen_pw(length=7):
alphabet = string.ascii_letters + string.digits + '!@#$%^&*()'
return "".join(random.choice(alphabet) for i in range(length))
def gen_token():
return base64.urlsafe_b64encode(str(uuid.uuid4()))
def hash_pw(password):
salt = bcrypt.gensalt()
pwhash = bcrypt.hashpw(password, salt)
return salt, pwhash
class UserAuth(object):
def __init__(self, config_file=None):
if config_file is not None:
with open(config_file) as infh:
self._config = json.loads(infh.read())
#DB connection setup:
self.authdb = sqlite.connect(self._config["authdb"], factory=AuthDB)
self.authdb.row_factory = sqlite.Row
def login(self, request):
"""Validate provided username and password and insert new token into
tokens and return if successful. We also use this
opportunity to clear stale tokens.
- The DB/service actually logged into is determined by
the service as setup in the dispatcher
"""
with sqlite.connect(self._config["authdb"]) as db_conn:
#REMOVE STALE TOKENS
db_curs = db_conn.cursor()
db_curs.execute("DELETE FROM tokens WHERE ? > expiry", (time.time(),))
db_conn.commit()
#PROCEED TO AUTHENTICATE USER
db_curs.execute("SELECT * FROM users WHERE username=?", (request["username"],))
entry = db_curs.fetchone()
#User exists?
if entry is None:
raise NotAuthorizedError("Wrong credentials")
else:
username, pwhash, salt, name, surname, email, role, tmppwhash = entry
#Password correct?
templogin = False
inpwhash = bcrypt.hashpw(request["password"], salt)
if pwhash != inpwhash:
templogin = True
if tmppwhash:
if tmppwhash != inpwhash:
raise NotAuthorizedError("Wrong credentials")
else:
raise NotAuthorizedError("Wrong credentials")
roles = role.split(";")
if request["role"] not in roles:
raise ConflictError("User cannot take this role")
#User already logged in?
db_curs.execute("SELECT * FROM tokens WHERE username=?", (username,))
entry = db_curs.fetchone()
if not entry is None:
raise ConflictError("User already logged in")
#All good, create new token, remove tmppwhash
token = gen_token()
# Assign role based on request URI
db_curs.execute("INSERT INTO tokens (token, username, role, expiry) VALUES(?,?,?,?)", (token,
username, request["role"],
time.time() + self._config["toklife"]))
db_curs.execute("UPDATE users SET tmppwhash=? WHERE username=?", (None, username))
LOG.info("User login: {}".format(request["username"]))
return {"token": token, "templogin": templogin}
def logout(self, request):
"""The DB/service actually logged out of is determined by the service
as setup in the dispatcher
"""
username = self.authdb.authenticate(request["token"], self._config["role"])
with sqlite.connect(self._config["authdb"]) as db_conn:
db_curs = db_conn.cursor()
db_curs.execute("DELETE FROM tokens WHERE token=?", (request["token"],))
LOG.info("User logout: {}".format(username))
return "User logged out"
def logout2(self, request):
"""Validate provided username and password and remove token associated
with this user if successful. We also use this opportunity
to clear stale tokens.
"""
with sqlite.connect(self._config["authdb"]) as db_conn:
#REMOVE STALE TOKENS
db_curs = db_conn.cursor()
db_curs.execute("DELETE FROM tokens WHERE ? > expiry", (time.time(),))
db_conn.commit()
#PROCEED TO AUTHENTICATE USER
db_curs.execute("SELECT * FROM users WHERE username=?", (request["username"],))
entry = db_curs.fetchone()
#User exists?
if entry is None:
raise NotAuthorizedError("Wrong credentials")
else:
username, pwhash, salt, name, surname, email, role, tmppwhash = entry
#Password correct?
inpwhash = bcrypt.hashpw(request["password"], salt)
if pwhash != inpwhash:
if tmppwhash:
if tmppwhash != inpwhash:
raise NotAuthorizedError("Wrong credentials")
else:
raise NotAuthorizedError("Wrong credentials")
#logout
db_curs.execute("DELETE FROM tokens WHERE username=?", (username,))
LOG.info("User logout: {}".format(username))
return "User logged out"
def change_password(self, request):
"""Allows a logged-in user (token) to change the password.
"""
username = self.authdb.authenticate(request["token"], self._config["role"])
salt, pwhash = hash_pw(request["password"])
with self.authdb as authdb:
authdb.execute("UPDATE users SET pwhash=?, salt=? WHERE username=?", (pwhash, salt, username))
LOG.info("Password updated: {}".format(username))
return "Password updated"
def reset_password(self, request):
"""Generates a random new temporary password for one-time use and
sends this to the registered email address
TODO: May also want to request using email
"""
with sqlite.connect(self._config["authdb"]) as db_conn:
db_curs = db_conn.cursor()
#Get user info
db_curs.execute("SELECT * FROM users WHERE username=?", (request["username"],))
entry = db_curs.fetchone()
#User exists?
if entry is None:
raise NotAuthorizedError("User not registered")
else:
username, pwhash, salt, name, surname, email, role, tmppwhash = entry
#Generate random password and insert
tmppw = gen_pw()
tmppwhash = bcrypt.hashpw(tmppw, salt)
db_curs.execute("UPDATE users SET tmppwhash=? WHERE username=?", (tmppwhash, username))
subject = 'Temporary password created for your account'
body = "The administrator has reset your password.\r\nYour temporary password is: {}\r\nLogin with with this temporary password.\r\n".format(tmppw)
email_text = "From: STP Admin <{}>\r\nTo: {} {} <{}>\r\nSubject: {}\r\n\r\n{}\r\n".format(self._config["gmail_user"], name, surname, email, subject, body)
try:
server = smtplib.SMTP_SSL(self._config["gmail_smtp"], int(self._config["gmail_smtp_port"]))
server.ehlo()
server.login(self._config["gmail_user"], self._config["gmail_password"])
server.sendmail(self._config["gmail_user"], [email], email_text)
server.close()
except Exception as e:
LOG.error(str(e))
db_curs.execute("UPDATE users SET tmppwhash=? WHERE username=?", (tmppwhash, username))
raise RuntimeError("Cannot send email to user!")
LOG.info("Temp password created: {}".format(username))
return tmppw
class AuthDB(sqlite.Connection):
def authenticate(self, token, role):
"""Checks whether token is valid/existing in authdb and returns associated
username or raises NotAuthorizedError
"""
with self:
entry = self.execute("SELECT * FROM tokens WHERE token=?", (token,)).fetchone()
if entry is None:
raise NotAuthorizedError("Token does not exist!")
else:
entry = dict(entry)
roles = entry["role"].split(";")
if time.time() > entry["expiry"]:
self.execute("DELETE FROM tokens WHERE token=?", (token,)) #remove expired token
raise NotAuthorizedError("Token has expired!")
elif role not in roles:
self.execute("DELETE FROM tokens WHERE token=?", (token,)) #remove expired token
raise NotAuthorizedError("Permission denied based on role!")
return entry["username"]
### TODO
def test():
"""Informal tests...
"""
import sys, os
sys.path = [os.path.abspath("../tools")] + sys.path
from authdb import create_new_db
#testuser
salt = bcrypt.gensalt()
pwhash = bcrypt.hashpw("testpass", salt)
#create test DB and add testuser
db_conn = create_new_db("/tmp/test.db")
db_curs = db_conn.cursor()
db_curs.execute("INSERT INTO users ( username, pwhash, salt, name, surname, email, role, tmppwhash ) VALUES (?,?,?,?,?,?,?,?)", ("testuser", pwhash, salt, None, None, None, "root", None))
db_conn.commit()
#test UserAuth
a = UserAuth()
a._config = {}
a._config["authdb"] = "/tmp/test.db"
a._config["toklife"] = 0
a.authdb = sqlite.connect(a._config["authdb"], factory=AuthDB)
a.authdb.row_factory = sqlite.Row
## 1
try:
print(a.login({"username": "testuser", "password": "<PASSWORD>", "role" : "root"}))
except NotAuthorizedError:
print("TEST_1 SUCCESS:", "Wrong password caught...")
## 2
tokenpackage = a.login({"username": "testuser", "password": "<PASSWORD>", "role" : "root"})
print("TEST_2 SUCCESS:", "User authenticated with token:", tokenpackage["token"])
## 3
try:
username = a.authdb.authenticate(tokenpackage["token"], "root")
print("TEST_3 FAILED:", "Authenticated against expired token")
except NotAuthorizedError:
print("TEST_3 SUCCESS:", "Do not authenticate against expired token")
## 4
a._config["toklife"] = 300
tokenpackage = a.login({"username": "testuser", "password": "<PASSWORD>", "role" : "root"}) #should have been removed from tokens in previous test
username = a.authdb.authenticate(tokenpackage["token"], "root")
if username is not None:
print("TEST_4 SUCCESS:", "Authenticated logged in username:", username)
else:
print("TEST_4 FAILED:", "Could not authenticated logged in username")
## 5
try:
print(a.login({"username": "testuser", "password": "<PASSWORD>", "role" : "root"}))
except ConflictError:
print("TEST_5 SUCCESS:", "Already logged in caught...")
## 6
a.logout(tokenpackage)
try:
username = a.authdb.authenticate(tokenpackage["token"], "root")
print("TEST_6 FAILED:", "Authenticated against logged out token")
except NotAuthorizedError:
print("TEST_6 SUCCESS:", "Do not authenticate against logged out token")
if __name__ == "__main__":
test()
|
[
"pysqlite2.dbapi2.connect",
"os.path.abspath",
"uuid.uuid4",
"authdb.create_new_db",
"bcrypt.gensalt",
"random.choice",
"logging.getLogger",
"time.time",
"httperrs.NotAuthorizedError",
"httperrs.ConflictError",
"bcrypt.hashpw"
] |
[((761, 790), 'logging.getLogger', 'logging.getLogger', (['"""APP.AUTH"""'], {}), "('APP.AUTH')\n", (778, 790), False, 'import logging\n'), ((1056, 1072), 'bcrypt.gensalt', 'bcrypt.gensalt', ([], {}), '()\n', (1070, 1072), False, 'import bcrypt\n'), ((1086, 1115), 'bcrypt.hashpw', 'bcrypt.hashpw', (['password', 'salt'], {}), '(password, salt)\n', (1099, 1115), False, 'import bcrypt\n'), ((9776, 9792), 'bcrypt.gensalt', 'bcrypt.gensalt', ([], {}), '()\n', (9790, 9792), False, 'import bcrypt\n'), ((9806, 9837), 'bcrypt.hashpw', 'bcrypt.hashpw', (['"""testpass"""', 'salt'], {}), "('testpass', salt)\n", (9819, 9837), False, 'import bcrypt\n'), ((9889, 9918), 'authdb.create_new_db', 'create_new_db', (['"""/tmp/test.db"""'], {}), "('/tmp/test.db')\n", (9902, 9918), False, 'from authdb import create_new_db\n'), ((10305, 10356), 'pysqlite2.dbapi2.connect', 'sqlite.connect', (["a._config['authdb']"], {'factory': 'AuthDB'}), "(a._config['authdb'], factory=AuthDB)\n", (10319, 10356), True, 'from pysqlite2 import dbapi2 as sqlite\n'), ((900, 923), 'random.choice', 'random.choice', (['alphabet'], {}), '(alphabet)\n', (913, 923), False, 'import random\n'), ((1006, 1018), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1016, 1018), False, 'import uuid, base64\n'), ((1402, 1456), 'pysqlite2.dbapi2.connect', 'sqlite.connect', (["self._config['authdb']"], {'factory': 'AuthDB'}), "(self._config['authdb'], factory=AuthDB)\n", (1416, 1456), True, 'from pysqlite2 import dbapi2 as sqlite\n'), ((1869, 1907), 'pysqlite2.dbapi2.connect', 'sqlite.connect', (["self._config['authdb']"], {}), "(self._config['authdb'])\n", (1883, 1907), True, 'from pysqlite2 import dbapi2 as sqlite\n'), ((4342, 4380), 'pysqlite2.dbapi2.connect', 'sqlite.connect', (["self._config['authdb']"], {}), "(self._config['authdb'])\n", (4356, 4380), True, 'from pysqlite2 import dbapi2 as sqlite\n'), ((4846, 4884), 'pysqlite2.dbapi2.connect', 'sqlite.connect', (["self._config['authdb']"], {}), "(self._config['authdb'])\n", (4860, 4884), True, 'from pysqlite2 import dbapi2 as sqlite\n'), ((6796, 6834), 'pysqlite2.dbapi2.connect', 'sqlite.connect', (["self._config['authdb']"], {}), "(self._config['authdb'])\n", (6810, 6834), True, 'from pysqlite2 import dbapi2 as sqlite\n'), ((7370, 7396), 'bcrypt.hashpw', 'bcrypt.hashpw', (['tmppw', 'salt'], {}), '(tmppw, salt)\n', (7383, 7396), False, 'import bcrypt\n'), ((9674, 9701), 'os.path.abspath', 'os.path.abspath', (['"""../tools"""'], {}), "('../tools')\n", (9689, 9701), False, 'import sys, os\n'), ((2355, 2394), 'httperrs.NotAuthorizedError', 'NotAuthorizedError', (['"""Wrong credentials"""'], {}), "('Wrong credentials')\n", (2373, 2394), False, 'from httperrs import NotAuthorizedError, ConflictError\n'), ((2595, 2635), 'bcrypt.hashpw', 'bcrypt.hashpw', (["request['password']", 'salt'], {}), "(request['password'], salt)\n", (2608, 2635), False, 'import bcrypt\n'), ((3339, 3378), 'httperrs.ConflictError', 'ConflictError', (['"""User already logged in"""'], {}), "('User already logged in')\n", (3352, 3378), False, 'from httperrs import NotAuthorizedError, ConflictError\n'), ((5332, 5371), 'httperrs.NotAuthorizedError', 'NotAuthorizedError', (['"""Wrong credentials"""'], {}), "('Wrong credentials')\n", (5350, 5371), False, 'from httperrs import NotAuthorizedError, ConflictError\n'), ((5538, 5578), 'bcrypt.hashpw', 'bcrypt.hashpw', (["request['password']", 'salt'], {}), "(request['password'], salt)\n", (5551, 5578), False, 'import bcrypt\n'), ((7122, 7163), 'httperrs.NotAuthorizedError', 'NotAuthorizedError', (['"""User not registered"""'], {}), "('User not registered')\n", (7140, 7163), False, 'from httperrs import NotAuthorizedError, ConflictError\n'), ((8957, 9000), 'httperrs.NotAuthorizedError', 'NotAuthorizedError', (['"""Token does not exist!"""'], {}), "('Token does not exist!')\n", (8975, 9000), False, 'from httperrs import NotAuthorizedError, ConflictError\n'), ((2060, 2071), 'time.time', 'time.time', ([], {}), '()\n', (2069, 2071), False, 'import time\n'), ((3081, 3124), 'httperrs.ConflictError', 'ConflictError', (['"""User cannot take this role"""'], {}), "('User cannot take this role')\n", (3094, 3124), False, 'from httperrs import NotAuthorizedError, ConflictError\n'), ((5037, 5048), 'time.time', 'time.time', ([], {}), '()\n', (5046, 5048), False, 'import time\n'), ((9123, 9134), 'time.time', 'time.time', ([], {}), '()\n', (9132, 9134), False, 'import time\n'), ((9281, 9321), 'httperrs.NotAuthorizedError', 'NotAuthorizedError', (['"""Token has expired!"""'], {}), "('Token has expired!')\n", (9299, 9321), False, 'from httperrs import NotAuthorizedError, ConflictError\n'), ((2926, 2965), 'httperrs.NotAuthorizedError', 'NotAuthorizedError', (['"""Wrong credentials"""'], {}), "('Wrong credentials')\n", (2944, 2965), False, 'from httperrs import NotAuthorizedError, ConflictError\n'), ((3831, 3842), 'time.time', 'time.time', ([], {}), '()\n', (3840, 3842), False, 'import time\n'), ((5832, 5871), 'httperrs.NotAuthorizedError', 'NotAuthorizedError', (['"""Wrong credentials"""'], {}), "('Wrong credentials')\n", (5850, 5871), False, 'from httperrs import NotAuthorizedError, ConflictError\n'), ((9489, 9543), 'httperrs.NotAuthorizedError', 'NotAuthorizedError', (['"""Permission denied based on role!"""'], {}), "('Permission denied based on role!')\n", (9507, 9543), False, 'from httperrs import NotAuthorizedError, ConflictError\n'), ((2830, 2869), 'httperrs.NotAuthorizedError', 'NotAuthorizedError', (['"""Wrong credentials"""'], {}), "('Wrong credentials')\n", (2848, 2869), False, 'from httperrs import NotAuthorizedError, ConflictError\n'), ((5736, 5775), 'httperrs.NotAuthorizedError', 'NotAuthorizedError', (['"""Wrong credentials"""'], {}), "('Wrong credentials')\n", (5754, 5775), False, 'from httperrs import NotAuthorizedError, ConflictError\n')]
|
from recon.core.module import BaseModule
from censys.ipv4 import CensysIPv4
from censys.base import CensysException
class Module(BaseModule):
meta = {
'name': 'Censys hosts by hostname',
'author': '<NAME>',
'version': '1.1',
'description': 'Finds all IPs for a given hostname. Updates the "hosts" and "ports" tables.',
'query': 'SELECT DISTINCT host FROM hosts WHERE host IS NOT NULL',
'dependencies': ['censys'],
'required_keys': ['censysio_id', 'censysio_secret'],
}
def module_run(self, hosts):
api_id = self.get_key('censysio_id')
api_secret = self.get_key('censysio_secret')
c = CensysIPv4(api_id, api_secret, timeout=self._global_options['timeout'])
IPV4_FIELDS = [
'ip',
'protocols',
'location.country',
'location.latitude',
'location.longitude',
'location.province',
]
for host in hosts:
self.heading(host, level=0)
try:
payload = c.search('a:{0}'.format(host), IPV4_FIELDS)
except CensysException:
continue
for result in payload:
self.insert_hosts(
host=host,
ip_address=result['ip'],
country=result.get('location.country', ''),
region=result.get('location.province', ''),
latitude=result.get('location.latitude', ''),
longitude=result.get('location.longitude', ''),
)
for protocol in result['protocols']:
port, service = protocol.split('/')
self.insert_ports(
ip_address=result['ip'], port=port, protocol=service
)
|
[
"censys.ipv4.CensysIPv4"
] |
[((680, 751), 'censys.ipv4.CensysIPv4', 'CensysIPv4', (['api_id', 'api_secret'], {'timeout': "self._global_options['timeout']"}), "(api_id, api_secret, timeout=self._global_options['timeout'])\n", (690, 751), False, 'from censys.ipv4 import CensysIPv4\n')]
|
# -*- coding: utf-8 -*-
"""MedleyDB pitch Dataset Loader
.. admonition:: Dataset Info
:class: dropdown
MedleyDB Pitch is a pitch-tracking subset of the MedleyDB dataset
containing only f0-annotated, monophonic stems.
MedleyDB is a dataset of annotated, royalty-free multitrack recordings.
MedleyDB was curated primarily to support research on melody extraction,
addressing important shortcomings of existing collections. For each song
we provide melody f0 annotations as well as instrument activations for
evaluating automatic instrument recognition.
For more details, please visit: https://medleydb.weebly.com
"""
import csv
import json
import librosa
import logging
import numpy as np
import os
from mirdata import download_utils
from mirdata import jams_utils
from mirdata import core
from mirdata import annotations
BIBTEX = """@inproceedings{bittner2014medleydb,
Author = {<NAME> and Salamon, Justin and Tierney, Mike and Mauch, Matthias and Cannam, Chris and <NAME>},
Booktitle = {International Society of Music Information Retrieval (ISMIR)},
Month = {October},
Title = {Medley{DB}: A Multitrack Dataset for Annotation-Intensive {MIR} Research},
Year = {2014}
}"""
DOWNLOAD_INFO = """
To download this dataset, visit:
https://zenodo.org/record/2620624#.XKZc7hNKh24
and request access.
Once downloaded, unzip the file MedleyDB-Pitch.zip
and copy the result to:
{}
"""
LICENSE_INFO = (
"Creative Commons Attribution Non-Commercial Share-Alike 4.0 (CC BY-NC-SA 4.0)."
)
def _load_metadata(data_home):
metadata_path = os.path.join(data_home, "medleydb_pitch_metadata.json")
if not os.path.exists(metadata_path):
logging.info("Metadata file {} not found.".format(metadata_path))
return None
with open(metadata_path, "r") as fhandle:
metadata = json.load(fhandle)
metadata["data_home"] = data_home
return metadata
DATA = core.LargeData("medleydb_pitch_index.json", _load_metadata)
class Track(core.Track):
"""medleydb_pitch Track class
Args:
track_id (str): track id of the track
Attributes:
artist (str): artist
audio_path (str): path to the audio file
genre (str): genre
instrument (str): instrument of the track
pitch_path (str): path to the pitch annotation file
title (str): title
track_id (str): track id
Cached Properties:
pitch (F0Data): human annotated pitch
"""
def __init__(self, track_id, data_home):
if track_id not in DATA.index["tracks"]:
raise ValueError(
"{} is not a valid track ID in MedleyDB-Pitch".format(track_id)
)
self.track_id = track_id
self._data_home = data_home
self._track_paths = DATA.index["tracks"][track_id]
self.pitch_path = os.path.join(self._data_home, self._track_paths["pitch"][0])
metadata = DATA.metadata(data_home)
if metadata is not None and track_id in metadata:
self._track_metadata = metadata[track_id]
else:
self._track_metadata = {
"instrument": None,
"artist": None,
"title": None,
"genre": None,
}
self.audio_path = os.path.join(self._data_home, self._track_paths["audio"][0])
self.instrument = self._track_metadata["instrument"]
self.artist = self._track_metadata["artist"]
self.title = self._track_metadata["title"]
self.genre = self._track_metadata["genre"]
@core.cached_property
def pitch(self):
return load_pitch(self.pitch_path)
@property
def audio(self):
"""The track's audio
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_audio(self.audio_path)
def to_jams(self):
"""Get the track's data in jams format
Returns:
jams.JAMS: the track's data in jams format
"""
return jams_utils.jams_converter(
audio_path=self.audio_path,
f0_data=[(self.pitch, "annotated pitch")],
metadata=self._track_metadata,
)
def load_audio(audio_path):
"""Load a MedleyDB audio file.
Args:
audio_path (str): path to audio file
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
if not os.path.exists(audio_path):
raise IOError("audio_path {} does not exist".format(audio_path))
return librosa.load(audio_path, sr=None, mono=True)
def load_pitch(pitch_path):
"""load a MedleyDB pitch annotation file
Args:
pitch_path (str): path to pitch annotation file
Raises:
IOError: if pitch_path doesn't exist
Returns:
F0Data: pitch annotation
"""
if not os.path.exists(pitch_path):
raise IOError("pitch_path {} does not exist".format(pitch_path))
times = []
freqs = []
with open(pitch_path, "r") as fhandle:
reader = csv.reader(fhandle, delimiter=",")
for line in reader:
times.append(float(line[0]))
freqs.append(float(line[1]))
times = np.array(times)
freqs = np.array(freqs)
confidence = (freqs > 0).astype(float)
pitch_data = annotations.F0Data(times, freqs, confidence)
return pitch_data
@core.docstring_inherit(core.Dataset)
class Dataset(core.Dataset):
"""
The medleydb_pitch dataset
"""
def __init__(self, data_home=None):
super().__init__(
data_home,
index=DATA.index,
name="medleydb_pitch",
track_object=Track,
bibtex=BIBTEX,
download_info=DOWNLOAD_INFO,
license_info=LICENSE_INFO,
)
@core.copy_docs(load_audio)
def load_audio(self, *args, **kwargs):
return load_audio(*args, **kwargs)
@core.copy_docs(load_pitch)
def load_pitch(self, *args, **kwargs):
return load_pitch(*args, **kwargs)
|
[
"json.load",
"mirdata.core.copy_docs",
"csv.reader",
"mirdata.annotations.F0Data",
"mirdata.jams_utils.jams_converter",
"os.path.exists",
"mirdata.core.docstring_inherit",
"mirdata.core.LargeData",
"numpy.array",
"librosa.load",
"os.path.join"
] |
[((1974, 2033), 'mirdata.core.LargeData', 'core.LargeData', (['"""medleydb_pitch_index.json"""', '_load_metadata'], {}), "('medleydb_pitch_index.json', _load_metadata)\n", (1988, 2033), False, 'from mirdata import core\n'), ((5465, 5501), 'mirdata.core.docstring_inherit', 'core.docstring_inherit', (['core.Dataset'], {}), '(core.Dataset)\n', (5487, 5501), False, 'from mirdata import core\n'), ((1628, 1683), 'os.path.join', 'os.path.join', (['data_home', '"""medleydb_pitch_metadata.json"""'], {}), "(data_home, 'medleydb_pitch_metadata.json')\n", (1640, 1683), False, 'import os\n'), ((4629, 4673), 'librosa.load', 'librosa.load', (['audio_path'], {'sr': 'None', 'mono': '(True)'}), '(audio_path, sr=None, mono=True)\n', (4641, 4673), False, 'import librosa\n'), ((5291, 5306), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (5299, 5306), True, 'import numpy as np\n'), ((5319, 5334), 'numpy.array', 'np.array', (['freqs'], {}), '(freqs)\n', (5327, 5334), True, 'import numpy as np\n'), ((5395, 5439), 'mirdata.annotations.F0Data', 'annotations.F0Data', (['times', 'freqs', 'confidence'], {}), '(times, freqs, confidence)\n', (5413, 5439), False, 'from mirdata import annotations\n'), ((5888, 5914), 'mirdata.core.copy_docs', 'core.copy_docs', (['load_audio'], {}), '(load_audio)\n', (5902, 5914), False, 'from mirdata import core\n'), ((6007, 6033), 'mirdata.core.copy_docs', 'core.copy_docs', (['load_pitch'], {}), '(load_pitch)\n', (6021, 6033), False, 'from mirdata import core\n'), ((1696, 1725), 'os.path.exists', 'os.path.exists', (['metadata_path'], {}), '(metadata_path)\n', (1710, 1725), False, 'import os\n'), ((1887, 1905), 'json.load', 'json.load', (['fhandle'], {}), '(fhandle)\n', (1896, 1905), False, 'import json\n'), ((2898, 2958), 'os.path.join', 'os.path.join', (['self._data_home', "self._track_paths['pitch'][0]"], {}), "(self._data_home, self._track_paths['pitch'][0])\n", (2910, 2958), False, 'import os\n'), ((3338, 3398), 'os.path.join', 'os.path.join', (['self._data_home', "self._track_paths['audio'][0]"], {}), "(self._data_home, self._track_paths['audio'][0])\n", (3350, 3398), False, 'import os\n'), ((4089, 4220), 'mirdata.jams_utils.jams_converter', 'jams_utils.jams_converter', ([], {'audio_path': 'self.audio_path', 'f0_data': "[(self.pitch, 'annotated pitch')]", 'metadata': 'self._track_metadata'}), "(audio_path=self.audio_path, f0_data=[(self.pitch,\n 'annotated pitch')], metadata=self._track_metadata)\n", (4114, 4220), False, 'from mirdata import jams_utils\n'), ((4516, 4542), 'os.path.exists', 'os.path.exists', (['audio_path'], {}), '(audio_path)\n', (4530, 4542), False, 'import os\n'), ((4941, 4967), 'os.path.exists', 'os.path.exists', (['pitch_path'], {}), '(pitch_path)\n', (4955, 4967), False, 'import os\n'), ((5133, 5167), 'csv.reader', 'csv.reader', (['fhandle'], {'delimiter': '""","""'}), "(fhandle, delimiter=',')\n", (5143, 5167), False, 'import csv\n')]
|
"""contain methods to maintain GitHub hooks based on automatic deploy."""
import hashlib
import hmac
import json
import subprocess
from datetime import datetime, timedelta
from functools import wraps
from ipaddress import IPv4Address, IPv6Address, ip_address, ip_network
from os import path
from shutil import copyfile
from typing import Callable, List, Union
import requests
from flask import Blueprint, abort, g, request
from git import InvalidGitRepositoryError, Repo
mod_deploy = Blueprint('deploy', __name__)
IPAddress = Union[IPv4Address, IPv6Address]
cached_web_hook_blocks: List[str] = []
cached_load_time: datetime = datetime(1970, 1, 1)
def cache_has_expired() -> bool:
"""
Check if the cache expired.
:return: True if the cache was last updated more than one hour ago.
:rtype: bool
"""
global cached_load_time
return cached_load_time + timedelta(hours=1) < datetime.now()
def get_cached_web_hook_blocks() -> List[str]:
"""
Fetch the cached web hook blocks.
:return: A list of ip blocks.
:rtype: List[str]
"""
global cached_web_hook_blocks
from run import config
if len(cached_web_hook_blocks) == 0 or cache_has_expired():
client_id = config.get('GITHUB_CLIENT_ID', '')
client_secret = config.get('GITHUB_CLIENT_SECRET', '')
meta_json = requests.get(
f'https://api.github.com/meta?client_id={client_id}&client_secret={client_secret}').json()
try:
cached_web_hook_blocks = meta_json['hooks']
except KeyError:
g.log.critical(f"Failed to retrieve hook IP's from GitHub! API returned {meta_json}")
return cached_web_hook_blocks
def is_github_web_hook_ip(request_ip: IPAddress) -> bool:
"""
Check if the given IP address is matching one provided by the API of GitHub.
:param request_ip: The IP address the request came from.
:type request_ip: IPAddress
:return: True if the IP address is a valid GitHub Web Hook requester.
:rtype: bool
"""
for block in get_cached_web_hook_blocks():
if request_ip in ip_network(block):
return True
return False
def request_from_github(abort_code: int = 418) -> Callable:
"""Provide decorator to handle request from GitHub on the web hook."""
def decorator(f):
"""Decorate the function to check if a request is a GitHub hook request."""
@wraps(f)
def decorated_function(*args, **kwargs):
if request.method != 'POST':
return 'OK'
request_ip = ip_address(f"{request.remote_addr}")
if not is_github_web_hook_ip(request_ip):
g.log.warning(f"Unauthorized attempt to deploy by IP {request_ip}")
abort(abort_code)
for header in ['X-GitHub-Event', 'X-GitHub-Delivery', 'X-Hub-Signature', 'User-Agent']:
if header not in request.headers:
g.log.critical(f"{header} not in headers!")
abort(abort_code)
ua = request.headers.get('User-Agent')
if not ua.startswith('GitHub-Hookshot/'):
g.log.critical("User-Agent does not begin with GitHub-Hookshot/!")
abort(abort_code)
if not request.is_json:
g.log.critical("Request is not JSON!")
abort(abort_code)
return f(*args, **kwargs)
return decorated_function
return decorator
def is_valid_signature(x_hub_signature, data, private_key):
"""
Re-check if the GitHub hook request got valid signature.
:param x_hub_signature: Signature to check
:type x_hub_signature: str
:param data: Signature's data
:type data: bytearray
:param private_key: Signature's token
:type private_key: str
"""
hash_algorithm, github_signature = x_hub_signature.split('=', 1)
algorithm = hashlib.__dict__.get(hash_algorithm)
encoded_key = bytes(private_key, 'latin-1')
mac = hmac.new(encoded_key, msg=data, digestmod=algorithm)
return hmac.compare_digest(mac.hexdigest(), github_signature)
@mod_deploy.route('/deploy', methods=['GET', 'POST'])
@request_from_github()
def deploy():
"""Deploy the GitHub request to the test platform."""
from run import app
abort_code = 418
event = request.headers.get('X-GitHub-Event')
if event == "ping":
g.log.info("deploy endpoint pinged!")
return json.dumps({'msg': "Hi!"})
if event != "push":
g.log.info("deploy endpoint received unaccepted push request!")
return json.dumps({'msg': "Wrong event type"})
x_hub_signature = request.headers.get('X-Hub-Signature')
# web hook content type should be application/json for request.data to have the payload
# request.data is empty in case of x-www-form-urlencoded
if not is_valid_signature(x_hub_signature, request.data, g.github['deploy_key']):
g.log.warning(f"Deploy signature failed: {x_hub_signature}")
abort(abort_code)
payload = request.get_json()
if payload is None:
g.log.warning(f"Deploy payload is empty: {payload}")
abort(abort_code)
if payload['ref'] != 'refs/heads/master':
return json.dumps({'msg': "Not master; ignoring"})
try:
repo = Repo(app.config['INSTALL_FOLDER'])
except InvalidGitRepositoryError:
return json.dumps({'msg': "Folder is not a valid git directory"})
try:
origin = repo.remote('origin')
except ValueError:
return json.dumps({'msg': "Remote origin does not exist"})
fetch_info = origin.fetch()
if len(fetch_info) == 0:
return json.dumps({'msg': "Didn't fetch any information from remote!"})
pull_info = origin.pull()
if len(pull_info) == 0:
return json.dumps({'msg': "Didn't pull any information from remote!"})
if pull_info[0].flags > 128:
return json.dumps({'msg': "Didn't pull any information from remote!"})
commit_hash = pull_info[0].commit.hexsha
build_commit = f'build_commit = "{commit_hash}"'
with open('build_commit.py', 'w') as f:
f.write(build_commit)
run_ci_repo = path.join(app.config['INSTALL_FOLDER'], 'install', 'ci-vm', 'ci-linux', 'ci', 'runCI')
run_ci_nfs = path.join(app.config['SAMPLE_REPOSITORY'], 'vm_data', app.config['KVM_LINUX_NAME'], 'runCI')
copyfile(run_ci_repo, run_ci_nfs)
g.log.info(f"Platform upgraded to commit {commit_hash}")
subprocess.Popen(["sudo", "service", "platform", "reload"])
g.log.info("Sample platform synced with GitHub!")
return json.dumps({'msg': f"Platform upgraded to commit {commit_hash}"})
|
[
"json.dumps",
"flask.g.log.info",
"os.path.join",
"flask.request.get_json",
"ipaddress.ip_network",
"flask.request.headers.get",
"flask.abort",
"flask.g.log.warning",
"datetime.timedelta",
"requests.get",
"shutil.copyfile",
"datetime.datetime.now",
"subprocess.Popen",
"flask.Blueprint",
"hmac.new",
"git.Repo",
"ipaddress.ip_address",
"datetime.datetime",
"hashlib.__dict__.get",
"flask.g.log.critical",
"functools.wraps",
"run.config.get"
] |
[((486, 515), 'flask.Blueprint', 'Blueprint', (['"""deploy"""', '__name__'], {}), "('deploy', __name__)\n", (495, 515), False, 'from flask import Blueprint, abort, g, request\n'), ((630, 650), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (638, 650), False, 'from datetime import datetime, timedelta\n'), ((3902, 3938), 'hashlib.__dict__.get', 'hashlib.__dict__.get', (['hash_algorithm'], {}), '(hash_algorithm)\n', (3922, 3938), False, 'import hashlib\n'), ((3997, 4049), 'hmac.new', 'hmac.new', (['encoded_key'], {'msg': 'data', 'digestmod': 'algorithm'}), '(encoded_key, msg=data, digestmod=algorithm)\n', (4005, 4049), False, 'import hmac\n'), ((4325, 4362), 'flask.request.headers.get', 'request.headers.get', (['"""X-GitHub-Event"""'], {}), "('X-GitHub-Event')\n", (4344, 4362), False, 'from flask import Blueprint, abort, g, request\n'), ((4649, 4687), 'flask.request.headers.get', 'request.headers.get', (['"""X-Hub-Signature"""'], {}), "('X-Hub-Signature')\n", (4668, 4687), False, 'from flask import Blueprint, abort, g, request\n'), ((5037, 5055), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (5053, 5055), False, 'from flask import Blueprint, abort, g, request\n'), ((6170, 6260), 'os.path.join', 'path.join', (["app.config['INSTALL_FOLDER']", '"""install"""', '"""ci-vm"""', '"""ci-linux"""', '"""ci"""', '"""runCI"""'], {}), "(app.config['INSTALL_FOLDER'], 'install', 'ci-vm', 'ci-linux',\n 'ci', 'runCI')\n", (6179, 6260), False, 'from os import path\n'), ((6274, 6371), 'os.path.join', 'path.join', (["app.config['SAMPLE_REPOSITORY']", '"""vm_data"""', "app.config['KVM_LINUX_NAME']", '"""runCI"""'], {}), "(app.config['SAMPLE_REPOSITORY'], 'vm_data', app.config[\n 'KVM_LINUX_NAME'], 'runCI')\n", (6283, 6371), False, 'from os import path\n'), ((6371, 6404), 'shutil.copyfile', 'copyfile', (['run_ci_repo', 'run_ci_nfs'], {}), '(run_ci_repo, run_ci_nfs)\n', (6379, 6404), False, 'from shutil import copyfile\n'), ((6410, 6466), 'flask.g.log.info', 'g.log.info', (['f"""Platform upgraded to commit {commit_hash}"""'], {}), "(f'Platform upgraded to commit {commit_hash}')\n", (6420, 6466), False, 'from flask import Blueprint, abort, g, request\n'), ((6471, 6530), 'subprocess.Popen', 'subprocess.Popen', (["['sudo', 'service', 'platform', 'reload']"], {}), "(['sudo', 'service', 'platform', 'reload'])\n", (6487, 6530), False, 'import subprocess\n'), ((6535, 6584), 'flask.g.log.info', 'g.log.info', (['"""Sample platform synced with GitHub!"""'], {}), "('Sample platform synced with GitHub!')\n", (6545, 6584), False, 'from flask import Blueprint, abort, g, request\n'), ((6596, 6661), 'json.dumps', 'json.dumps', (["{'msg': f'Platform upgraded to commit {commit_hash}'}"], {}), "({'msg': f'Platform upgraded to commit {commit_hash}'})\n", (6606, 6661), False, 'import json\n'), ((903, 917), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (915, 917), False, 'from datetime import datetime, timedelta\n'), ((1224, 1258), 'run.config.get', 'config.get', (['"""GITHUB_CLIENT_ID"""', '""""""'], {}), "('GITHUB_CLIENT_ID', '')\n", (1234, 1258), False, 'from run import config\n'), ((1283, 1321), 'run.config.get', 'config.get', (['"""GITHUB_CLIENT_SECRET"""', '""""""'], {}), "('GITHUB_CLIENT_SECRET', '')\n", (1293, 1321), False, 'from run import config\n'), ((2412, 2420), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (2417, 2420), False, 'from functools import wraps\n'), ((4395, 4432), 'flask.g.log.info', 'g.log.info', (['"""deploy endpoint pinged!"""'], {}), "('deploy endpoint pinged!')\n", (4405, 4432), False, 'from flask import Blueprint, abort, g, request\n'), ((4448, 4474), 'json.dumps', 'json.dumps', (["{'msg': 'Hi!'}"], {}), "({'msg': 'Hi!'})\n", (4458, 4474), False, 'import json\n'), ((4507, 4570), 'flask.g.log.info', 'g.log.info', (['"""deploy endpoint received unaccepted push request!"""'], {}), "('deploy endpoint received unaccepted push request!')\n", (4517, 4570), False, 'from flask import Blueprint, abort, g, request\n'), ((4586, 4625), 'json.dumps', 'json.dumps', (["{'msg': 'Wrong event type'}"], {}), "({'msg': 'Wrong event type'})\n", (4596, 4625), False, 'import json\n'), ((4935, 4995), 'flask.g.log.warning', 'g.log.warning', (['f"""Deploy signature failed: {x_hub_signature}"""'], {}), "(f'Deploy signature failed: {x_hub_signature}')\n", (4948, 4995), False, 'from flask import Blueprint, abort, g, request\n'), ((5004, 5021), 'flask.abort', 'abort', (['abort_code'], {}), '(abort_code)\n', (5009, 5021), False, 'from flask import Blueprint, abort, g, request\n'), ((5088, 5140), 'flask.g.log.warning', 'g.log.warning', (['f"""Deploy payload is empty: {payload}"""'], {}), "(f'Deploy payload is empty: {payload}')\n", (5101, 5140), False, 'from flask import Blueprint, abort, g, request\n'), ((5149, 5166), 'flask.abort', 'abort', (['abort_code'], {}), '(abort_code)\n', (5154, 5166), False, 'from flask import Blueprint, abort, g, request\n'), ((5229, 5272), 'json.dumps', 'json.dumps', (["{'msg': 'Not master; ignoring'}"], {}), "({'msg': 'Not master; ignoring'})\n", (5239, 5272), False, 'import json\n'), ((5298, 5332), 'git.Repo', 'Repo', (["app.config['INSTALL_FOLDER']"], {}), "(app.config['INSTALL_FOLDER'])\n", (5302, 5332), False, 'from git import InvalidGitRepositoryError, Repo\n'), ((5661, 5725), 'json.dumps', 'json.dumps', (['{\'msg\': "Didn\'t fetch any information from remote!"}'], {}), '({\'msg\': "Didn\'t fetch any information from remote!"})\n', (5671, 5725), False, 'import json\n'), ((5801, 5864), 'json.dumps', 'json.dumps', (['{\'msg\': "Didn\'t pull any information from remote!"}'], {}), '({\'msg\': "Didn\'t pull any information from remote!"})\n', (5811, 5864), False, 'import json\n'), ((5914, 5977), 'json.dumps', 'json.dumps', (['{\'msg\': "Didn\'t pull any information from remote!"}'], {}), '({\'msg\': "Didn\'t pull any information from remote!"})\n', (5924, 5977), False, 'import json\n'), ((882, 900), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (891, 900), False, 'from datetime import datetime, timedelta\n'), ((2100, 2117), 'ipaddress.ip_network', 'ip_network', (['block'], {}), '(block)\n', (2110, 2117), False, 'from ipaddress import IPv4Address, IPv6Address, ip_address, ip_network\n'), ((2565, 2601), 'ipaddress.ip_address', 'ip_address', (['f"""{request.remote_addr}"""'], {}), "(f'{request.remote_addr}')\n", (2575, 2601), False, 'from ipaddress import IPv4Address, IPv6Address, ip_address, ip_network\n'), ((3045, 3078), 'flask.request.headers.get', 'request.headers.get', (['"""User-Agent"""'], {}), "('User-Agent')\n", (3064, 3078), False, 'from flask import Blueprint, abort, g, request\n'), ((5386, 5444), 'json.dumps', 'json.dumps', (["{'msg': 'Folder is not a valid git directory'}"], {}), "({'msg': 'Folder is not a valid git directory'})\n", (5396, 5444), False, 'import json\n'), ((5532, 5583), 'json.dumps', 'json.dumps', (["{'msg': 'Remote origin does not exist'}"], {}), "({'msg': 'Remote origin does not exist'})\n", (5542, 5583), False, 'import json\n'), ((1342, 1448), 'requests.get', 'requests.get', (['f"""https://api.github.com/meta?client_id={client_id}&client_secret={client_secret}"""'], {}), "(\n f'https://api.github.com/meta?client_id={client_id}&client_secret={client_secret}'\n )\n", (1354, 1448), False, 'import requests\n'), ((1565, 1655), 'flask.g.log.critical', 'g.log.critical', (['f"""Failed to retrieve hook IP\'s from GitHub! API returned {meta_json}"""'], {}), '(\n f"Failed to retrieve hook IP\'s from GitHub! API returned {meta_json}")\n', (1579, 1655), False, 'from flask import Blueprint, abort, g, request\n'), ((2672, 2739), 'flask.g.log.warning', 'g.log.warning', (['f"""Unauthorized attempt to deploy by IP {request_ip}"""'], {}), "(f'Unauthorized attempt to deploy by IP {request_ip}')\n", (2685, 2739), False, 'from flask import Blueprint, abort, g, request\n'), ((2756, 2773), 'flask.abort', 'abort', (['abort_code'], {}), '(abort_code)\n', (2761, 2773), False, 'from flask import Blueprint, abort, g, request\n'), ((3149, 3215), 'flask.g.log.critical', 'g.log.critical', (['"""User-Agent does not begin with GitHub-Hookshot/!"""'], {}), "('User-Agent does not begin with GitHub-Hookshot/!')\n", (3163, 3215), False, 'from flask import Blueprint, abort, g, request\n'), ((3232, 3249), 'flask.abort', 'abort', (['abort_code'], {}), '(abort_code)\n', (3237, 3249), False, 'from flask import Blueprint, abort, g, request\n'), ((3303, 3341), 'flask.g.log.critical', 'g.log.critical', (['"""Request is not JSON!"""'], {}), "('Request is not JSON!')\n", (3317, 3341), False, 'from flask import Blueprint, abort, g, request\n'), ((3358, 3375), 'flask.abort', 'abort', (['abort_code'], {}), '(abort_code)\n', (3363, 3375), False, 'from flask import Blueprint, abort, g, request\n'), ((2945, 2988), 'flask.g.log.critical', 'g.log.critical', (['f"""{header} not in headers!"""'], {}), "(f'{header} not in headers!')\n", (2959, 2988), False, 'from flask import Blueprint, abort, g, request\n'), ((3009, 3026), 'flask.abort', 'abort', (['abort_code'], {}), '(abort_code)\n', (3014, 3026), False, 'from flask import Blueprint, abort, g, request\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from gaebusiness.business import CommandExecutionException
from tekton import router
from gaecookie.decorator import no_csrf
from aluno_app import facade
from routes.alunos import admin
@no_csrf
def index():
return TemplateResponse({'save_path': router.to_path(save)},'alunos/admin/form.html')
def save(_handler, aluno_id=None, **aluno_properties):
cmd = facade.save_aluno_cmd(**aluno_properties)
try:
cmd()
except CommandExecutionException:
context = {'errors': cmd.errors,
'aluno': cmd.form}
return TemplateResponse(context, 'alunos/admin/form.html')
_handler.redirect(router.to_path(admin))
|
[
"tekton.router.to_path",
"config.template_middleware.TemplateResponse",
"aluno_app.facade.save_aluno_cmd"
] |
[((504, 545), 'aluno_app.facade.save_aluno_cmd', 'facade.save_aluno_cmd', ([], {}), '(**aluno_properties)\n', (525, 545), False, 'from aluno_app import facade\n'), ((776, 797), 'tekton.router.to_path', 'router.to_path', (['admin'], {}), '(admin)\n', (790, 797), False, 'from tekton import router\n'), ((389, 409), 'tekton.router.to_path', 'router.to_path', (['save'], {}), '(save)\n', (403, 409), False, 'from tekton import router\n'), ((702, 753), 'config.template_middleware.TemplateResponse', 'TemplateResponse', (['context', '"""alunos/admin/form.html"""'], {}), "(context, 'alunos/admin/form.html')\n", (718, 753), False, 'from config.template_middleware import TemplateResponse\n')]
|
from __future__ import division
import torch
import numpy as np
def parse_conv_block(m, weights, offset, initflag):
"""
Initialization of conv layers with batchnorm
Args:
m (Sequential): sequence of layers
weights (numpy.ndarray): pretrained weights data
offset (int): current position in the weights file
initflag (bool): if True, the layers are not covered by the weights file. \
They are initialized using darknet-style initialization.
Returns:
offset (int): current position in the weights file
weights (numpy.ndarray): pretrained weights data
"""
conv_model = m[0]
bn_model = m[1]
param_length = m[1].bias.numel() # numel()获取张量元素的个数
# batchnorm
for pname in ['bias', 'weight', 'running_mean', 'running_var']:
layerparam = getattr(bn_model, pname)
if initflag: # yolo initialization - scale to one, bias to zero
if pname == 'weight':
weights = np.append(weights, np.ones(param_length))
else:
weights = np.append(weights, np.zeros(param_length))
param = torch.from_numpy(weights[offset:offset + param_length]).view_as(layerparam)
layerparam.data.copy_(param)
offset += param_length
param_length = conv_model.weight.numel()
# conv
if initflag: # yolo initialization
n, c, k, _ = conv_model.weight.shape
scale = np.sqrt(2 / (k * k * c))
weights = np.append(weights, scale * np.random.normal(size=param_length))
param = torch.from_numpy(
weights[offset:offset + param_length]).view_as(conv_model.weight)
conv_model.weight.data.copy_(param)
offset += param_length
return offset, weights
def parse_yolo_block(m, weights, offset, initflag):
"""
YOLO Layer (one conv with bias) Initialization
Args:
m (Sequential): sequence of layers
weights (numpy.ndarray): pretrained weights data
offset (int): current position in the weights file
initflag (bool): if True, the layers are not covered by the weights file. \
They are initialized using darknet-style initialization.
Returns:
offset (int): current position in the weights file
weights (numpy.ndarray): pretrained weights data
"""
conv_model = m._modules['conv']
param_length = conv_model.bias.numel()
if initflag: # yolo initialization - bias to zero
weights = np.append(weights, np.zeros(param_length))
param = torch.from_numpy(
weights[offset:offset + param_length]).view_as(conv_model.bias)
conv_model.bias.data.copy_(param)
offset += param_length
param_length = conv_model.weight.numel()
if initflag: # yolo initialization
n, c, k, _ = conv_model.weight.shape
scale = np.sqrt(2 / (k * k * c))
weights = np.append(weights, scale * np.random.normal(size=param_length))
param = torch.from_numpy(
weights[offset:offset + param_length]).view_as(conv_model.weight)
conv_model.weight.data.copy_(param)
offset += param_length
return offset, weights
def parse_yolo_weights(model, weights_path):
"""
Parse YOLO (darknet) pre-trained weights data onto the pytorch model
Args:
model : pytorch model object
weights_path (str): path to the YOLO (darknet) pre-trained weights file
"""
fp = open(weights_path, "rb")
# skip the header
header = np.fromfile(fp, dtype=np.int32, count=5) # not used
# read weights
weights = np.fromfile(fp, dtype=np.float32)
fp.close()
offset = 0
initflag = False #whole yolo weights : False, darknet weights : True
for m in model.module_list:
if m._get_name() == 'Sequential':
# normal conv block
offset, weights = parse_conv_block(m, weights, offset, initflag)
elif m._get_name() == 'resblock':
# residual block
for modu in m._modules['module_list']:
for blk in modu:
offset, weights = parse_conv_block(blk, weights, offset, initflag)
elif m._get_name() == 'YOLOLayer':
# YOLO Layer (one conv with bias) Initialization
offset, weights = parse_yolo_block(m, weights, offset, initflag)
initflag = (offset >= len(weights)) # the end of the weights file. turn the flag on
|
[
"numpy.fromfile",
"numpy.zeros",
"numpy.ones",
"numpy.random.normal",
"numpy.sqrt",
"torch.from_numpy"
] |
[((3473, 3513), 'numpy.fromfile', 'np.fromfile', (['fp'], {'dtype': 'np.int32', 'count': '(5)'}), '(fp, dtype=np.int32, count=5)\n', (3484, 3513), True, 'import numpy as np\n'), ((3559, 3592), 'numpy.fromfile', 'np.fromfile', (['fp'], {'dtype': 'np.float32'}), '(fp, dtype=np.float32)\n', (3570, 3592), True, 'import numpy as np\n'), ((1445, 1469), 'numpy.sqrt', 'np.sqrt', (['(2 / (k * k * c))'], {}), '(2 / (k * k * c))\n', (1452, 1469), True, 'import numpy as np\n'), ((2833, 2857), 'numpy.sqrt', 'np.sqrt', (['(2 / (k * k * c))'], {}), '(2 / (k * k * c))\n', (2840, 2857), True, 'import numpy as np\n'), ((1565, 1620), 'torch.from_numpy', 'torch.from_numpy', (['weights[offset:offset + param_length]'], {}), '(weights[offset:offset + param_length])\n', (1581, 1620), False, 'import torch\n'), ((2494, 2516), 'numpy.zeros', 'np.zeros', (['param_length'], {}), '(param_length)\n', (2502, 2516), True, 'import numpy as np\n'), ((2531, 2586), 'torch.from_numpy', 'torch.from_numpy', (['weights[offset:offset + param_length]'], {}), '(weights[offset:offset + param_length])\n', (2547, 2586), False, 'import torch\n'), ((2954, 3009), 'torch.from_numpy', 'torch.from_numpy', (['weights[offset:offset + param_length]'], {}), '(weights[offset:offset + param_length])\n', (2970, 3009), False, 'import torch\n'), ((1143, 1198), 'torch.from_numpy', 'torch.from_numpy', (['weights[offset:offset + param_length]'], {}), '(weights[offset:offset + param_length])\n', (1159, 1198), False, 'import torch\n'), ((1515, 1550), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'param_length'}), '(size=param_length)\n', (1531, 1550), True, 'import numpy as np\n'), ((2903, 2938), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'param_length'}), '(size=param_length)\n', (2919, 2938), True, 'import numpy as np\n'), ((1016, 1037), 'numpy.ones', 'np.ones', (['param_length'], {}), '(param_length)\n', (1023, 1037), True, 'import numpy as np\n'), ((1102, 1124), 'numpy.zeros', 'np.zeros', (['param_length'], {}), '(param_length)\n', (1110, 1124), True, 'import numpy as np\n')]
|
from flask import Flask, request
from structs import *
import json
#import numpy
from basicFuncs import *
app = Flask(__name__)
def create_action(action_type, target):
actionContent = ActionContent(action_type, target.__dict__)
bleh = json.dumps(actionContent.__dict__)
print(bleh)
return bleh
def create_move_action(target):
print("Target move x = " + str(target.X) + ", y = " + str(target.Y) + "\n")
return create_action("MoveAction", target)
def create_attack_action(target):
return create_action("AttackAction", target)
def create_collect_action(target):
return create_action("CollectAction", target)
def create_steal_action(target):
return create_action("StealAction", target)
def create_heal_action():
return create_action("HealAction", "")
def create_purchase_action(item):
return create_action("PurchaseAction", item)
def create_upgrade_action(upgrade):
actionContent = ActionContent("UpgradeAction", str(upgrade))
bleh = json.dumps(actionContent.__dict__)
print(bleh)
return bleh
def deserialize_map(serialized_map):
"""
Fonction utilitaire pour comprendre la map
"""
serialized_map = serialized_map[1:]
rows = serialized_map.split('[')
column = rows[0].split('{')
deserialized_map = [[Tile() for x in range(20)] for y in range(20)]
for i in range(len(rows) - 1):
column = rows[i + 1].split('{')
for j in range(len(column) - 1):
infos = column[j + 1].split(',')
end_index = infos[2].find('}')
content = int(infos[0])
x = int(infos[1])
y = int(infos[2][:end_index])
deserialized_map[i][j] = Tile(content, x, y)
return deserialized_map
shortestPath = None
pathIndex = int(0)
resourcePos = None
isFirstMove = True
goGetResource = True
grabResource = False
bringBackResource = False
actionCounter = 0
brokeAWall = False
goBreakAWall = False
def bot():
"""
Main de votre bot.
"""
map_json = request.form["map"]
# Player info
encoded_map = map_json.encode()
map_json = json.loads(encoded_map)
p = map_json["Player"]
pos = p["Position"]
x = pos["X"]
y = pos["Y"]
house = p["HouseLocation"]
player = Player(p["Health"], p["MaxHealth"], Point(x,y),
Point(house["X"], house["Y"]), p["Score"],
p["CarriedResources"], p["CarryingCapacity"])
# Map
serialized_map = map_json["CustomSerializedMap"]
deserialized_map = deserialize_map(serialized_map)
otherPlayers = []
for players in map_json["OtherPlayers"]:
player_info = players["Value"]
p_pos = player_info["Position"]
player_info = PlayerInfo(player_info["Health"],
player_info["MaxHealth"],
Point(p_pos["X"], p_pos["Y"]))
otherPlayers.append(player_info)
# return decision
#return create_move_action(Point(0,1))
offset_x = deserialized_map[0][0].X
offset_y = deserialized_map[0][0].Y
offset = Point(offset_x, offset_y)
global shortestPath
global resourcePos
global pathIndex
global isFirstMove
global goGetResource
global grabResource
global bringBackResource
global goBreakAWall
global brokeAWall
global actionCounter
currentPosition = Point(x-offset_x,y-offset_y)
print("position X= " + str(x) + " Y= " + str(y))
if goGetResource:
resourcePos = findClosestResource(currentPosition, deserialized_map) + offset
#print("res = " + str(resourcePos.X) + " " + str(resourcePos.Y))
shortestPath = planMovement(createObstacleMap(deserialized_map), currentPosition, resourcePos - offset)
#for i in shortestPath:
# print("i = " + str(i.X) + " " + str(i.Y) + "\n")
if len(shortestPath) > 2:
return create_move_action(shortestPath[1] + offset)
else:
goGetResource = False
grabResource = True
if grabResource:
if player.CarriedRessources < player.CarryingCapacity:
return create_collect_action(resourcePos)
else:
grabResource = False
bringBackResource = True
if bringBackResource:
shortestPath = planMovement(createObstacleMap(deserialized_map), currentPosition, player.HouseLocation - offset)
print("ppos = " + str(player.Position.X) + " " + str(player.Position.Y) + "\n")
print("ppos = " + str(player.HouseLocation.X) + " " + str(player.HouseLocation.Y) + "\n")
if Point().Distance(player.Position, player.HouseLocation) > 0:
return create_move_action(shortestPath[1] + offset)
else:
print("Carry capacity: " + str(player.CarryingCapacity) + "\n")
create_upgrade_action(UpgradeType.CarryingCapacity)
bringBackResource = False
goGetResource = True
# actionCounter += 1
#
# if actionCounter == 10:
# goBreakAWall = True
# goGetResource = False
#
# if goGetResource:
return create_move_action(currentPosition)
@app.route("/", methods=["POST"])
def reponse():
"""
Point d'entree appelle par le GameServer
"""
return bot()
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080, debug=True)
|
[
"flask.Flask",
"json.loads",
"json.dumps"
] |
[((114, 129), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (119, 129), False, 'from flask import Flask, request\n'), ((246, 280), 'json.dumps', 'json.dumps', (['actionContent.__dict__'], {}), '(actionContent.__dict__)\n', (256, 280), False, 'import json\n'), ((994, 1028), 'json.dumps', 'json.dumps', (['actionContent.__dict__'], {}), '(actionContent.__dict__)\n', (1004, 1028), False, 'import json\n'), ((2106, 2129), 'json.loads', 'json.loads', (['encoded_map'], {}), '(encoded_map)\n', (2116, 2129), False, 'import json\n')]
|
from setuptools import setup, find_packages
setup(
name='yabeda',
version='0.1',
description="Yabeda",
long_description="""
A tool that sends deployment notifications from Gitlab CI to Slack.
""",
url="https://github.com/flix-tech/yabeda",
author="Flixtech",
license='MIT',
python_requires='>=3.6',
packages=find_packages(),
install_requires=[
'flask',
'pytest',
'pytest-cov',
'python-gitlab',
'slackclient',
'flasgger',
'ciso8601',
'python-dateutil',
'uwsgi'
],
)
|
[
"setuptools.find_packages"
] |
[((358, 373), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (371, 373), False, 'from setuptools import setup, find_packages\n')]
|
import os
from azul import config, require
from azul.template import emit
expected_component_path = os.path.join(os.path.abspath(config.project_root), 'terraform', config.terraform_component)
actual_component_path = os.path.dirname(os.path.abspath(__file__))
require(os.path.samefile(expected_component_path, actual_component_path),
f"The current Terraform component is set to '{config.terraform_component}'. "
f"You should therefore be in '{expected_component_path}'")
emit({
"data": [
{
"aws_caller_identity": {
"current": {}
}
},
{
"aws_region": {
"current": {}
}
},
*([{
"google_client_config": {
"current": {}
}
}] if config.enable_gcp() else [])
],
"locals": {
"account_id": "${data.aws_caller_identity.current.account_id}",
"region": "${data.aws_region.current.name}",
"google_project": "${data.google_client_config.current.project}" if config.enable_gcp() else None
}
})
|
[
"os.path.samefile",
"os.path.abspath",
"azul.config.enable_gcp"
] |
[((115, 151), 'os.path.abspath', 'os.path.abspath', (['config.project_root'], {}), '(config.project_root)\n', (130, 151), False, 'import os\n'), ((234, 259), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (249, 259), False, 'import os\n'), ((269, 333), 'os.path.samefile', 'os.path.samefile', (['expected_component_path', 'actual_component_path'], {}), '(expected_component_path, actual_component_path)\n', (285, 333), False, 'import os\n'), ((1067, 1086), 'azul.config.enable_gcp', 'config.enable_gcp', ([], {}), '()\n', (1084, 1086), False, 'from azul import config, require\n'), ((814, 833), 'azul.config.enable_gcp', 'config.enable_gcp', ([], {}), '()\n', (831, 833), False, 'from azul import config, require\n')]
|
# coding: utf8
from __future__ import unicode_literals
import prodigy
from prodigy.components.loaders import JSONL
from prodigy.models.ner import EntityRecognizer
from prodigy.models.matcher import PatternMatcher
from prodigy.components.preprocess import split_sentences
from prodigy.components.sorters import prefer_uncertain
from prodigy.util import combine_models, split_string
import spacy
# Recipe decorator with argument annotations: (description, argument type,
# shortcut, type / converter function called on value before it's passed to
# the function). Descriptions are also shown when typing --help.
@prodigy.recipe('ner.teach',
dataset=("The dataset to use", "positional", None, str),
spacy_model=("The base model", "positional", None, str),
source=("The source data as a JSONL file", "positional", None, str),
label=("One or more comma-separated labels", "option", "l", split_string),
patterns=("Optional match patterns", "option", "p", str),
exclude=("Names of datasets to exclude", "option", "e", split_string),
unsegmented=("Don't split sentences", "flag", "U", bool)
)
def ner_teach(dataset, spacy_model, source=None, label=None, patterns=None,
exclude=None, unsegmented=False):
"""
Collect the best possible training data for a named entity recognition
model with the model in the loop. Based on your annotations, Prodigy will
decide which questions to ask next.
"""
# Load the stream from a JSONL file and return a generator that yields a
# dictionary for each example in the data.
stream = JSONL(source)
# Load the spaCy model
nlp = spacy.load(spacy_model)
# Initialize Prodigy's entity recognizer model, which uses beam search to
# find all possible analyses and outputs (score, example) tuples
model = EntityRecognizer(nlp, label=label)
if patterns is None:
# No patterns are used, so just use the NER model to suggest examples
# and only use the model's update method as the update callback
predict = model
update = model.update
else:
# Initialize the pattern matcher and load in the JSONL patterns
matcher = PatternMatcher(nlp).from_disk(patterns)
# Combine the NER model and the matcher and interleave their
# suggestions and update both at the same time
predict, update = combine_models(model, matcher)
if not unsegmented:
# Use spaCy to split text into sentences
stream = split_sentences(nlp, stream)
# Use the prefer_uncertain sorter to focus on suggestions that the model
# is most uncertain about (i.e. with a score closest to 0.5). The model
# yields (score, example) tuples and the sorter yields just the example
stream = prefer_uncertain(predict(stream))
return {
'view_id': 'ner', # Annotation interface to use
'dataset': dataset, # Name of dataset to save annotations
'stream': stream, # Incoming stream of examples
'update': update, # Update callback, called with batch of answers
'exclude': exclude, # List of dataset names to exclude
'config': { # Additional config settings, mostly for app UI
'lang': nlp.lang,
'label': ', '.join(label) if label is not None else 'all'
}
}
|
[
"prodigy.util.combine_models",
"prodigy.components.loaders.JSONL",
"prodigy.models.matcher.PatternMatcher",
"prodigy.components.preprocess.split_sentences",
"spacy.load",
"prodigy.models.ner.EntityRecognizer",
"prodigy.recipe"
] |
[((614, 1110), 'prodigy.recipe', 'prodigy.recipe', (['"""ner.teach"""'], {'dataset': "('The dataset to use', 'positional', None, str)", 'spacy_model': "('The base model', 'positional', None, str)", 'source': "('The source data as a JSONL file', 'positional', None, str)", 'label': "('One or more comma-separated labels', 'option', 'l', split_string)", 'patterns': "('Optional match patterns', 'option', 'p', str)", 'exclude': "('Names of datasets to exclude', 'option', 'e', split_string)", 'unsegmented': '("Don\'t split sentences", \'flag\', \'U\', bool)'}), '(\'ner.teach\', dataset=(\'The dataset to use\', \'positional\',\n None, str), spacy_model=(\'The base model\', \'positional\', None, str),\n source=(\'The source data as a JSONL file\', \'positional\', None, str),\n label=(\'One or more comma-separated labels\', \'option\', \'l\',\n split_string), patterns=(\'Optional match patterns\', \'option\', \'p\', str),\n exclude=(\'Names of datasets to exclude\', \'option\', \'e\', split_string),\n unsegmented=("Don\'t split sentences", \'flag\', \'U\', bool))\n', (628, 1110), False, 'import prodigy\n'), ((1586, 1599), 'prodigy.components.loaders.JSONL', 'JSONL', (['source'], {}), '(source)\n', (1591, 1599), False, 'from prodigy.components.loaders import JSONL\n'), ((1638, 1661), 'spacy.load', 'spacy.load', (['spacy_model'], {}), '(spacy_model)\n', (1648, 1661), False, 'import spacy\n'), ((1822, 1856), 'prodigy.models.ner.EntityRecognizer', 'EntityRecognizer', (['nlp'], {'label': 'label'}), '(nlp, label=label)\n', (1838, 1856), False, 'from prodigy.models.ner import EntityRecognizer\n'), ((2377, 2407), 'prodigy.util.combine_models', 'combine_models', (['model', 'matcher'], {}), '(model, matcher)\n', (2391, 2407), False, 'from prodigy.util import combine_models, split_string\n'), ((2499, 2527), 'prodigy.components.preprocess.split_sentences', 'split_sentences', (['nlp', 'stream'], {}), '(nlp, stream)\n', (2514, 2527), False, 'from prodigy.components.preprocess import split_sentences\n'), ((2187, 2206), 'prodigy.models.matcher.PatternMatcher', 'PatternMatcher', (['nlp'], {}), '(nlp)\n', (2201, 2206), False, 'from prodigy.models.matcher import PatternMatcher\n')]
|
import json
from typing import Optional
from ..backend import OpenIDConnectBackend
from .models import SignaturgruppenToken
class SignaturgruppenBackend(OpenIDConnectBackend):
"""
TODO
"""
def __init__(
self,
*args,
authorization_endpoint: str,
token_endpoint: str,
**kwargs,
):
"""
TODO
"""
self.authorization_endpoint = authorization_endpoint
self.token_endpoint = token_endpoint
super(SignaturgruppenBackend, self).__init__(*args, **kwargs)
def create_authorization_url(
self,
state: str,
callback_uri: str,
validate_ssn: bool,
language: Optional[str],
) -> str:
"""
Creates and returns an absolute URL to initiate an OpenID Connect
authorization flow at the Identity Provider.
:param state: An arbitrary string passed to the callback endpoint
:param callback_uri: URL to callback endpoint to return client to
after completing or interrupting the flow
:param validate_ssn: Whether or not to validate social security
number as part of the flow
:returns: Absolute URL @ Identity Provider
"""
# amr_values is a space-seperated list of NemID login methods.
# nemid.otp enables authentication using nøglekort/app.
# nemid.keyfile enables authentication using nøglefil.
# The first item in the list is the default displayed option.
amr_values = {'nemid': {'amr_values': 'nemid.otp nemid.keyfile'}}
# OpenID Connect scopes
scope = ['openid', 'mitid', 'nemid', 'userinfo_token']
if validate_ssn:
scope.append('ssn')
url, _ = self.session.create_authorization_url(
url=self.authorization_endpoint,
redirect_uri=callback_uri,
state=state,
scope=scope,
language=language,
idp_params=json.dumps(amr_values),
)
return url
def fetch_token(
self,
code: str,
state: str,
redirect_uri: str,
) -> SignaturgruppenToken:
"""
TODO
"""
raw_token = self.session.fetch_token(
url=self.token_endpoint,
grant_type='authorization_code',
code=code,
state=state,
redirect_uri=redirect_uri,
verify=True,
)
return SignaturgruppenToken.from_raw_token(
raw_token=raw_token,
jwk=self.session.get_jwk(),
)
|
[
"json.dumps"
] |
[((2025, 2047), 'json.dumps', 'json.dumps', (['amr_values'], {}), '(amr_values)\n', (2035, 2047), False, 'import json\n')]
|
#!/usr/bin/env python3
import asyncio
import concurrent.futures
import datetime
import hashlib
import json
import time
import re
import os
import secrets
import time
import urllib.request
import urllib.error
from decimal import Decimal
from typing import Tuple
import blspy
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.program import Program
from chia.types.spend_bundle import SpendBundle
from chia.types.coin_spend import CoinSpend
from chia.util.bech32m import decode_puzzle_hash, encode_puzzle_hash
from chia.wallet.puzzles.p2_conditions import puzzle_for_conditions
from chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
calculate_synthetic_offset,
puzzle_for_public_key_and_hidden_puzzle_hash,
solution_for_conditions,
solution_for_hidden_puzzle,
)
from chia.wallet.puzzles.load_clvm import load_clvm
from .bech32m import bech32_decode, convertbits
from .pushtx import push_tx
GROUP_ORDER = 0x73EDA753299D7D483339D80809A1D80553BDA402FFFE5BFEFFFFFFFF00000001
P2_DELAYED_OR_PREIMAGE = load_clvm("p2_delayed_or_preimage.cl", __name__)
ADDITIONAL_DATA = bytes.fromhex(
"ccd5bb71183532bff220ba46c268991a3ff07eb358e8255a65c30a2dce0e5fbb"
)
OWNERSHIP_MESSAGE = b"I own this key"
def lookup_xch_prices():
prices = json.load(
urllib.request.urlopen(
"https://min-api.cryptocompare.com/data/price?fsym=XCH&tsyms=USD,BTC"
),
parse_float=Decimal,
)
return prices
def fromhex(s):
if s.lower().startswith("0x"):
s = s[2:]
if len(s) & 1 == 1:
s = f"0{s}"
return bytes.fromhex(s)
def wait_for_payments_to_address(address, min_amount):
root_url = "https://api2.spacescan.io"
addr_url = f"{root_url}/1/xch/address/txns"
while 1:
print(f"checking for payments to {address}")
resp = urllib.request.urlopen(f"{addr_url}/{address}", timeout=10)
if resp.status == 200:
coins = json.load(resp, parse_float=Decimal)["data"]["coins"][::-1]
for coin in coins:
amount = int(coin["amount"])
if amount >= min_amount:
return [coin]
time.sleep(5)
# ### ui
def ui_choose_path(possible_paths):
if len(possible_paths) > 0:
while 1:
print(f" 0. NEW SWAP")
for idx, pp in enumerate(possible_paths):
print(f"{(idx+1):2}. {pp}")
r = input("> ")
try:
v = int(r)
if v == 0:
break
return possible_paths[v - 1]
except ValueError:
pass
name = input('(optional) short name for counterparty (example: "ed")> ')
if len(name) > 0:
name = f"-{name}"
now = datetime.datetime.now().strftime("%Y-%m-%d-%H%M%S")
path = f"xchswap-log-{now}{name}.txt"
return path
def ui_get_logfile():
cre = re.compile(r"xchswap-log-(\d{4})-(\d{2})-(\d{2})-(\d{6})(\-.+)?.txt")
possible_paths = [_ for _ in os.listdir(".") if cre.match(_)]
possible_paths.sort()
path = ui_choose_path(possible_paths)
if not os.path.exists(path):
with open(path, "a") as f:
secret_key = secrets.randbits(256)
f.write(f"{secret_key}\n")
file_lines = open(path).readlines()
secret_key = int(file_lines[0])
file_lines = file_lines[1:]
def logfile(msg):
nonlocal file_lines
while len(file_lines) > 0:
line, file_lines = file_lines[0], file_lines[1:]
if line.startswith("#"):
continue
time.sleep(0.125)
print("%s%s" % (msg, line))
return line[:-1]
r = input(f"{msg}")
with open(path, "a") as f:
f.write(f"{r}\n")
return r
return logfile, secret_key
def ui_choose(input):
while 1:
print("1. Have XCH, want BTC")
print("2. Have BTC, want XCH")
choice = input("> ")
if choice in ("1", "2"):
return int(choice)
def ui_get_amounts(input, prices):
print("prices from cryptocompare.com")
BTC_PER_XCH = prices["BTC"]
USD_PER_XCH = prices["USD"]
USD_PER_BTC = USD_PER_XCH / BTC_PER_XCH
print("1 XCH = %0.6f BTC" % BTC_PER_XCH)
print("1 BTC = %0.6f XCH" % (1 / BTC_PER_XCH))
print(
"USD estimates are based on $%0.2f/BTC and $%0.2f/XCH"
% (USD_PER_BTC, USD_PER_XCH)
)
print()
xch_amount = Decimal(input("How much XCH is being traded? > "))
btc_amount = xch_amount * BTC_PER_XCH
print(
"%0.13f XCH worth about %0.8f btc (USD$%0.2f)"
% (xch_amount, btc_amount, USD_PER_XCH * xch_amount)
)
print()
return btc_amount, xch_amount
def ui_get_puzzle_hash(input, msg):
while 1:
address = input(msg)
if not address.startswith("xch"):
continue
puzzle_hash = decode_puzzle_hash(address)
return puzzle_hash
def ui_get_lightning_payment_request(input):
while 1:
r = input("> ")
if validate_lpr(r):
return r
print("invalid")
def ui_get_private_key(input, public_key):
while 1:
r = input("enter counter-party private key\n> ")
try:
p = int(r, 16) % GROUP_ORDER
private_key = private_key_for_secret(p)
if private_key.get_g1() == public_key:
return p
print("this private key doesn't match the public key")
continue
except Exception:
pass
print("private key is a 64 character hex string")
def ui_get_pubkey_with_sig(input, my_pubkey):
while 1:
r = input("> ")
r = r.strip()
try:
puzzle_hash_hex, sig_hex = r.split("_")
b1 = fromhex(puzzle_hash_hex)
g1 = blspy.G1Element.from_bytes(b1)
if g1 == my_pubkey:
print()
print("that's your public key, silly! Try again.")
continue
b2 = fromhex(sig_hex)
g2 = blspy.G2Element.from_bytes(b2)
r = blspy.AugSchemeMPL.verify(g1, OWNERSHIP_MESSAGE, g2)
if not r:
print("bad signature!")
continue
return g1
except Exception:
pass
print(
'wrong format: expecting 64 hex digits followed by a "_" character, then 96 hex digits'
)
def ui_get_sweep_preimage(input, sweep_receipt_hash):
while 1:
r = input("> ").strip().lower()
try:
b = fromhex(r)
if hashlib.sha256(b).digest() == sweep_receipt_hash:
return b
print("the hash of that doesn't match")
except Exception:
pass
print("invalid, try again")
def ui_get_sweep_preimage_or_private_key(
input, sweep_receipt_hash, clawback_public_key
):
while 1:
r = input("> ").strip().lower()
if r == "quit":
return None, None, True
try:
p = int(r, 16) % GROUP_ORDER
private_key = private_key_for_secret(p)
if private_key.get_g1() == clawback_public_key:
return None, p, False
b = fromhex(r)
if hashlib.sha256(b).digest() == sweep_receipt_hash:
return b, None, False
except Exception:
pass
print("this isn't the private key nor the pre-image")
print("each is a 64 character hex string")
def ui_should_send_spend_bundle():
r = input("send this spend bundle? (Y/N) > ")
return r.lower().startswith("y")
# ### end ui
def clawback_or_sweep_solution(
total_pubkey,
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
conditions,
sweep_preimage,
):
hidden_puzzle = generate_hidden_puzzle(
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
)
delegated_solution = solution_for_conditions(conditions)
p2_delayed_or_preimage_solution = Program.to([sweep_preimage, delegated_solution])
solution = solution_for_hidden_puzzle(
total_pubkey, hidden_puzzle, p2_delayed_or_preimage_solution
)
return solution
def parse_lpr(lpr):
prefix, data, spec = bech32_decode(lpr, max_length=2048)
OVERRIDE_SIZES = {1: 256, 16: 256}
d = {}
tagged = data[7:]
while len(tagged) * 5 > 520:
type = tagged[0]
size = convertbits(tagged[1:3], 5, 10)[0]
data_blob = tagged[3 : 3 + size]
bit_size = OVERRIDE_SIZES.get(type, 5 * size)
if size > 0:
data = convertbits(data_blob, 5, bit_size)[0]
else:
data = None
tagged = tagged[3 + size :]
if size > 10:
data = data.to_bytes((bit_size + 7) >> 3, byteorder="big")
d[type] = data
signature = convertbits(tagged, 5, 520)[0]
d["signature"] = signature
return d
def validate_lpr(lpr):
r = hash_for_lpr(lpr)
return len(r) == 32
def hash_for_lpr(lpr):
d = parse_lpr(lpr)
return d[1]
def private_key_for_secret(s):
s %= GROUP_ORDER
return blspy.PrivateKey.from_bytes(s.to_bytes(32, byteorder="big"))
def pubkey_for_secret(s):
return private_key_for_secret(s).get_g1()
def signed_pubkey_for_secret(s):
private_key = private_key_for_secret(s)
public_key = private_key.get_g1()
sig = blspy.AugSchemeMPL.sign(private_key, OWNERSHIP_MESSAGE, public_key)
return public_key, f"{bytes(public_key).hex()}_{bytes(sig).hex()}"
def generate_hidden_puzzle(
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
) -> Program:
hidden_puzzle = P2_DELAYED_OR_PREIMAGE.curry(
(clawback_delay_seconds, clawback_public_key),
(sweep_receipt_hash, sweep_public_key),
)
return hidden_puzzle
def solve_p2_delayed_or_preimage(
delegated_puzzle: Program, delegated_solution: Program, sweep_preimage: bytes = b""
) -> Program:
return Program.to([sweep_preimage, [delegated_puzzle, delegated_solution]])
def generate_holding_address(
total_pubkey,
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
) -> Tuple[Program, int]:
hidden_puzzle = generate_hidden_puzzle(
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
)
hidden_puzzle_hash = hidden_puzzle.get_tree_hash()
puzzle = puzzle_for_public_key_and_hidden_puzzle_hash(
total_pubkey, hidden_puzzle_hash
)
synthetic_offset = calculate_synthetic_offset(total_pubkey, hidden_puzzle_hash)
return puzzle, synthetic_offset
def generate_spendbundle(
parent_coin_id,
xch_amount_mojos,
total_pubkey,
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
conditions,
sweep_preimage=0,
) -> SpendBundle:
puzzle_reveal, synthetic_offset = generate_holding_address(
total_pubkey,
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
)
puzzle_hash = puzzle_reveal.get_tree_hash()
coin = Coin(parent_coin_id, puzzle_hash, xch_amount_mojos)
solution = clawback_or_sweep_solution(
total_pubkey,
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
conditions,
sweep_preimage,
)
coin_spend = CoinSpend(coin, puzzle_reveal, solution)
spend_bundle = SpendBundle([coin_spend], blspy.G2Element())
return spend_bundle
def sign_spend_bundle(coin_spend, conditions, secret, additional_data):
message = (
puzzle_for_conditions(conditions).get_tree_hash()
+ coin_spend.coin.name()
+ additional_data
)
total_sig = blspy.AugSchemeMPL.sign(private_key_for_secret(secret), message)
return SpendBundle([coin_spend], total_sig)
def have_xch_want_btc(logfile, secret_key, btc_amount, xch_amount_mojos):
s = secret_key
clawback_public_key, my_pubkey_string = signed_pubkey_for_secret(s)
print("Send the long line below to your counter-party. It contains your")
print("signed public key.")
print(my_pubkey_string)
print()
print("enter your counter-party's public key as pasted by them")
sweep_public_key = ui_get_pubkey_with_sig(logfile, clawback_public_key)
total_pubkey = sweep_public_key + clawback_public_key
print(
f"In your lightning wallet, create a lightning payment request for {btc_amount} BTC"
)
print("The timeout must be at least ten minutes.")
print("Copy and paste the lightning payment request here.")
print()
lpr = ui_get_lightning_payment_request(logfile)
d = parse_lpr(lpr)
sweep_receipt_hash = d[1]
# TODO: fix the next line
clawback_delay_seconds = 86400
puzzle_reveal, synthetic_offset = generate_holding_address(
total_pubkey,
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
)
puzzle_hash = puzzle_reveal.get_tree_hash()
address = encode_puzzle_hash(puzzle_hash, "xch")
xch_amount = Decimal(xch_amount_mojos) / Decimal(int(1e12))
print(f"go into your XCH wallet and send {xch_amount} XCH to")
print(f"{address}")
print()
print("You need to enter a refund address where your XCH will be returns if")
print("the swap fails. It can be an address from a wallet or an exchange.")
print()
clawback_puzzle_hash = ui_get_puzzle_hash(logfile, "enter XCH refund address > ")
coins = wait_for_payments_to_address(address, xch_amount_mojos)
parent_coin_id = fromhex(coins[0]["coin_parent"])
conditions = [[51, clawback_puzzle_hash, xch_amount_mojos]]
spend_bundle = generate_spendbundle(
parent_coin_id,
xch_amount_mojos,
total_pubkey,
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
conditions,
)
spend_bundle = sign_spend_bundle(
spend_bundle.coin_spends[0], conditions, s, ADDITIONAL_DATA
)
hours = clawback_delay_seconds // 3600
spend_bundle_hex = bytes(spend_bundle).hex()
print()
print("Wait for the lightning invoice payment.")
print()
print("When you get it, you can immediately share the private key below with")
print("your counter-party to allow them to cleanly claim the XCH funds.")
print()
print(f"private key: 0x{s:064x}")
print()
print("If it never happens, use the spendbundle below to claw it back")
print(f"after {hours} hours.")
print()
print(f"clawback spend bundle: {spend_bundle_hex}")
print()
print(f"waiting {clawback_delay_seconds} s then pushing the clawback spend bundle")
print(f"Leave this window open or control-c to exit.")
print()
print(
f"Warning: if you answer before {clawback_delay_seconds} seconds have elapsed,"
)
print(
"the spend bundle will be rejected. No harm done though, you'll just have to try later."
)
if ui_should_send_spend_bundle():
try_to_push_tx(spend_bundle, clawback_puzzle_hash)
def try_to_push_tx(sb, dest_puzzle_hash):
print()
print(f"Check your wallet or an explorer to confirm.")
address = encode_puzzle_hash(dest_puzzle_hash, "xch")
print(f"https://www.spacescan.io/xch/address/{address}")
print()
r = asyncio.run(push_tx(sb))
if r == 0:
print("It seems to have worked.")
else:
print("*** The spend bundle may not have been accepted.")
def have_btc_want_xch(logfile, secret_key, btc_amount, xch_amount_mojos):
s = secret_key + 1
sweep_public_key, my_pubkey_string = signed_pubkey_for_secret(s)
print("Send the long line below to your counter-party. It contains your")
print("signed public key.")
print(my_pubkey_string)
print()
print("enter your counter-party's public key as pasted by them")
clawback_public_key = ui_get_pubkey_with_sig(logfile, sweep_public_key)
total_pubkey = sweep_public_key + clawback_public_key
print("Paste the lightning payment request from your counter-party here.")
lpr = ui_get_lightning_payment_request(logfile)
d = parse_lpr(lpr)
sweep_receipt_hash = d[1]
total_pubkey = sweep_public_key + clawback_public_key
clawback_delay_seconds = 86400
puzzle_reveal, synthetic_offset = generate_holding_address(
total_pubkey,
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
)
puzzle_hash = puzzle_reveal.get_tree_hash()
address = encode_puzzle_hash(puzzle_hash, "xch")
xch_amount = Decimal(xch_amount_mojos) / Decimal(int(1e12))
print("Enter an address where your XCH will be delivered.")
print("It can be an address from a wallet or an exchange.")
print()
sweep_puzzle_hash = ui_get_puzzle_hash(logfile, "XCH address > ")
print(f"Your counter-party should be sending {xch_amount:.13f} XCH to the address")
print(f"{address}")
print()
print("Go to an explorer and watch for payments")
print()
print(f"https://www.spacescan.io/xch/address/{address}")
print()
coins = wait_for_payments_to_address(address, xch_amount_mojos)
parent_coin_id = fromhex(coins[0]["coin_parent"])
print()
print("Once your XCH has enough confirmations, pay the lightning invoice.")
print()
print("If you DO NOT want to complete this transaction, DO NOT pay the")
print("lightning invoice. Instead, send the following private key to your")
print("counterparty to allow them to cleanly reclaim the XCH funds.")
print()
print(f"private key: 0x{s:064x}")
print()
print("Once you've paid the lightning invoice, ask your counterparty to")
print("share their private key. Meanwhile, look up your lightning invoice")
print("receipt pre-image in case your counterparty doesn't respond.")
print()
# TODO: fix these next two lines
conditions = [[51, sweep_puzzle_hash, xch_amount_mojos]]
coin = Coin(parent_coin_id, puzzle_hash, xch_amount_mojos)
coin_spend = CoinSpend(coin, puzzle_reveal, solution_for_conditions(conditions))
message = (
puzzle_for_conditions(conditions).get_tree_hash()
+ coin.name()
+ ADDITIONAL_DATA
)
while True:
print(
"Enter your counterparty private key OR the lightning invoice receipt pre-image or `quit`"
)
(
sweep_preimage,
remote_secret,
should_quit,
) = ui_get_sweep_preimage_or_private_key(
logfile, sweep_receipt_hash, clawback_public_key
)
if should_quit:
break
if sweep_preimage:
sweep_spend_bundle = handle_sweep_preimage(
s,
puzzle_hash,
parent_coin_id,
xch_amount_mojos,
total_pubkey,
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
conditions,
sweep_preimage,
)
print("You should wait for your counterparty to send their private key")
print("and only use this spend bundle if they seem non-responsive.")
print(
f"Warning: after {clawback_delay_seconds} s they can claw back the XCH"
)
print()
if ui_should_send_spend_bundle():
try_to_push_tx(sweep_spend_bundle, sweep_puzzle_hash)
if remote_secret:
clean_spend_bundle = handle_remote_secret(
coin_spend,
message,
remote_secret,
s,
synthetic_offset,
parent_coin_id,
xch_amount_mojos,
total_pubkey,
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
conditions,
)
print()
if ui_should_send_spend_bundle():
try_to_push_tx(clean_spend_bundle, sweep_puzzle_hash)
def handle_sweep_preimage(
my_secret,
puzzle_hash,
parent_coin_id,
xch_amount_mojos,
total_pubkey,
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
conditions,
sweep_preimage,
):
spend_bundle = generate_spendbundle(
parent_coin_id,
xch_amount_mojos,
total_pubkey,
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
conditions,
sweep_preimage,
)
coin = Coin(parent_coin_id, puzzle_hash, xch_amount_mojos)
message = (
puzzle_for_conditions(conditions).get_tree_hash()
+ coin.name()
+ ADDITIONAL_DATA
)
private_key = private_key_for_secret(my_secret)
total_sig = blspy.AugSchemeMPL.sign(private_key, message)
spend_bundle = SpendBundle(spend_bundle.coin_spends, total_sig)
spend_bundle_hex = bytes(spend_bundle).hex()
print(f"sweep spend bundle: {spend_bundle_hex}")
print()
print("Your counterparty should share their (disposable) private key")
print("with you now. If your counterparty disappears before sending it,")
print("you can use the spend bundle above as a last resort.")
print()
return spend_bundle
def handle_remote_secret(
coin_spend,
message,
remote_secret,
my_secret,
synthetic_offset,
parent_coin_id,
xch_amount_mojos,
total_pubkey,
clawback_delay_seconds,
clawback_public_key,
sweep_receipt_hash,
sweep_public_key,
conditions,
):
total_secret = remote_secret + my_secret + synthetic_offset
total_private_key = private_key_for_secret(total_secret)
# build local signatures
total_sig = blspy.AugSchemeMPL.sign(total_private_key, message)
clean_spend_bundle = SpendBundle([coin_spend], total_sig)
spend_bundle_hex = bytes(clean_spend_bundle).hex()
print(f"clean spend bundle: {spend_bundle_hex}")
print()
print("Use the spend bundle above because it's smaller and is")
print("indistinguishable from standard spend, so will give")
print("the participants (you) more privacy.")
return clean_spend_bundle
def main():
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
prices_future = executor.submit(lookup_xch_prices)
logfile, secret_key = ui_get_logfile()
prices = prices_future.result()
btc_amount, xch_amount = ui_get_amounts(logfile, prices)
xch_amount_mojos = int(xch_amount * Decimal(1e12))
which_way = ui_choose(logfile)
f = have_xch_want_btc if which_way == 1 else have_btc_want_xch
f(logfile, secret_key, btc_amount, xch_amount_mojos)
if __name__ == "__main__":
main()
|
[
"chia.util.bech32m.decode_puzzle_hash",
"chia.types.spend_bundle.SpendBundle",
"blspy.G2Element.from_bytes",
"os.path.exists",
"hashlib.sha256",
"chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle.solution_for_conditions",
"datetime.datetime.now",
"chia.types.coin_spend.CoinSpend",
"chia.wallet.puzzles.p2_conditions.puzzle_for_conditions",
"time.sleep",
"chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle.puzzle_for_public_key_and_hidden_puzzle_hash",
"os.listdir",
"blspy.AugSchemeMPL.sign",
"re.compile",
"secrets.randbits",
"json.load",
"chia.types.blockchain_format.coin.Coin",
"chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle.calculate_synthetic_offset",
"chia.util.bech32m.encode_puzzle_hash",
"decimal.Decimal",
"blspy.AugSchemeMPL.verify",
"chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle.solution_for_hidden_puzzle",
"blspy.G1Element.from_bytes",
"blspy.G2Element",
"chia.wallet.puzzles.load_clvm.load_clvm",
"chia.types.blockchain_format.program.Program.to"
] |
[((1069, 1117), 'chia.wallet.puzzles.load_clvm.load_clvm', 'load_clvm', (['"""p2_delayed_or_preimage.cl"""', '__name__'], {}), "('p2_delayed_or_preimage.cl', __name__)\n", (1078, 1117), False, 'from chia.wallet.puzzles.load_clvm import load_clvm\n'), ((2941, 3014), 're.compile', 're.compile', (['"""xchswap-log-(\\\\d{4})-(\\\\d{2})-(\\\\d{2})-(\\\\d{6})(\\\\-.+)?.txt"""'], {}), "('xchswap-log-(\\\\d{4})-(\\\\d{2})-(\\\\d{2})-(\\\\d{6})(\\\\-.+)?.txt')\n", (2951, 3014), False, 'import re\n'), ((8063, 8098), 'chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle.solution_for_conditions', 'solution_for_conditions', (['conditions'], {}), '(conditions)\n', (8086, 8098), False, 'from chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import calculate_synthetic_offset, puzzle_for_public_key_and_hidden_puzzle_hash, solution_for_conditions, solution_for_hidden_puzzle\n'), ((8138, 8186), 'chia.types.blockchain_format.program.Program.to', 'Program.to', (['[sweep_preimage, delegated_solution]'], {}), '([sweep_preimage, delegated_solution])\n', (8148, 8186), False, 'from chia.types.blockchain_format.program import Program\n'), ((8203, 8295), 'chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle.solution_for_hidden_puzzle', 'solution_for_hidden_puzzle', (['total_pubkey', 'hidden_puzzle', 'p2_delayed_or_preimage_solution'], {}), '(total_pubkey, hidden_puzzle,\n p2_delayed_or_preimage_solution)\n', (8229, 8295), False, 'from chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import calculate_synthetic_offset, puzzle_for_public_key_and_hidden_puzzle_hash, solution_for_conditions, solution_for_hidden_puzzle\n'), ((9513, 9580), 'blspy.AugSchemeMPL.sign', 'blspy.AugSchemeMPL.sign', (['private_key', 'OWNERSHIP_MESSAGE', 'public_key'], {}), '(private_key, OWNERSHIP_MESSAGE, public_key)\n', (9536, 9580), False, 'import blspy\n'), ((10128, 10196), 'chia.types.blockchain_format.program.Program.to', 'Program.to', (['[sweep_preimage, [delegated_puzzle, delegated_solution]]'], {}), '([sweep_preimage, [delegated_puzzle, delegated_solution]])\n', (10138, 10196), False, 'from chia.types.blockchain_format.program import Program\n'), ((10606, 10684), 'chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle.puzzle_for_public_key_and_hidden_puzzle_hash', 'puzzle_for_public_key_and_hidden_puzzle_hash', (['total_pubkey', 'hidden_puzzle_hash'], {}), '(total_pubkey, hidden_puzzle_hash)\n', (10650, 10684), False, 'from chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import calculate_synthetic_offset, puzzle_for_public_key_and_hidden_puzzle_hash, solution_for_conditions, solution_for_hidden_puzzle\n'), ((10722, 10782), 'chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle.calculate_synthetic_offset', 'calculate_synthetic_offset', (['total_pubkey', 'hidden_puzzle_hash'], {}), '(total_pubkey, hidden_puzzle_hash)\n', (10748, 10782), False, 'from chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import calculate_synthetic_offset, puzzle_for_public_key_and_hidden_puzzle_hash, solution_for_conditions, solution_for_hidden_puzzle\n'), ((11330, 11381), 'chia.types.blockchain_format.coin.Coin', 'Coin', (['parent_coin_id', 'puzzle_hash', 'xch_amount_mojos'], {}), '(parent_coin_id, puzzle_hash, xch_amount_mojos)\n', (11334, 11381), False, 'from chia.types.blockchain_format.coin import Coin\n'), ((11629, 11669), 'chia.types.coin_spend.CoinSpend', 'CoinSpend', (['coin', 'puzzle_reveal', 'solution'], {}), '(coin, puzzle_reveal, solution)\n', (11638, 11669), False, 'from chia.types.coin_spend import CoinSpend\n'), ((12064, 12100), 'chia.types.spend_bundle.SpendBundle', 'SpendBundle', (['[coin_spend]', 'total_sig'], {}), '([coin_spend], total_sig)\n', (12075, 12100), False, 'from chia.types.spend_bundle import SpendBundle\n'), ((13306, 13344), 'chia.util.bech32m.encode_puzzle_hash', 'encode_puzzle_hash', (['puzzle_hash', '"""xch"""'], {}), "(puzzle_hash, 'xch')\n", (13324, 13344), False, 'from chia.util.bech32m import decode_puzzle_hash, encode_puzzle_hash\n'), ((15518, 15561), 'chia.util.bech32m.encode_puzzle_hash', 'encode_puzzle_hash', (['dest_puzzle_hash', '"""xch"""'], {}), "(dest_puzzle_hash, 'xch')\n", (15536, 15561), False, 'from chia.util.bech32m import decode_puzzle_hash, encode_puzzle_hash\n'), ((16874, 16912), 'chia.util.bech32m.encode_puzzle_hash', 'encode_puzzle_hash', (['puzzle_hash', '"""xch"""'], {}), "(puzzle_hash, 'xch')\n", (16892, 16912), False, 'from chia.util.bech32m import decode_puzzle_hash, encode_puzzle_hash\n'), ((18329, 18380), 'chia.types.blockchain_format.coin.Coin', 'Coin', (['parent_coin_id', 'puzzle_hash', 'xch_amount_mojos'], {}), '(parent_coin_id, puzzle_hash, xch_amount_mojos)\n', (18333, 18380), False, 'from chia.types.blockchain_format.coin import Coin\n'), ((21054, 21105), 'chia.types.blockchain_format.coin.Coin', 'Coin', (['parent_coin_id', 'puzzle_hash', 'xch_amount_mojos'], {}), '(parent_coin_id, puzzle_hash, xch_amount_mojos)\n', (21058, 21105), False, 'from chia.types.blockchain_format.coin import Coin\n'), ((21303, 21348), 'blspy.AugSchemeMPL.sign', 'blspy.AugSchemeMPL.sign', (['private_key', 'message'], {}), '(private_key, message)\n', (21326, 21348), False, 'import blspy\n'), ((21368, 21416), 'chia.types.spend_bundle.SpendBundle', 'SpendBundle', (['spend_bundle.coin_spends', 'total_sig'], {}), '(spend_bundle.coin_spends, total_sig)\n', (21379, 21416), False, 'from chia.types.spend_bundle import SpendBundle\n'), ((22251, 22302), 'blspy.AugSchemeMPL.sign', 'blspy.AugSchemeMPL.sign', (['total_private_key', 'message'], {}), '(total_private_key, message)\n', (22274, 22302), False, 'import blspy\n'), ((22328, 22364), 'chia.types.spend_bundle.SpendBundle', 'SpendBundle', (['[coin_spend]', 'total_sig'], {}), '([coin_spend], total_sig)\n', (22339, 22364), False, 'from chia.types.spend_bundle import SpendBundle\n'), ((2195, 2208), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2205, 2208), False, 'import time\n'), ((3157, 3177), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3171, 3177), False, 'import os\n'), ((4935, 4962), 'chia.util.bech32m.decode_puzzle_hash', 'decode_puzzle_hash', (['address'], {}), '(address)\n', (4953, 4962), False, 'from chia.util.bech32m import decode_puzzle_hash, encode_puzzle_hash\n'), ((11715, 11732), 'blspy.G2Element', 'blspy.G2Element', ([], {}), '()\n', (11730, 11732), False, 'import blspy\n'), ((13362, 13387), 'decimal.Decimal', 'Decimal', (['xch_amount_mojos'], {}), '(xch_amount_mojos)\n', (13369, 13387), False, 'from decimal import Decimal\n'), ((16930, 16955), 'decimal.Decimal', 'Decimal', (['xch_amount_mojos'], {}), '(xch_amount_mojos)\n', (16937, 16955), False, 'from decimal import Decimal\n'), ((18429, 18464), 'chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle.solution_for_conditions', 'solution_for_conditions', (['conditions'], {}), '(conditions)\n', (18452, 18464), False, 'from chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import calculate_synthetic_offset, puzzle_for_public_key_and_hidden_puzzle_hash, solution_for_conditions, solution_for_hidden_puzzle\n'), ((2797, 2820), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2818, 2820), False, 'import datetime\n'), ((3044, 3059), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (3054, 3059), False, 'import os\n'), ((3239, 3260), 'secrets.randbits', 'secrets.randbits', (['(256)'], {}), '(256)\n', (3255, 3260), False, 'import secrets\n'), ((3630, 3647), 'time.sleep', 'time.sleep', (['(0.125)'], {}), '(0.125)\n', (3640, 3647), False, 'import time\n'), ((5865, 5895), 'blspy.G1Element.from_bytes', 'blspy.G1Element.from_bytes', (['b1'], {}), '(b1)\n', (5891, 5895), False, 'import blspy\n'), ((6095, 6125), 'blspy.G2Element.from_bytes', 'blspy.G2Element.from_bytes', (['b2'], {}), '(b2)\n', (6121, 6125), False, 'import blspy\n'), ((6142, 6194), 'blspy.AugSchemeMPL.verify', 'blspy.AugSchemeMPL.verify', (['g1', 'OWNERSHIP_MESSAGE', 'g2'], {}), '(g1, OWNERSHIP_MESSAGE, g2)\n', (6167, 6194), False, 'import blspy\n'), ((23031, 23055), 'decimal.Decimal', 'Decimal', (['(1000000000000.0)'], {}), '(1000000000000.0)\n', (23038, 23055), False, 'from decimal import Decimal\n'), ((11856, 11889), 'chia.wallet.puzzles.p2_conditions.puzzle_for_conditions', 'puzzle_for_conditions', (['conditions'], {}), '(conditions)\n', (11877, 11889), False, 'from chia.wallet.puzzles.p2_conditions import puzzle_for_conditions\n'), ((18490, 18523), 'chia.wallet.puzzles.p2_conditions.puzzle_for_conditions', 'puzzle_for_conditions', (['conditions'], {}), '(conditions)\n', (18511, 18523), False, 'from chia.wallet.puzzles.p2_conditions import puzzle_for_conditions\n'), ((21130, 21163), 'chia.wallet.puzzles.p2_conditions.puzzle_for_conditions', 'puzzle_for_conditions', (['conditions'], {}), '(conditions)\n', (21151, 21163), False, 'from chia.wallet.puzzles.p2_conditions import puzzle_for_conditions\n'), ((1976, 2012), 'json.load', 'json.load', (['resp'], {'parse_float': 'Decimal'}), '(resp, parse_float=Decimal)\n', (1985, 2012), False, 'import json\n'), ((6636, 6653), 'hashlib.sha256', 'hashlib.sha256', (['b'], {}), '(b)\n', (6650, 6653), False, 'import hashlib\n'), ((7299, 7316), 'hashlib.sha256', 'hashlib.sha256', (['b'], {}), '(b)\n', (7313, 7316), False, 'import hashlib\n')]
|
# -*- coding: utf-8 -*-
"""
Chat Room Demo for Miniboa.
"""
import logging
from miniboa import TelnetServer
IDLE_TIMEOUT = 300
CLIENT_LIST = []
SERVER_RUN = True
def on_connect(client):
"""
Sample on_connect function.
Handles new connections.
"""
logging.info("Opened connection to {}".format(client.addrport()))
broadcast("{} joins the conversation.\n".format(client.addrport()))
CLIENT_LIST.append(client)
client.send("Welcome to the Chat Server, {}.\n".format(client.addrport()))
def on_disconnect(client):
"""
Sample on_disconnect function.
Handles lost connections.
"""
logging.info("Lost connection to {}".format(client.addrport()))
CLIENT_LIST.remove(client)
broadcast("{} leaves the conversation.\n".format(client.addrport()))
def kick_idle():
"""
Looks for idle clients and disconnects them by setting active to False.
"""
# Who hasn't been typing?
for client in CLIENT_LIST:
if client.idle() > IDLE_TIMEOUT:
logging.info("Kicking idle lobby client from {}".format(client.addrport()))
client.active = False
def process_clients():
"""
Check each client, if client.cmd_ready == True then there is a line of
input available via client.get_command().
"""
for client in CLIENT_LIST:
if client.active and client.cmd_ready:
# If the client sends input echo it to the chat room
chat(client)
def broadcast(msg):
"""
Send msg to every client.
"""
for client in CLIENT_LIST:
client.send(msg)
def chat(client):
"""
Echo whatever client types to everyone.
"""
global SERVER_RUN
msg = client.get_command()
logging.info("{} says '{}'".format(client.addrport(), msg))
for guest in CLIENT_LIST:
if guest != client:
guest.send("{} says '{}'\n".format(client.addrport(), msg))
else:
guest.send("You say '{}'\n".format(msg))
cmd = msg.lower()
# bye = disconnect
if cmd == 'bye':
client.active = False
# shutdown == stop the server
elif cmd == 'shutdown':
SERVER_RUN = False
if __name__ == '__main__':
# Simple chat server to demonstrate connection handling via the
# async and telnet modules.
logging.basicConfig(level=logging.DEBUG)
# Create a telnet server with a port, address,
# a function to call with new connections
# and one to call with lost connections.
telnet_server = TelnetServer(
port=7777,
address='',
on_connect=on_connect,
on_disconnect=on_disconnect,
timeout=.05
)
logging.info("Listening for connections on"
" port {}. CTRL-C to break.".format(telnet_server.port))
# Server Loop
while SERVER_RUN:
telnet_server.poll() # Send, Recv, and look for new connections
kick_idle() # Check for idle clients
process_clients() # Check for client input
logging.info("Server shutdown.")
|
[
"logging.info",
"miniboa.TelnetServer",
"logging.basicConfig"
] |
[((2306, 2346), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (2325, 2346), False, 'import logging\n'), ((2511, 2617), 'miniboa.TelnetServer', 'TelnetServer', ([], {'port': '(7777)', 'address': '""""""', 'on_connect': 'on_connect', 'on_disconnect': 'on_disconnect', 'timeout': '(0.05)'}), "(port=7777, address='', on_connect=on_connect, on_disconnect=\n on_disconnect, timeout=0.05)\n", (2523, 2617), False, 'from miniboa import TelnetServer\n'), ((3032, 3064), 'logging.info', 'logging.info', (['"""Server shutdown."""'], {}), "('Server shutdown.')\n", (3044, 3064), False, 'import logging\n')]
|
# vim: et:ts=4:sw=4:fenc=utf-8
import json
import os
import time
import math
def get_machine_dependent_params(settings, isa):
if os.path.isfile(settings.machine_dependent_params_file) and not settings.newSU:
with open(settings.machine_dependent_params_file, "r") as params_file:
params = json.load(params_file)
settings.num_total_dynamic_insns = params["num_total_dynamic_insns"]
settings.num_insns_per_iteration = params["num_insns_per_iteration"]
else:
assert len(isa.instruction_list) > 4, 'At least 5 instructions are required per isa!'
settings.num_total_dynamic_insns = determine_num_total_dynamic_insns(settings, isa)
settings.num_insns_per_iteration = determine_num_insns_per_iteration(settings, isa)
params = {
"num_total_dynamic_insns": settings.num_total_dynamic_insns,
"num_insns_per_iteration": settings.num_insns_per_iteration,
}
if os.path.isfile(settings.machine_dependent_params_file):
# If we have a previously existing param file, create a backup
os.rename(settings.machine_dependent_params_file, settings.machine_dependent_params_file + ".bak")
with open(settings.machine_dependent_params_file, "w") as params_file:
json.dump(params, params_file)
print("Configured to run {} dynamic instructions in a loop with {} instructions per iteration.".format(
settings.num_total_dynamic_insns, settings.num_insns_per_iteration))
def determine_num_total_dynamic_insns(settings, isa):
from PITE.processor_benchmarking import run_experiment_impl
num_insns_per_iteration = 200
rangeX = 11
insns = sorted(isa.insnmap.keys())
testing_num_total_dynamic_insns = 10**9
print("Starting to determine the total number of dynamic instructions necessary to execute for {} seconds.".format(settings.loop_target_time))
min_time = math.inf
for i in insns[0:5]:
tmp_results = []
for j in range(rangeX):
tmp_results.append(run_experiment_impl(settings, isa, [i],
num_insns_per_iteration=num_insns_per_iteration,
num_total_dynamic_insns=testing_num_total_dynamic_insns))
time_taken = sum(float(x['time']) / 1000000 for x in tmp_results) / rangeX
min_time = min(min_time, time_taken)
num_total_dynamic_insns = int((settings.loop_target_time / min_time) * testing_num_total_dynamic_insns)
print("Determine the total number of dynamic instructions to be {}.".format(num_total_dynamic_insns))
return num_total_dynamic_insns
def determine_num_insns_per_iteration(settings, isa):
# This requires a reasonable num_total_dynamic_insns in the settings!
print("Starting to determine the number of instructions per iteration for this machine.")
startLLMeasuring = time.clock()
assert isa.insnmap is not None
if settings.preciseStart:
config = settings.setup_configs["precise"]
else:
config = settings.setup_configs["default"]
inital_looplength = config["start_loop_length"]
final_looplength = config["end_loop_length"]
num_total_dynamic_insns = settings.num_total_dynamic_insns
steps = config["step_width"]
fg_steps = config["fine_grained_step_width"]
num_samples = config["num_samples"]
minLength = __exec_experiments(settings, isa, inital_looplength, final_looplength, steps, num_total_dynamic_insns, num_samples)
# adjustment so that fine-grained searching can start at minLength - steps
minLength = max(minLength, inital_looplength + steps)
print('Determined number of instructions per iteration to be around {}'.format(minLength))
minLength = __exec_experiments(settings, isa, minLength - steps, minLength + steps, fg_steps, num_total_dynamic_insns, num_samples)
print('Number of instructions per iteration fixed at: {}'.format(minLength))
res = minLength
endLLMeasuring = time.clock()
timeLL = endLLMeasuring - startLLMeasuring
print("Done determining the number of iterations after {} seconds.".format(timeLL))
return res
def __exec_experiments(settings, isa, inital_ll, final_ll, steps_width, num_total_dynamic_insns, num_samples):
from PITE.processor_benchmarking import run_experiment_impl
assert isa.insnmap is not None
insns = sorted(isa.insnmap.keys())
results = []
for i in range(inital_ll, final_ll, steps_width):
intermed_res = []
for j in range(num_samples):
tmp_res = run_experiment_impl(settings, isa, insns[0:5], num_insns_per_iteration=i, num_total_dynamic_insns=num_total_dynamic_insns)
intermed_res.append(tmp_res)
i_res = min(intermed_res, key=lambda t: t['cycles'])
results.append((i, i_res['cycles']))
minLength = min(results, key=lambda t: t[1])
return minLength[0]
|
[
"json.dump",
"json.load",
"PITE.processor_benchmarking.run_experiment_impl",
"os.rename",
"time.clock",
"os.path.isfile"
] |
[((2873, 2885), 'time.clock', 'time.clock', ([], {}), '()\n', (2883, 2885), False, 'import time\n'), ((3977, 3989), 'time.clock', 'time.clock', ([], {}), '()\n', (3987, 3989), False, 'import time\n'), ((136, 190), 'os.path.isfile', 'os.path.isfile', (['settings.machine_dependent_params_file'], {}), '(settings.machine_dependent_params_file)\n', (150, 190), False, 'import os\n'), ((978, 1032), 'os.path.isfile', 'os.path.isfile', (['settings.machine_dependent_params_file'], {}), '(settings.machine_dependent_params_file)\n', (992, 1032), False, 'import os\n'), ((315, 337), 'json.load', 'json.load', (['params_file'], {}), '(params_file)\n', (324, 337), False, 'import json\n'), ((1121, 1224), 'os.rename', 'os.rename', (['settings.machine_dependent_params_file', "(settings.machine_dependent_params_file + '.bak')"], {}), "(settings.machine_dependent_params_file, settings.\n machine_dependent_params_file + '.bak')\n", (1130, 1224), False, 'import os\n'), ((1312, 1342), 'json.dump', 'json.dump', (['params', 'params_file'], {}), '(params, params_file)\n', (1321, 1342), False, 'import json\n'), ((4546, 4672), 'PITE.processor_benchmarking.run_experiment_impl', 'run_experiment_impl', (['settings', 'isa', 'insns[0:5]'], {'num_insns_per_iteration': 'i', 'num_total_dynamic_insns': 'num_total_dynamic_insns'}), '(settings, isa, insns[0:5], num_insns_per_iteration=i,\n num_total_dynamic_insns=num_total_dynamic_insns)\n', (4565, 4672), False, 'from PITE.processor_benchmarking import run_experiment_impl\n'), ((2068, 2223), 'PITE.processor_benchmarking.run_experiment_impl', 'run_experiment_impl', (['settings', 'isa', '[i]'], {'num_insns_per_iteration': 'num_insns_per_iteration', 'num_total_dynamic_insns': 'testing_num_total_dynamic_insns'}), '(settings, isa, [i], num_insns_per_iteration=\n num_insns_per_iteration, num_total_dynamic_insns=\n testing_num_total_dynamic_insns)\n', (2087, 2223), False, 'from PITE.processor_benchmarking import run_experiment_impl\n')]
|
import pronto
import zipfile
import gzip
import json
import networkx as nx
import time
import re
import MedGenParser
import HpoParser
import HGNCParser
# since this the Phenotype API is in a different folder we need to add it to the python path
import sys
sys.path.insert(0, '../PhenotypeAPI/')
import PhenotypeCorrelationParser
def put2dict_of_sets(dict, key, item):
if key in dict:
dict[key].add(item)
else:
dict[key] = {item}
def get_mesh_ids(*args):
mesh_set = set({})
for xref_map in args:
for xref_set in xref_map.values():
for xref in xref_set:
if xref.startswith('MESH'):
mesh_set.add(xref)
return mesh_set
def clean_tag(source_tags_dict, id):
formatted_id = id
for source_tag in source_tags_dict.keys():
if source_tag in id:
formatted_id = id.replace(source_tag, source_tags_dict[source_tag])
break
return formatted_id
def get_xrefs_from_ontology(ontology, source_tags_dict):
print("Getting xrefs for ID for " + str(ontology))
term_xref_dict = dict({})
for term in ontology:
if 'xref' in term.other:
cleaned_id = clean_tag(source_tags_dict, term.id)
xrefs = set({})
for xref in set(term.other['xref']):
xrefs.add(clean_tag(source_tags_dict, xref))
term_xref_dict[cleaned_id] = xrefs
return term_xref_dict
def get_mesh_xref_map(*args):
mesh_dict = dict({})
for xref_map in args:
for xref_key, xref_set in xref_map.items():
for xref in xref_set:
if xref.startswith('MESH'):
if xref in mesh_dict:
mesh_dict[xref].add(xref_key)
mesh_dict[xref].update(xref_set)
else:
mesh_dict[xref] = {xref_key}
mesh_dict[xref].update(xref_set)
return mesh_dict
if __name__ == '__main__':
start = time.time()
# load ontologies used for processing relationships between terms
print('Loading HPO...')
hpo_file_name = './HPO_data_files/hp.obo'
hp_ontology = pronto.Ontology(hpo_file_name)
print('Loading DO...')
do_file_name = './HPO_data_files/doid.obo'
disease_ontology = pronto.Ontology(do_file_name)
print("Loading ORDO...")
ordo_owl_file = './HPO_data_files/ordo_orphanet.owl'
ordo_zip_file_name = './HPO_data_files/ordo_orphanet.owl.zip'
ordo_zip = zipfile.ZipFile(ordo_zip_file_name)
ordo_zip.extract('ordo_orphanet.owl', './HPO_data_files/')
ordo_zip.close()
orphanet_ontology = pronto.Ontology(ordo_owl_file)
# list out the tags that can be used for cross referencing
source_tags = {
"MSH": "MESH",
"MeSH": "MESH",
"ORPHA:": "Orphanet_",
"OMIM": "OMIM",
"MESH": "MESH",
"ORPHANET:": "Orphanet_",
"Orphanet:": "Orphanet_",
"ORDO:": "Orphanet_",
"DOID": "DOID",
"HP:": "HP:"
}
accepted_clean_source_tags = {
"MESH",
"Orphanet_",
"OMIM",
"DOID",
"HP:"
}
# create maps from a given ID in an ontology to it's cross referenced ID in other classification ontologies
print('Creating cross reference maps...')
orpha_xref_map = get_xrefs_from_ontology(orphanet_ontology, source_tags)
hpo_xref_map = get_xrefs_from_ontology(hp_ontology, source_tags)
do_xref_map = get_xrefs_from_ontology(disease_ontology, source_tags)
# create mapping of MESH IDs to other IDs
mesh2other_map = get_mesh_xref_map(orpha_xref_map, hpo_xref_map, do_xref_map)
# load HPO annotations of disease to phenotype mappings
print('Loading HPO disease to phenotype mappings...')
hpo_disease2hpo_map = HpoParser.get_hpo_disease2hpoId_map(source_tags)
# load MedGen OMIM disease to phenotype mappings
print('Loading MedGen data...')
medgen_disease2hpo_map = MedGenParser.get_medgen_disease2hpo()
# create a set of MESH terms that we can use for mapping
print('Getting set of accepted mesh terms...')
accepted_mesh_terms = get_mesh_ids(orpha_xref_map, hpo_xref_map, do_xref_map)
print('Number of distinct accepted mesh terms: ' + str(len(accepted_mesh_terms)))
# create graphs per mesh term to map to phenotype term
print('Building graphs...')
mesh2disease_phenotype_map = dict({})
for mesh_id in accepted_mesh_terms:
# create one graph per MESH ID term
mesh2disease_phenotype_map[mesh_id] = nx.Graph()
# link mesh to accepted other terms
for id in mesh2other_map[mesh_id]:
for accepted_clean_source_tag in accepted_clean_source_tags:
if id.startswith(accepted_clean_source_tag):
mesh2disease_phenotype_map[mesh_id].add_edge(mesh_id, id)
# link other IDs to each other
id_set = set({})
for id in list(mesh2disease_phenotype_map[mesh_id].nodes):
if id in do_xref_map:
id_set = do_xref_map[id]
elif id in hpo_xref_map:
id_set = hpo_xref_map[id]
elif id in orpha_xref_map:
id_set = orpha_xref_map[id]
if len(id_set) > 0:
for link_id in id_set:
for accepted_clean_source_tag in accepted_clean_source_tags:
if link_id.startswith(accepted_clean_source_tag):
mesh2disease_phenotype_map[mesh_id].add_edge(link_id, id)
# associate phenotypes with diseases in the graph
id_set = set({})
for id in list(mesh2disease_phenotype_map[mesh_id].nodes):
if not id.startswith('HP'):
id_set.add(id)
for disease_id in id_set:
if disease_id in medgen_disease2hpo_map:
for phenotype in medgen_disease2hpo_map[disease_id]:
mesh2disease_phenotype_map[mesh_id].add_edge(disease_id, phenotype)
if disease_id in hpo_disease2hpo_map:
for phenotype in hpo_disease2hpo_map[disease_id]:
mesh2disease_phenotype_map[mesh_id].add_edge(disease_id, phenotype)
if mesh_id == 'MESH:D009135' or mesh_id == 'MESH:D018908' or mesh_id == 'MESH:C567499':
print('Graph for ' + mesh_id)
print('graph ' + mesh_id.replace(':', '_') + '{')
for edge in mesh2disease_phenotype_map[mesh_id].edges:
print(' ' + edge[0].replace(':', '_') + ' -- ' + edge[1].replace(':', '_') + ';')
print('}')
print('')
# load gene information from HGNC
accepted_entrez_ids = HGNCParser.get_hgnc_genes_ids()
print("# accepted entrez gene IDs: " + str(len(accepted_entrez_ids)))
# create map of all accepted genes to all accepted terms
# load pubtator data
print('Loading and processing Pubtator data...')
gene_filename = './HPO_data_files/gene2pubtator.gz'
disease_filename = './HPO_data_files/disease2pubtator.gz'
gene2pubmed = {}
pubmed2disease = {}
print('Loading gene-pubmed...')
with gzip.open(gene_filename, 'rt') as f_in:
for line in f_in:
cols = line.rstrip().split('\t')
pmid = cols[0]
pubmed2disease[pmid] = set([])
genes = cols[1]
for gene in re.split(',|;', genes):
if (gene in accepted_entrez_ids):
put2dict_of_sets(gene2pubmed, gene, pmid)
print('Loading pubmed-disease...')
with gzip.open(disease_filename, 'rt') as f_in:
for line in f_in:
cols = line.rstrip().split('\t')
pmid = cols[0]
if (pmid in pubmed2disease):
disease = cols[1]
if (disease in accepted_mesh_terms):
put2dict_of_sets(pubmed2disease, pmid, disease)
print('Loaded', len(gene2pubmed), 'gene-pubmed keys.')
print('Loaded', len(pubmed2disease), 'pubmed-disease keys.')
print('Writing results to file...')
outfile = open("./HPO_data_files/gene2phenotype.json", 'w+')
for gene in sorted(gene2pubmed.keys()):
hpo2pubmed = {}
for pubmed in gene2pubmed[gene]:
for disease in pubmed2disease.get(pubmed, []):
for phen_id in mesh2disease_phenotype_map[disease].nodes:
if str(phen_id).startswith('HP'):
put2dict_of_sets(hpo2pubmed, phen_id, pubmed)
for hpo in sorted(hpo2pubmed.keys()):
outfile.write(json.dumps({'geneId': gene, 'hpoId': hpo, 'pmids': sorted(hpo2pubmed[hpo])}) + '\n')
outfile.close()
print('Building block index for JSON file...')
block_index = PhenotypeCorrelationParser.build_block_index("./HPO_data_files/gene2phenotype.json")
print('Writing block index to file...')
outfile = open("./HPO_data_files/gene2phenotypeIndex.json", 'w+')
json.dump(block_index, outfile)
outfile.close()
print('All PubTator information consumed in ' + str((time.time() - start)) + ' seconds')
|
[
"HGNCParser.get_hgnc_genes_ids",
"json.dump",
"pronto.Ontology",
"zipfile.ZipFile",
"gzip.open",
"re.split",
"PhenotypeCorrelationParser.build_block_index",
"sys.path.insert",
"time.time",
"HpoParser.get_hpo_disease2hpoId_map",
"networkx.Graph",
"MedGenParser.get_medgen_disease2hpo"
] |
[((258, 296), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../PhenotypeAPI/"""'], {}), "(0, '../PhenotypeAPI/')\n", (273, 296), False, 'import sys\n'), ((2018, 2029), 'time.time', 'time.time', ([], {}), '()\n', (2027, 2029), False, 'import time\n'), ((2192, 2222), 'pronto.Ontology', 'pronto.Ontology', (['hpo_file_name'], {}), '(hpo_file_name)\n', (2207, 2222), False, 'import pronto\n'), ((2321, 2350), 'pronto.Ontology', 'pronto.Ontology', (['do_file_name'], {}), '(do_file_name)\n', (2336, 2350), False, 'import pronto\n'), ((2519, 2554), 'zipfile.ZipFile', 'zipfile.ZipFile', (['ordo_zip_file_name'], {}), '(ordo_zip_file_name)\n', (2534, 2554), False, 'import zipfile\n'), ((2664, 2694), 'pronto.Ontology', 'pronto.Ontology', (['ordo_owl_file'], {}), '(ordo_owl_file)\n', (2679, 2694), False, 'import pronto\n'), ((3831, 3879), 'HpoParser.get_hpo_disease2hpoId_map', 'HpoParser.get_hpo_disease2hpoId_map', (['source_tags'], {}), '(source_tags)\n', (3866, 3879), False, 'import HpoParser\n'), ((3999, 4036), 'MedGenParser.get_medgen_disease2hpo', 'MedGenParser.get_medgen_disease2hpo', ([], {}), '()\n', (4034, 4036), False, 'import MedGenParser\n'), ((6722, 6753), 'HGNCParser.get_hgnc_genes_ids', 'HGNCParser.get_hgnc_genes_ids', ([], {}), '()\n', (6751, 6753), False, 'import HGNCParser\n'), ((8783, 8872), 'PhenotypeCorrelationParser.build_block_index', 'PhenotypeCorrelationParser.build_block_index', (['"""./HPO_data_files/gene2phenotype.json"""'], {}), "(\n './HPO_data_files/gene2phenotype.json')\n", (8827, 8872), False, 'import PhenotypeCorrelationParser\n'), ((8987, 9018), 'json.dump', 'json.dump', (['block_index', 'outfile'], {}), '(block_index, outfile)\n', (8996, 9018), False, 'import json\n'), ((4582, 4592), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (4590, 4592), True, 'import networkx as nx\n'), ((7178, 7208), 'gzip.open', 'gzip.open', (['gene_filename', '"""rt"""'], {}), "(gene_filename, 'rt')\n", (7187, 7208), False, 'import gzip\n'), ((7598, 7631), 'gzip.open', 'gzip.open', (['disease_filename', '"""rt"""'], {}), "(disease_filename, 'rt')\n", (7607, 7631), False, 'import gzip\n'), ((7413, 7435), 're.split', 're.split', (['""",|;"""', 'genes'], {}), "(',|;', genes)\n", (7421, 7435), False, 'import re\n'), ((9097, 9108), 'time.time', 'time.time', ([], {}), '()\n', (9106, 9108), False, 'import time\n')]
|
import os
import torch
import torch.nn as nn
import logging
import time
from torch.nn.parallel import DistributedDataParallel as DDP
from lib.models.builder import build_model
from lib.models.loss import CrossEntropyLabelSmooth
from lib.models.utils.dbb.dbb_block import DiverseBranchBlock
from lib.dataset.builder import build_dataloader
from lib.utils.args import parse_args
from lib.utils.dist_utils import init_dist, init_logger
from lib.utils.misc import accuracy, AverageMeter, CheckpointManager
from lib.utils.model_ema import ModelEMA
from lib.utils.measure import get_params, get_flops
torch.backends.cudnn.benchmark = True
'''init logger'''
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
datefmt='%H:%M:%S')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def main():
args, args_text = parse_args()
assert args.resume != ''
args.exp_dir = f'{os.path.dirname(args.resume)}/convert'
'''distributed'''
init_dist(args)
init_logger(args)
'''build dataloader'''
train_dataset, val_dataset, train_loader, val_loader = \
build_dataloader(args)
'''build model'''
if args.smoothing == 0.:
loss_fn = nn.CrossEntropyLoss().cuda()
else:
loss_fn = CrossEntropyLabelSmooth(num_classes=args.num_classes,
epsilon=args.smoothing).cuda()
model = build_model(args)
logger.info(
f'Model {args.model} created, params: {get_params(model)}, '
f'FLOPs: {get_flops(model, input_shape=args.input_shape)}')
# Diverse Branch Blocks
if args.dbb:
# convert 3x3 convs to dbb blocks
from lib.models.utils.dbb_converter import convert_to_dbb
convert_to_dbb(model)
logger.info(model)
logger.info(
f'Converted to DBB blocks, model params: {get_params(model)}, '
f'FLOPs: {get_flops(model, input_shape=args.input_shape)}')
model.cuda()
model = DDP(model,
device_ids=[args.local_rank],
find_unused_parameters=False)
if args.model_ema:
model_ema = ModelEMA(model, decay=args.model_ema_decay)
else:
model_ema = None
'''dyrep'''
if args.dyrep:
from lib.models.utils.dyrep import DyRep
dyrep = DyRep(
model.module,
None)
logger.info('Init DyRep done.')
else:
dyrep = None
'''resume'''
ckpt_manager = CheckpointManager(model,
ema_model=model_ema,
save_dir=args.exp_dir,
rank=args.rank,
additions={
'dyrep': dyrep
})
if args.resume:
epoch = ckpt_manager.load(args.resume)
if args.dyrep:
model = DDP(model.module,
device_ids=[args.local_rank],
find_unused_parameters=True)
logger.info(
f'Resume ckpt {args.resume} done, '
f'epoch {epoch}'
)
else:
epoch = 0
# validate
test_metrics = validate(args, epoch, model, val_loader, loss_fn)
# convert dyrep / dbb model to inference model
for m in model.module.modules():
if isinstance(m, DiverseBranchBlock):
m.switch_to_deploy()
logger.info(str(model))
logger.info(
f'Converted DBB / DyRep model to inference model, params: {get_params(model)}, '
f'FLOPs: {get_flops(model, input_shape=args.input_shape)}')
test_metrics = validate(args, epoch, model, val_loader, loss_fn)
'''save converted checkpoint'''
if args.rank == 0:
save_path = os.path.join(args.exp_dir, 'model.ckpt')
torch.save(model.module.state_dict(), save_path)
logger.info(f'Saved converted model checkpoint into {save_path} .')
def validate(args, epoch, model, loader, loss_fn, log_suffix=''):
loss_m = AverageMeter(dist=True)
top1_m = AverageMeter(dist=True)
top5_m = AverageMeter(dist=True)
batch_time_m = AverageMeter(dist=True)
start_time = time.time()
model.eval()
for batch_idx, (input, target) in enumerate(loader):
with torch.no_grad():
output = model(input)
loss = loss_fn(output, target)
top1, top5 = accuracy(output, target, topk=(1, 5))
loss_m.update(loss.item(), n=input.size(0))
top1_m.update(top1 * 100, n=input.size(0))
top5_m.update(top5 * 100, n=input.size(0))
batch_time = time.time() - start_time
batch_time_m.update(batch_time)
if batch_idx % args.log_interval == 0 or batch_idx == len(loader) - 1:
logger.info('Test{}: {} [{:>4d}/{}] '
'Loss: {loss.val:.3f} ({loss.avg:.3f}) '
'Top-1: {top1.val:.3f}% ({top1.avg:.3f}%) '
'Top-5: {top5.val:.3f}% ({top5.avg:.3f}%) '
'Time: {batch_time.val:.2f}s'.format(
log_suffix,
epoch,
batch_idx,
len(loader),
loss=loss_m,
top1=top1_m,
top5=top5_m,
batch_time=batch_time_m))
start_time = time.time()
return {'test_loss': loss_m.avg, 'top1': top1_m.avg, 'top5': top5_m.avg}
if __name__ == '__main__':
main()
|
[
"lib.dataset.builder.build_dataloader",
"lib.utils.measure.get_params",
"torch.no_grad",
"lib.utils.args.parse_args",
"os.path.join",
"lib.models.utils.dyrep.DyRep",
"torch.nn.parallel.DistributedDataParallel",
"os.path.dirname",
"lib.utils.measure.get_flops",
"lib.utils.model_ema.ModelEMA",
"lib.models.builder.build_model",
"lib.utils.misc.AverageMeter",
"lib.utils.misc.CheckpointManager",
"lib.utils.dist_utils.init_logger",
"lib.utils.dist_utils.init_dist",
"logging.basicConfig",
"torch.nn.CrossEntropyLoss",
"time.time",
"lib.models.utils.dbb_converter.convert_to_dbb",
"lib.utils.misc.accuracy",
"lib.models.loss.CrossEntropyLabelSmooth",
"logging.getLogger"
] |
[((653, 745), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s %(message)s"""', 'datefmt': '"""%H:%M:%S"""'}), "(format='%(asctime)s %(levelname)s %(message)s', datefmt\n ='%H:%M:%S')\n", (672, 745), False, 'import logging\n'), ((770, 789), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (787, 789), False, 'import logging\n'), ((856, 868), 'lib.utils.args.parse_args', 'parse_args', ([], {}), '()\n', (866, 868), False, 'from lib.utils.args import parse_args\n'), ((986, 1001), 'lib.utils.dist_utils.init_dist', 'init_dist', (['args'], {}), '(args)\n', (995, 1001), False, 'from lib.utils.dist_utils import init_dist, init_logger\n'), ((1006, 1023), 'lib.utils.dist_utils.init_logger', 'init_logger', (['args'], {}), '(args)\n', (1017, 1023), False, 'from lib.utils.dist_utils import init_dist, init_logger\n'), ((1121, 1143), 'lib.dataset.builder.build_dataloader', 'build_dataloader', (['args'], {}), '(args)\n', (1137, 1143), False, 'from lib.dataset.builder import build_dataloader\n'), ((1411, 1428), 'lib.models.builder.build_model', 'build_model', (['args'], {}), '(args)\n', (1422, 1428), False, 'from lib.models.builder import build_model\n'), ((1993, 2063), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['model'], {'device_ids': '[args.local_rank]', 'find_unused_parameters': '(False)'}), '(model, device_ids=[args.local_rank], find_unused_parameters=False)\n', (1996, 2063), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((2479, 2596), 'lib.utils.misc.CheckpointManager', 'CheckpointManager', (['model'], {'ema_model': 'model_ema', 'save_dir': 'args.exp_dir', 'rank': 'args.rank', 'additions': "{'dyrep': dyrep}"}), "(model, ema_model=model_ema, save_dir=args.exp_dir, rank=\n args.rank, additions={'dyrep': dyrep})\n", (2496, 2596), False, 'from lib.utils.misc import accuracy, AverageMeter, CheckpointManager\n'), ((4050, 4073), 'lib.utils.misc.AverageMeter', 'AverageMeter', ([], {'dist': '(True)'}), '(dist=True)\n', (4062, 4073), False, 'from lib.utils.misc import accuracy, AverageMeter, CheckpointManager\n'), ((4087, 4110), 'lib.utils.misc.AverageMeter', 'AverageMeter', ([], {'dist': '(True)'}), '(dist=True)\n', (4099, 4110), False, 'from lib.utils.misc import accuracy, AverageMeter, CheckpointManager\n'), ((4124, 4147), 'lib.utils.misc.AverageMeter', 'AverageMeter', ([], {'dist': '(True)'}), '(dist=True)\n', (4136, 4147), False, 'from lib.utils.misc import accuracy, AverageMeter, CheckpointManager\n'), ((4167, 4190), 'lib.utils.misc.AverageMeter', 'AverageMeter', ([], {'dist': '(True)'}), '(dist=True)\n', (4179, 4190), False, 'from lib.utils.misc import accuracy, AverageMeter, CheckpointManager\n'), ((4208, 4219), 'time.time', 'time.time', ([], {}), '()\n', (4217, 4219), False, 'import time\n'), ((1745, 1766), 'lib.models.utils.dbb_converter.convert_to_dbb', 'convert_to_dbb', (['model'], {}), '(model)\n', (1759, 1766), False, 'from lib.models.utils.dbb_converter import convert_to_dbb\n'), ((2140, 2183), 'lib.utils.model_ema.ModelEMA', 'ModelEMA', (['model'], {'decay': 'args.model_ema_decay'}), '(model, decay=args.model_ema_decay)\n', (2148, 2183), False, 'from lib.utils.model_ema import ModelEMA\n'), ((2320, 2345), 'lib.models.utils.dyrep.DyRep', 'DyRep', (['model.module', 'None'], {}), '(model.module, None)\n', (2325, 2345), False, 'from lib.models.utils.dyrep import DyRep\n'), ((3795, 3835), 'os.path.join', 'os.path.join', (['args.exp_dir', '"""model.ckpt"""'], {}), "(args.exp_dir, 'model.ckpt')\n", (3807, 3835), False, 'import os\n'), ((4424, 4461), 'lib.utils.misc.accuracy', 'accuracy', (['output', 'target'], {'topk': '(1, 5)'}), '(output, target, topk=(1, 5))\n', (4432, 4461), False, 'from lib.utils.misc import accuracy, AverageMeter, CheckpointManager\n'), ((5448, 5459), 'time.time', 'time.time', ([], {}), '()\n', (5457, 5459), False, 'import time\n'), ((920, 948), 'os.path.dirname', 'os.path.dirname', (['args.resume'], {}), '(args.resume)\n', (935, 948), False, 'import os\n'), ((2931, 3007), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['model.module'], {'device_ids': '[args.local_rank]', 'find_unused_parameters': '(True)'}), '(model.module, device_ids=[args.local_rank], find_unused_parameters=True)\n', (2934, 3007), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((4308, 4323), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4321, 4323), False, 'import torch\n'), ((4638, 4649), 'time.time', 'time.time', ([], {}), '()\n', (4647, 4649), False, 'import time\n'), ((1214, 1235), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1233, 1235), True, 'import torch.nn as nn\n'), ((1271, 1348), 'lib.models.loss.CrossEntropyLabelSmooth', 'CrossEntropyLabelSmooth', ([], {'num_classes': 'args.num_classes', 'epsilon': 'args.smoothing'}), '(num_classes=args.num_classes, epsilon=args.smoothing)\n', (1294, 1348), False, 'from lib.models.loss import CrossEntropyLabelSmooth\n'), ((1493, 1510), 'lib.utils.measure.get_params', 'get_params', (['model'], {}), '(model)\n', (1503, 1510), False, 'from lib.utils.measure import get_params, get_flops\n'), ((1533, 1579), 'lib.utils.measure.get_flops', 'get_flops', (['model'], {'input_shape': 'args.input_shape'}), '(model, input_shape=args.input_shape)\n', (1542, 1579), False, 'from lib.utils.measure import get_params, get_flops\n'), ((3556, 3573), 'lib.utils.measure.get_params', 'get_params', (['model'], {}), '(model)\n', (3566, 3573), False, 'from lib.utils.measure import get_params, get_flops\n'), ((3596, 3642), 'lib.utils.measure.get_flops', 'get_flops', (['model'], {'input_shape': 'args.input_shape'}), '(model, input_shape=args.input_shape)\n', (3605, 3642), False, 'from lib.utils.measure import get_params, get_flops\n'), ((1869, 1886), 'lib.utils.measure.get_params', 'get_params', (['model'], {}), '(model)\n', (1879, 1886), False, 'from lib.utils.measure import get_params, get_flops\n'), ((1913, 1959), 'lib.utils.measure.get_flops', 'get_flops', (['model'], {'input_shape': 'args.input_shape'}), '(model, input_shape=args.input_shape)\n', (1922, 1959), False, 'from lib.utils.measure import get_params, get_flops\n')]
|
import code
from pprint import pprint
from grouper import models
from grouper.ctl.util import make_session
from grouper.graph import GroupGraph
def shell_command(args):
session = make_session()
graph = GroupGraph.from_db(session)
m = models
pp = pprint
try:
from IPython import embed
except ImportError:
code.interact(local={
"session": session,
"graph": graph,
"m": m,
"models": models,
"pp": pp,
})
else:
embed()
def add_parser(subparsers):
shell_parser = subparsers.add_parser(
"shell", help="Launch a shell with models imported.")
shell_parser.set_defaults(func=shell_command)
|
[
"IPython.embed",
"grouper.graph.GroupGraph.from_db",
"code.interact",
"grouper.ctl.util.make_session"
] |
[((186, 200), 'grouper.ctl.util.make_session', 'make_session', ([], {}), '()\n', (198, 200), False, 'from grouper.ctl.util import make_session\n'), ((213, 240), 'grouper.graph.GroupGraph.from_db', 'GroupGraph.from_db', (['session'], {}), '(session)\n', (231, 240), False, 'from grouper.graph import GroupGraph\n'), ((531, 538), 'IPython.embed', 'embed', ([], {}), '()\n', (536, 538), False, 'from IPython import embed\n'), ((348, 445), 'code.interact', 'code.interact', ([], {'local': "{'session': session, 'graph': graph, 'm': m, 'models': models, 'pp': pp}"}), "(local={'session': session, 'graph': graph, 'm': m, 'models':\n models, 'pp': pp})\n", (361, 445), False, 'import code\n')]
|
import sklearn.datasets as dt
import matplotlib.pyplot as plt
import numpy as np
seed = 1
# Create dataset
"""
x_data,y_data = dt.make_classification(n_samples=1000,
n_features=2,
n_repeated=0,
class_sep=2,
n_redundant=0,
random_state=seed)
"""
x_data,y_data = dt.make_circles(n_samples=700,
noise=0.2,
factor=0.3)
# Plot dataset
my_scatter_plot = plt.scatter(x_data[:,0],
x_data[:,1],
c=y_data,
vmin=min(y_data),
vmax=max(y_data),
s=35)
plt.savefig("data.png")
plt.show()
# Format y_data
y_data = np.array([[1,0] if y==0 else [0,1] for y in y_data])
# Save data into csv files
np.savetxt("x_data.csv", x_data,delimiter=',',fmt='%f')
np.savetxt("y_data.csv", y_data,delimiter=',',fmt='%f')
|
[
"sklearn.datasets.make_circles",
"matplotlib.pyplot.show",
"numpy.savetxt",
"numpy.array",
"matplotlib.pyplot.savefig"
] |
[((458, 511), 'sklearn.datasets.make_circles', 'dt.make_circles', ([], {'n_samples': '(700)', 'noise': '(0.2)', 'factor': '(0.3)'}), '(n_samples=700, noise=0.2, factor=0.3)\n', (473, 511), True, 'import sklearn.datasets as dt\n'), ((866, 889), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""data.png"""'], {}), "('data.png')\n", (877, 889), True, 'import matplotlib.pyplot as plt\n'), ((890, 900), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (898, 900), True, 'import matplotlib.pyplot as plt\n'), ((929, 987), 'numpy.array', 'np.array', (['[([1, 0] if y == 0 else [0, 1]) for y in y_data]'], {}), '([([1, 0] if y == 0 else [0, 1]) for y in y_data])\n', (937, 987), True, 'import numpy as np\n'), ((1010, 1067), 'numpy.savetxt', 'np.savetxt', (['"""x_data.csv"""', 'x_data'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "('x_data.csv', x_data, delimiter=',', fmt='%f')\n", (1020, 1067), True, 'import numpy as np\n'), ((1066, 1123), 'numpy.savetxt', 'np.savetxt', (['"""y_data.csv"""', 'y_data'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "('y_data.csv', y_data, delimiter=',', fmt='%f')\n", (1076, 1123), True, 'import numpy as np\n')]
|
import win32service
import win32serviceutil
import win32api
import win32event
from ssh_cmd_manager import CmdManager
class aservice(win32serviceutil.ServiceFramework):
_svc_name_ = "ssh-shepherd-svc"
_svc_display_name_ = "SSH Shepherd"
_svc_description_ = "SSH tunnel manager for db-shepherd"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
import servicemanager
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,servicemanager.PYS_SERVICE_STARTED,(self._svc_name_, ''))
self.timeout = 3000
CmdManager().start()
while 1:
# Wait for service stop signal, if I timeout, loop again
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
# Check to see if self.hWaitStop happened
if rc == win32event.WAIT_OBJECT_0:
# Stop signal encountered
servicemanager.LogInfoMsg("STOPPED")
break
else:
servicemanager.LogInfoMsg("ALIVE")
def ctrlHandler(ctrlType):
return True
if __name__ == '__main__':
win32api.SetConsoleCtrlHandler(ctrlHandler, True)
win32serviceutil.HandleCommandLine(aservice)
|
[
"win32serviceutil.HandleCommandLine",
"servicemanager.LogInfoMsg",
"win32api.SetConsoleCtrlHandler",
"ssh_cmd_manager.CmdManager",
"win32event.SetEvent",
"win32event.WaitForSingleObject",
"win32event.CreateEvent",
"servicemanager.LogMsg",
"win32serviceutil.ServiceFramework.__init__"
] |
[((1446, 1495), 'win32api.SetConsoleCtrlHandler', 'win32api.SetConsoleCtrlHandler', (['ctrlHandler', '(True)'], {}), '(ctrlHandler, True)\n', (1476, 1495), False, 'import win32api\n'), ((1502, 1546), 'win32serviceutil.HandleCommandLine', 'win32serviceutil.HandleCommandLine', (['aservice'], {}), '(aservice)\n', (1536, 1546), False, 'import win32serviceutil\n'), ((358, 412), 'win32serviceutil.ServiceFramework.__init__', 'win32serviceutil.ServiceFramework.__init__', (['self', 'args'], {}), '(self, args)\n', (400, 412), False, 'import win32serviceutil\n'), ((441, 481), 'win32event.CreateEvent', 'win32event.CreateEvent', (['None', '(0)', '(0)', 'None'], {}), '(None, 0, 0, None)\n', (463, 481), False, 'import win32event\n'), ((598, 633), 'win32event.SetEvent', 'win32event.SetEvent', (['self.hWaitStop'], {}), '(self.hWaitStop)\n', (617, 633), False, 'import win32event\n'), ((727, 853), 'servicemanager.LogMsg', 'servicemanager.LogMsg', (['servicemanager.EVENTLOG_INFORMATION_TYPE', 'servicemanager.PYS_SERVICE_STARTED', "(self._svc_name_, '')"], {}), "(servicemanager.EVENTLOG_INFORMATION_TYPE,\n servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, ''))\n", (748, 853), False, 'import servicemanager\n'), ((1006, 1066), 'win32event.WaitForSingleObject', 'win32event.WaitForSingleObject', (['self.hWaitStop', 'self.timeout'], {}), '(self.hWaitStop, self.timeout)\n', (1036, 1066), False, 'import win32event\n'), ((889, 901), 'ssh_cmd_manager.CmdManager', 'CmdManager', ([], {}), '()\n', (899, 901), False, 'from ssh_cmd_manager import CmdManager\n'), ((1212, 1248), 'servicemanager.LogInfoMsg', 'servicemanager.LogInfoMsg', (['"""STOPPED"""'], {}), "('STOPPED')\n", (1237, 1248), False, 'import servicemanager\n'), ((1294, 1328), 'servicemanager.LogInfoMsg', 'servicemanager.LogInfoMsg', (['"""ALIVE"""'], {}), "('ALIVE')\n", (1319, 1328), False, 'import servicemanager\n')]
|
import os
import time
from multiprocessing import Pool # 首字母大写
def test(name):
print("[子进程-%s]PID=%d,PPID=%d" % (name, os.getpid(), os.getppid()))
time.sleep(1)
def main():
print("[父进程]PID=%d,PPID=%d" % (os.getpid(), os.getppid()))
p = Pool(5) # 设置最多5个进程(不设置就是CPU核数)
for i in range(10):
# 异步执行
p.apply_async(test, args=(i, )) # 同步用apply(如非必要不建议用)
p.close() # 关闭池,不再加入新任务
p.join() # 等待所有子进程执行完毕回收资源
print("over")
if __name__ == '__main__':
main()
|
[
"os.getppid",
"multiprocessing.Pool",
"os.getpid",
"time.sleep"
] |
[((158, 171), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (168, 171), False, 'import time\n'), ((257, 264), 'multiprocessing.Pool', 'Pool', (['(5)'], {}), '(5)\n', (261, 264), False, 'from multiprocessing import Pool\n'), ((134, 145), 'os.getpid', 'os.getpid', ([], {}), '()\n', (143, 145), False, 'import os\n'), ((147, 159), 'os.getppid', 'os.getppid', ([], {}), '()\n', (157, 159), False, 'import os\n'), ((229, 240), 'os.getpid', 'os.getpid', ([], {}), '()\n', (238, 240), False, 'import os\n'), ((242, 254), 'os.getppid', 'os.getppid', ([], {}), '()\n', (252, 254), False, 'import os\n')]
|
#! /usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
# Copyright 2014 Altera Corporation. All Rights Reserved.
# Copyright 2014-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup, find_packages
from pyfakefs.fake_filesystem import __version__
NAME = 'pyfakefs'
REQUIRES = []
DESCRIPTION = ('pyfakefs implements a fake file system that mocks '
'the Python file system modules.')
URL = "http://pyfakefs.org"
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(BASE_PATH, 'README.md')) as f:
LONG_DESCRIPTION = f.read()
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
'Topic :: System :: Filesystems',
]
AUTHOR = 'Google'
AUTHOR_EMAIL = '<EMAIL>'
MAINTAINER = '<NAME>'
MAINTAINER_EMAIL = '<EMAIL>'
KEYWORDS = ("testing test file os shutil glob mocking unittest "
"fakes filesystem unit").split(' ')
params = dict(
name=NAME,
entry_points={
'pytest11': ['pytest_fakefs = pyfakefs.pytest_plugin'],
},
version=__version__,
install_requires=REQUIRES,
# metadata for upload to PyPI
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
keywords=KEYWORDS,
url=URL,
classifiers=CLASSIFIERS,
python_requires='>=3.5',
test_suite='pyfakefs.tests',
packages=find_packages(exclude=['docs'])
)
setup(**params)
|
[
"os.path.dirname",
"os.path.join",
"setuptools.setup",
"setuptools.find_packages"
] |
[((2875, 2890), 'setuptools.setup', 'setup', ([], {}), '(**params)\n', (2880, 2890), False, 'from setuptools import setup, find_packages\n'), ((1051, 1076), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1066, 1076), False, 'import os\n'), ((1089, 1125), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""README.md"""'], {}), "(BASE_PATH, 'README.md')\n", (1101, 1125), False, 'import os\n'), ((2837, 2868), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['docs']"}), "(exclude=['docs'])\n", (2850, 2868), False, 'from setuptools import setup, find_packages\n')]
|
# A simple script that plots the time and the speedup
# of the parallel OpenMP program as the number of available
# cores increases.
import matplotlib.pyplot as plt
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
t_64 = []
t_1024 = []
t_4096 = []
s_64 = []
s_1024 = []
s_4096 = []
fp = open(sys.argv[1])
line = fp.readline()
while line:
tokens = line.split()
if tokens[2] == '64':
t_64.append(float(tokens[6]) * 100)
if tokens[2] == '1024':
t_1024.append(float(tokens[6]))
if tokens[2] == '4096':
t_4096.append(float(tokens[6]))
line = fp.readline()
fp.close()
print(t_64)
print(t_1024)
print(t_4096)
for i in range(0, len(t_64)):
s_64.append(t_64[0] / t_64[i])
s_1024.append(t_1024[0] / t_1024[i])
s_4096.append(t_4096[0] / t_4096[i])
print(s_64)
print(s_1024)
print(s_4096)
fig, ax = plt.subplots()
ax.grid(True)
ax.set_xlabel("number of cores")
ax.xaxis.set_ticks(np.arange(0, 5, 1))
ax.set_xticklabels(['1', '2', '4', '6', '8'], rotation=45)
ax.set_xlim(-0.5, 4.5)
ax.set_ylabel("time (ms)")
plt.plot(t_64, label="Time", color="blue", marker='x')
plt.title("Game of Life in 64×64 table")
plt.savefig("time_64.png", bbox_inches="tight")
fig, ax = plt.subplots()
ax.grid(True)
ax.set_xlabel("number of cores")
ax.xaxis.set_ticks(np.arange(0, 5, 1))
ax.set_xticklabels(['1', '2', '4', '6', '8'], rotation=45)
ax.set_xlim(-0.5, 4.5)
ax.set_ylabel("time (s)")
plt.plot(t_1024, label="Time", color="blue", marker='x')
plt.title("Game of Life 1024×1024 table")
plt.savefig("time_1024.png", bbox_inches="tight")
fig, ax = plt.subplots()
ax.grid(True)
ax.set_xlabel("number of cores")
ax.xaxis.set_ticks(np.arange(0, 5, 1))
ax.set_xticklabels(['1', '2', '4', '6', '8'], rotation=45)
ax.set_xlim(-0.5, 4.5)
ax.set_ylabel("time (s)")
plt.plot(t_4096, label="Time", color="blue", marker='x')
plt.title("Game of Life 4096×4096 table")
plt.savefig("time_4096.png", bbox_inches="tight")
fig, ax = plt.subplots()
ax.grid(True)
ax.set_xlabel("number of cores")
ax.xaxis.set_ticks(np.arange(0, 5, 1))
ax.set_xticklabels(['1', '2', '4', '6', '8'], rotation=45)
ax.set_xlim(-0.5, 4.5)
ax.set_ylabel("speedup")
plt.plot(s_64, label="Speedup", color="blue", marker='x')
plt.title("Game of Life in 64×64 table")
plt.savefig("speedup_64.png", bbox_inches="tight")
fig, ax = plt.subplots()
ax.grid(True)
ax.set_xlabel("number of cores")
ax.xaxis.set_ticks(np.arange(0, 5, 1))
ax.set_xticklabels(['1', '2', '4', '6', '8'], rotation=45)
ax.set_xlim(-0.5, 4.5)
ax.set_ylabel("speedup")
plt.plot(s_1024, label="Speedup", color="blue", marker='x')
plt.title("Game of Life 1024×1024 table")
plt.savefig("speedup_1024.png", bbox_inches="tight")
fig, ax = plt.subplots()
ax.grid(True)
ax.set_xlabel("number of cores")
ax.xaxis.set_ticks(np.arange(0, 5, 1))
ax.set_xticklabels(['1', '2', '4', '6', '8'], rotation=45)
ax.set_xlim(-0.5, 4.5)
ax.set_ylabel("speedup")
plt.plot(s_4096, label="Speedup", color="blue", marker='x')
plt.title("Game of Life 4096×4096 table")
plt.savefig("speedup_4096.png", bbox_inches="tight")
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.use",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((214, 235), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (228, 235), False, 'import matplotlib\n'), ((873, 887), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (885, 887), True, 'import matplotlib.pyplot as plt\n'), ((1083, 1137), 'matplotlib.pyplot.plot', 'plt.plot', (['t_64'], {'label': '"""Time"""', 'color': '"""blue"""', 'marker': '"""x"""'}), "(t_64, label='Time', color='blue', marker='x')\n", (1091, 1137), True, 'import matplotlib.pyplot as plt\n'), ((1138, 1178), 'matplotlib.pyplot.title', 'plt.title', (['"""Game of Life in 64×64 table"""'], {}), "('Game of Life in 64×64 table')\n", (1147, 1178), True, 'import matplotlib.pyplot as plt\n'), ((1179, 1226), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""time_64.png"""'], {'bbox_inches': '"""tight"""'}), "('time_64.png', bbox_inches='tight')\n", (1190, 1226), True, 'import matplotlib.pyplot as plt\n'), ((1238, 1252), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1250, 1252), True, 'import matplotlib.pyplot as plt\n'), ((1447, 1503), 'matplotlib.pyplot.plot', 'plt.plot', (['t_1024'], {'label': '"""Time"""', 'color': '"""blue"""', 'marker': '"""x"""'}), "(t_1024, label='Time', color='blue', marker='x')\n", (1455, 1503), True, 'import matplotlib.pyplot as plt\n'), ((1504, 1545), 'matplotlib.pyplot.title', 'plt.title', (['"""Game of Life 1024×1024 table"""'], {}), "('Game of Life 1024×1024 table')\n", (1513, 1545), True, 'import matplotlib.pyplot as plt\n'), ((1546, 1595), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""time_1024.png"""'], {'bbox_inches': '"""tight"""'}), "('time_1024.png', bbox_inches='tight')\n", (1557, 1595), True, 'import matplotlib.pyplot as plt\n'), ((1607, 1621), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1619, 1621), True, 'import matplotlib.pyplot as plt\n'), ((1816, 1872), 'matplotlib.pyplot.plot', 'plt.plot', (['t_4096'], {'label': '"""Time"""', 'color': '"""blue"""', 'marker': '"""x"""'}), "(t_4096, label='Time', color='blue', marker='x')\n", (1824, 1872), True, 'import matplotlib.pyplot as plt\n'), ((1873, 1914), 'matplotlib.pyplot.title', 'plt.title', (['"""Game of Life 4096×4096 table"""'], {}), "('Game of Life 4096×4096 table')\n", (1882, 1914), True, 'import matplotlib.pyplot as plt\n'), ((1915, 1964), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""time_4096.png"""'], {'bbox_inches': '"""tight"""'}), "('time_4096.png', bbox_inches='tight')\n", (1926, 1964), True, 'import matplotlib.pyplot as plt\n'), ((1976, 1990), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1988, 1990), True, 'import matplotlib.pyplot as plt\n'), ((2184, 2241), 'matplotlib.pyplot.plot', 'plt.plot', (['s_64'], {'label': '"""Speedup"""', 'color': '"""blue"""', 'marker': '"""x"""'}), "(s_64, label='Speedup', color='blue', marker='x')\n", (2192, 2241), True, 'import matplotlib.pyplot as plt\n'), ((2242, 2282), 'matplotlib.pyplot.title', 'plt.title', (['"""Game of Life in 64×64 table"""'], {}), "('Game of Life in 64×64 table')\n", (2251, 2282), True, 'import matplotlib.pyplot as plt\n'), ((2283, 2333), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""speedup_64.png"""'], {'bbox_inches': '"""tight"""'}), "('speedup_64.png', bbox_inches='tight')\n", (2294, 2333), True, 'import matplotlib.pyplot as plt\n'), ((2345, 2359), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2357, 2359), True, 'import matplotlib.pyplot as plt\n'), ((2553, 2612), 'matplotlib.pyplot.plot', 'plt.plot', (['s_1024'], {'label': '"""Speedup"""', 'color': '"""blue"""', 'marker': '"""x"""'}), "(s_1024, label='Speedup', color='blue', marker='x')\n", (2561, 2612), True, 'import matplotlib.pyplot as plt\n'), ((2613, 2654), 'matplotlib.pyplot.title', 'plt.title', (['"""Game of Life 1024×1024 table"""'], {}), "('Game of Life 1024×1024 table')\n", (2622, 2654), True, 'import matplotlib.pyplot as plt\n'), ((2655, 2707), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""speedup_1024.png"""'], {'bbox_inches': '"""tight"""'}), "('speedup_1024.png', bbox_inches='tight')\n", (2666, 2707), True, 'import matplotlib.pyplot as plt\n'), ((2719, 2733), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2731, 2733), True, 'import matplotlib.pyplot as plt\n'), ((2927, 2986), 'matplotlib.pyplot.plot', 'plt.plot', (['s_4096'], {'label': '"""Speedup"""', 'color': '"""blue"""', 'marker': '"""x"""'}), "(s_4096, label='Speedup', color='blue', marker='x')\n", (2935, 2986), True, 'import matplotlib.pyplot as plt\n'), ((2987, 3028), 'matplotlib.pyplot.title', 'plt.title', (['"""Game of Life 4096×4096 table"""'], {}), "('Game of Life 4096×4096 table')\n", (2996, 3028), True, 'import matplotlib.pyplot as plt\n'), ((3029, 3081), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""speedup_4096.png"""'], {'bbox_inches': '"""tight"""'}), "('speedup_4096.png', bbox_inches='tight')\n", (3040, 3081), True, 'import matplotlib.pyplot as plt\n'), ((954, 972), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (963, 972), True, 'import numpy as np\n'), ((1319, 1337), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (1328, 1337), True, 'import numpy as np\n'), ((1688, 1706), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (1697, 1706), True, 'import numpy as np\n'), ((2057, 2075), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (2066, 2075), True, 'import numpy as np\n'), ((2426, 2444), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (2435, 2444), True, 'import numpy as np\n'), ((2800, 2818), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (2809, 2818), True, 'import numpy as np\n')]
|
from django.conf.urls import url, include
from stores import views
from rest_framework.routers import DefaultRouter
from rest_framework.schemas import get_schema_view
from rest_framework.authtoken import views as views_rest
from django.contrib.auth import views as auth_views
from django.conf import settings
from django.conf.urls.static import static
from django.views.static import serve
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register(r'versions', views.VersionViewSet)
router.register(r'units', views.UnitViewSet)
router.register(r'products', views.ProductViewSet)
router.register(r'categories', views.CategoryViewSet)
router.register(r'stores', views.StoreViewSet)
router.register(r'products-in-stores', views.ProductInStoreViewSet)
schema_view = get_schema_view(title='Pastebin API')
urlpatterns = [
url(r'^schema/$', schema_view),
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api-token-auth/', views_rest.obtain_auth_token),
url(r'^api-token-generate/', views.get_all_tokens),
url(r'^admin/password_reset/$', auth_views.password_reset,
name='admin_password_reset'),
url(r'^admin/password_reset/done/$',
auth_views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$',
auth_views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.password_reset_complete,
name='password_reset_complete'),
url(r'^media/(?P<path>.*)$', serve,
{'document_root': settings.MEDIA_ROOT, 'show_indexes': settings.DEBUG}),
]
|
[
"django.conf.urls.include",
"django.conf.urls.url",
"rest_framework.routers.DefaultRouter",
"rest_framework.schemas.get_schema_view"
] |
[((455, 470), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (468, 470), False, 'from rest_framework.routers import DefaultRouter\n'), ((802, 839), 'rest_framework.schemas.get_schema_view', 'get_schema_view', ([], {'title': '"""Pastebin API"""'}), "(title='Pastebin API')\n", (817, 839), False, 'from rest_framework.schemas import get_schema_view\n'), ((861, 890), 'django.conf.urls.url', 'url', (['"""^schema/$"""', 'schema_view'], {}), "('^schema/$', schema_view)\n", (864, 890), False, 'from django.conf.urls import url, include\n'), ((1018, 1071), 'django.conf.urls.url', 'url', (['"""^api-token-auth/"""', 'views_rest.obtain_auth_token'], {}), "('^api-token-auth/', views_rest.obtain_auth_token)\n", (1021, 1071), False, 'from django.conf.urls import url, include\n'), ((1078, 1127), 'django.conf.urls.url', 'url', (['"""^api-token-generate/"""', 'views.get_all_tokens'], {}), "('^api-token-generate/', views.get_all_tokens)\n", (1081, 1127), False, 'from django.conf.urls import url, include\n'), ((1134, 1225), 'django.conf.urls.url', 'url', (['"""^admin/password_reset/$"""', 'auth_views.password_reset'], {'name': '"""admin_password_reset"""'}), "('^admin/password_reset/$', auth_views.password_reset, name=\n 'admin_password_reset')\n", (1137, 1225), False, 'from django.conf.urls import url, include\n'), ((1235, 1335), 'django.conf.urls.url', 'url', (['"""^admin/password_reset/done/$"""', 'auth_views.password_reset_done'], {'name': '"""password_reset_done"""'}), "('^admin/password_reset/done/$', auth_views.password_reset_done, name=\n 'password_reset_done')\n", (1238, 1335), False, 'from django.conf.urls import url, include\n'), ((1345, 1474), 'django.conf.urls.url', 'url', (['"""^reset/(?P<uidb64>[0-9A-Za-z_\\\\-]+)/(?P<token>.+)/$"""', 'auth_views.password_reset_confirm'], {'name': '"""password_reset_confirm"""'}), "('^reset/(?P<uidb64>[0-9A-Za-z_\\\\-]+)/(?P<token>.+)/$', auth_views.\n password_reset_confirm, name='password_reset_confirm')\n", (1348, 1474), False, 'from django.conf.urls import url, include\n'), ((1483, 1576), 'django.conf.urls.url', 'url', (['"""^reset/done/$"""', 'auth_views.password_reset_complete'], {'name': '"""password_reset_complete"""'}), "('^reset/done/$', auth_views.password_reset_complete, name=\n 'password_reset_complete')\n", (1486, 1576), False, 'from django.conf.urls import url, include\n'), ((1586, 1696), 'django.conf.urls.url', 'url', (['"""^media/(?P<path>.*)$"""', 'serve', "{'document_root': settings.MEDIA_ROOT, 'show_indexes': settings.DEBUG}"], {}), "('^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT,\n 'show_indexes': settings.DEBUG})\n", (1589, 1696), False, 'from django.conf.urls import url, include\n'), ((907, 927), 'django.conf.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (914, 927), False, 'from django.conf.urls import url, include\n'), ((953, 1011), 'django.conf.urls.include', 'include', (['"""rest_framework.urls"""'], {'namespace': '"""rest_framework"""'}), "('rest_framework.urls', namespace='rest_framework')\n", (960, 1011), False, 'from django.conf.urls import url, include\n')]
|
from copy import deepcopy
class NodeGroupDelta:
def __init__(self, node_group : 'NodeGroup', sign : int = 1, virtual : bool = False):
self.node_group = node_group.produce_virtual_copy() if (virtual and not node_group.virtual) else node_group
self._sign = sign
def enforce(self):
return self.__class__(self.node_group.enforce(), self._sign, False)
def to_virtual(self):
return self.__class__(self.node_group, self._sign, True)
def copy(self):
return self.__class__(self.node_group, self._sign, self.virtual)
def __deepcopy__(self, memo):
result = self.__class__(deepcopy(self.node_group, memo), self._sign, self.virtual)
memo[id(result)] = result
return result
@property
def in_change(self):
return not self.node_group.enforced
@property
def virtual(self):
return self.node_group.virtual
@property
def provider(self):
return self.node_group.node_info.provider
@property
def node_type(self):
return self.node_group.node_info.node_type
@property
def is_scale_down(self):
return self._sign == -1
@property
def is_scale_up(self):
return self._sign == 1
@property
def id(self):
return self.node_group.id
@property
def is_empty(self):
return self.node_group.nodes_count == 0
@property
def nodes_change(self):
return self.node_group.nodes_count * self._sign
def __repr__(self):
return f'{self.__class__.__name__}( node_group = {self.node_group}, \
sign = {self._sign}, \
virtual = {self.virtual})'
|
[
"copy.deepcopy"
] |
[((640, 671), 'copy.deepcopy', 'deepcopy', (['self.node_group', 'memo'], {}), '(self.node_group, memo)\n', (648, 671), False, 'from copy import deepcopy\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
def kalman_transit_covariance(S, A, R):
"""
:param S: Current covariance matrix
:param A: Either transition matrix or jacobian matrix
:param R: Current noise covariance matrix
"""
state_size = S.shape[0]
assert S.shape == (state_size, state_size)
assert A.shape == (state_size, state_size)
assert R.shape == (state_size, state_size)
new_S = np.dot(np.dot(A, S), A.T) + R
return new_S
def kalman_process_observation(mu, S, observation, C, Q):
"""
Performs processing of an observation coming from the model: z = C * x + noise
:param mu: Current mean
:param S: Current covariance matrix
:param observation: Vector z
:param C: Observation matrix
:param Q: Noise covariance matrix (with zero mean)
"""
state_size = mu.shape[0]
observation_size = observation.shape[0]
assert S.shape == (state_size, state_size)
assert observation_size == C.shape[0]
assert observation_size == Q.shape[0]
H = np.linalg.inv(np.dot(np.dot(C, S), C.T) + Q)
K = np.dot(np.dot(S, C.T), H)
new_mu = mu + np.dot(K, observation - np.dot(C, mu))
new_S = np.dot(np.eye(state_size) - np.dot(K, C), S)
# Избавляемся от маленьких чисел. Из-за них могут быть мнимые числа в собственных значениях
new_S[np.abs(new_S) < 1e-16] = 0
return new_mu, new_S
|
[
"numpy.dot",
"numpy.abs",
"numpy.eye"
] |
[((1093, 1107), 'numpy.dot', 'np.dot', (['S', 'C.T'], {}), '(S, C.T)\n', (1099, 1107), True, 'import numpy as np\n'), ((433, 445), 'numpy.dot', 'np.dot', (['A', 'S'], {}), '(A, S)\n', (439, 445), True, 'import numpy as np\n'), ((1188, 1206), 'numpy.eye', 'np.eye', (['state_size'], {}), '(state_size)\n', (1194, 1206), True, 'import numpy as np\n'), ((1209, 1221), 'numpy.dot', 'np.dot', (['K', 'C'], {}), '(K, C)\n', (1215, 1221), True, 'import numpy as np\n'), ((1332, 1345), 'numpy.abs', 'np.abs', (['new_S'], {}), '(new_S)\n', (1338, 1345), True, 'import numpy as np\n'), ((1054, 1066), 'numpy.dot', 'np.dot', (['C', 'S'], {}), '(C, S)\n', (1060, 1066), True, 'import numpy as np\n'), ((1154, 1167), 'numpy.dot', 'np.dot', (['C', 'mu'], {}), '(C, mu)\n', (1160, 1167), True, 'import numpy as np\n')]
|
# Author: <NAME>
import h5py
import json
import librosa
import numpy as np
import os
import scipy
import time
from pathlib import Path
from PIL import Image
from torchvision.transforms import transforms
from dataloaders.utils import WINDOWS, compute_spectrogram
def run(json_path, hdf5_json_path, audio_path, image_path, audio_conf={}):
with open(json_path, 'r') as f:
data_and_dirs = json.load(f)
data = data_and_dirs['data']
audio_base = data_and_dirs['audio_base_path']
image_base = data_and_dirs['image_base_path']
print('Loaded %d data from %s' % (len(data), json_path))
run_audio(data, audio_base, audio_path, audio_conf)
run_image(data, image_base, image_path)
Path(os.path.dirname(hdf5_json_path)).mkdir(parents=True, exist_ok=True)
with open(hdf5_json_path, 'w') as f:
d = {'audio_hdf5_path': audio_path, 'image_hdf5_path': image_path}
json.dump(d, f)
# Solution borrows from https://github.com/h5py/h5py/issues/745
def run_image(data, image_base, image_path):
if os.path.exists(image_path):
print('%s already exists. skip' % image_path)
return
print('Dumping image to HDF5 : %s' % image_path)
n = len(data)
Path(os.path.dirname(image_path)).mkdir(parents=True, exist_ok=True)
f = h5py.File(image_path, 'w')
dt = h5py.special_dtype(vlen=np.dtype('uint8'))
dset_img = f.create_dataset('image', (n,), dtype=dt)
start = time.time()
for i, d in enumerate(data):
with open('%s/%s' % (image_base, d['image']), 'rb') as f_img:
binary_img = f_img.read()
dset_img[i] = np.frombuffer(binary_img, dtype='uint8')
if i % 100 == 0:
t = time.time() - start
print('processed %d / %d images (%.fs)' % (i, n, t))
def run_audio(data, audio_base, audio_path, audio_conf):
if os.path.exists(audio_path):
print('%s already exists. skip' % audio_path)
return
print('Dumping audio to HDF5 : %s' % audio_path)
print(' audio_conf : %s' % audio_conf)
audio_conf['num_mel_bins'] = audio_conf.get('num_mel_bins', 40)
audio_conf['target_length'] = audio_conf.get('target_length', 2048)
audio_conf['use_raw_length'] = audio_conf.get('use_raw_length', False)
assert(not audio_conf['use_raw_length'])
# dump audio
n = len(data)
Path(os.path.dirname(audio_path)).mkdir(parents=True, exist_ok=True)
f = h5py.File(audio_path, 'w')
dset_mel_shape = (n, audio_conf['num_mel_bins'],
audio_conf['target_length'])
dset_mel = f.create_dataset('melspec', dset_mel_shape, dtype='f')
dset_len = f.create_dataset('melspec_len', (n,), dtype='i8')
start = time.time()
for i, d in enumerate(data):
y, sr = librosa.load('%s/%s' % (audio_base, d['wav']), None)
logspec, n_frames = compute_spectrogram(y, sr, audio_conf)
dset_mel[i, :, :] = logspec
dset_len[i] = n_frames
if i % 100 == 0:
t = time.time() - start
print('processed %d / %d audios (%.fs)' % (i, n, t))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('inp_json_path', type=str, help='input JSON file')
parser.add_argument('out_json_path', type=str, help='path to save output json')
parser.add_argument('audio_h5_path', type=str, help='path to save audio HDF5')
parser.add_argument('image_h5_path', type=str, help='path to save image HDF5')
args = parser.parse_args()
print(args)
run(args.inp_json_path, args.out_json_path,
args.audio_h5_path, args.image_h5_path)
|
[
"json.dump",
"h5py.File",
"json.load",
"argparse.ArgumentParser",
"numpy.frombuffer",
"os.path.dirname",
"numpy.dtype",
"os.path.exists",
"time.time",
"dataloaders.utils.compute_spectrogram",
"librosa.load"
] |
[((1060, 1086), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (1074, 1086), False, 'import os\n'), ((1310, 1336), 'h5py.File', 'h5py.File', (['image_path', '"""w"""'], {}), "(image_path, 'w')\n", (1319, 1336), False, 'import h5py\n'), ((1463, 1474), 'time.time', 'time.time', ([], {}), '()\n', (1472, 1474), False, 'import time\n'), ((1872, 1898), 'os.path.exists', 'os.path.exists', (['audio_path'], {}), '(audio_path)\n', (1886, 1898), False, 'import os\n'), ((2448, 2474), 'h5py.File', 'h5py.File', (['audio_path', '"""w"""'], {}), "(audio_path, 'w')\n", (2457, 2474), False, 'import h5py\n'), ((2727, 2738), 'time.time', 'time.time', ([], {}), '()\n', (2736, 2738), False, 'import time\n'), ((3163, 3188), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3186, 3188), False, 'import argparse\n'), ((400, 412), 'json.load', 'json.load', (['f'], {}), '(f)\n', (409, 412), False, 'import json\n'), ((926, 941), 'json.dump', 'json.dump', (['d', 'f'], {}), '(d, f)\n', (935, 941), False, 'import json\n'), ((1638, 1678), 'numpy.frombuffer', 'np.frombuffer', (['binary_img'], {'dtype': '"""uint8"""'}), "(binary_img, dtype='uint8')\n", (1651, 1678), True, 'import numpy as np\n'), ((2788, 2840), 'librosa.load', 'librosa.load', (["('%s/%s' % (audio_base, d['wav']))", 'None'], {}), "('%s/%s' % (audio_base, d['wav']), None)\n", (2800, 2840), False, 'import librosa\n'), ((2869, 2907), 'dataloaders.utils.compute_spectrogram', 'compute_spectrogram', (['y', 'sr', 'audio_conf'], {}), '(y, sr, audio_conf)\n', (2888, 2907), False, 'from dataloaders.utils import WINDOWS, compute_spectrogram\n'), ((1370, 1387), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (1378, 1387), True, 'import numpy as np\n'), ((734, 765), 'os.path.dirname', 'os.path.dirname', (['hdf5_json_path'], {}), '(hdf5_json_path)\n', (749, 765), False, 'import os\n'), ((1238, 1265), 'os.path.dirname', 'os.path.dirname', (['image_path'], {}), '(image_path)\n', (1253, 1265), False, 'import os\n'), ((1721, 1732), 'time.time', 'time.time', ([], {}), '()\n', (1730, 1732), False, 'import time\n'), ((2376, 2403), 'os.path.dirname', 'os.path.dirname', (['audio_path'], {}), '(audio_path)\n', (2391, 2403), False, 'import os\n'), ((3017, 3028), 'time.time', 'time.time', ([], {}), '()\n', (3026, 3028), False, 'import time\n')]
|
from copy import deepcopy
DEVICE0_MAC = "00-11-22-33-44-55"
DEVICE1_MAC = "22-33-44-55-66-77"
BLOCKED_DEVICE1_MAC = "BB-BB-BB-BB-BB-B1"
BLOCKED_DEVICE2_MAC = "BB-BB-BB-BB-BB-B2"
LIMIT_DEVICE1_MAC = "33-33-33-33-33-33"
LIMIT_DEVICE2_MAC = "44-44-44-44-44-44"
ADDED_DEVICE_MAC = "55-55-55-55-55-55"
restructured_info_dicts1 = {
'forbid_domain': {
'forbid_domain_1': {
'.anonymous': False,
'.index': 4,
'.name': 'forbid_domain_1',
'.type': 'forbid_domain',
'domain': 'www.example1.com'},
'forbid_domain_2': {
'.anonymous': False,
'.index': 23,
'.name': 'forbid_domain_2',
'.type': 'forbid_domain',
'domain': 'www.foo.com'},
'forbid_domain_3': {
'.anonymous': False,
'.index': 29,
'.name': 'forbid_domain_3',
'.type': 'forbid_domain',
'domain': 'www.foobar.com'},
'forbid_domain_4': {
'.anonymous': False,
'.index': 30,
'.name': 'forbid_domain_4',
'.type': 'forbid_domain',
'domain': 'www.bar.com'},
'forbid_domain_5': {
'.anonymous': False,
'.index': 31,
'.name': 'forbid_domain_5',
'.type': 'forbid_domain',
'domain': 'www.foo_bar.com'},
},
'host_info': {
DEVICE0_MAC: {
'blocked': '0',
'down_limit': '0',
'down_speed': '0',
'forbid_domain': '',
'hostname': 'CONTROL_HOST',
'ip': '192.168.0.119',
'is_cur_host': '1',
'limit_time': '',
'mac': DEVICE0_MAC,
'plan_rule': [],
'type': '0',
'up_limit': '0',
'up_speed': '0'},
DEVICE1_MAC: {
'acs_time': '2022-02-10 12:35:20',
'ap_radio': 2,
'blocked': '0',
'down_limit': '0',
'down_speed': '0',
'encode': 1,
'forbid_domain': '',
'freq_unit': 1,
'hostname': 'DEVICE1',
'id': '44',
'ip': '192.168.0.116',
'is_cur_host': '0',
'limit_time': '',
'mac': DEVICE1_MAC,
'plan_rule': [],
'rssi': '-63',
'ssid': 'SSID',
'type': '1',
'up_limit': '0',
'up_speed': '0',
'vlan': '0',
'wifi_mode': 0,
'wserv_id': 0},
BLOCKED_DEVICE1_MAC: {
'blocked': '1',
'down_limit': '0',
'down_speed': '0',
'forbid_domain': '',
'hostname': 'BLOCKED_DEVICE1',
'ip': '0.0.0.0',
'is_cur_host': '0',
'limit_time': 'limit_time_1',
'mac': BLOCKED_DEVICE1_MAC,
'plan_rule': [],
'type': '0',
'up_limit': '0',
'up_speed': '0'},
BLOCKED_DEVICE2_MAC: {
'blocked': '1',
'down_limit': '0',
'down_speed': '0',
'forbid_domain': '',
'hostname': 'BLOCKED_DEVICE2',
'ip': '0.0.0.0',
'is_cur_host': '0',
'limit_time': '',
'mac': BLOCKED_DEVICE2_MAC,
'plan_rule': [],
'type': '0',
'up_limit': '0',
'up_speed': '0'},
LIMIT_DEVICE1_MAC: {
'acs_time': '2022-02-10 02:46:37',
'ap_radio': 2,
'blocked': '0',
'down_limit': '0',
'down_speed': '0',
'encode': 1,
'forbid_domain': 'forbid_domain_3,forbid_domain_4',
'freq_unit': 1,
'hostname': 'LIMITED_HOST1',
'id': '3',
'ip': '192.168.0.115',
'is_cur_host': '0',
'limit_time': '',
'mac': LIMIT_DEVICE1_MAC,
'plan_rule': [],
'rssi': '-42',
'ssid': 'SSID',
'type': '1',
'up_limit': '0',
'up_speed': '10',
'vlan': '0',
'wifi_mode': 0,
'wserv_id': 0},
LIMIT_DEVICE2_MAC: {
'acs_time': '2022-02-10 02:46:40',
'ap_radio': 2,
'blocked': '0',
'down_limit': '0',
'down_speed': '0',
'encode': 1,
'forbid_domain': 'forbid_domain_1',
'freq_unit': 1,
'hostname': 'LIMITED_HOST2',
'id': '5',
'ip': '192.168.0.111',
'is_cur_host': '0',
'limit_time': 'limit_time_1,limit_time_3',
'mac': LIMIT_DEVICE2_MAC,
'plan_rule': [],
'rssi': '-66',
'ssid': 'SSID',
'type': '1',
'up_limit': '5',
'up_speed': '0',
'vlan': '0',
'wifi_mode': 0,
'wserv_id': 0}
},
'limit_time': {
'limit_time_1': {
'.anonymous': False,
'.index': 21,
'.name': 'limit_time_1',
'.type': 'limit_time',
'end_time': '18:10',
'fri': '0',
'mon': '0',
'name': 'PRE_SUPPER',
'sat': '1',
'start_time': '17:30',
'sun': '1',
'thu': '0',
'tue': '0',
'wed': '0'},
'limit_time_3': {
'.anonymous': False,
'.index': 22,
'.name': 'limit_time_3',
'.type': 'limit_time',
'end_time': '22:00',
'fri': '1',
'mon': '1',
'name': 'EVE',
'sat': '1',
'start_time': '21:10',
'sun': '1',
'thu': '1',
'tue': '1',
'wed': '1'},
'limit_time_4': {
'.anonymous': False,
'.index': 25,
'.name': 'limit_time_4',
'.type': 'limit_time',
'end_time': '23:59',
'fri': '1',
'mon': '1',
'name': '<NAME>',
'sat': '1',
'start_time': '00:00',
'sun': '1',
'thu': '1',
'tue': '1',
'wed': '1'},
'limit_time_5': {
'.anonymous': False,
'.index': 26,
'.name': 'limit_time_5',
'.type': 'limit_time',
'end_time': '08:00',
'fri': '1',
'mon': '1',
'name': 'MORNING',
'sat': '1',
'start_time': '11:00',
'sun': '1',
'thu': '1',
'tue': '1',
'wed': '1'}
}}
restructured_info_dicts2 = deepcopy(restructured_info_dicts1)
_hosts = restructured_info_dicts2["host_info"]
_hosts[DEVICE0_MAC]["ip"] = "192.168.0.129"
_hosts[DEVICE1_MAC]["hostname"] = "DEVICE1_RENAMED"
_hosts.pop(LIMIT_DEVICE2_MAC)
_hosts[ADDED_DEVICE_MAC] = {
'acs_time': '2022-02-10 02:46:37',
'ap_radio': 2,
'blocked': '0',
'down_limit': '0',
'down_speed': '0',
'encode': 1,
'forbid_domain': 'forbid_domain_3,forbid_domain_2',
'freq_unit': 1,
'hostname': 'ANOTHER_HOST',
'id': '3',
'ip': '192.168.0.188',
'is_cur_host': '0',
'limit_time': 'limit_time_4',
'mac': ADDED_DEVICE_MAC,
'plan_rule': [],
'rssi': '-41',
'ssid': 'SSID',
'type': '1',
'up_limit': '0',
'up_speed': '10',
'vlan': '0',
'wifi_mode': 0,
'wserv_id': 0}
_limit_time = restructured_info_dicts2["limit_time"]
_limit_time.pop("limit_time_1")
_forbid_domain = restructured_info_dicts2["forbid_domain"]
_forbid_domain.pop("forbid_domain_1")
restructured_info_dicts2["host_info"] = _hosts
|
[
"copy.deepcopy"
] |
[((6722, 6756), 'copy.deepcopy', 'deepcopy', (['restructured_info_dicts1'], {}), '(restructured_info_dicts1)\n', (6730, 6756), False, 'from copy import deepcopy\n')]
|
import os
import unittest
import ansiblelint
from ansiblelint import RulesCollection
class TestTaskIncludes(unittest.TestCase):
def setUp(self):
rulesdir = os.path.join('lib', 'ansiblelint', 'rules')
self.rules = RulesCollection.create_from_directory(rulesdir)
def test_included_tasks(self):
filename = 'test/taskincludes.txt'
runner = ansiblelint.Runner(self.rules, {filename}, [], [])
runner.run()
self.assertEqual(len(runner.playbooks), 3)
|
[
"os.path.join",
"ansiblelint.Runner",
"ansiblelint.RulesCollection.create_from_directory"
] |
[((171, 214), 'os.path.join', 'os.path.join', (['"""lib"""', '"""ansiblelint"""', '"""rules"""'], {}), "('lib', 'ansiblelint', 'rules')\n", (183, 214), False, 'import os\n'), ((236, 283), 'ansiblelint.RulesCollection.create_from_directory', 'RulesCollection.create_from_directory', (['rulesdir'], {}), '(rulesdir)\n', (273, 283), False, 'from ansiblelint import RulesCollection\n'), ((380, 430), 'ansiblelint.Runner', 'ansiblelint.Runner', (['self.rules', '{filename}', '[]', '[]'], {}), '(self.rules, {filename}, [], [])\n', (398, 430), False, 'import ansiblelint\n')]
|
import argparse
import glob
import os
from utils import *
def main(args):
desired_width = args.desired_width
desired_height = args.desired_height
min_percentage = args.min_percentage
max_percentage = args.max_percentage
img_fn_array = []
if args.image:
img_obj = {}
img_obj["img"] = args.image
img_obj["out"] = args.out
img_fn_array.append(img_obj)
if args.dir:
normpath = os.path.normpath("/".join([args.dir, '*', '']))
for img_fn in glob.iglob(normpath, recursive=True):
if os.path.isfile(img_fn) and True in [ext in img_fn for ext in [".nrrd", ".nrrd.gz", ".nii", ".nii.gz", ".gipl", ".gipl.gz"]]:
img_obj = {}
img_obj["img"] = img_fn
img_obj["out"] = os.path.normpath("/".join([args.out]))
img_fn_array.append(img_obj)
for img_obj in img_fn_array:
image = img_obj["img"]
out = img_obj["out"]
if not os.path.exists(out):
os.makedirs(out)
img, header = ReadFile(image)
print("Normalization and contrast adjustment...")
img = Normalize(img,in_min=0,in_max=img.max(),out_min=0,out_max=255)
img = Adjust_Contrast(img,pmin=min_percentage,pmax=max_percentage)
Deconstruction(img, image, out, desired_width, desired_height)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Pre-processing', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
input_group = parser.add_argument_group('Input files')
input_params = input_group.add_mutually_exclusive_group(required=True)
input_params.add_argument('--image', type=str, help='Input 3D image')
input_params.add_argument('--dir', type=str, help='Input directory with 3D images')
size_group = parser.add_argument_group('Resizing parameters')
size_group.add_argument('--desired_width', type=int, help="desired width of the images", default=512)
size_group.add_argument('--desired_height', type=int, help="desired width of the images", default=512)
contrast_group = parser.add_argument_group('Contrast parameters')
contrast_group.add_argument('--min_percentage', type=int, help="min percentage to adjust contrast of the images", default=45)
contrast_group.add_argument('--max_percentage', type=int, help="max percentage to adjust contrast of the images", default=90)
output_params = parser.add_argument_group('Output parameters')
output_params.add_argument('--out', type=str, help='Output directory', required=True)
args = parser.parse_args()
main(args)
|
[
"os.makedirs",
"argparse.ArgumentParser",
"os.path.exists",
"os.path.isfile",
"glob.iglob"
] |
[((1237, 1351), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Pre-processing"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Pre-processing', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (1260, 1351), False, 'import argparse\n'), ((460, 496), 'glob.iglob', 'glob.iglob', (['normpath'], {'recursive': '(True)'}), '(normpath, recursive=True)\n', (470, 496), False, 'import glob\n'), ((856, 875), 'os.path.exists', 'os.path.exists', (['out'], {}), '(out)\n', (870, 875), False, 'import os\n'), ((880, 896), 'os.makedirs', 'os.makedirs', (['out'], {}), '(out)\n', (891, 896), False, 'import os\n'), ((504, 526), 'os.path.isfile', 'os.path.isfile', (['img_fn'], {}), '(img_fn)\n', (518, 526), False, 'import os\n')]
|
import graphene
from ....checkout.error_codes import CheckoutErrorCode
from ....checkout.fetch import (
fetch_checkout_info,
fetch_checkout_lines,
update_delivery_method_lists_for_checkout_info,
)
from ....checkout.utils import add_variants_to_checkout, recalculate_checkout_discount
from ....warehouse.reservations import get_reservation_length, is_reservation_enabled
from ...core.descriptions import ADDED_IN_34, DEPRECATED_IN_3X_INPUT
from ...core.mutations import BaseMutation
from ...core.scalars import UUID
from ...core.types import CheckoutError, NonNullList
from ...core.validators import validate_variants_available_in_channel
from ...product.types import ProductVariant
from ..types import Checkout
from .checkout_create import CheckoutLineInput
from .utils import (
check_lines_quantity,
check_permissions_for_custom_prices,
get_checkout,
group_quantity_and_custom_prices_by_variants,
update_checkout_shipping_method_if_invalid,
validate_variants_are_published,
validate_variants_available_for_purchase,
)
class CheckoutLinesAdd(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
id = graphene.ID(
description="The checkout's ID." + ADDED_IN_34,
required=False,
)
token = UUID(
description=f"Checkout token.{DEPRECATED_IN_3X_INPUT} Use `id` instead.",
required=False,
)
checkout_id = graphene.ID(
required=False,
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use `id` instead."
),
)
lines = NonNullList(
CheckoutLineInput,
required=True,
description=(
"A list of checkout lines, each containing information about "
"an item in the checkout."
),
)
class Meta:
description = (
"Adds a checkout line to the existing checkout."
"If line was already in checkout, its quantity will be increased."
)
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def validate_checkout_lines(
cls,
info,
variants,
checkout_lines_data,
country,
channel_slug,
lines=None,
):
quantities = [line_data.quantity for line_data in checkout_lines_data]
check_lines_quantity(
variants,
quantities,
country,
channel_slug,
info.context.site.settings.limit_quantity_per_checkout,
existing_lines=lines,
check_reservations=is_reservation_enabled(info.context.site.settings),
)
@classmethod
def clean_input(
cls,
info,
checkout,
variants,
checkout_lines_data,
checkout_info,
lines,
manager,
discounts,
replace,
):
channel_slug = checkout_info.channel.slug
cls.validate_checkout_lines(
info,
variants,
checkout_lines_data,
checkout.get_country(),
channel_slug,
lines=lines,
)
variants_ids_to_validate = {
variant.id
for variant, line_data in zip(variants, checkout_lines_data)
if line_data.quantity_to_update and line_data.quantity != 0
}
# validate variant only when line quantity is bigger than 0
if variants_ids_to_validate:
validate_variants_available_for_purchase(
variants_ids_to_validate, checkout.channel_id
)
validate_variants_available_in_channel(
variants_ids_to_validate,
checkout.channel_id,
CheckoutErrorCode.UNAVAILABLE_VARIANT_IN_CHANNEL,
)
validate_variants_are_published(
variants_ids_to_validate, checkout.channel_id
)
if variants and checkout_lines_data:
checkout = add_variants_to_checkout(
checkout,
variants,
checkout_lines_data,
channel_slug,
replace=replace,
replace_reservations=True,
reservation_length=get_reservation_length(info.context),
)
lines, _ = fetch_checkout_lines(checkout)
shipping_channel_listings = checkout.channel.shipping_method_listings.all()
update_delivery_method_lists_for_checkout_info(
checkout_info,
checkout_info.checkout.shipping_method,
checkout_info.checkout.collection_point,
checkout_info.shipping_address,
lines,
discounts,
manager,
shipping_channel_listings,
)
return lines
@classmethod
def perform_mutation(
cls, _root, info, lines, checkout_id=None, token=None, id=None, replace=False
):
check_permissions_for_custom_prices(info.context.app, lines)
checkout = get_checkout(
cls,
info,
checkout_id=checkout_id,
token=token,
id=id,
error_class=CheckoutErrorCode,
)
discounts = info.context.discounts
manager = info.context.plugins
variant_ids = [line.get("variant_id") for line in lines]
variants = cls.get_nodes_or_error(variant_ids, "variant_id", ProductVariant)
checkout_lines_data = group_quantity_and_custom_prices_by_variants(lines)
shipping_channel_listings = checkout.channel.shipping_method_listings.all()
checkout_info = fetch_checkout_info(
checkout, [], discounts, manager, shipping_channel_listings
)
lines, _ = fetch_checkout_lines(checkout)
lines = cls.clean_input(
info,
checkout,
variants,
checkout_lines_data,
checkout_info,
lines,
manager,
discounts,
replace,
)
update_checkout_shipping_method_if_invalid(checkout_info, lines)
recalculate_checkout_discount(
manager, checkout_info, lines, info.context.discounts
)
manager.checkout_updated(checkout)
return CheckoutLinesAdd(checkout=checkout)
|
[
"graphene.ID",
"graphene.Field"
] |
[((1115, 1175), 'graphene.Field', 'graphene.Field', (['Checkout'], {'description': '"""An updated checkout."""'}), "(Checkout, description='An updated checkout.')\n", (1129, 1175), False, 'import graphene\n'), ((1211, 1286), 'graphene.ID', 'graphene.ID', ([], {'description': '("The checkout\'s ID." + ADDED_IN_34)', 'required': '(False)'}), '(description="The checkout\'s ID." + ADDED_IN_34, required=False)\n', (1222, 1286), False, 'import graphene\n'), ((1490, 1605), 'graphene.ID', 'graphene.ID', ([], {'required': '(False)', 'description': 'f"""The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use `id` instead."""'}), "(required=False, description=\n f'The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use `id` instead.')\n", (1501, 1605), False, 'import graphene\n')]
|
#!/usr/bin/env python
"""
_LoadFromFilesetWorkflow_
MySQL implementation of Subscription.LoadFromFilesetWorkflow
"""
__all__ = []
from WMCore.Database.DBFormatter import DBFormatter
class LoadFromFilesetWorkflow(DBFormatter):
sql = """SELECT wmbs_subscription.id, fileset, workflow, split_algo,
wmbs_sub_types.name, last_update FROM wmbs_subscription
INNER JOIN wmbs_sub_types ON
wmbs_subscription.subtype = wmbs_sub_types.id
WHERE fileset = :fileset AND workflow = :workflow"""
def formatDict(self, result):
"""
_formatDict_
Cast the id, fileset, workflow and last_update columns to integers
since formatDict() turns everything into strings.
"""
formattedResult = DBFormatter.formatDict(self, result)[0]
formattedResult["id"] = int(formattedResult["id"])
formattedResult["fileset"] = int(formattedResult["fileset"])
formattedResult["workflow"] = int(formattedResult["workflow"])
formattedResult["last_update"] = int(formattedResult["last_update"])
formattedResult["type"] = formattedResult["name"]
del formattedResult["name"]
return formattedResult
def execute(self, fileset = None, workflow = None, conn = None,
transaction = False):
result = self.dbi.processData(self.sql, {"fileset": fileset,
"workflow": workflow},
conn = conn, transaction = transaction)
return self.formatDict(result)
|
[
"WMCore.Database.DBFormatter.DBFormatter.formatDict"
] |
[((794, 830), 'WMCore.Database.DBFormatter.DBFormatter.formatDict', 'DBFormatter.formatDict', (['self', 'result'], {}), '(self, result)\n', (816, 830), False, 'from WMCore.Database.DBFormatter import DBFormatter\n')]
|
from django.contrib.gis.db import models
class Property(models.Model):
account = models.ForeignKey(
"accounts.Account",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
cadastre = models.ForeignKey(
"cadastres.Cadastre",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
external_id = models.TextField()
buyed_price = models.FloatField("Precio de compra")
estimated_price = models.FloatField("Precio estimado", null=True, blank=True)
class Meta:
unique_together = (("account", "external_id"),)
verbose_name_plural = "Properties"
def __str__(self):
return str(self.account) + " - " + self.external_id
|
[
"django.contrib.gis.db.models.FloatField",
"django.contrib.gis.db.models.TextField",
"django.contrib.gis.db.models.ForeignKey"
] |
[((87, 179), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.Account"""'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.SET_NULL'}), "('accounts.Account', null=True, blank=True, on_delete=\n models.SET_NULL)\n", (104, 179), False, 'from django.contrib.gis.db import models\n'), ((229, 323), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['"""cadastres.Cadastre"""'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.SET_NULL'}), "('cadastres.Cadastre', null=True, blank=True, on_delete=\n models.SET_NULL)\n", (246, 323), False, 'from django.contrib.gis.db import models\n'), ((376, 394), 'django.contrib.gis.db.models.TextField', 'models.TextField', ([], {}), '()\n', (392, 394), False, 'from django.contrib.gis.db import models\n'), ((413, 450), 'django.contrib.gis.db.models.FloatField', 'models.FloatField', (['"""Precio de compra"""'], {}), "('Precio de compra')\n", (430, 450), False, 'from django.contrib.gis.db import models\n'), ((473, 532), 'django.contrib.gis.db.models.FloatField', 'models.FloatField', (['"""Precio estimado"""'], {'null': '(True)', 'blank': '(True)'}), "('Precio estimado', null=True, blank=True)\n", (490, 532), False, 'from django.contrib.gis.db import models\n')]
|
from __future__ import print_function
import pandas as pd
import numpy as np
import os
from collections import OrderedDict
from pria_lifechem.function import *
from prospective_screening_model_names import *
from prospective_screening_metric_names import *
def clean_excel():
dataframe = pd.read_excel('../../output/stage_2_predictions/Keck_LC4_backup.xlsx')
dataframe = dataframe.drop(dataframe.index[[8779]])
dataframe.to_excel('../../output/stage_2_predictions/Keck_LC4_export.xlsx', index=None)
def merge_prediction():
dataframe = pd.read_csv('../../dataset/fixed_dataset/pria_prospective.csv.gz')
molecule_ids = dataframe['Molecule'].tolist()
actual_labels = dataframe['Keck_Pria_AS_Retest'].tolist()
inhibits = dataframe['Keck_Pria_Continuous'].tolist()
complete_df = pd.DataFrame({'molecule': molecule_ids, 'label': actual_labels, 'inhibition': inhibits})
column_names = ['molecule', 'label', 'inhibition']
complete_df = complete_df[column_names]
dir_ = '../../output/stage_2_predictions/Keck_Pria_AS_Retest'
model_names = []
for model_name in model_name_mapping.keys():
file_path = '{}/{}.npz'.format(dir_, model_name)
if not os.path.exists(file_path):
continue
print('model: {} exists'.format(model_name))
data = np.load(file_path)
file_path, '\t', data.keys()
y_pred = data['y_pred_on_test'][:, 0]
if y_pred.ndim == 2:
y_pred = y_pred[:, 0]
model_name = model_name_mapping[model_name]
model_names.append(model_name)
complete_df[model_name] = y_pred
print()
model_names = sorted(model_names)
column_names.extend(model_names)
complete_df = complete_df[column_names]
print(complete_df.shape)
complete_df.to_csv('{}/complete_prediction.csv'.format(dir_), index=None)
return
def merge_prediction_old():
dataframe = pd.read_excel('../../output/stage_2_predictions/Keck_LC4_export.xlsx')
molecule_name_list = dataframe['Molecule Name'].tolist()
supplier_id = dataframe['Supplier ID'].tolist()
failed_id = ['F0401-0050', 'F2964-1411', 'F2964-1523']
inhibits = dataframe[
'PriA-SSB AS, normalized for plate and edge effects, correct plate map: % inhibition Alpha, normalized (%)'].tolist()
neo_dataframe = pd.read_csv('../../output/stage_2_predictions/pria_lc4_retest_may18.csv')
failed_molecule_names = neo_dataframe[neo_dataframe['Active'] == 0]['Row Labels'].tolist()
failed_molecule_names += ['SMSSF-0044356', 'SMSSF-0030688']
positive_enumerate = filter(lambda x: x[1] >= 35 and supplier_id[x[0]] not in failed_id and molecule_name_list[x[0]] not in failed_molecule_names, enumerate(inhibits))
positive_idx = map(lambda x: x[0], positive_enumerate)
actual_label = map(lambda x: 1 if x in positive_idx else 0, range(len(supplier_id)))
actual_label = np.array(actual_label)
complete_df = pd.DataFrame({'molecule name': molecule_name_list, 'molecule id': supplier_id, 'label': actual_label, 'inhibition': inhibits})
column_names = ['molecule name', 'molecule id', 'label', 'inhibition']
complete_df = complete_df[column_names]
test_data_df = pd.read_csv('../../dataset/keck_lc4.csv.gz')
test_data_df = test_data_df[['Molecule', 'SMILES', 'Fingerprints']]
complete_df = complete_df.merge(test_data_df, how='left', left_on='molecule id', right_on='Molecule', sort=False)
complete_df.to_csv('LC4_complete.csv', index=None)
dir_ = '../../output/stage_2_predictions/Keck_Pria_AS_Retest'
file_path = '{}/{}.npz'.format(dir_, 'vanilla_lstm_19')
data = np.load(file_path)
molecule_id = data['molecule_id']
model_names = []
special_models = ['irv', 'random_forest', 'dockscore', 'consensus', 'baseline']
for model_name in model_name_mapping.keys():
file_path = '{}/{}.npz'.format(dir_, model_name)
if not os.path.exists(file_path):
continue
print('model: {} exists'.format(model_name))
data = np.load(file_path)
if any(x in model_name for x in special_models):
y_pred = data['y_pred_on_test']
else:
y_pred = data['y_pred']
if y_pred.ndim == 2:
y_pred = y_pred[:, 0]
temp_df = pd.DataFrame({'molecule id': molecule_id,
model_name_mapping[model_name]: y_pred})
model_names.append(model_name_mapping[model_name])
complete_df = complete_df.join(temp_df.set_index('molecule id'), on='molecule id')
print()
model_names = sorted(model_names)
column_names.extend(model_names)
complete_df = complete_df[column_names]
print(complete_df.shape)
complete_df.to_csv('{}/complete_prediction.csv'.format(dir_), index=None)
def merge_rank():
dir_ = '../../output/stage_2_predictions/Keck_Pria_AS_Retest'
complete_df = pd.read_csv('{}/complete_prediction.csv'.format(dir_))
model_names = complete_df.columns[3:]
rank_df = complete_df[['molecule', 'label', 'inhibition']]
for (idx, model_name) in enumerate(model_names):
order = complete_df[model_name].rank(ascending=False, method='max').tolist()
order = np.array(order)
order = order.astype(np.int)
rank_df[model_name] = order
ensemble_model_names_pairs = OrderedDict()
for ensemble_name, ensemble_model_names in ensemble_model_names_pairs.items():
ensemble_orders = []
for (idx, model_name) in enumerate(model_names):
order = complete_df[model_name].rank(ascending=False, method='max').tolist()
order = np.array(order)
order = order.astype(np.int)
if model_name in ensemble_model_names:
ensemble_orders.append(order)
ensemble_orders = np.vstack(ensemble_orders)
ensemble_order = np.zeros((ensemble_orders.shape[1]))
for i in range(ensemble_orders.shape[1]):
ensemble_order[i] = np.min(ensemble_orders[:, i])
ensemble_order = ensemble_order.astype(int)
temp_df = pd.DataFrame()
temp_df[ensemble_name] = ensemble_order
# Rank the simple ensemble
order = temp_df[ensemble_name].rank(method='max').as_matrix()
order = np.array(order)
order = order.astype(int)
rank_df[ensemble_name] = order
rank_df.to_csv('{}/complete_rank.csv'.format(dir_), index=None)
def merge_evaluation():
dir_ = '../../output/stage_2_predictions/Keck_Pria_AS_Retest'
complete_df = pd.read_csv('{}/complete_prediction.csv'.format(dir_))
model_names = complete_df.columns[3:]
metric_df = pd.DataFrame({'Model': model_names})
actual_oracle = complete_df['label'].as_matrix()
actual_oracle = reshape_data_into_2_dim(actual_oracle)
for (metric_name, metric_) in metric_name_mapping.iteritems():
metric_values = []
for model_name in model_names:
pred = complete_df[model_name].as_matrix()
pred = reshape_data_into_2_dim(pred)
actual, pred = collectively_drop_nan(actual_oracle, pred)
value = metric_['function'](actual, pred, **metric_['argument'])
metric_values.append(value)
print(metric_name, '\t', model_name, '\t', value)
metric_df[metric_name] = metric_values
print()
print('saving to {}/complete_evaluation.csv'.format(dir_))
metric_df.to_csv('{}/complete_evaluation.csv'.format(dir_), index=None)
def filter_model_name(model_name):
model_name = model_name.replace('SingleClassification', 'STC')
model_name = model_name.replace('SingleRegression', 'STR')
model_name = model_name.replace('MultiClassification', 'MTC')
model_name = model_name.replace('RandomForest', 'RF')
model_name = model_name.replace('ConsensusDocking', 'ConDock')
model_name = model_name.replace('Docking', 'Dock')
return model_name
if __name__ == '__main__':
# clean_excel()
merge_prediction()
merge_rank()
merge_evaluation()
|
[
"pandas.DataFrame",
"numpy.load",
"pandas.read_csv",
"numpy.zeros",
"os.path.exists",
"pandas.read_excel",
"numpy.min",
"numpy.array",
"collections.OrderedDict",
"numpy.vstack"
] |
[((295, 365), 'pandas.read_excel', 'pd.read_excel', (['"""../../output/stage_2_predictions/Keck_LC4_backup.xlsx"""'], {}), "('../../output/stage_2_predictions/Keck_LC4_backup.xlsx')\n", (308, 365), True, 'import pandas as pd\n'), ((556, 622), 'pandas.read_csv', 'pd.read_csv', (['"""../../dataset/fixed_dataset/pria_prospective.csv.gz"""'], {}), "('../../dataset/fixed_dataset/pria_prospective.csv.gz')\n", (567, 622), True, 'import pandas as pd\n'), ((812, 904), 'pandas.DataFrame', 'pd.DataFrame', (["{'molecule': molecule_ids, 'label': actual_labels, 'inhibition': inhibits}"], {}), "({'molecule': molecule_ids, 'label': actual_labels,\n 'inhibition': inhibits})\n", (824, 904), True, 'import pandas as pd\n'), ((1929, 1999), 'pandas.read_excel', 'pd.read_excel', (['"""../../output/stage_2_predictions/Keck_LC4_export.xlsx"""'], {}), "('../../output/stage_2_predictions/Keck_LC4_export.xlsx')\n", (1942, 1999), True, 'import pandas as pd\n'), ((2345, 2418), 'pandas.read_csv', 'pd.read_csv', (['"""../../output/stage_2_predictions/pria_lc4_retest_may18.csv"""'], {}), "('../../output/stage_2_predictions/pria_lc4_retest_may18.csv')\n", (2356, 2418), True, 'import pandas as pd\n'), ((2918, 2940), 'numpy.array', 'np.array', (['actual_label'], {}), '(actual_label)\n', (2926, 2940), True, 'import numpy as np\n'), ((2960, 3090), 'pandas.DataFrame', 'pd.DataFrame', (["{'molecule name': molecule_name_list, 'molecule id': supplier_id, 'label':\n actual_label, 'inhibition': inhibits}"], {}), "({'molecule name': molecule_name_list, 'molecule id':\n supplier_id, 'label': actual_label, 'inhibition': inhibits})\n", (2972, 3090), True, 'import pandas as pd\n'), ((3227, 3271), 'pandas.read_csv', 'pd.read_csv', (['"""../../dataset/keck_lc4.csv.gz"""'], {}), "('../../dataset/keck_lc4.csv.gz')\n", (3238, 3271), True, 'import pandas as pd\n'), ((3655, 3673), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (3662, 3673), True, 'import numpy as np\n'), ((5361, 5374), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5372, 5374), False, 'from collections import OrderedDict\n'), ((6673, 6709), 'pandas.DataFrame', 'pd.DataFrame', (["{'Model': model_names}"], {}), "({'Model': model_names})\n", (6685, 6709), True, 'import pandas as pd\n'), ((1327, 1345), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (1334, 1345), True, 'import numpy as np\n'), ((4056, 4074), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (4063, 4074), True, 'import numpy as np\n'), ((4309, 4395), 'pandas.DataFrame', 'pd.DataFrame', (["{'molecule id': molecule_id, model_name_mapping[model_name]: y_pred}"], {}), "({'molecule id': molecule_id, model_name_mapping[model_name]:\n y_pred})\n", (4321, 4395), True, 'import pandas as pd\n'), ((5238, 5253), 'numpy.array', 'np.array', (['order'], {}), '(order)\n', (5246, 5253), True, 'import numpy as np\n'), ((5834, 5860), 'numpy.vstack', 'np.vstack', (['ensemble_orders'], {}), '(ensemble_orders)\n', (5843, 5860), True, 'import numpy as np\n'), ((5886, 5920), 'numpy.zeros', 'np.zeros', (['ensemble_orders.shape[1]'], {}), '(ensemble_orders.shape[1])\n', (5894, 5920), True, 'import numpy as np\n'), ((6106, 6120), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6118, 6120), True, 'import pandas as pd\n'), ((6291, 6306), 'numpy.array', 'np.array', (['order'], {}), '(order)\n', (6299, 6306), True, 'import numpy as np\n'), ((1211, 1236), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1225, 1236), False, 'import os\n'), ((3940, 3965), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (3954, 3965), False, 'import os\n'), ((5654, 5669), 'numpy.array', 'np.array', (['order'], {}), '(order)\n', (5662, 5669), True, 'import numpy as np\n'), ((6005, 6034), 'numpy.min', 'np.min', (['ensemble_orders[:, i]'], {}), '(ensemble_orders[:, i])\n', (6011, 6034), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import unittest
from pybeardy.zapper import ZapState
class ZapStateTest(unittest.TestCase):
def test_state(self):
s = ZapState(True, True)
self.assertTrue(s.detect)
self.assertTrue(s.trigger)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"pybeardy.zapper.ZapState"
] |
[((278, 293), 'unittest.main', 'unittest.main', ([], {}), '()\n', (291, 293), False, 'import unittest\n'), ((155, 175), 'pybeardy.zapper.ZapState', 'ZapState', (['(True)', '(True)'], {}), '(True, True)\n', (163, 175), False, 'from pybeardy.zapper import ZapState\n')]
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import sys
version = '0.3.0'
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='rmageddon',
version=version,
description='Small linting and building tool for R containers at QBiC',
long_description=readme,
keywords=['R', 'linting', 'lint', 'Docker', 'container'],
author='<NAME>',
author_email='<EMAIL>',
license=license,
scripts=['scripts/rmageddon'],
install_requires=required,
setup_requires=[
'twine>=1.11.0',
'setuptools>=38.6.',
] + ([] if sys.version_info.minor == 4 else ['wheel>=0.31.0']),
packages=find_packages(exclude='docs'),
include_package_data=True
)
|
[
"setuptools.find_packages"
] |
[((830, 859), 'setuptools.find_packages', 'find_packages', ([], {'exclude': '"""docs"""'}), "(exclude='docs')\n", (843, 859), False, 'from setuptools import setup, find_packages\n')]
|
from flask_apispec import MethodResource
from flask_apispec import use_kwargs, doc
from flask_jwt_extended import jwt_required
from flask_restful import Resource
from webargs import fields
from decorator.catch_exception import catch_exception
from decorator.log_request import log_request
from decorator.verify_admin_access import verify_admin_access
from exception.object_not_found import ObjectNotFound
class UpdateUserGroupAssignment(MethodResource, Resource):
db = None
def __init__(self, db):
self.db = db
@log_request
@doc(tags=['user'],
description='Update user group assignment',
responses={
"200": {},
"422.a": {"description": "Object not found: group"},
"422.b": {"description": "Object not found: user"}
})
@use_kwargs({
'user': fields.Int(),
'group': fields.Int(),
})
@jwt_required
@verify_admin_access
@catch_exception
def post(self, **kwargs):
users = self.db.get(self.db.tables["User"], {"id": kwargs["user"]})
if len(users) == 0:
raise ObjectNotFound("user")
groups = self.db.get(self.db.tables["UserGroup"], {"id": kwargs["group"]})
if len(groups) == 0:
raise ObjectNotFound("group")
self.db.delete(self.db.tables["UserGroupAssignment"], {"user_id": kwargs["user"]})
self.db.insert({
"user_id": kwargs["user"],
"group_id": kwargs["group"]
}, self.db.tables["UserGroupAssignment"])
return "", "200 "
|
[
"webargs.fields.Int",
"flask_apispec.doc",
"exception.object_not_found.ObjectNotFound"
] |
[((555, 754), 'flask_apispec.doc', 'doc', ([], {'tags': "['user']", 'description': '"""Update user group assignment"""', 'responses': "{'200': {}, '422.a': {'description': 'Object not found: group'}, '422.b': {\n 'description': 'Object not found: user'}}"}), "(tags=['user'], description='Update user group assignment', responses={\n '200': {}, '422.a': {'description': 'Object not found: group'}, '422.b':\n {'description': 'Object not found: user'}})\n", (558, 754), False, 'from flask_apispec import use_kwargs, doc\n'), ((1118, 1140), 'exception.object_not_found.ObjectNotFound', 'ObjectNotFound', (['"""user"""'], {}), "('user')\n", (1132, 1140), False, 'from exception.object_not_found import ObjectNotFound\n'), ((1273, 1296), 'exception.object_not_found.ObjectNotFound', 'ObjectNotFound', (['"""group"""'], {}), "('group')\n", (1287, 1296), False, 'from exception.object_not_found import ObjectNotFound\n'), ((848, 860), 'webargs.fields.Int', 'fields.Int', ([], {}), '()\n', (858, 860), False, 'from webargs import fields\n'), ((879, 891), 'webargs.fields.Int', 'fields.Int', ([], {}), '()\n', (889, 891), False, 'from webargs import fields\n')]
|