id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3276990 | from .utils import iter_flatten, set_seed, get_config_from_args, \
default_argument_parser, log_args, generate_kfold, cross_validation
from .logging import setup_logger
from .get_dataset_api import get_dataset_api
| StarcoderdataPython |
1734628 | import math
import sys
import itertools
import itertools
import collections
def sa(Type= int):
return [Type(x) for x in input().split()]
def solve(t):
n = int(input())
s = input()
cc = collections.defaultdict(int)
for i in range(len(s)-1):
cc[s[i:i+2]] += 1
result = max(cc.items(), key= lambda x: x[1])[0]
print(result)
if __name__ == '__main__':
# sys.stdin = open('input.txt', 'r')
# t = int(input())
# for i in range(t):
# solve(i+1)
solve(1)
| StarcoderdataPython |
129333 | <reponame>msgoff/transitions<filename>tests/test_markup.py
try:
from builtins import object
except ImportError:
pass
from transitions.core import Enum
from transitions.extensions.markup import MarkupMachine, rep
from transitions.extensions import MachineFactory
from transitions.extensions.factory import HierarchicalMarkupMachine
from .utils import Stuff
from functools import partial
from unittest import TestCase, skipIf
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
try:
import enum
except ImportError:
enum = None
class SimpleModel:
def after_func(self):
pass
class TestRep(TestCase):
def test_rep_string(self):
self.assertEqual(rep("string"), "string")
def test_rep_function(self):
def check():
return True
self.assertTrue(check())
self.assertEqual(rep(check), "check")
def test_rep_partial_no_args_no_kwargs(self):
def check():
return True
pcheck = partial(check)
self.assertTrue(pcheck())
self.assertEqual(rep(pcheck), "check()")
def test_rep_partial_with_args(self):
def check(result):
return result
pcheck = partial(check, True)
self.assertTrue(pcheck())
self.assertEqual(rep(pcheck), "check(True)")
def test_rep_partial_with_kwargs(self):
def check(result=True):
return result
pcheck = partial(check, result=True)
self.assertTrue(pcheck())
self.assertEqual(rep(pcheck), "check(result=True)")
def test_rep_partial_with_args_and_kwargs(self):
def check(result, doublecheck=True):
return result == doublecheck
pcheck = partial(check, True, doublecheck=True)
self.assertTrue(pcheck())
self.assertEqual(rep(pcheck), "check(True, doublecheck=True)")
def test_rep_callable_class(self):
class Check:
def __init__(self, result):
self.result = result
def __call__(self):
return self.result
def __repr__(self):
return "%s(%r)" % (type(self).__name__, self.result)
ccheck = Check(True)
self.assertTrue(ccheck())
self.assertEqual(rep(ccheck), "Check(True)")
class TestMarkupMachine(TestCase):
def setUp(self):
self.machine_cls = MarkupMachine
self.states = ['A', 'B', 'C', 'D']
self.transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
self.num_trans = len(self.transitions)
self.num_auto = len(self.states) ** 2
def test_markup_self(self):
m1 = self.machine_cls(states=self.states, transitions=self.transitions, initial='A')
m1.walk()
m2 = self.machine_cls(markup=m1.markup)
self.assertTrue(m1.state == m2.state or m1.state.name == m2.state)
self.assertEqual(len(m1.models), len(m2.models))
self.assertEqual(sorted(m1.states.keys()), sorted(m2.states.keys()))
self.assertEqual(sorted(m1.events.keys()), sorted(m2.events.keys()))
m2.run()
m2.sprint()
self.assertNotEqual(m1.state, m2.state)
def test_markup_model(self):
model1 = SimpleModel()
m1 = self.machine_cls(model1, states=self.states, transitions=self.transitions, initial='A')
model1.walk()
m2 = self.machine_cls(markup=m1.markup)
model2 = m2.models[0]
self.assertIsInstance(model2, SimpleModel)
self.assertEqual(len(m1.models), len(m2.models))
self.assertTrue(model1.state == model2.state or model1.state.name == model2.state)
self.assertEqual(sorted(m1.states.keys()), sorted(m2.states.keys()))
self.assertEqual(sorted(m1.events.keys()), sorted(m2.events.keys()))
def test_conditions_unless(self):
s = Stuff(machine_cls=self.machine_cls)
s.machine.add_transition('go', 'A', 'B', conditions='this_passes',
unless=['this_fails', 'this_fails_by_default'])
t = s.machine.markup['transitions']
self.assertEqual(len(t), 1)
self.assertEqual(t[0]['trigger'], 'go')
self.assertEqual(len(t[0]['conditions']), 1)
self.assertEqual(len(t[0]['unless']), 2)
def test_auto_transitions(self):
m1 = self.machine_cls(states=self.states, transitions=self.transitions, initial='A')
m2 = self.machine_cls(states=self.states, transitions=self.transitions, initial='A',
auto_transitions_markup=True)
self.assertEqual(len(m1.markup.get('transitions')), self.num_trans)
self.assertEqual(len(m2.markup.get('transitions')), self.num_trans + self.num_auto)
m1.add_transition('go', 'A', 'B')
m2.add_transition('go', 'A', 'B')
self.num_trans += 1
self.assertEqual(len(m1.markup.get('transitions')), self.num_trans)
self.assertEqual(len(m2.markup.get('transitions')), self.num_trans + self.num_auto)
m1.auto_transitions_markup = True
m2.auto_transitions_markup = False
self.assertEqual(len(m1.markup.get('transitions')), self.num_trans + self.num_auto)
self.assertEqual(len(m2.markup.get('transitions')), self.num_trans)
class TestMarkupHierarchicalMachine(TestMarkupMachine):
def setUp(self):
self.states = ['A', 'B', {'name': 'C',
'children': ['1', '2', {'name': '3', 'children': ['a', 'b', 'c']}]}]
self.transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'C_1'},
{'trigger': 'run', 'source': 'C_1', 'dest': 'C_3_a'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'B'}
]
# MarkupMachine cannot be imported via get_predefined as of now
# We want to be able to run these tests without (py)graphviz
self.machine_cls = HierarchicalMarkupMachine
self.num_trans = len(self.transitions)
self.num_auto = len(self.states) * 9
def test_nested_definitions(self):
states = [{'name': 'A'},
{'name': 'B'},
{'name': 'C',
'children': [
{'name': '1'},
{'name': '2'}],
'transitions': [
{'trigger': 'go',
'source': '1',
'dest': '2'}],
'initial': '2'}]
machine = self.machine_cls(states=states, initial='A', auto_transitions=False, name='TestMachine')
markup = {k: v for k, v in machine.markup.items() if v and k != 'models'}
self.assertEqual(dict(initial='A', states=states, name='TestMachine'), markup)
@skipIf(enum is None, "enum is not available")
class TestMarkupMachineEnum(TestMarkupMachine):
class States(Enum):
A = 1
B = 2
C = 3
D = 4
def setUp(self):
self.machine_cls = MarkupMachine
self.states = TestMarkupMachineEnum.States
self.transitions = [
{'trigger': 'walk', 'source': self.states.A, 'dest': self.states.B},
{'trigger': 'run', 'source': self.states.B, 'dest': self.states.C},
{'trigger': 'sprint', 'source': self.states.C, 'dest': self.states.D}
]
self.num_trans = len(self.transitions)
self.num_auto = len(self.states)**2
| StarcoderdataPython |
3320838 | class Solution(object):
def minMoves(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return 0 if not nums else sum(nums) - min(nums) * len(nums)
s = Solution()
print(s.minMoves([1, 2, 3]))
# https://discuss.leetcode.com/topic/66737/it-is-a-math-question | StarcoderdataPython |
4810610 | <filename>tests/test_tides_api.py
from .context import showtime
from showtime.tidesapi import TidesApiClient
from datetime import datetime
from showtime.tides_util import get_tides
def test_tides_client_returns_result():
dt = datetime.utcnow()
start_date = '{0}-{1}-{2}'.format(dt.year, dt.month, dt.day)
actual = TidesApiClient(start_date).get_tides()
assert len(actual.get('location', None)) > 3
assert len(actual.get('tides')) > 1
assert actual.get('moon').get('pct', None) >= 0
def test_response_array_filter():
timeZoneOffset = 36000
ts = 1595982031.162842 + timeZoneOffset
a = [{'dateTime': 1595980800}, {'dateTime': 1596067200}, {'dateTime': 1596153600}]
actual = [x for x in a if x['dateTime'] < ts]
assert len(actual) == 1
def test_tides_util_get_tides_returns_result():
actual = get_tides()
assert (len(actual.get('location', None)) > 3) is True | StarcoderdataPython |
1622051 | """Retina dataset."""
import re
import tensorflow_datasets as tfds
_DESCRIPTION = """\
Retinal OCT image dataset reflecting Drusen, DME, CNV and Normal
"""
_CITATION = """\
title = {Retinal OCT image data}
author = {paultimothymooney}
publisher = {Kaggle}
url = {https://www.kaggle.com/paultimothymooney/kermany2018 }
"""
_TRAIN_URL = "https://storage.googleapis.com/retinal_oct_archive/retinal_oct_train.zip"
_TEST_URL = "https://storage.googleapis.com/retinal_oct_archive/retinal_oct_test.zip"
_LABELS = ["NORMAL", "DRUSEN", "DME", "CNV"]
_NAME_RE = re.compile(r"^([\w]*[\\/])(NORMAL|DRUSEN|DME|CNV)(?:/|\\)[\w-]*\.jpeg$")
class RetinaDataset(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for retinaoct dataset."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
# TODO(my_dataset): Specifies the tfds.core.DatasetInfo object
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"image": tfds.features.Image(),
"label": tfds.features.ClassLabel(names=_LABELS)
}),
supervised_keys=("image", "label"),
homepage='https://www.kaggle.com/paultimothymooney/kermany2018',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_path, test_path = dl_manager.download([_TRAIN_URL, _TEST_URL])
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"archive": dl_manager.iter_archive(train_path)
}),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"archive": dl_manager.iter_archive(test_path)
}),
]
def _generate_examples(self, archive):
"""Generate images and labels given the directory path.
Args:
archive: object that iterates over the zip.
Yields:
The image path and its corresponding label.
"""
for fname, fobj in archive:
res = _NAME_RE.match(fname)
if not res:
continue
label = res.group(2)
record = {
"image": fobj,
"label": label,
}
yield fname, record
| StarcoderdataPython |
146916 | import os
import time
import logger
import random
import tensorflow as tf
import gym
import numpy as np
from collections import deque
from config import args
from utils import set_global_seeds, sf01, explained_variance
from agent import PPO
from env_wrapper import make_env
def main():
env = make_env()
set_global_seeds(env, args.seed)
agent = PPO(env=env)
batch_steps = args.n_envs * args.batch_steps # number of steps per update
if args.save_interval and logger.get_dir():
# some saving jobs
pass
ep_info_buffer = deque(maxlen=100)
t_train_start = time.time()
n_updates = args.n_steps // batch_steps
runner = Runner(env, agent)
for update in range(1, n_updates + 1):
t_start = time.time()
frac = 1.0 - (update - 1.0) / n_updates
lr_now = args.lr # maybe dynamic change
clip_range_now = args.clip_range # maybe dynamic change
obs, returns, masks, acts, vals, neglogps, advs, rewards, ep_infos = \
runner.run(args.batch_steps, frac)
ep_info_buffer.extend(ep_infos)
loss_infos = []
idxs = np.arange(batch_steps)
for _ in range(args.n_epochs):
np.random.shuffle(idxs)
for start in range(0, batch_steps, args.minibatch):
end = start + args.minibatch
mb_idxs = idxs[start: end]
minibatch = [arr[mb_idxs] for arr in [obs, returns, masks, acts, vals, neglogps, advs]]
loss_infos.append(agent.train(lr_now, clip_range_now, *minibatch))
t_now = time.time()
time_this_batch = t_now - t_start
if update % args.log_interval == 0:
ev = float(explained_variance(vals, returns))
logger.logkv('updates', str(update) + '/' + str(n_updates))
logger.logkv('serial_steps', update * args.batch_steps)
logger.logkv('total_steps', update * batch_steps)
logger.logkv('time', time_this_batch)
logger.logkv('fps', int(batch_steps / (t_now - t_start)))
logger.logkv('total_time', t_now - t_train_start)
logger.logkv("explained_variance", ev)
logger.logkv('avg_reward', np.mean([e['r'] for e in ep_info_buffer]))
logger.logkv('avg_ep_len', np.mean([e['l'] for e in ep_info_buffer]))
logger.logkv('adv_mean', np.mean(returns - vals))
logger.logkv('adv_variance', np.std(returns - vals)**2)
loss_infos = np.mean(loss_infos, axis=0)
for loss_name, loss_info in zip(agent.loss_names, loss_infos):
logger.logkv(loss_name, loss_info)
logger.dumpkvs()
if args.save_interval and update % args.save_interval == 0 and logger.get_dir():
pass
env.close()
class Runner:
def __init__(self, env, agent):
self.env = env
self.agent = agent
self.obs = np.zeros((args.n_envs,) + env.observation_space.shape, dtype=np.float32)
self.obs[:] = env.reset()
self.dones = [False for _ in range(args.n_envs)]
def run(self, batch_steps, frac):
b_obs, b_rewards, b_actions, b_values, b_dones, b_neglogps = [], [], [], [], [], []
ep_infos = []
for s in range(batch_steps):
actions, values, neglogps = self.agent.step(self.obs, self.dones)
b_obs.append(self.obs.copy())
b_actions.append(actions)
b_values.append(values)
b_neglogps.append(neglogps)
b_dones.append(self.dones)
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeinfo = info.get('episode')
if maybeinfo:
ep_infos.append(maybeinfo)
b_rewards.append(rewards)
# batch of steps to batch of rollouts
b_obs = np.asarray(b_obs, dtype=self.obs.dtype)
b_rewards = np.asarray(b_rewards, dtype=np.float32)
b_actions = np.asarray(b_actions)
b_values = np.asarray(b_values, dtype=np.float32)
b_neglogps = np.asarray(b_neglogps, dtype=np.float32)
b_dones = np.asarray(b_dones, dtype=np.bool)
last_values = self.agent.get_value(self.obs, self.dones)
b_returns = np.zeros_like(b_rewards)
b_advs = np.zeros_like(b_rewards)
lastgaelam = 0
for t in reversed(range(batch_steps)):
if t == batch_steps - 1:
mask = 1.0 - self.dones
nextvalues = last_values
else:
mask = 1.0 - b_dones[t + 1]
nextvalues = b_values[t + 1]
delta = b_rewards[t] + args.gamma * nextvalues * mask - b_values[t]
b_advs[t] = lastgaelam = delta + args.gamma * args.lam * mask * lastgaelam
b_returns = b_advs + b_values
return (*map(sf01, (b_obs, b_returns, b_dones, b_actions, b_values, b_neglogps, b_advs, b_rewards)), ep_infos)
if __name__ == '__main__':
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logger.configure()
main()
| StarcoderdataPython |
3267352 | <reponame>JohnnyHao/Py-Spider<filename>SpiderDouban.py
#!/usr/bin/python
#-*- coding: utf-8 -*-
#encoding=utf-8
import urllib2
import urllib
from BeautifulSoup import BeautifulSoup
def getAllImageLink():
html = urllib2.urlopen('http://www.dbmeizi.com').read()
soup = BeautifulSoup(html)
liResult = soup.findAll('li',attrs={"class":"span3"})
for li in liResult:
imageEntityArray = li.findAll('img')
for image in imageEntityArray:
link = image.get('data-src')
imageName = image.get('data-id')
filesavepath = '/Users/mll-001/Documents/Study_Space/Python-Git/Py-Spider/DoubanImage/%s.jpg' % imageName
urllib.urlretrieve(link,filesavepath)
print filesavepath
getAllImageLink() | StarcoderdataPython |
3399546 | """
Scripts to manage Mailman mailing lists.
"""
from .utils import confirm, DocOptArgs, entrypoint
from ..plumbing.common import Owner
from ..tasks import mailman
@entrypoint
def create(opts: DocOptArgs, owner: Owner):
"""
Create a Mailman mailing list.
If SUFFIX is omitted, the list will be named after its owner, without a suffix.
Usage: {script} OWNER [SUFFIX]
"""
name, admin = mailman._list_name_owner(owner, opts["SUFFIX"])
print("List address: <EMAIL>{}".format(name, "" if opts["SUFFIX"] else " (!)"))
print("Owner: {}".format(admin))
confirm("Create this list?")
result = mailman.create_list(owner, opts["SUFFIX"])
if result:
_, password = result.value
if password:
print("Created Mailman list {!r} owned by {}".format(name, admin))
else:
print("Reset owner of list {!r} to {}".format(name, admin))
@entrypoint
def delete(opts: DocOptArgs, owner: Owner):
"""
Delete a Mailman mailing list, and optionally its archives.
Usage: {script} OWNER [SUFFIX] [--archives]
"""
name, _ = mailman._list_name_owner(owner, opts["SUFFIX"])
archives = opts["--archives"]
print("List address: <EMAIL>".<EMAIL>(name))
confirm("Delete this list{}?".format(" and its archives" if archives else ""))
if mailman.remove_list(owner, opts["SUFFIX"], archives):
print("Deleted Mailman list {!r}".format(name))
| StarcoderdataPython |
3360180 | # Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This capacity type describes transmission lines that can be built by the
optimization at a cost. These investment decisions are linearized, i.e.
the decision is not whether to build a specific transmission line, but how
much capacity to build at a particular transmission corridor. Once built, the
capacity remains available for the duration of the line's pre-specified
lifetime. The line flow limits are assumed to be the same in each direction,
e.g. a 500 MW line from Zone 1 to Zone 2 will allow flows of 500 MW from
Zone 1 to Zone 2 and vice versa.
The cost input to the model is an annualized cost per unit capacity.
If the optimization makes the decision to build new capacity, the total
annualized cost is incurred in each period of the study (and multiplied by
the number of years the period represents) for the duration of the
transmission line's lifetime.
"""
import csv
import os.path
from pyomo.environ import Set, Param, Var, Expression, NonNegativeReals, value
from db.common_functions import spin_on_database_lock
from gridpath.auxiliary.auxiliary import cursor_to_df
from gridpath.auxiliary.db_interface import setup_results_import
from gridpath.auxiliary.dynamic_components import \
tx_capacity_type_operational_period_sets
from gridpath.auxiliary.validations import write_validation_to_database, \
get_expected_dtypes, get_tx_lines, validate_dtypes, validate_values, \
validate_idxs
# TODO: can we have different capacities depending on the direction
# TODO: add fixed O&M costs similar to gen_new_lin
def add_model_components(
m, d, scenario_directory, subproblem, stage
):
"""
The following Pyomo model components are defined in this module:
+-------------------------------------------------------------------------+
| Sets |
+=========================================================================+
| | :code:`TX_NEW_LIN_VNTS` |
| |
| A two-dimensional set of line-vintage combinations to help describe |
| the periods in time when transmission line capacity can be built in the |
| optimization. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Required Input Params |
+=========================================================================+
| | :code:`tx_new_lin_lifetime_yrs` |
| | *Defined over*: :code:`TX_NEW_LIN_VNTS` |
| | *Within*: :code:`NonNegativeReals` |
| |
| The transmission line's lifetime, i.e. how long line capacity of a |
| particular vintage remains operational. |
+-------------------------------------------------------------------------+
| | :code:`tx_new_lin_annualized_real_cost_per_mw_yr` |
| | *Defined over*: :code:`TX_NEW_LIN_VNTS` |
| | *Within*: :code:`NonNegativeReals` |
| |
| The transmission line's cost to build new capacity in annualized |
| real dollars per MW. |
+-------------------------------------------------------------------------+
.. note:: The cost input to the model is a levelized cost per unit
capacity. This annualized cost is incurred in each period of the study
(and multiplied by the number of years the period represents) for
the duration of the project's lifetime. It is up to the user to
ensure that the :code:`tx_new_lin_lifetime_yrs` and
:code:`tx_new_lin_annualized_real_cost_per_mw_yr` parameters are
consistent.
|
+-------------------------------------------------------------------------+
| Derived Sets |
+=========================================================================+
| | :code:`OPR_PRDS_BY_TX_NEW_LIN_VINTAGE` |
| | *Defined over*: :code:`TX_NEW_LIN_VNTS` |
| |
| Indexed set that describes the operational periods for each possible |
| transmission line-vintage combination, based on the |
| :code:`tx_new_lin_lifetime_yrs`. For instance, transmission capacity |
| of the 2020 vintage with lifetime of 30 years will be assumed |
| operational starting Jan 1, 2020 and through Dec 31, 2049, but will |
| *not* be operational in 2050. |
+-------------------------------------------------------------------------+
| | :code:`TX_NEW_LIN_OPR_PRDS` |
| |
| Two-dimensional set that includes the periods when transmission |
| capacity of any vintage *could* be operational if built. This set is |
| added to the list of sets to join to get the final |
| :code:`TRANMISSION_OPERATIONAL_PERIODS` set defined in |
| **gridpath.transmission.capacity.capacity**. |
+-------------------------------------------------------------------------+
| | :code:`TX_NEW_LIN_VNTS_OPR_IN_PRD` |
| | *Defined over*: :code:`PERIODS` |
| |
| Indexed set that describes the transmission line-vintages that could |
| be operational in each period based on the |
| :code:`tx_new_lin_lifetime_yrs`. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Variables |
+=========================================================================+
| | :code:`TxNewLin_Build_MW` |
| | *Defined over*: :code:`TX_NEW_LIN_VNTS` |
| | *Within*: :code:`NonNegativeReals` |
| |
| Determines how much transmission capacity of each possible vintage is |
| built at each :code:`tx_new_lin transmission line`. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Expressions |
+=========================================================================+
| | :code:`TxNewLin_Capacity_MW` |
| | *Defined over*: :code:`TX_NEW_LIN_OPR_PRDS` |
| | *Within*: :code:`NonNegativeReals` |
| |
| The transmission capacity of a line in a given operational period is |
| equal to the sum of all capacity-build of vintages operational in that |
| period. |
+-------------------------------------------------------------------------+
"""
# Sets
###########################################################################
m.TX_NEW_LIN_VNTS = Set(dimen=2)
# Required Params
###########################################################################
m.tx_new_lin_lifetime_yrs = Param(
m.TX_NEW_LIN_VNTS,
within=NonNegativeReals
)
m.tx_new_lin_annualized_real_cost_per_mw_yr = Param(
m.TX_NEW_LIN_VNTS,
within=NonNegativeReals
)
# Derived Sets
###########################################################################
m.OPR_PRDS_BY_TX_NEW_LIN_VINTAGE = Set(
m.TX_NEW_LIN_VNTS,
initialize=operational_periods_by_new_build_transmission_vintage
)
m.TX_NEW_LIN_OPR_PRDS = Set(
dimen=2,
initialize=new_build_transmission_operational_periods
)
m.TX_NEW_LIN_VNTS_OPR_IN_PRD = Set(
m.PERIODS, dimen=2,
initialize=new_build_transmission_vintages_operational_in_period
)
# Variables
###########################################################################
m.TxNewLin_Build_MW = Var(
m.TX_NEW_LIN_VNTS,
within=NonNegativeReals
)
# Expressions
###########################################################################
m.TxNewLin_Capacity_MW = Expression(
m.TX_NEW_LIN_OPR_PRDS,
rule=tx_new_lin_capacity_rule
)
# Dynamic Components
###########################################################################
getattr(d, tx_capacity_type_operational_period_sets).append(
"TX_NEW_LIN_OPR_PRDS",
)
# Set Rules
###############################################################################
def operational_periods_by_new_build_transmission_vintage(mod, g, v):
operational_periods = list()
for p in mod.PERIODS:
if v <= p < v + mod.tx_new_lin_lifetime_yrs[g, v]:
operational_periods.append(p)
else:
pass
return operational_periods
def new_build_transmission_operational_periods(mod):
return list(
set((g, p) for (g, v) in mod.TX_NEW_LIN_VNTS
for p in mod.OPR_PRDS_BY_TX_NEW_LIN_VINTAGE[g, v])
)
def new_build_transmission_vintages_operational_in_period(mod, p):
build_vintages_by_period = list()
for (g, v) in mod.TX_NEW_LIN_VNTS:
if p in mod.\
OPR_PRDS_BY_TX_NEW_LIN_VINTAGE[g, v]:
build_vintages_by_period.append((g, v))
else:
pass
return build_vintages_by_period
# Expression Rules
###############################################################################
def tx_new_lin_capacity_rule(mod, g, p):
"""
**Expression Name**: TxNewLin_Capacity_MW
**Defined Over**: TX_NEW_LIN_OPR_PRDS
The transmission capacity of a new line in a given operational period is
equal to the sum of all capacity-build of vintages operational in that
period.
This expression is not defined for a new transmission line's non-
operational periods (i.e. it's 0). E.g. if we were allowed to build
capacity in 2020 and 2030, and the line had a 15 year lifetime,
in 2020 we'd take 2020 capacity-build only, in 2030, we'd take the sum
of 2020 capacity-build and 2030 capacity-build, in 2040, we'd take 2030
capacity-build only, and in 2050, the capacity would be undefined (i.e.
0 for the purposes of the objective function).
"""
return sum(
mod.TxNewLin_Build_MW[g, v] for (gen, v)
in mod.TX_NEW_LIN_VNTS_OPR_IN_PRD[p]
if gen == g
)
# Tx Capacity Type Methods
###############################################################################
def min_transmission_capacity_rule(mod, g, p):
"""
"""
return -mod.TxNewLin_Capacity_MW[g, p]
def max_transmission_capacity_rule(mod, g, p):
"""
"""
return mod.TxNewLin_Capacity_MW[g, p]
def tx_capacity_cost_rule(mod, g, p):
"""
Capacity cost for new builds in each period (sum over all vintages
operational in current period).
"""
return sum(mod.TxNewLin_Build_MW[g, v]
* mod.tx_new_lin_annualized_real_cost_per_mw_yr[g, v]
for (gen, v) in mod.TX_NEW_LIN_VNTS_OPR_IN_PRD[p]
if gen == g)
# Input-Output
###############################################################################
def load_module_specific_data(
m, data_portal, scenario_directory, subproblem, stage
):
# TODO: throw an error when a line of the 'tx_new_lin' capacity
# type is not found in new_build_transmission_vintage_costs.tab
data_portal.load(
filename=os.path.join(scenario_directory, str(subproblem), str(stage), "inputs",
"new_build_transmission_vintage_costs.tab"),
index=m.TX_NEW_LIN_VNTS,
select=("transmission_line", "vintage",
"tx_lifetime_yrs",
"tx_annualized_real_cost_per_mw_yr"),
param=(m.tx_new_lin_lifetime_yrs,
m.tx_new_lin_annualized_real_cost_per_mw_yr)
)
# TODO: untested
def export_module_specific_results(
m, d, scenario_directory, subproblem, stage
):
"""
:param m:
:param d:
:param scenario_directory:
:param subproblem:
:param stage:
:return:
"""
# Export transmission capacity
with open(os.path.join(scenario_directory, str(subproblem), str(stage), "results",
"transmission_new_capacity.csv"),
"w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["transmission_line", "period",
"load_zone_from", "load_zone_to",
"new_build_transmission_capacity_mw"])
for (transmission_line, p) in m.TX_OPR_PRDS:
writer.writerow([
transmission_line,
p,
m.load_zone_from[transmission_line],
m.load_zone_to[transmission_line],
value(m.TxNewLin_Build_MW[transmission_line, p])
])
# Database
###############################################################################
def get_module_specific_inputs_from_database(
scenario_id, subscenarios, subproblem, stage, conn
):
"""
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
c = conn.cursor()
tx_cost = c.execute(
"""SELECT transmission_line, vintage, tx_lifetime_yrs,
tx_annualized_real_cost_per_mw_yr
FROM inputs_transmission_portfolios
CROSS JOIN
(SELECT period as vintage
FROM inputs_temporal_periods
WHERE temporal_scenario_id = {}) as relevant_periods
INNER JOIN
(SELECT transmission_line, vintage, tx_lifetime_yrs,
tx_annualized_real_cost_per_mw_yr
FROM inputs_transmission_new_cost
WHERE transmission_new_cost_scenario_id = {} ) as cost
USING (transmission_line, vintage )
WHERE transmission_portfolio_scenario_id = {};""".format(
subscenarios.TEMPORAL_SCENARIO_ID,
subscenarios.TRANSMISSION_NEW_COST_SCENARIO_ID,
subscenarios.TRANSMISSION_PORTFOLIO_SCENARIO_ID
)
)
return tx_cost
def write_module_specific_model_inputs(
scenario_directory, scenario_id, subscenarios, subproblem, stage, conn):
"""
Get inputs from database and write out the model input .tab file.
:param scenario_directory: string, the scenario directory
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
tx_cost = get_module_specific_inputs_from_database(
scenario_id, subscenarios, subproblem, stage, conn)
with open(os.path.join(scenario_directory, str(subproblem), str(stage), "inputs",
"new_build_transmission_vintage_costs.tab"),
"w", newline="") as existing_tx_capacity_tab_file:
writer = csv.writer(existing_tx_capacity_tab_file,
delimiter="\t", lineterminator="\n")
# Write header
writer.writerow(
["transmission_line", "vintage",
"tx_lifetime_yrs", "tx_annualized_real_cost_per_mw_yr"]
)
for row in tx_cost:
writer.writerow(row)
def import_module_specific_results_into_database(
scenario_id, subproblem, stage, c, db, results_directory, quiet
):
"""
:param scenario_id:
:param subproblem:
:param stage:
:param c:
:param db:
:param results_directory:
:param quiet:
:return:
"""
# New build capacity results
if not quiet:
print("transmission new build")
# Delete prior results and create temporary import table for ordering
setup_results_import(
conn=db, cursor=c,
table="results_transmission_capacity_new_build",
scenario_id=scenario_id, subproblem=subproblem, stage=stage
)
# Load results into the temporary table
results = []
with open(os.path.join(results_directory,
"transmission_new_capacity.csv"),
"r") as capacity_file:
reader = csv.reader(capacity_file)
next(reader) # skip header
for row in reader:
transmission_line = row[0]
period = row[1]
load_zone_from = row[2]
load_zone_to = row[3]
new_build_transmission_capacity_mw = row[4]
results.append(
(scenario_id, transmission_line, period, subproblem, stage,
load_zone_from, load_zone_to,
new_build_transmission_capacity_mw)
)
insert_temp_sql = """
INSERT INTO
temp_results_transmission_capacity_new_build{}
(scenario_id, transmission_line, period, subproblem_id, stage_id,
load_zone_from, load_zone_to,
new_build_transmission_capacity_mw)
VALUES (?, ?, ?, ?, ?, ?, ?, ?);
""".format(scenario_id)
spin_on_database_lock(conn=db, cursor=c, sql=insert_temp_sql, data=results)
# Insert sorted results into permanent results table
insert_sql = """
INSERT INTO results_transmission_capacity_new_build
(scenario_id, transmission_line, period, subproblem_id, stage_id,
load_zone_from, load_zone_to, new_build_transmission_capacity_mw)
SELECT
scenario_id, transmission_line, period, subproblem_id, stage_id,
load_zone_from, load_zone_to, new_build_transmission_capacity_mw
FROM temp_results_transmission_capacity_new_build{}
ORDER BY scenario_id, transmission_line, period, subproblem_id,
stage_id;
""".format(scenario_id)
spin_on_database_lock(conn=db, cursor=c, sql=insert_sql, data=(),
many=False)
# Validation
###############################################################################
def validate_module_specific_inputs(scenario_id, subscenarios, subproblem, stage, conn):
"""
Get inputs from database and validate the inputs
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
tx_cost = get_module_specific_inputs_from_database(
scenario_id, subscenarios, subproblem, stage, conn
)
tx_lines = get_tx_lines(conn, scenario_id, subscenarios, "capacity_type", "tx_new_lin")
# Convert input data into pandas DataFrame
df = cursor_to_df(tx_cost)
# get the tx lines lists
tx_lines_w_cost = df["transmission_line"].unique()
# Get expected dtypes
expected_dtypes = get_expected_dtypes(
conn=conn,
tables=["inputs_transmission_new_cost"]
)
# Check dtypes
dtype_errors, error_columns = validate_dtypes(df, expected_dtypes)
write_validation_to_database(
conn=conn,
scenario_id=scenario_id,
subproblem_id=subproblem,
stage_id=stage,
gridpath_module=__name__,
db_table="inputs_transmission_new_cost",
severity="High",
errors=dtype_errors
)
# Check valid numeric columns are non-negative
numeric_columns = [c for c in df.columns
if expected_dtypes[c] == "numeric"]
valid_numeric_columns = set(numeric_columns) - set(error_columns)
write_validation_to_database(
conn=conn,
scenario_id=scenario_id,
subproblem_id=subproblem,
stage_id=stage,
gridpath_module=__name__,
db_table="inputs_transmission_new_cost",
severity="High",
errors=validate_values(df, valid_numeric_columns,
"transmission_line", min=0)
)
# Check that all binary new build tx lines are available in >=1 vintage
msg = "Expected cost data for at least one vintage."
write_validation_to_database(
conn=conn,
scenario_id=scenario_id,
subproblem_id=subproblem,
stage_id=stage,
gridpath_module=__name__,
db_table="inputs_transmission_new_cost",
severity="Mid",
errors=validate_idxs(actual_idxs=tx_lines_w_cost,
req_idxs=tx_lines,
idx_label="transmission_line",
msg=msg)
)
| StarcoderdataPython |
4829847 | <reponame>jisazaTappsi/mastermind
#!/usr/bin/env python
"""Test for code.py"""
import unittest
from shatter.code import Code
from tests.generated_code import code_functions as f
from tests.testing_helpers import common_testing_code
from shatter.custom_operator import CustomOperator
__author__ = '<NAME>'
class CodeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
common_testing_code.reset_functions_file(f.__file__, hard_reset=True)
def test_inequality_different_operator(self):
"""Always false if there is an operator mismatch"""
i = Code()
j = Code()
m = (i == j)
k = (i < j)
self.assertFalse(str(m) == str(k))
self.assertFalse(str(k) == str(m))
def test_equality(self):
i = Code()
j = Code()
for s in CustomOperator.OPERATORS.values():
m = (eval('i {} j'.format(s)))
k = (eval('i {} j'.format(s)))
self.assertTrue(str(m) == str(k))
def test_no_commutation(self):
i = Code()
j = Code()
for s in CustomOperator.OPERATORS.values():
m = (eval('i {} j'.format(s)))
k = (eval('j {} i'.format(s)))
self.assertFalse(str(m) == str(k))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1608990 | <gh_stars>1-10
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from typing import List, Optional
import numpy as np
from pymorphy2 import MorphAnalyzer
from russian_tagsets import converters
from deeppavlov.core.common.registry import register
from deeppavlov.core.models.serializable import Serializable
from deeppavlov.models.morpho_tagger.common_tagger import get_tag_distance
class BasicLemmatizer(Serializable):
"""
A basic class for lemmatizers. It must contain two methods:
* :meth: `_lemmatize` for single word lemmatization. It is an abstract method and should be reimplemented.
* :meth: `__call__` for lemmatizing a batch of sentences.
"""
def __init__(self, save_path: Optional[str] = None,
load_path: Optional[str] = None, **kwargs) -> None:
super().__init__(save_path, load_path, **kwargs)
@abstractmethod
def _lemmatize(self, word: str, tag: Optional[str] = None) -> str:
"""
Lemmatizes a separate word given its tag.
Args:
word: the input word.
tag: optional morphological tag.
Returns:
a lemmatized word
"""
raise NotImplementedError("Your lemmatizer must implement the abstract method _lemmatize.")
def __call__(self, data: List[List[str]], tags: Optional[List[List[str]]] = None) -> List[List[str]]:
"""
Lemmatizes each word in a batch of sentences.
Args:
data: the batch of sentences (lists of words).
tags: the batch of morphological tags (if available).
Returns:
a batch of lemmatized sentences.
"""
if tags is None:
tags = [[None for _ in sent] for sent in data]
if len(tags) != len(data):
raise ValueError("There must be the same number of tag sentences as the number of word sentences.")
if any((len(elem[0]) != len(elem[1])) for elem in zip(data, tags)):
raise ValueError("Tag sentence must be of the same length as the word sentence.")
answer = [[self._lemmatize(word, tag) for word, tag in zip(*elem)] for elem in zip(data, tags)]
return answer
@register("UD_pymorphy_lemmatizer")
class UDPymorphyLemmatizer(BasicLemmatizer):
"""
A class that returns a normal form of a Russian word given its morphological tag in UD format.
Lemma is selected from one of PyMorphy parses,
the parse whose tag resembles the most a known UD tag is chosen.
"""
def __init__(self, save_path: Optional[str] = None, load_path: Optional[str] = None,
transform_lemmas=False, **kwargs) -> None:
self.transform_lemmas = transform_lemmas
self._reset()
self.analyzer = MorphAnalyzer()
self.converter = converters.converter("opencorpora-int", "ud20")
super().__init__(save_path, load_path, **kwargs)
def save(self, *args, **kwargs):
pass
def load(self, *args, **kwargs):
pass
def _reset(self):
self.memo = dict()
def _lemmatize(self, word: str, tag: Optional[str] = None) -> str:
lemma = self.memo.get((word, tag))
if lemma is not None:
return lemma
parses = self.analyzer.parse(word)
best_lemma, best_distance = word, np.inf
for i, parse in enumerate(parses):
curr_tag, curr_lemma = self.converter(str(parse.tag)), parse.normal_form
distance = get_tag_distance(tag, curr_tag)
if distance < best_distance:
best_lemma, best_distance = curr_lemma, distance
if distance == 0:
break
self.memo[(word, tag)] = best_lemma
return best_lemma
| StarcoderdataPython |
1760025 | <filename>Spark/DL_H2O.py
from pysparkling import *
hc = H2OContext.getOrCreate(sc)
import time
import h2o
from h2o.estimators.deeplearning import H2OAutoEncoderEstimator, H2ODeepLearningEstimator
input1=h2o.import_file("/home/admin/RECO/cluster8.csv",sep=",")
input_df = input1.as_data_frame(use_pandas=True)
input=input_df[['R_MSISDN','R_PRODUCT_ID','M1_VOICE_REVENUE','M1_TOTAL_VOICE_DURATION','M1_DATA_REVENUE','M1_TOTAL_DATA_VOLUME','M1_SMS_REVENUE','M1_SMS_COUNT','M1_ACTIVE_DAYS','M1_ENGAGEMENT_INDEX','RATING']].copy()
input.head(5)
input=h2o.H2OFrame(input,column_names=["R_MSISDN","R_PRODUCT_ID","M1_VOICE_REVENUE","M1_TOTAL_VOICE_DURATION","M1_DATA_REVENUE","M1_TOTAL_DATA_VOLUME","M1_SMS_REVENUE","M1_SMS_COUNT","M1_ACTIVE_DAYS","M1_ENGAGEMENT_INDEX","RATING"],column_types=["numeric", "factor", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"])
train,test=input.split_frame(ratios=[.65])
y="R_PRODUCT_ID"
x=train.names[2:11]
dl_1 = H2ODeepLearningEstimator(nfolds = 3,
fold_assignment = "Modulo",
keep_cross_validation_predictions = True,
overwrite_with_best_model=False,
epochs=10,hidden=[5,5],
score_validation_samples=10000,
score_duty_cycle=0.025,
adaptive_rate=False,
rate=0.01,
activation = "Tanh",
rate_annealing=2e-6,
momentum_start=0.2,
momentum_stable=0.4,
momentum_ramp=1e7,
l1=1e-5,
l2=1e-5,
seed = 1)
start = time.time()
dl_1.train(x, y, train)
done = time.time()
elapsed = done - start
print(elapsed)
pred = dl_1.predict(test)
pred.head()
performance = dl_1.model_performance(prostate)
performance.show()
model_path = h2o.save_model(model = dl_1, path = "/tmp/mymodel", force = True)
print model_path
h2o.shutdown() | StarcoderdataPython |
76841 | <filename>bayesapi/fips.py
import csv
import os
from snaql.factory import Snaql
def read_fips(fn):
with open(fn) as fips_file:
reader = csv.reader(fips_file, delimiter=',')
cols = ['state-name', 'state-code', 'county-code','county-name','class-code']
return [ dict(zip(cols, row)) for row in reader ]
def fips_sets(rows):
states = set()
counties = set()
for row in rows:
states.add(row['state-code'])
counties.add("{}{}".format(row['state-code'], row['county-code']))
return { 'states': states,
'counties': counties }
# In order to be considered a "fips column", a column must contain at
# least 10% of either state or state-county fips values, and 90%
# column values must be either valid state or state-county values
def fips_cols(sets, col_names, cursor):
state_count = len(sets['states'])
county_count = len(sets['counties'])
col_threshold = 0.10
miss_threshold = 0.90
scoreboard_init = [ {'name': c, 'county': 0, 'state': 0, 'miss': 0 } for c in col_names ]
def keep_score(acc, r):
for c, sb_row in zip(r, acc):
cs = str(c)
if cs.zfill(2) in sets['states']:
sb_row['state'] += 1
elif cs.zfill(5) in sets['counties']:
sb_row['county'] += 1
else:
sb_row['miss'] += 1
return acc
scoreboard = reduce(keep_score, cursor, scoreboard_init)
def resolve(acc, t):
state_val = float(t['state']) / max(1, (t['state'] + t['miss']))
county_val = float(t['county']) / max(1, (t['county'] + t['miss']))
if state_val > miss_threshold \
and state_val > acc['state']['val'] \
and t['state'] / max(1, float(state_count)) > col_threshold:
acc['state']['name'] = t['name']
acc['state']['val'] = state_val
if county_val > miss_threshold \
and county_val > acc['county']['val'] \
and t['county'] / max(1, float(county_count)) > col_threshold:
acc['county']['name'] = t['name']
acc['county']['val'] = state_val
return acc
resolved = reduce(resolve, scoreboard, {'state': {'name': None, 'val': 0},
'county': {'name': None, 'val': 0}})
ret = {'state': resolved['state']['name'],
'county': resolved['county']['name']}
return ret
def find_fips_cols(cfg, bdb, fips_file):
root_location = os.path.abspath(os.path.dirname(__file__))
snaql_factory = Snaql(root_location, 'resources/queries')
queries = snaql_factory.load_queries('queries.sql')
query = queries.get_full_table(table_name = '"{}"'.format(cfg.table_name))
cursor = bdb.execute(query)
sets = fips_sets(read_fips(fips_file))
col_name_list = [tuple[0] for tuple in cursor.description]
return fips_cols(sets, col_name_list, cursor)
| StarcoderdataPython |
34493 | <filename>cell.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class HiddenGate(nn.Module):
def __init__(self, hidden_size, input_size, bias, nonlinearity="sigmoid"):
super(HiddenGate, self).__init__()
self.linear = nn.Linear(
3*hidden_size + input_size + hidden_size, hidden_size, bias=bias)
self.nonlinearity = F.sigmoid if nonlinearity == "sigmoid" else F.tanh
def forward(self, Xis, x_i, prev_g):
return self.nonlinearity(self.linear(torch.cat([Xis, x_i, prev_g])))
class SentenceStateGate(nn.Module):
def __init__(self, hidden_size, input_size, bias):
super(SentenceStateGate, self).__init__()
self.linear = nn.Linear(
hidden_size + hidden_size, hidden_size, bias=bias)
def forward(self, prev_g, h):
""" h is either h_av or h_i for different i"""
return F.sigmoid(self.linear(torch.cat([prev_g, h])))
class SLSTMCell(nn.Module):
def __init__(self, input_size, hidden_size, bias=True):
super(SLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
# hidden state gates
self.i_i_op = HiddenGate(hidden_size, input_size, bias)
self.l_i_op = HiddenGate(hidden_size, input_size, bias)
self.r_i_op = HiddenGate(hidden_size, input_size, bias)
self.f_i_op = HiddenGate(hidden_size, input_size, bias)
self.s_i_op = HiddenGate(hidden_size, input_size, bias)
self.o_i_op = HiddenGate(hidden_size, input_size, bias)
self.u_i_op = HiddenGate(hidden_size, input_size, nonlinearity="tanh")
# sentence state gates
self.g_f_g_op = SentenceStateGate(hidden_size, input_size, bias)
self.g_f_i_op = SentenceStateGate(hidden_size, input_size, bias)
self.g_o_op = SentenceStateGate(hidden_size, input_size, bias)
def reset_params(self):
pass
def get_Xis(self, prev_h_states):
"""Apply proper index selection mask to get xis"""
# How do you handle it getting shorter eh??
pass
def forward(self, prev_h_states, prev_c_states, prev_g_state,
x_i, prev_c_g):
Xi_i = self.get_Xi_i(prev_h_states)
i_i = self.i_i_op(Xi_i, x_i, prev_g_state)
l_i = self.l_i_op(Xi_i, x_i, prev_g_state)
r_i = self.l_i_op(Xi_i, x_i, prev_g_state)
f_i = self.l_i_op(Xi_i, x_i, prev_g_state)
s_i = self.l_i_op(Xi_i, x_i, prev_g_state)
o_i = self.l_i_op(Xi_i, x_i, prev_g_state)
u_i = self.u_i_op(Xi_i, x_i, prev_g_state)
# Now Get Softmaxed Versions
i_i, l_i, r_i, f_i, s_i = self.softmaxed_gates(
[i_i, l_i, r_i, f_i, s_i])
# what happens to the the last cell here?????? which has no i+1?
# what happens when first one has no i-1??
prev_c_left, prev_c_right, prev_c = self.get_prev_cs(prev_c_states)
c_i = l_i * prev_c_left + f_i * prev_c + r_i * prev_c_right + \
s_i * prev_c_g + i_i * u_i
h_i = o_i * F.tanh(c_i)
# Now for the sentence level calculations
h_avg = prev_h_states.mean(dim=0)
g_f_g = self.g_f_g_op(prev_g_state, h_avg)
g_f_i = self.g_f_i_op(prev_g_state, prev_h_states)
g_o = self.g_o_op(prev_g_state, h_avg)
temp = self.softmaxed_gates(list(torch.unbind(g_f_i)) + [g_f_g])
g_f_i = torch.stack(temp[:-1], dim=0)
g_f_g = temp[-1]
c_g = g_f_g * prev_c_g + torch.sum(g_f_i * prev_c_states, dim=0)
g = g_o * F.tanh(c_g)
return h_i, c_i, g, c_g
def softmaxed_gates(self, gates_list):
softmaxed = F.softmax(torch.stack(gates_list), dim=0)
return torch.unbind(softmaxed)
| StarcoderdataPython |
3286956 | import numpy as np
import cv2
class Net(object):
def __init__(self, model_path, use_cpu=False, prefix='prefix',
pad=52, max_mp=5, gpu_fraction=None):
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
self.pad = pad
self.max_megapixels = max_mp if max_mp is not None else 5
self.gpu_fraction = gpu_fraction
self.prefix = prefix
if model_path is not None:
with tf.gfile.GFile(model_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name=prefix)
self.graph = graph
print(f"{model_path} loaded")
if use_cpu:
tf_config = tf.ConfigProto(intra_op_parallelism_threads=6, inter_op_parallelism_threads=1, device_count={'GPU': 0})
else:
tf_config = tf.ConfigProto(device_count={'GPU': 1})
if self.gpu_fraction is None:
tf_config.gpu_options.allow_growth = True
else:
tf_config.gpu_options.per_process_gpu_memory_fraction = self.gpu_fraction
self.session = tf.Session(graph=self.graph, config=tf_config)
if self.prefix is not None:
out_map = self.session.run(
'{}/test_probs:0'.format(self.prefix),
feed_dict={'{}/test_dataset:0'.format(self.prefix): np.zeros([1, 128, 128, 3], dtype=np.uint8)}
)
else:
out_map = self.session.run(
'test_probs:0',
feed_dict={'test_dataset:0': np.zeros([1, 128, 128, 3], dtype=np.uint8)}
)
print('graph initialized')
def get_maps(self, img, downsample):
'''
Parsenet CNN inference
'''
img = cv2.resize(img, (0,0), fx=1/downsample, fy=1/downsample, interpolation=cv2.INTER_AREA)
img = np.pad(img, [(self.pad, self.pad), (self.pad, self.pad), (0, 0)], 'constant')
new_shape_x = int(np.ceil(img.shape[0] / 64) * 64)
new_shape_y = int(np.ceil(img.shape[1] / 64) * 64)
test_img_canvas = np.zeros((1, new_shape_x, new_shape_y, 3), dtype=np.float32)
test_img_canvas[0, :img.shape[0], :img.shape[1], :] = img
#print("LAYOUT_CNN_DOWNSAMPLE", downsample, 'INPUT_SHAPE', test_img_canvas.shape)
if self.prefix is not None:
out_map = self.session.run(
'{}/test_probs:0'.format(self.prefix),
feed_dict={'{}/test_dataset:0'.format(self.prefix): test_img_canvas[:, :, :] / np.float32(256.)})
else:
out_map = self.session.run(
'test_probs:0',
feed_dict={'test_dataset:0': test_img_canvas[:, :, :] / np.float32(256.)})
out_map = out_map[0, self.pad:img.shape[0] - self.pad, self.pad:img.shape[1] - self.pad, :]
return out_map
class ParseNet(Net):
def __init__(self, model_path, downsample=4, use_cpu=False, prefix='parsenet',
pad=52, max_mp=5, gpu_fraction=None, detection_threshold=0.2, adaptive_downsample=True):
super().__init__(
model_path, use_cpu=use_cpu, prefix=prefix,
pad=pad, max_mp=max_mp, gpu_fraction=gpu_fraction)
self.detection_threshold = detection_threshold
self.adaptive_downsample = adaptive_downsample
self.init_downsample = downsample
self.last_downsample = downsample
self.downsample_line_pixel_adapt_threshold = 100
self.min_line_processing_height = 8
self.max_line_processing_height = 13
self.optimal_line_processing_height = 11
self.min_downsample = 1
self.max_downsample = 8
def get_maps_with_optimal_resolution(self, img):
'''
Memory-safe Parsenet CNN inference with optimal downsampling
'''
# check that big images are rescaled before first CNN run
first_downsample = max(
self.last_downsample,
np.sqrt((img.shape[0] * img.shape[1]) / (self.max_megapixels * 10e5)))
# first run with default downsample
net_downsample = first_downsample
out_map = self.get_maps(img, net_downsample)
if not self.adaptive_downsample:
return out_map, net_downsample
second_downsample = first_downsample
if (out_map[:, :, 2] > self.detection_threshold).sum() > self.downsample_line_pixel_adapt_threshold:
med_height = self.get_med_height(out_map)
#print('MEDIAN HEIGHT', med_height, med_height * first_downsample)
if med_height > self.max_line_processing_height or med_height < self.min_line_processing_height:
second_downsample = first_downsample * (med_height / self.optimal_line_processing_height)
second_downsample = min(second_downsample, self.max_downsample)
second_downsample = max(second_downsample, self.min_downsample)
self.last_downsample = second_downsample
second_downsample = max(
self.last_downsample,
np.sqrt((img.shape[0] * img.shape[1]) / (self.max_megapixels * 10e5)))
if second_downsample / first_downsample < 0.8 or second_downsample / first_downsample > 1.2:
net_downsample = second_downsample
out_map = self.get_maps(img, net_downsample)
return out_map, net_downsample
def get_med_height(self, out_map):
'''
Compute median line height from CNN output
'''
heights = (out_map[:, :, 2] > self.detection_threshold).astype(np.float) * out_map[:, :, 0]
med_height = np.median(heights[heights > 0])
return med_height
class TiltNet(Net):
def __init__(self, model_path, use_cpu=False, prefix='tiltnet',
pad=52, max_mp=5, gpu_fraction=None):
super().__init__(
model_path, use_cpu=use_cpu, prefix=prefix,
pad=pad, max_mp=max_mp, gpu_fraction=gpu_fraction)
| StarcoderdataPython |
3217356 | # coding=UTF-8
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (print_function, division, absolute_import,
unicode_literals)
import difflib
import os.path
import shutil
import sys
import tempfile
import unittest
import defcon
from fontTools.misc.py23 import open
from glyphsLib.builder import GLYPHS_PREFIX
from glyphsLib.interpolation import build_designspace
def makeFamily(familyName):
m1, m2 = defcon.Font(), defcon.Font()
m1.info.familyName, m1.info.styleName = familyName, "Regular"
m1.lib[GLYPHS_PREFIX + "weightValue"] = 400.0
m2.info.familyName, m2.info.styleName = familyName, "Black"
m2.lib[GLYPHS_PREFIX + "weightValue"] = 900.0
instances = {
"defaultFamilyName": familyName,
"data": [
{"name": "Regular", "interpolationWeight": 400.0},
{"name": "Semibold", "interpolationWeight": 600.0},
{"name": "Bold", "interpolationWeight": 700.0},
{"name": "Black", "interpolationWeight": 900.0},
],
}
return [m1, m2], instances
class DesignspaceTest(unittest.TestCase):
def expect_designspace(self, masters, instances, expectedFile):
master_dir = tempfile.mkdtemp()
designspace, _ = build_designspace(
masters, master_dir, os.path.join(master_dir, "out"), instances)
with open(designspace, mode="r", encoding="utf-8") as f:
actual = f.readlines()
path, _ = os.path.split(__file__)
expectedPath = os.path.join(path, "data", expectedFile)
with open(expectedPath, mode="r", encoding="utf-8") as f:
expected = f.readlines()
if actual != expected:
for line in difflib.unified_diff(
expected, actual,
fromfile=expectedPath, tofile=designspace):
sys.stderr.write(line)
self.fail("*.designspace file is different from expected")
shutil.rmtree(master_dir)
def test_basic(self):
masters, instances = makeFamily("DesignspaceTest Basic")
self.expect_designspace(masters, instances,
"DesignspaceTestBasic.designspace")
def test_inactive_from_active(self):
# Glyphs.app recognizes active=0 as a flag for inactive instances.
# https://github.com/googlei18n/glyphsLib/issues/129
masters, instances = makeFamily("DesignspaceTest Inactive")
for inst in instances["data"]:
inst["active"] = (1 if inst["name"] == "Semibold" else 0)
self.expect_designspace(masters, instances,
"DesignspaceTestInactive.designspace")
def test_inactive_from_exports(self):
# Glyphs.app recognizes exports=0 as a flag for inactive instances.
# https://github.com/googlei18n/glyphsLib/issues/129
masters, instances = makeFamily("DesignspaceTest Inactive")
for inst in instances["data"]:
inst["exports"] = (1 if inst["name"] == "Semibold" else 0)
self.expect_designspace(masters, instances,
"DesignspaceTestInactive.designspace")
def test_familyName(self):
masters, instances = makeFamily("DesignspaceTest FamilyName")
instances["data"] = [
{"name": "Regular", "interpolationWeight": 400.0},
{
"name": "Regular",
"interpolationWeight": 600.0,
"customParameters": [
{"name": "familyName", "value": "Custom Family"},
],
},
]
self.expect_designspace(masters, instances,
"DesignspaceTestFamilyName.designspace")
def test_instanceOrder(self):
# The generated *.designspace file should place instances
# in the same order as they appear in the original source.
# https://github.com/googlei18n/glyphsLib/issues/113
masters, instances = makeFamily("DesignspaceTest InstanceOrder")
instances["data"] = [
{"name": "Black", "interpolationWeight": 900.0},
{"name": "Regular", "interpolationWeight": 400.0},
{"name": "Bold", "interpolationWeight": 700.0},
]
self.expect_designspace(masters, instances,
"DesignspaceTestInstanceOrder.designspace")
if __name__ == "__main__":
sys.exit(unittest.main())
| StarcoderdataPython |
3321957 | <reponame>itemmanager/bungieapi
# generated by update to not change manually
import dataclasses as dt
import typing as t
from bungieapi.json import to_json
@dt.dataclass(frozen=True)
class EntityActionResult:
entity_id: int
result: "PlatformErrorCodes"
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"entityId": to_json(self.entity_id),
"result": to_json(self.result),
}
# imported at the end to do not case circular imports for type annotations
from bungieapi.generated.components.schemas.exceptions import ( # noqa: E402
PlatformErrorCodes,
)
| StarcoderdataPython |
4838735 | # Generated by Django 2.1.3 on 2018-11-05 14:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0024_auto_20181105_1131'),
]
operations = [
migrations.RenameField(
model_name='semester_1',
old_name='professerr_name',
new_name='section',
),
]
| StarcoderdataPython |
192838 | from conans import ConanFile, CMake, tools
import os
class Conan(ConanFile):
name = 'DiligentEngine'
version = '2.5'
homepage = 'https://github.com/DiligentGraphics/DiligentEngine'
description = 'A Modern Cross-Platform Low-Level 3D Graphics Library and Rendering Framework'
topics = ('conan', 'DiligentEngine', '3d', 'graphics')
license = 'Apache-2.0'
settings = 'os', 'compiler', 'build_type', 'arch'
def source(self):
tools.get('https://github.com/DiligentGraphics/DiligentEngine/releases/download/v' + self.version + '/DiligentEngine_v' + self.version + '.zip', destination='src', strip_root=True)
def build(self):
cmake = CMake(self)
cmake.configure(build_dir='build', source_dir='../src')
cmake.build(target='Diligent-GraphicsEngineVk-static')
cmake.build(target='Diligent-GraphicsTools')
def package(self):
for ext in ['.h', '.hpp', '.inl']:
self.copy('*/interface/*' + ext, 'include/DiligentCore', 'src/DiligentCore', keep_path=True)
self.copy('*/interface/*' + ext, 'include/DiligentTools', 'src/DiligentTools', keep_path=True)
self.copy('ImGuiDiligentRenderer.cpp', 'res', 'src/DiligentTools/Imgui/src', keep_path=False)
self.copy('ImGuiImplDiligent.cpp', 'res', 'src/DiligentTools/Imgui/src', keep_path=False)
for ext in ['.a', '.lib', '.dll', '.so', '.dylib', '.pdb', '.dsym']:
self.copy('*' + ext, 'lib', 'build/DiligentCore', keep_path=False)
def package_info(self):
diligent_platform = None
if self.settings.os == 'Windows':
self.cpp_info.defines = ['PLATFORM_WIN32=1']
diligent_platform = 'Win32'
elif self.settings.os == 'Macos':
self.cpp_info.defines = ['PLATFORM_MACOS=1']
diligent_platform = 'Apple'
elif self.settings.os == 'Linux':
self.cpp_info.defines = ['PLATFORM_LINUX=1']
diligent_platform = 'Linux'
elif self.settings.os == 'Android':
self.cpp_info.defines = ['PLATFORM_ANDROID=1']
diligent_platform = 'Android'
elif self.settings.os == 'iOS':
self.cpp_info.defines = ['PLATFORM_IOS=1']
diligent_platform = 'Apple'
self.cpp_info.defines.append('DILIGENT_NO_DIRECT3D11')
self.cpp_info.defines.append('DILIGENT_NO_DIRECT3D12')
self.cpp_info.defines.append('DILIGENT_NO_OPENGL')
self.cpp_info.includedirs = []
for (root, dirs, files) in os.walk('include'):
for d in dirs:
if d == 'interface':
self.cpp_info.includedirs.append(root.replace('\\', '/') + '/' + d)
need_debug_sufix = self.settings.os == 'Windows' and self.settings.build_type == 'Debug'
self.cpp_info.libs = [
'Diligent-GraphicsEngineVk-static',
'Diligent-GraphicsEngineNextGenBase',
'Diligent-ShaderTools',
'Diligent-HLSL2GLSLConverterLib',
'Diligent-GraphicsEngine',
'spirv-cross-cored' if need_debug_sufix else 'spirv-cross-core',
'SPIRVd' if need_debug_sufix else 'SPIRV',
'glslangd' if need_debug_sufix else 'glslang',
'MachineIndependentd' if need_debug_sufix else 'MachineIndependent',
'GenericCodeGend' if need_debug_sufix else 'GenericCodeGen',
'OGLCompilerd' if need_debug_sufix else 'OGLCompiler',
'OSDependentd' if need_debug_sufix else 'OSDependent',
'SPIRV-Tools-opt',
'SPIRV-Tools',
'Diligent-GraphicsTools',
'Diligent-GraphicsAccessories',
'Diligent-Common',
'Diligent-' + diligent_platform + 'Platform',
'Diligent-BasicPlatform',
'Diligent-Primitives',
]
| StarcoderdataPython |
3312837 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from adversarial_filtering.bert import optimization
from adversarial_filtering.bert.modeling import get_shape_list, BertModel, get_cls_output, \
get_assignment_map_from_checkpoint, get_masked_lm_output
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, do_mask=False):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
# Create model with aux loss
print(get_shape_list(input_ids, expected_rank=3))
batch_size, n_way, seq_length = get_shape_list(input_ids, expected_rank=3)
# THIS IS JUST FOR bert_experiments/, not for AF.
assert n_way == num_labels
model = BertModel(
config=bert_config,
is_training=is_training,
input_ids=tf.reshape(input_ids, [batch_size * n_way, seq_length]),
input_mask=tf.reshape(input_mask, [batch_size * n_way, seq_length]),
token_type_ids=tf.reshape(segment_ids, [batch_size * n_way, seq_length]),
use_one_hot_embeddings=use_one_hot_embeddings)
(cls_loss, per_example_cls_loss, logits) = get_cls_output(
model.get_pooled_output(),
is_training=is_training,
num_labels=n_way,
labels=label_ids,
)
if do_mask and is_training:
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
masked_shape = get_shape_list(masked_lm_positions, expected_rank=3)
assert n_way == masked_shape[1]
assert batch_size == masked_shape[0]
(masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
bert_config, model.get_sequence_output(), model.get_embedding_table(),
tf.reshape(masked_lm_positions, [batch_size * n_way, masked_shape[2]]),
tf.reshape(masked_lm_ids, [batch_size * n_way, masked_shape[2]]),
tf.reshape(masked_lm_weights, [batch_size * n_way, masked_shape[2]]))
tf.logging.info("==== Incorporating Mask LM Loss ====")
total_loss = cls_loss + masked_lm_loss
else:
total_loss = cls_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint and (init_checkpoint != 'False'):
(assignment_map, initialized_variable_names
) = get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
if use_tpu:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
else:
accuracy = tf.metrics.accuracy(label_ids, tf.argmax(logits, axis=-1, output_type=tf.int32))
if do_mask:
logging_info = {
'loss': tf.metrics.mean(per_example_cls_loss)[1] + tf.metrics.mean(masked_lm_loss)[1],
'lm_loss': tf.metrics.mean(masked_lm_loss)[1],
}
else:
logging_info = {
'loss': tf.metrics.mean(per_example_cls_loss)[1],
}
logging_info['cls_loss'] = tf.metrics.mean(per_example_cls_loss)[1]
logging_info['accuracy'] = accuracy[1]
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
training_hooks=[tf.train.LoggingTensorHook(logging_info, every_n_iter=100)],
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_cls_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"scores": logits},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
| StarcoderdataPython |
4811181 | lista = []
def Datos():
print "Introduzca 3 numeros enteros: "
i = 0
while i < 3:
lista.append(input())
i += 1
def Mostrar():
print "\nNumeros ingresados: \n"
for elemento in lista:
print elemento | StarcoderdataPython |
1757343 | <reponame>meetri/cryptolib<filename>libs/modules/tradewallet.py
import random,uuid,os
from datetime import datetime
from mongowrapper import MongoWrapper
from twiliosms import TwilioSms
class TradeWallet(object):
def __init__( self, config = {}):
self.buys = []
self.rejected = []
self.sells = []
self.reports = []
self.name = config.get("name","sim1")
self.market = config.get("market","")
self.sync = config.get("sync",True)
self.scale = config.get("scale",8)
self.maxtrades = config.get("trades",5)
if self.scale == 2:
self.budget = config.get("budget",100)
else:
self.budget = config.get("budget",0.1)
self.qtyVal = self.budget / self.maxtrades
self.sellGoalPercent = config.get("sellGoalPercent",0.05)
self.stopLossPercent = config.get("sellGoalPercent",-0.025)
self.sms = TwilioSms.getInstance()
self.notifyList = os.getenv("TRADEBOT_NOTIFY","")
#mongodb
self.mongo = MongoWrapper.getInstance().getClient()
self.exchange = None
def reset(self):
self.buys = []
self.rejected = []
self.sells = []
self.reports = []
def notify(self,msg):
if self.sync:
if len(msg) > 0:
nl = self.notifyList.split(",")
for number in nl:
self.sms.send(msg,number)
def getResults(self, lastprice = None ):
totalShorts = 0
for trade in self.sells:
if trade["type"] in ["short"]:
totalShorts+=1
openTrades = 0
totalprofit = 0
for trade in self.buys:
# if trade["status"] not in ["completed","sold","forsale"]:
if trade["sell_id"] is None and trade["status"] not in ["cancelled"]:
openTrades+=1
if lastprice is not None:
totalprofit += (lastprice - trade["price"])*trade["qty"]
total = 0
totalSells = 0
for trade in self.sells:
if trade["type"] == "sell":
totalSells += 1
profit = (trade["price"] - trade["buy_price"]) * trade["qty"]
# print("{:.8f}-{:.8f}={:.8f}".format(trade['price'],trade['buy_price'],profit))
total += profit
totalTrades = totalSells + len(self.buys)
return {
"last": lastprice,
"totalTrades": totalTrades,
"totalBuys": len(self.buys),
"totalSells": totalSells,
"totalShorts": totalShorts,
"openTrades": openTrades,
"sellprofit": "{:.8f}".format(total),
"openprofit": "{:.8f}".format(totalprofit),
"totalprofit": "{:.8f}".format(totalprofit+total)
}
def exchangeSync(self):
if self.exchange is None:
return
for buy in self.buys:
if buy["status"] in ["pending","partial"]:
buy.update(self.exchange.getOrderStatus(buy['id']))
if buy["status"] in ["completed","cancelled"]:
self.notify("Market {} {} buy of {} units @ {:.8f}".format(self.market,buy["status"],buy["qty"],buy["price"]))
for sell in self.sells:
if sell["status"] in ["pending","partial"]:
sell.update(self.exchange.getOrderStatus(sell['id']))
if sell["status"] in ["completed","cancelled"]:
self.notify("Market {} {} sell of {} units @ {:.8f}".format(self.market,sell['status'],sell["qty"],sell["price"]))
self.update()
def setup(self):
res = self.mongo.crypto.drop_collection("wallet")
res = self.mongo.crypto.wallet.create_index([("name",pymongo.ASCENDING)],unique=True)
def update(self):
if self.sync:
doc = { 'name': self.name, 'buys': self.buys, 'sells': self.sells, 'rejected': self.rejected }
return self.mongo.crypto.wallet.replace_one({'name':self.name},doc,upsert=True)
def load(self):
if self.sync:
res = self.mongo.crypto.wallet.find_one({'name':self.name})
if res is not None and 'name' in res:
self.buys = res['buys']
self.sells = res['sells']
if "rejected" in res:
self.rejected = res['rejected']
return res
def report(self, candle, signals = None, timeIndex = None ):
if self.exchange is None:
utcnow = candle['date']
else:
utcnow = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
self.reports.append( {
'type': 'report',
'date': utcnow,
'index': timeIndex,
'candle': candle['date'],
'price': candle['close'],
'signals': signals
})
def short(self, candle, price = None, signals = None, timeIndex = None):
'''used as an indicator predicting the market will be taking a down turn'''
self.checkSales(short=True,candle=candle,price=price,timeIndex=timeIndex,signals=signals)
def getSignals(self):
sigevent = []
sigevent.extend(self.buys)
sigevent.extend(self.sells)
#sigevent.extend(self.reports)
return sigevent
def buyCheck(self, buyobj ):
reject = False
rejCount = 2
for buy in self.buys:
if buy['sell_id'] is None and buy['status'] not in ['cancelled']:
if buy['candle'] == buyobj['candle']:
reject = True
if buy['price'] <= buyobj['price']:
rejCount -= 1
#TODO: Add time restraints...
#if buyobj['price'] > buy['price'] - (buy['price'] * 0.03):
# reject = True
if rejCount <= 0:
reject = True
res = self.getResults()
price = buyobj["price"]
if res['openTrades'] >= self.maxtrades:
reject = True
return not reject
def createBuy(self,market,price,qty,buydate=None,goalPercent=0.05,stopLossPercent=None):
"""create buy order"""
utcnow = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
if buydate is None:
buydate = utcnow
buyid = "watch-{}".format(str(uuid.uuid4()))
if goalPercent is None:
goalPercent = self.sellGoalPercent
stopLossPrice = None
if stopLossPercent is not None:
stopLossPrice = self.getPriceFromPercent(price,stopLossPercent)
goalPrice = self.getPriceFromPercent(price,goalPercent)
buyObj = {
'id': buyid,
'sell_id': None,
'status': 'completed',
'type': 'buy',
'date': utcnow,
'market': market,
'candle': buydate,
'index': 0,
'price': price,
'qty': qty,
'goalPercent': goalPercent,
'goalPrice': goalPrice,
'stopLossPrice': stopLossPrice
}
self.buys.append(buyObj)
self.update()
def buy(self, goalPercent=None, goalPrice=None, price= None, signals = None, timeIndex = None, candle=None, qty = None):
'''create new buy order'''
if candle is None:
candle = { "date":datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S') }
if self.exchange is None:
utcnow = candle['date']
else:
utcnow = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
if goalPrice is None:
if goalPercent is None:
goalPercent = self.sellGoalPercent
goalPrice = self.getPriceFromPercent(price,goalPercent)
stopLossPrice = None
if self.stopLossPercent is not None:
stopLossPrice = self.getPriceFromPercent(price,self.stopLossPercent)
if qty is None:
qty = self.qtyVal / price
buyid = "sim-{}".format(str(uuid.uuid4()))
buyObj = {
'id': buyid,
'sell_id': None,
'status': 'pending',
'type': 'buy',
'date': utcnow,
'market': self.market,
'candle': candle['date'],
'index': timeIndex,
'price': price,
'qty': qty,
'goalPercent': goalPercent,
'goalPrice': goalPrice,
'stopLossPrice': stopLossPrice,
'signals': signals
}
if self.buyCheck(buyObj):
if self.exchange is not None:
buyObj = self.exchange.buy( buyObj )
else:
buyObj["status"] = "completed"
self.buys.append( buyObj )
self.update()
self.notify("Market {} bid {} units @ {:.8f} btc".format(self.market,buyObj["qty"],buyObj["price"]))
else:
buyObj["status"] = "rejected"
self.rejected.append ( buyObj )
#self.update()
return buyObj
def sell(self, buydata, saledata = None, price=None, signals = None, timeIndex = None):
'''place buy order in sell queue'''
if saledata is None:
utcnow = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
if price is not None:
saleprice = price
else:
saleprice = buydata["goalPrice"]
else:
saleprice = saledata['price']
utcnow = saledata['date']
sellid = "sim-{}".format(str(uuid.uuid4()))
sellObj = {
'id': sellid,
'market': buydata['market'],
'status': 'pending',
'type': 'sell',
'date': utcnow,
'index': timeIndex,
'price': saleprice,
'qty': buydata['qty'],
'buy_price': buydata['price'],
'buy_id': buydata['id'],
'signals': signals
}
buydata['sell_id'] = sellObj["id"]
if self.exchange is not None:
sellObj = self.exchange.sell(sellObj)
else:
sellObj["status"] = "completed"
self.sells.append(sellObj)
self.update()
self.notify("Market {} ask {} units @ {:.8f} btc ".format(self.market,sellObj["qty"],sellObj["price"]))
return sellObj
# TODO:
def getPriceFromPercent(self, price, percent ):
print(price)
print(percent)
return (price * percent) + price
def isForSale(self, candle, price, buydata,short=False,checkStops = False):
if buydata["status"] not in ["completed"] or buydata["sell_id"] is not None:
return { "status": False }
goalPrice = buydata['goalPrice']
if goalPrice is None:
goalPrice = self.getPriceFromPercent(buydata['price'],buydata['goalPercent'])
if self.exchange is None:
utcnow = candle['date']
else:
utcnow = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
if checkStops:
forsale = buydata["stopLossPrice"] is not None and price <= buydata["stopLossPrice"]
else:
forsale = short or price >= goalPrice
return {
"status": forsale,
"price":price,
"date": utcnow,
"buy":buydata['price'],
"goal":goalPrice,
"goalPercent": buydata['goalPercent']
}
def checkSales(self,candle, price, timeIndex = None, shortScore = 0, short = False, signals = None):
sold = False
for buydata in self.buys:
sale = self.isForSale(candle,price,buydata,short=short)
if sale['status']:
self.sell( buydata, saledata=sale, timeIndex=timeIndex, signals=signals )
sold = True
return sold
def checkStops(self,candle, price, timeIndex = None, shortScore = 0, short = False, signals = None):
sold = False
for buydata in self.buys:
sale = self.isForSale(candle,price,buydata,short=short,checkStops=True)
if sale['status']:
self.sell( buydata, saledata=sale, timeIndex=timeIndex, signals=signals )
sold = True
return sold
| StarcoderdataPython |
1674143 | <filename>faker/providers/color/he_IL/__init__.py<gh_stars>1-10
from collections import OrderedDict
from .. import Provider as ColorProvider
localized = True
class Provider(ColorProvider):
"""Implement color provider for ``he_IL`` locale."""
"""Source : https://he.wikipedia.org/wiki/%D7%95%D7%99%D7%A7%D7%99%D7%A4%D7%93%D7%99%D7%94:%D7%A2%D7%A8%D7%9B%D7%AA_%D7%A6%D7%91%D7%A2%D7%99%D7%9D#%D7%98%D7%91%D7%9C%D7%94_%D7%96%D7%95_%D7%9E%D7%A8%D7%90%D7%94_%D7%90%D7%AA_%D7%98%D7%95%D7%95%D7%97_%D7%94%D7%92%D7%95%D7%95%D7%A0%D7%99%D7%9D_%D7%A9%D7%9C_%D7%9B%D7%9E%D7%94_%D7%A6%D7%91%D7%A2%D7%99%D7%9D_%D7%A0%D7%A4%D7%95%D7%A6%D7%99%D7%9D""" # NOQA
all_colors = OrderedDict((
("אדום",
"#FF0000"),
("אוכרה",
"#DDAA33"),
("אינדיגו",
"#4B0082"),
("אפור",
"#7F7F7F"),
("ארגמן",
"#7F003F"),
("ורוד",
"#FF007F"),
("זהב",
"#FFDF00"),
("חאקי",
"#C3B091"),
("חום",
"#7F3F00"),
("טורקיז",
"#40E0D0"),
("ירוק",
"#00FF00"),
("כחול",
"#0000FF"),
("כסף",
"#C0C0C0"),
("כתום",
"#FF7F00"),
("לבן",
"#FFFFFF"),
("מג'נטה",
"#FF00FF"),
("סגול",
"#7F00FF"),
("צהוב",
"#FFFF00"),
("ציאן",
"#00FFFF"),
("קרדינל",
"#C41E3A"),
("שחור",
"#000000"),
("שני",
"#7F0000"),
("תכלת",
"#007FFF"),
))
safe_colors = (
"אדום",
"ירוק",
"כחול",
"צהוב",
"ציאן",
"מג'נטה",
"לבן",
)
| StarcoderdataPython |
3388616 | <filename>src/wms_layers.py
# This work is based on original code developed and copyrighted by TNO 2020.
# Subsequent contributions are licensed to you by the developers of such code and are
# made available to the Project under one or several contributor license agreements.
#
# This work is licensed to you under the Apache License, Version 2.0.
# You may obtain a copy of the license at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Contributors:
# TNO - Initial implementation
# Manager:
# TNO
from extensions.settings_storage import SettingType, SettingsStorage
from extensions.session_manager import get_session
import copy
import src.log as log
logger = log.get_logger(__name__)
LAYERS_SETTING = 'layers'
class WMSLayers:
def __init__(self, settings_storage: SettingsStorage):
self.settings_storage = settings_storage
# add initial layers when not in the system settings
if not self.settings_storage.has_system(LAYERS_SETTING):
self.settings_storage.set_system(LAYERS_SETTING, default_wms_layers)
else:
logger.info('Found WMS layers in User Settings')
def add_wms_layer(self, layer_id, layer):
setting_type = SettingType(layer['setting_type'])
project_name = layer['project_name']
identifier = self._get_identifier(setting_type, project_name)
layer['visible'] = False # make sure we don't save the visibility
if identifier is not None and self.settings_storage.has(setting_type, identifier, LAYERS_SETTING):
layers = self.settings_storage.get(setting_type, identifier, LAYERS_SETTING)
else:
layers = dict()
layers[layer_id] = layer
self.settings_storage.set(setting_type, identifier, LAYERS_SETTING, layers)
def remove_wms_layer(self, layer_id):
# as we only have an ID, we don't know if it is a user, project or system layer
# get the whole list, so we can find out the setting_type
layer = self.get_layers()[LAYERS_SETTING][layer_id]
setting_type = SettingType(layer['setting_type'])
identifier = self._get_identifier(setting_type, layer['project_name'])
if identifier is None:
return
if self.settings_storage.has(setting_type, identifier, LAYERS_SETTING):
# update layer dict
layers = self.settings_storage.get(setting_type, identifier, LAYERS_SETTING)
logger.info('Deleting layer {}'.format(layers[layer_id]))
del(layers[layer_id])
self.settings_storage.set(setting_type, identifier, LAYERS_SETTING, layers)
def _get_identifier(self, setting_type: SettingType, project_name=None):
if setting_type is None:
return
elif setting_type == SettingType.USER:
identifier = get_session('user-email')
elif setting_type == SettingType.PROJECT:
if project_name is not None:
identifier = project_name.replace(' ', '_')
else:
identifier = 'unnamed project'
elif setting_type == SettingType.SYSTEM:
identifier = SettingsStorage.SYSTEM_NAME_IDENTIFIER
else:
return None
return identifier
def get_layers(self):
# gets the default list and adds the user layers
all_layers = dict()
if self.settings_storage.has_system(LAYERS_SETTING):
all_layers.update(self.settings_storage.get_system(LAYERS_SETTING))
user = get_session('user-email')
user_group = get_session('user-group')
role = get_session('user-role')
mapeditor_role = get_session('user-mapeditor-role')
# logger.debug('User: {}'.format(user))
# logger.debug('Groups: {}'.format(user_group))
# logger.debug('Roles: {}'.format(role))
# logger.debug('Mapeditor roles: {}'.format(mapeditor_role))
if user is not None and self.settings_storage.has_user(user, LAYERS_SETTING):
# add user layers if available
all_layers.update(self.settings_storage.get_user(user, LAYERS_SETTING))
if user_group is not None:
for group in user_group:
identifier = self._get_identifier(SettingType.PROJECT, group)
if self.settings_storage.has_project(identifier, LAYERS_SETTING):
# add project layers if available
all_layers.update(self.settings_storage.get_project(identifier, LAYERS_SETTING))
# generate message
message = copy.deepcopy(default_wms_layer_groups)
possible_groups = message["groups"]
# if enough rights, mark Standard layers editable
if 'mapeditor-admin' in mapeditor_role:
for g in possible_groups:
if g['setting_type'] == SettingType.SYSTEM.value:
g['readonly'] = False
possible_groups.extend(self._create_group_layers_for_projects(user_group))
message[LAYERS_SETTING] = all_layers
# logger.debug(message)
return message
def _create_group_layers_for_projects(self, groups):
project_list = list()
if groups is not None:
for group in groups:
identifier = self._get_identifier(SettingType.PROJECT, group)
json = {"setting_type": SettingType.PROJECT.value, "project_name": identifier, "name": "Project Layers for " + group, "readonly": False}
project_list.append(json)
return project_list
default_wms_layer_groups = {
"groups": [
{"setting_type": SettingType.USER.value, "project_name": SettingType.USER.value, "name": "Personal Layers", "readonly": False},
{"setting_type": SettingType.SYSTEM.value, "project_name": SettingType.SYSTEM.value, "name": "Standard Layers", "readonly": True}
]
}
#{"id": SettingType.PROJECT.value, "name": "Project Layers"},
default_wms_layers = {
"AHN2_5m": {
"description": "AHN2 5 meter",
"url": "http://geodata.nationaalgeoregister.nl/ahn2/wms?",
"layer_name": "ahn2_5m",
"setting_type": SettingType.SYSTEM.value,
"layer_ref": None,
"visible": False,
"attribution": ''
},
"RVO_Restwarmte": {
"description": "Restwarmte (RVO: ligging industrie)",
"url": "https://geodata.nationaalgeoregister.nl/restwarmte/wms?",
"layer_name": "liggingindustrieco2",
"setting_type": SettingType.SYSTEM.value,
"layer_ref": None,
"visible": False,
"attribution": ''
},
"LianderHS": {
"description": "Liander hoogspanningskabels",
"url": "https://geodata.nationaalgeoregister.nl/liander/elektriciteitsnetten/v1/wms?",
"layer_name": "hoogspanningskabels",
"setting_type": SettingType.SYSTEM.value,
"legend_url": "",
"layer_ref": None,
"visible": False,
"attribution": ''
},
"LianderMS": {
"description": "Liander middenspanningskabels",
"url": "https://geodata.nationaalgeoregister.nl/liander/elektriciteitsnetten/v1/wms?",
"layer_name": "middenspanningskabels",
"setting_type": SettingType.SYSTEM.value,
"legend_url": "",
"layer_ref": None,
"visible": False,
"attribution": ''
},
"LT_WarmteBronnen_ECW": {
"description": "LT_WarmteBronnen_ECW",
"url": "https://rvo.b3p.nl/geoserver/WarmteAtlas/wms?",
"layer_name": "LT_WarmteBronnen_ECW",
"setting_type": SettingType.SYSTEM.value,
"legend_url": "",
"layer_ref": None,
"visible": False,
"attribution": 'Mapdata <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a> © <a href="http://www.warmteatlas.nl">WarmteAtlas RVO</a>'
},
"WarmteNetten": {
"description": "WarmteNetten",
"url": "https://rvo.b3p.nl/geoserver/WarmteAtlas/wms?",
"layer_name": "WarmteNetten",
"setting_type": SettingType.SYSTEM.value,
"legend_url": "",
"layer_ref": None,
"visible": False,
"attribution": 'Mapdata <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a> © <a href="http://www.warmteatlas.nl">WarmteAtlas RVO</a>'
},
"GasLeidingenEnexis": {
"description": "GasLeidingenEnexis",
"url": "https://rvo.b3p.nl/geoserver/WarmteAtlas/wms?",
"layer_name": "GasLeidingenEnexis",
"setting_type": SettingType.SYSTEM.value,
"legend_url": "",
"layer_ref": None,
"visible": False,
"attribution": 'Mapdata <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a> © <a href="http://www.warmteatlas.nl">WarmteAtlas RVO</a>'
},
"GasLeidingenStedin": {
"description": "GasLeidingenStedin",
"url": "https://rvo.b3p.nl/geoserver/WarmteAtlas/wms?",
"layer_name": "GasLeidingenStedin",
"setting_type": SettingType.SYSTEM.value,
"legend_url": "",
"layer_ref": None,
"visible": False,
"attribution": 'Mapdata <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a> © <a href="http://www.warmteatlas.nl">WarmteAtlas RVO</a>'
},
"CO2EmissieBedrijven": {
"description": "CO2EmissieBedrijven",
"url": "https://rvo.b3p.nl/geoserver/WarmteAtlas/wms?",
"layer_name": "CO2EmissieBedrijven",
"setting_type": SettingType.SYSTEM.value,
"legend_url": "",
"layer_ref": None,
"visible": False,
"attribution": 'Mapdata <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a> © <a href="http://www.warmteatlas.nl">WarmteAtlas RVO</a>'
},
"AardwarmteKrijtJura": {
"description": "AardwarmteKrijtJura",
"url": "https://rvo.b3p.nl/geoserver/WarmteAtlas/wms?",
"layer_name": "AardwarmteKrijtJura",
"setting_type": SettingType.SYSTEM.value,
"legend_url": "",
"layer_ref": None,
"visible": False,
"attribution": 'Mapdata <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a> © <a href="http://www.warmteatlas.nl">WarmteAtlas RVO</a>'
},
# "AardwarmteTriasMap": {
# "description": "AardwarmteTriasMap",
# "url": "https://rvo.b3p.nl/geoserver/WarmteAtlas/wms?",
# "layer_name": "AardwarmteTriasMap",
# "legend_url": "",
# "layer_ref": None,
# "visible": False,
# "attribution": 'Mapdata <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a> © <a href="http://www.warmteatlas.nl">WarmteAtlas RVO</a>'
# },
"AardwarmteRotliegend": {
"description": "AardwarmteRotliegend",
"url": "https://rvo.b3p.nl/geoserver/WarmteAtlas/wms?",
"layer_name": "AardwarmteRotliegend",
"setting_type": SettingType.SYSTEM.value,
"legend_url": "",
"layer_ref": None,
"visible": False,
"attribution": 'Mapdata <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a> © <a href="http://www.warmteatlas.nl">WarmteAtlas RVO</a>'
},
"CondensWarmte": {
"description": "Potentieel Restwarmte uit koelinstallaties voor MT warmtenetten",
"url": "https://rvo.b3p.nl/geoserver/WarmteAtlas/wms?",
"layer_name": "CondensWarmte",
"setting_type": SettingType.SYSTEM.value,
"legend_url": "",
"layer_ref": None,
"visible": False,
"attribution": 'Mapdata <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a> © <a href="http://www.warmteatlas.nl">WarmteAtlas RVO</a>'
},
"DataCentraWarmte": {
"description": "Potentieel Restwarmte uit DataCentra voor LT warmtenetten",
"url": "https://rvo.b3p.nl/geoserver/WarmteAtlas/wms?",
"layer_name": "DataCentraWarmte",
"setting_type": SettingType.SYSTEM.value,
"legend_url": "",
"layer_ref": None,
"visible": False,
"attribution": 'Mapdata <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a> © <a href="http://www.warmteatlas.nl">WarmteAtlas RVO</a>'
},
# "gs_warm": {
# "description": "Potentieel warmte uit vertikale gesloten WKO (warmte koude opslag)",
# "url": "https://rvo.b3p.nl/geoserver/WarmteAtlas/wms?",
# "layer_name": "gs_warm",
# "legend_url": "",
# "layer_ref": None,
# "visible": False,
# "attribution": 'Mapdata <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a> © <a href="http://www.warmteatlas.nl">WarmteAtlas RVO</a>'
# },
# "gs_koud": {
# "description": "Potentieel koude uit vertikale gesloten WKO (warmte koude opslag)",
# "url": "https://rvo.b3p.nl/geoserver/WarmteAtlas/wms?",
# "layer_name": "gs_koud",
# "legend_url": "",
# "layer_ref": None,
# "visible": False,
# "attribution": 'Mapdata <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a> © <a href="http://www.warmteatlas.nl">WarmteAtlas RVO</a>'
# }
"PICO Hoogspanningsnet 2018": {
"description": "PICO Hoogspanningsnet 2018",
"url": "https://pico.geodan.nl/cgi-bin/qgis_mapserv.fcgi?DPI=120&map=/usr/lib/cgi-bin/projects/Hoogspanningsnet_2018.qgs",
"layer_name": "Hoogspanningsnet_2018",
"setting_type": SettingType.PROJECT.value,
"project_name": "MCS",
"legend_url": "https://pico.geodan.nl/cgi-bin/qgis_mapserv.fcgi?DPI=96&map=/usr/lib/cgi-bin/projects/Hoogspanningsnet_2018.qgs&request=GetLegendGraphic&service=WMS&itemFONTSIZE=7&format=png&layertitle=false&layer=Hoogspanningsnet_2018",
"layer_ref": None,
"visible": False,
"attribution": 'PICO'
},
"TEO": {
"description": "Thermische energie uit oppervlaktewater",
"url": "https://stowa.geoapps.nl/proxy?auth=null&path=https://geosrv02a.geoapps.nl/geoserver/b8e2d7c2645c48359cc2994f45f10940/wms?",
"layer_name": "a3643e0e53fa4174a4ead41f56659a6e",
"setting_type": SettingType.PROJECT.value,
"project_name": "MCS",
"legend_url": "",
"layer_ref": None,
"visible": False,
"attribution": 'Stowa'
}
}
# zie https://stowa.geoapps.nl/Overzichtskaart voor meer TEO/TEA kaartlagen
# TEA - economische potentie - direct - RWZI: https://stowa.geoapps.nl/proxy?auth=null&path=https://geosrv02c.geoapps.nl/geoserver/b8e2d7c2645c48359cc2994f45f10940/wms?
# LAYERS=2bd449fcb6e74f81af1ccdba770c1b0a
# TEA - economische potentie - direct - Leidingen: https://stowa.geoapps.nl/proxy?auth=null&path=https://geosrv02c.geoapps.nl/geoserver/b8e2d7c2645c48359cc2994f45f10940/wms?
# LAYERS=d21caa0e2cfb4d638aa74b23f5b2b34f
# TEA - economische potentie - direct - Gemalen: https://stowa.geoapps.nl/proxy?auth=null&path=https://geosrv02e.geoapps.nl/geoserver/b8e2d7c2645c48359cc2994f45f10940/wms?
# LAYERS=88c739dc2cac4ab2a0740af5b5513965
"""
PICO Hoogspanningsnet 2018
https://pico.geodan.nl/cgi-bin/qgis_mapserv.fcgi?DPI=120&map=/usr/lib/cgi-bin/projects/Hoogspanningsnet_2018.qgs
Hoogspanningsnet_2018
https://pico.geodan.nl/cgi-bin/qgis_mapserv.fcgi?DPI=96&map=/usr/lib/cgi-bin/projects/Hoogspanningsnet_2018.qgs&request=GetLegendGraphic&service=WMS&itemFONTSIZE=7&format=png&layertitle=false&layer=Hoogspanningsnet_2018
""" | StarcoderdataPython |
1766315 | <gh_stars>0
import itertools, struct
class BinaryObject(object):
_fields_ = []
def __init__(self):
self._total_size = 0
def __len__(self):
return self._total_size
def __repr__(self):
result = ['{']
for slot, fmt in self._fields_:
if slot is None:
continue
if '[' in slot:
slot = slot.split('[')[0]
result.append(repr(slot)+':')
result.append(repr(getattr(self,slot)))
result.append('}')
return ' '.join(result)
def _initialize_slots(self, buf, offset, field_specs):
total = self._total_size
for slot, fmt in field_specs:
reps = 1
try:
if '[' in slot:
reps = slot.split('[')[1][:-1]
slot = slot.split('[')[0]
try:
reps = int(reps)
except Exception,e:
reps = getattr(self, reps)
except Exception,e:
pass
x = []
size = 0
for i in range(reps):
if isinstance(fmt, type):
val = fmt.from_buf(buf, offset + total + size)
assert isinstance(val, BinaryObject)
x.append(val)
size += len(val)
else:
try:
x += struct.unpack_from(fmt, buf, offset + total+ size)
except:
raise Exception("Failed parsing %s from offset %d"%(fmt,offset + total))
size += struct.calcsize(fmt)
#print "PARSING %s of type %s REP %d at offset %d of size %d"%(slot, fmt, i, offset + total, size)
total += size
if reps == 1 and len(x) ==1:
x = x[0]
if slot is not None:
setattr(self, slot, x)
self._total_size = total
@classmethod
def from_buf(cls, buf, offset=0):
# Determine our inheritance path back to BinaryObject
inheritance_chain = []
pos = cls
while pos != BinaryObject:
inheritance_chain.append(pos)
bases = pos.__bases__
assert len(bases) == 1
pos = bases[0]
inheritance_chain.reverse()
# Determine all the field names and specs that we need to read.
all_field_specs = itertools.chain(*[c._fields_
for c in inheritance_chain])
# Create the actual object and populate its fields.
obj = cls()
obj._initialize_slots(buf, offset, all_field_specs)
return obj
class UserRegsStruct(BinaryObject):
_fields_ = [('r15', '<Q'),
('r14', '<Q'),
('r13', '<Q'),
('r12', '<Q'),
('rbp', '<Q'),
('rbx', '<Q'),
('r11', '<Q'),
('r10', '<Q'),
('r9', '<Q'),
('r8', '<Q'),
('rax', '<Q'),
('rcx', '<Q'),
('rdx', '<Q'),
('rsi', '<Q'),
('rdi', '<Q'),
('orig_rax', '<Q'),
('rip', '<Q'),
('cs', '<Q'),
('eflags', '<Q'),
('rsp', '<Q'),
('ss', '<Q'),
('fs_base', '<Q'),
('gs_base', '<Q'),
('ds', '<Q'),
('es', '<Q'),
('fs', '<Q'),
('gs', '<Q')]
class FPUStackElement(BinaryObject):
_fields_ = [
('mantissa', '<Q'),
('exponent', '<H'),
(None, '6c')]
class FPUStack(BinaryObject):
_fields_ = [
('st0', FPUStackElement),
('st1', FPUStackElement),
('st2', FPUStackElement),
('st3', FPUStackElement),
('st4', FPUStackElement),
('st5', FPUStackElement),
('st6', FPUStackElement),
('st7', FPUStackElement),]
class XMMReg(BinaryObject):
_fields_ = [
('low', '<Q'),
('high', '<Q')]
class XMMSpace(BinaryObject):
_fields_ = [
('xmm0', XMMReg),
('xmm1', XMMReg),
('xmm2', XMMReg),
('xmm3', XMMReg),
('xmm4', XMMReg),
('xmm5', XMMReg),
('xmm6', XMMReg),
('xmm7', XMMReg),
('xmm8', XMMReg),
('xmm9', XMMReg),
('xmm10', XMMReg),
('xmm11', XMMReg),
('xmm12', XMMReg),
('xmm13', XMMReg),
('xmm14', XMMReg),
('xmm15', XMMReg),]
class UserFpregsStruct(BinaryObject):
_fields_ = [('cwd', '<H'),
('swd', '<H'),
('ftw', '<H'),
('fop', '<H'),
('rip', '<Q'),
('rdp', '<Q'),
('mxcsr', '<L'),
('mxcsr_mask', '<L'),
('st_space', FPUStack),
('xmm_space', XMMSpace),
(None, '96c'),
]
class MappedRange32(BinaryObject):
_fields_ = [('fd_offs', '<Q'),
('begin', '<L'),
('end', '<L'),
('is_r', 'B'),
('is_w', 'B'),
('is_x', 'B'),
(None, '5c')]
class Grr(BinaryObject):
_fields_ = [('magic', '4c'),
('exe_num','<L'),
('gregs', UserRegsStruct),
('fpregs', UserFpregsStruct),
('ranges[652]', MappedRange32) ]
if __name__ == '__main__':
import sys
mbb = Grr.from_buf(file(sys.argv[1]).read())
print mbb
| StarcoderdataPython |
54023 | <reponame>HBOMAT/AglaUndZufall
#!/usr/bin/python
# -*- coding utf-8 -*-
#
# zufall - Funktionen
#
#
# This file is part of zufall
#
#
# Copyright (c) 2019 <NAME> <EMAIL>
#
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Inhalt:
#
# abs, sqrt, ... - Mathematische Funktionen
# is_zahl - Test auf Zahl
# mit_param - Test auf Parameter
# permutationen, perm - Permutationen
# kombinationen, komb - Kombinationen
# variationen - Variationen
# zuf_zahl - Erzeugung von Zufallszahlen
# anzahl - Anzahl des Vorkommens eines Elementes in
# einer DatenReihe / Liste
# anzahl_treffer - Anzahl Treffer
# summe - Summe der Elemente einer Liste / DatenReihe
# ja_nein - Bewertung logischer Ausdrücke
# auswahlen - k-Auswahlen aus n Objekten
# gesetze - Einige Gesetze der Wahrscheinlichkeitsrechnung
# stochastisch - Test auf stochastischen Vektor / Matrix
# löse - Solver für Gleichungen / Ungleichungen
# einfach - Vereinfachung von Vektoren / Matrizen
# ja, nein, ... - Hilfsgrößen für True/False
# Hilfe - Hilfefunktion
import importlib
from itertools import (product, permutations, combinations,
combinations_with_replacement)
from Lib.random import randint, sample
from IPython.display import display, Math
from sympy import (Symbol, nsimplify, simplify, solve, radsimp, trigsimp,
signsimp)
from sympy.core.compatibility import iterable
from sympy import (Integer, Rational, Float, Add, Mul, Pow, Mod,
N, factorial, binomial as Binomial)
from sympy.core.numbers import Zero, One, NegativeOne, Half, E
from sympy.core.sympify import sympify
from sympy.core.containers import Tuple
from sympy import (
Abs, sqrt as Sqrt, exp as Exp, log as Log,
sin as Sin, cos as Cos, tan as Tan, cot as Cot,
asin as Asin, acos as Acos, atan as Atan, acot as Acot,
sinh as Sinh, cosh as Cosh, tanh as Tanh,
asinh as Asinh, acosh as Acosh, atanh as Atanh,
re as Re, im as Im, conjugate as Conjugate)
from sympy.functions.elementary.miscellaneous import Max, Min
from sympy.printing.latex import latex
from sympy.matrices import Matrix as SympyMatrix
from sympy import solveset, S, pi
from zufall.lib.objekte.basis import ZufallsObjekt
from zufall.lib.objekte.ausnahmen import ZufallError
import zufall
# ---------------------------
# Umrechnung Bogenmaß in Grad
# ---------------------------
def deg(*number, **kwargs):
if kwargs.get("h"):
print("\nUmrechnung Bogen- in Gradmaß - Funktion\n")
print("Aufruf deg( winkel )\n")
print(" winkel Winkel in Bogenmaß\n")
print("Synonymer Bezeichner grad\n")
print("Rückgabe Winkel in Grad\n")
print("Zusatz d=n Dezimaldarstellung")
print(" n - Anzahl der Nachkommastellen\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = number * 180 / pi
d = kwargs.get('d')
if d:
return wert_ausgabe(wert, d)
return wert
grad = deg
# ---------------------------
# Umrechnung Grad in Bogenmaß
# ---------------------------
def rad(*number, **kwargs):
if kwargs.get("h"):
print("\nUmrechnung Grad- in Bogenmaß - Funktion\n")
print("Aufruf rad( winkel )\n")
print(" winkel Winkel in Grad\n")
print("Synonymer Bezeichner bog\n")
print("Rückgabe Winkel in Bogenmaß\n")
print("Zusatz d=n Dezimaldarstellung")
print(" n - Anzahl der Nachkommastellen\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl aus [-1, 1] angeben")
return
wert = number / 180 * pi
d = kwargs.get('d')
if d:
return wert_ausgabe(wert, d)
return nsimplify(wert, [pi])
bog = rad
# -----------------------------------
# Allgemeine mathematische Funktionen
# -----------------------------------
def abs(*number, **kwargs):
if kwargs.get("h"):
print("\nBetrags - Funktion\n")
print("Aufruf abs( x )\n")
print(" x Zahl\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Abs(number)
if kwargs.get("d"):
return N(wert)
return wert
def sqrt(*number, **kwargs):
if kwargs.get("h"):
print("\nWurzel - Funktion\n")
print("Aufruf sqrt( x )\n")
print(" x Zahl\n")
print("Rückgabe einer reellen Zahl bei x > 0\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Sqrt(number)
if kwargs.get("d"):
return N(wert)
return wert
def exp(*number, **kwargs):
if kwargs.get("h"):
print("\nExponential - Funktion\n")
print("Aufruf exp( x )\n")
print(" x Zahl\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Exp(number)
if kwargs.get("d"):
return N(wert)
return wert
def log(*number, **kwargs):
if kwargs.get("h"):
print("\nNatürlicher Logarithmus - Funktion\n")
print("Aufruf ln( x )")
print("oder log( x )\n")
print(" x Zahl\n")
print("Rückgabe einer reellen Zahl bei x > 0\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Log(number)
if kwargs.get("d"):
return N(wert)
return wert
ln = log
def lg(*number, **kwargs):
if kwargs.get("h"):
print("\nDekadischer Logarithmus - Funktion\n")
print("Aufruf lg( x )\n")
print(" x Zahl\n")
print("Rückgabe einer reellen Zahl bei x > 0\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Log(number, 10)
if kwargs.get("d"):
return N(wert)
return wert
def max(*numbers, **kwargs):
if kwargs.get("h"):
print("\nGrößte Zahl in einer Folge von Zahlen\n")
print("Aufruf max( x1, x2, ... )\n")
print(" x Zahl\n")
return
if isinstance(numbers[0], (list, tuple, Tuple, set, dict)):
zahlen = [x for x in numbers[0]]
else:
zahlen = [x for x in numbers]
if not all([is_zahl(x) for x in zahlen]):
print("agla: nur Zahlen angeben")
return
wert = Max(*zahlen)
return wert
def min(*numbers, **kwargs):
if kwargs.get("h"):
print("\nKleinste Zahl in einer Folge von Zahlen\n")
print("Aufruf min( x1, x2, ... )\n")
print(" x Zahl\n")
return
if isinstance(numbers[0], (list, tuple, Tuple, set, dict)):
zahlen = [x for x in numbers[0]]
else:
zahlen = [x for x in numbers]
if not all([is_zahl(x) for x in zahlen]):
print("agla: nur Zahlen angeben")
return
wert = Min(*zahlen)
return wert
def re(*number, **kwargs):
if kwargs.get("h"):
print("\nRealteil einer komplexen Zahl\n")
print("Aufruf re( z )\n")
print(" z komplexe Zahl\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine komplexe Zahl angeben")
return
wert = Re(number)
return wert
def im(*number, **kwargs):
if kwargs.get("h"):
print("\nImaginärteil einer komplexen Zahl\n")
print("Aufruf im( z )\n")
print(" z komplexe Zahl\n")
return
if len(number) != 1:
print("agla: eine komplexe Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Im(number)
return wert
def conjugate(*number, **kwargs):
if kwargs.get("h"):
print("\nKonjugiert - komplexe Zahl\n")
print("Aufruf conjugate( z )")
print(" oder konjugirt( z )\n")
print(" z komplexe Zahl\n")
return
if len(number) != 1:
print("agla: eine komplexe Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Conjugate(number)
return wert
konjugiert = conjugate
# -------------------------------------------------
# Trigonometrische und Umkehr-Funktionen - Bogenmaß
# -------------------------------------------------
def sin(*number, **kwargs):
if kwargs.get("h"):
print("\nSinus - Funktion\n")
print("Aufruf sin( winkel )\n")
print(" winkel Winkel in Bogenmaß\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Sin(number)
if kwargs.get("d"):
return N(wert)
return wert
def arcsin(*number, **kwargs):
if kwargs.get("h"):
print("\nArkussinus - Funktion\n")
print("Aufruf arcsin( x )")
print(" oder asin( x )\n")
print(" x Zahl\n")
print("Rückgabe einer reellen Zahl bei x in [-1, 1]\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Asin(number)
if kwargs.get("d"):
return N(wert)
return wert
asin = arcsin
def cos(*number, **kwargs):
if kwargs.get("h"):
print("\nKosinus - Funktion\n")
print("Aufruf cos( winkel )\n")
print(" winkel Winkel in Bogenmaß\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Cos(number)
if kwargs.get("d"):
return N(wert)
return wert
def arccos(*number, **kwargs):
if kwargs.get("h"):
print("\nArkuskosinus - Funktion\n")
print("Aufruf arccos( x )")
print(" oder acos( x )\n")
print(" x Zahl\n")
print("Rückgabe einer reellen Zahl bei x in [-1, 1]\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Acos(number)
if kwargs.get("d"):
return N(wert)
return wert
acos = arccos
def tan(*number, **kwargs):
if kwargs.get("h"):
print("\nTangens - Funktion\n")
print("Aufruf tan( winkel )\n")
print(" winkel Winkel in Bogenmaß\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Tan(number)
if kwargs.get("d"):
return N(wert)
return wert
def arctan(*number, **kwargs):
if kwargs.get("h"):
print("\nArkustangens - Funktion\n")
print("Aufruf arctan( x )")
print(" oder atan( x )\n")
print(" x Zahl\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Atan(number)
if kwargs.get("d"):
return N(wert)
return wert
atan = arctan
def cot(*number, **kwargs):
if kwargs.get("h"):
print("\nKotangens - Funktion\n")
print("Aufruf cot( winkel )\n")
print(" winkel Winkel in Bogenmaß\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Cot(number)
if kwargs.get("d"):
return N(wert)
return wert
def arccot(*number, **kwargs):
if kwargs.get("h"):
print("\nArkuskotangens - Funktion\n")
print("Aufruf arccot( x )")
print(" oder acot( x )\n")
print(" x Zahl\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Acot(number)
if kwargs.get("d"):
return N(wert)
return wert
acot = arccot
# ------------------------------------------------
# Trigonometrische und Umkehr-Funktionen - Gradmaß
# ------------------------------------------------
def sing(*number, **kwargs):
if kwargs.get("h"):
print("\nSinus für Gradwerte - Funktion\n")
print("Aufruf sing( winkel )\n")
print(" winkel Winkel in Grad\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = sin(number * pi /180)
if kwargs.get("d"):
return N(wert)
return wert
def cosg(*number, **kwargs):
if kwargs.get("h"):
print("\nKosinus für Gradwerte - Funktion\n")
print("Aufruf cosg( winkel )\n")
print(" winkel Winkel in Grad\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = cos(number * pi /180)
if kwargs.get("d"):
return N(wert)
return wert
def tang(*number, **kwargs):
if kwargs.get("h"):
print("\nTangens für Gradwerte - Funktion\n")
print("Aufruf tang( winkel )\n")
print(" winkel Winkel in Grad\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = tan(number * pi /180)
if kwargs.get("d"):
return N(wert)
return wert
def cotg(*number, **kwargs):
if kwargs.get("h"):
print("\nKotangens für Gradwerte - Funktion\n")
print("Aufruf cotg( winkel )\n")
print(" winkel Winkel in Grad\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = 1 / tan(number * pi /180)
if kwargs.get("d"):
return N(wert)
return wert
def asing(*number, **kwargs):
if kwargs.get("h"):
print("\nArkussinus in Grad - Funktion\n")
print("Aufruf arcsing( x )")
print("oder asing( x )\n")
print(" x Zahl \n")
print("Rückgabe einer reellen Zahl bei x in [-1, 1]\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
try:
if len(number) != 1:
raise AglaError("eine Zahl angeben")
number = sympify(number[0])
if not is_zahl(number):
raise AglaError("eine Zahl angeben")
except AglaError as e:
print('agla:', str(e))
return
try:
number = nsimplify(number)
except RecursionError:
pass
wert = asin(number)*180/pi
if kwargs.get("d"):
return N(wert)
return wert
arcsing = asing
def acosg(*number, **kwargs):
if kwargs.get("h"):
print("\nArkuskosinus in Grad - Funktion\n")
print("Aufruf arccosg( x )")
print("oder acosg( x )\n")
print(" zahl Zahl \n")
print("Rückgabe einer reellen Zahl bei x in [-1, 1]\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
try:
if len(number) != 1:
raise AglaError("eine Zahl angeben")
number = sympify(number[0])
if not is_zahl(number):
raise AglaError("eine Zahl angeben")
number = re(number)
except AglaError as e:
print('agla:', str(e))
return
try:
number = nsimplify(number)
except RecursionError:
pass
wert = acos(number)*180/pi
if kwargs.get("d"):
return N(wert)
return wert
arccosg = acosg
def atang(*number, **kwargs):
if kwargs.get("h"):
print("\nArkustangens in Grad - Funktion\n")
print("Aufruf arctang( x )")
print("oder atang( x )\n")
print(" x Zahl\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = sympify(number[0])
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
number = nsimplify(number)
try:
number = nsimplify(number)
except RecursionError:
pass
wert = atan(number) * 180 / pi
if kwargs.get("d"):
return N(wert)
return wert
arctang = atang
def acotg(*number, **kwargs):
if kwargs.get("h"):
print("\nArkuskotangens in Grad - Funktion\n")
print("Aufruf arccotg( x )")
print("oder acotg( x )\n")
print(" x Zahl\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = sympify(number[0])
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
number = nsimplify(number)
try:
number = nsimplify(number)
except RecursionError:
pass
wert = acot(number) * 180 / pi
if kwargs.get("d"):
return N(wert)
return wert
arccotg = acotg
# -----------------------------------
# Hyperbolische und Umkehr-Funktionen
# -----------------------------------
def sinh(*number, **kwargs):
if kwargs.get("h"):
print("\nSinus hyperbolikus - Funktion\n")
print("Aufruf sinh( x )\n")
print(" x Zahl\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Sinh(number)
if kwargs.get("d"):
return N(wert)
return wert
def cosh(*number, **kwargs):
if kwargs.get("h"):
print("\nKosinus hyperbolikus - Funktion\n")
print("Aufruf cosh( x )\n")
print(" x Zahl\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Cosh(number)
if kwargs.get("d"):
return N(wert)
return wert
def tanh(*number, **kwargs):
if kwargs.get("h"):
print("\nTangens hyperbolikus - Funktion\n")
print("Aufruf tanh( x )\n")
print(" x Zahl\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Tanh(number)
if kwargs.get("d"):
return N(wert)
return wert
def asinh(*number, **kwargs):
if kwargs.get("h"):
print("\nAreasinus - Funktion\n")
print("Aufruf asinh( x )")
print(" oder arsinh( x )\n")
print(" x Zahl\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Asinh(number)
if kwargs.get("d"):
return N(wert)
return wert
arsinh = asinh
def acosh(*number, **kwargs):
if kwargs.get("h"):
print("\nAreakosinus - Funktion\n")
print("Aufruf acosh( x )")
print(" oder arcosh( x )\n")
print(" x Zahl\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Acosh(number)
if kwargs.get("d"):
return N(wert)
return wert
arcosh = acosh
def atanh(*number, **kwargs):
if kwargs.get("h"):
print("\nAreatangens - Funktion\n")
print("Aufruf atanh( x )")
print(" oder artanh( x )\n")
print(" x Zahl\n")
print("Zusatz d=1 Dezimaldarstellung\n")
return
if len(number) != 1:
print("agla: eine Zahl angeben")
return
number = number[0]
if not is_zahl(number):
print("agla: eine Zahl angeben")
return
wert = Atanh(number)
if kwargs.get("d"):
return N(wert)
return wert
artanh = atanh
# Test auf eine zufall-zahl
# -------------------------
def is_zahl(x):
if isinstance(x, str):
return False
x = sympify(x)
try:
if x.is_number:
return True
elif x.is_Function:
return True
except AttributeError:
pass
zahlen = (Integer, int, Float, float, Symbol, One, Zero, NegativeOne, Half,
sin, cos, tan, sinh, cosh, tanh, asin, acos, atan, exp, log,
Mul, Add, Pow)
return type(x) in zahlen
isZahl = is_zahl
# ------------------------
# Test auf freie Parameter
# ------------------------
def mit_param(obj):
nv = importlib.import_module('zufall.lib.objekte.normal_verteilung')
NormalVerteilung = nv.NormalVerteilung
if iterable(obj):
test = [mit_param(el) for el in obj]
return any(test)
obj = sympify(obj)
if is_zahl(obj):
try:
return bool(obj.free_symbols)
except SyntaxError:
return False
elif isinstance(obj, NormalVerteilung):
return mit_param(obj.mu) or mit_param(obj.sigma)
mitParam = mit_param
# --------------------------
# Ausgabe nummerischer Werte
# --------------------------
def wert_ausgabe(wert, d=None): # interne Funktion
if not isinstance(d, (Integer, int)):
d = None
else:
if d <= 0:
d = None
if not d:
if mit_param(wert):
return N(wert)
else:
return eval(format(float(wert)))
else:
if mit_param(wert):
return N(wert, d)
else:
return eval(format(float(wert), ".%df" %d ))
wertAusgabe = wert_ausgabe
# ---------
# Fakultaet
# ---------
def fakultaet(*args, **kwargs):
"""Fakultätsfunktion"""
if kwargs.get('h'):
print("\nfakultät - Fakultätsfunktion\n")
print("Kurzform fak\n")
print("Aufruf fak( n )\n")
print(" n ganze Zahl >= 0\n")
return
if len(args) != 1:
print('zufall: ein Argument angeben')
return
n = args[0]
if mit_param(n):
return factorial(n)
if not (isinstance(n, (int, Integer)) and n >= 0):
print ('zufall: ganze nichtnegative Zahl angeben')
return
return factorial(n)
fak = fakultaet
# -------------------
# Binomialkoeffizient
# -------------------
def binomial(*args, **kwargs):
"""Binomialkoeffizient"""
if kwargs.get('h'):
print("\nbinomial - Binomialkoeffizient\n")
print("Kurzform B\n")
print("Aufruf B( n, k )\n")
print(" n, k ganze Zahl >= 0\n")
print("Achtung - der Bezeichner B kann überschrieben werden\n")
return
if len(args) != 2:
print('zufall: zwei Argumente angeben')
return
n, k = args
if mit_param(n):
if mit_param(k):
return Binomial(n, k)
else:
if isinstance(k, (int, Integer)) and k >= 0:
return Binomial(n, k)
print('zufall: positive ganze Zahlen angeben')
return
else:
if isinstance(n, (int, Integer)) and n >= 0:
return Binomial(n, k)
print('zufall: positive ganze Zahlen angeben')
return
B = binomial
# -------------
# Permutationen
# -------------
def permutationen(*args, **kwargs):
"""Permutationen einer Menge von Elementen"""
if kwargs.get('h'):
print("\nPermutationen der Elemente einer Menge\n")
print("Kurzform perm\n")
print("Aufruf perm( menge | n )\n")
print(" menge Liste/Tupel/Menge von Elementen | dictionary ")
print(" Elemente sind Zahlen, Symbole, Zeichenketten")
print(" ein dictionary enthält (element:anzahl)-Paare")
print(" n bei Angabe einer ganzen Zahl >0 wird die Menge")
print(" {1, 2,...,n} verwendet\n")
print("Zusatz k=ja Ausgabe der Permutationen in Kurzform")
print(" l=ja Ausgabe der Permutationen in Listenform")
print(" f=ja Formeln\n")
print("Beispiele")
print("perm( [a, b, c, d], k=ja)")
print("perm( { 0:3, 1:2 }, l=ja)")
print("perm( 5)\n")
return
if kwargs.get('f'):
i = Symbol('i')
print(' ')
display(Math('Anzahl\; der\; Permutationen\; ohne\; Wiederholungen = n!'))
display(Math('Anzahl\; der\; Permutationen\; mit\; Wiederholungen = \\frac{n!}{n_1!\: n_2!\: ... \:n_p!}'))
display(Math('n - Anzahl\; der\; Elemente \; der\; Grundgesamtheit'))
display(Math('n_i - Anzahl\; des\; Auftretens \; des\;' + latex(i) + \
'.\; Elementes\; in\; der\; Grundgesamtheit, \\quad \\sum\limits_{i=1}^{p}n_i = n'))
print(' ')
return
if len(args) != 1:
print('zufall: ein Argument angeben')
return
menge = args[0]
if not menge:
return []
if not isinstance(menge, (list, tuple, set, dict, int, Integer)):
raise ZufallError('Liste/Tupel/Menge von Elementen oder ganze positive Zahl angeben')
if isinstance(menge, (list, tuple, set)) and not all(map(lambda x: isinstance(x, \
(int, Integer, Symbol, str)), menge)):
raise ZufallError("Listenelemente können ganze Zahlen, Symbole oder Zeichenketten sein")
if isinstance(menge, dict):
if not all(map(lambda x: isinstance(x, (int, Integer)) and x > 0, menge.values())):
raise ZufallError("im dictionary als Werte Anzahlen angeben")
m = []
for it in menge:
m += [it for i in range(menge[it])]
menge = m
if isinstance(menge, (int, Integer)):
if menge <= 0:
raise ZufallError('ganze positive Zahl angeben')
else:
menge = range(1, menge+1)
menge = list(menge)
menge.sort(key=str)
di = {menge[0]:1}
wiederh = False
for it in menge[1:]:
try:
di[it] += 1
wiederh = True
except KeyError:
di[it] = 1
kwl = kwargs.get('l')
kwk = kwargs.get('k')
if not(kwl or kwk):
if not wiederh:
return factorial(len(menge))
else:
N = factorial(len(menge))
for it in di:
N = N / factorial(di[it])
return nsimplify(N)
if not wiederh:
pp = list(permutations(menge))
else:
def pmw(iterable):
L = [iterable[0]]
for i, it in enumerate(iterable):
if i == 0 or it not in L:
L += [it]
yield it
pp = list(pmw(list(permutations(menge))))
if kwl:
return pp
elif kwk:
return [kurz_form(x) for x in pp]
perm = permutationen
# -------------
# Kombinationen
# -------------
def kombinationen(*args, **kwargs):
"""k-Kombinationen aus einer Menge von Elementen"""
if kwargs.get('h'):
print("\nKombinationen - k-Kombinationen aus einer Menge von n Objekten\n")
print("Kurzform komb\n")
print("Aufruf komb( menge, k, wiederh, anordn )\n")
print(" menge Liste/Tupel/Menge von Elementen | dictionary |")
print(" ganze positive Zahl")
print(" Listenelemente sind Zahlen, Symbole, strings,")
print(" aber keine Listen")
print(" ein dictionary enthält (Objekt:Anzahl)-Paare")
print(" bei Angabe einer Zahl n wird die Menge")
print(" {1,2,...,n} verwendet")
print(" k Anzahl Elemente einer Kombination")
print(" wiederh Wiederholungen von Elementen in einer Kombina-")
print(" tion möglich (ja/nein)")
print(" anordn Beachtung der Anordnung/Reihenfolge der Elemen- ")
print(" te in einer Kombination (ja/nein)\n")
print("Zusatz k=ja Ausgabe der Kombinationen in Kurzform")
print(" l=ja Ausgabe der Kombinationen in Listenform")
print(" f=ja Formeln")
print(" b=ja Begriffe\n")
print("Beispiele")
print("komb( [a, b, c, d], 2, ja, nein)")
print("komb( { 0:3, 1:2 }, 4, ja, ja, k=ja)")
print("komb( 5, 2, nein, nein)\n")
return
if kwargs.get('b'):
print("\nMitunter werden Kombinationen mit Berücksichtigung der Anordnung Varia-")
print("tionen genannt, die ohne Berücksichtigung der Anordnung heißen dann Kom-")
print("binationen\n")
return
try:
if len(args) != 4:
raise ZufallError('vier Argumente angeben')
menge, k, wiederh, anordn = args
if not isinstance(menge, (list, tuple, set, dict, int, Integer)):
raise ZufallError('Liste/Tupel/Menge von Elementen oder ganze positive Zahl angeben')
if isinstance(menge, (list, tuple, set)) and not all(map(lambda x: isinstance(x, \
(int, Integer, Symbol, str)), menge)):
raise ZufallError("Listenelemente können Zahlen, Symbole eoder Zeichenketten sein")
if isinstance(menge, dict):
if not all(map(lambda x: isinstance(x, (int, Integer)) and x > 0, menge.values())):
raise ZufallError("im dictionary als Werte Anzahlen angeben")
m = []
for it in menge:
m += [it for i in range(menge[it])]
menge = m
if isinstance(menge, (int, Integer)):
if menge <= 0:
raise ZufallError('ganze positive Zahl angeben')
else:
menge = range(1, menge+1)
if not isinstance(k, (int, Integer)) and k > 0:
raise ZufallError('für Anzahl Elemente ganze Zahl > 0 angeben')
if not isinstance(wiederh, bool):
raise ZufallError('Zulassen Wiederholungen mit ja/mit oder nein/ohne angeben')
if not isinstance(anordn, bool):
raise ZufallError('Beachten der Anordnung mit ja/mit oder nein/ohne angeben')
except ZufallError as e:
print('zufall:', str(e))
return
if kwargs.get('f'):
print(' ')
if wiederh and anordn:
display(Math('Anzahl\; der\; Kombinationen\; mit\; Wiederholungen, \; mit\; Anordnung = n^k'))
elif wiederh and not anordn:
display(Math('Anzahl\; der\; Kombinationen\; mit\; Wiederholungen, \; ohne\; Anordnung'))
display(Math('\\qquad {n+k-1 \\choose k} = \\frac{(k+n-1)!}{k!\,(n-1)!}'))
elif not wiederh and anordn:
display(Math('Anzahl\; der\; Kombinationen\; ohne\; Wiederholungen, \; mit\; Anordnung = ' + \
'\\frac{n!}{(n-k)! }'))
elif not wiederh and not anordn:
display(Math('Anzahl\; der\; Kombinationen\; ohne\; Wiederholungen, \; ohne\; Anordnung'))
display(Math('\\qquad {n \\choose k} = \\frac{n!}{k!\,(n-k)! }'))
display(Math('n - Anzahl\; der\; Elemente \; der\; Grundgesamtheit'))
display(Math('k - Anzahl\; der\; ausgewählten \; Elemente'))
print(' ')
return
if not menge:
return []
menge = list(menge)
menge.sort(key=str)
if not anordn and not wiederh:
kk = list(combinations(menge, k))
elif not anordn and wiederh:
kk = list(combinations_with_replacement(menge, k))
elif anordn and not wiederh:
kk = list(permutations(menge, k))
elif anordn and wiederh:
kk = list(product(menge, repeat=k))
kwl = kwargs.get('l')
kwk = kwargs.get('k')
n = len(menge)
if not(kwl or kwk):
if wiederh and anordn:
return n**k
elif wiederh and not anordn:
N = factorial(k+n-1) / (factorial(k) * factorial(n-1))
return nsimplify(N)
elif not wiederh and anordn:
N = factorial(n) / factorial(n-k)
return nsimplify(N)
elif not wiederh and not anordn:
N = factorial(n) / (factorial(k) * factorial(n-k))
return nsimplify(N)
if kwl:
return kk
elif kwk:
return [kurz_form(x) for x in kk]
komb = kombinationen
# -----------
# Variationen
# -----------
def variationen(*args, **kwargs):
"""k-Variationen aus einer Menge von Elementen"""
if kwargs.get('h'):
print("\nVariationen - k-Variationen aus einer Menge von n Objekten\n")
print("Aufruf variationen( menge, k, wiederh )\n")
print(" menge Liste/Tupel/Menge von Elementen | dictionary |")
print(" ganze positive Zahl")
print(" Listenelemente sind Zahlen, Symbole, strings,")
print(" aber keine Listen")
print(" ein dictionary enthält (Objekt:Anzahl)-Paare")
print(" bei Angabe einer Zahl n wird die Menge")
print(" {1,2,...,n} verwendet")
print(" k Anzahl Elemente einer Variation")
print(" wiederh Wiederholungen von Elementen in einer Variation")
print(" möglich (ja/nein)\n")
print("Zusatz k=ja Ausgabe der Variationen in Kurzform")
print(" l=ja Ausgabe der Variationen in Listenform")
print(" f=ja Formeln")
print(" b=ja Begriffe\n")
print("Beispiele")
print("variationen( [a, b, c, d], 2, ja)")
print("variationen( { 0:3, 1:2 }, 4, ja, k=ja)")
print("variationen( 5, 2, nein)\n")
return
if kwargs.get('b'):
print("\nVariationen sind Kombinationen mit Berücksichtigung der Anordnung/Reihenfolge")
print("der Elemente; wird der Begriff verwendet, heißen Kombinationen nur diejenigen ")
print("ohne Berücksichtigung der Anordnung\n")
return
try:
if len(args) != 3:
raise ZufallError('drei Argumente angeben')
menge, k, wiederh = args
if not isinstance(menge, (list, tuple, set, dict, int, Integer)):
raise ZufallError('Liste/Tupel/Menge von Elementen oder ganze positive Zahl angeben')
if isinstance(menge, (list, tuple, set)) and not all(map(lambda x: isinstance(x, \
(int, Integer, Symbol, str)), menge)):
raise ZufallError("Listenelemente können Zahlen, Symbole eoder Zeichenketten sein")
if isinstance(menge, dict):
if not all(map(lambda x: isinstance(x, (int, Integer)) and x > 0, menge.values())):
raise ZufallError("im dictionary als Werte Anzahlen angeben")
m = []
for it in menge:
m += [it for i in range(menge[it])]
menge = m
if isinstance(menge, (int, Integer)):
if menge <= 0:
raise ZufallError('ganze positive Zahl angeben')
else:
menge = range(1, menge+1)
if not isinstance(k, (int, Integer)) and k > 0:
raise ZufallError('für Anzahl Elemente ganze Zahl > 0 angeben')
if not isinstance(wiederh, bool):
raise ZufallError('Zulassen Wiederholungen mit ja/mit oder nein/ohne angeben')
except ZufallError as e:
print('zufall:', str(e))
return
return kombinationen(menge, k, wiederh, True, **kwargs)
# -------------
# Zufallszahlen
# -------------
def zuf_zahl(*args, **kwargs):
"""Erzeuung von Zufallszahlen"""
if kwargs.get('h'):
print("\nzuf_zahl - Erzeugung von ganzzahligen Pseudo-Zufallszahlen\n")
print("Aufruf zuf_zahl( bereich1 /[, bereich2, ... ] /[, anzahl ] )\n")
print(" bereich Bereichsangabe z.B. (0, 9); [1, 6]")
print(" anzahl Anzahl der erzeugten Zahlen; Standard = 1\n")
print("Zusatz w=nein keine Wiederholung von Zahlen; Standard=ja")
print(" s=ja sortierte Ausgabe mehrerer Zufallszahlen; ")
print(" Standard=nein\n")
print("Rückgabe eine einzelne Zahl oder eine Liste mit anzahl Elementen")
print(" ist die Anzahl der Bereiche > 1, so ist jedes Element ein")
print(" Tupel, dessen i. Element aus dem i. Bereich ist\n")
print("Beispiele zuf_zahl( (0, 9) ) - eine Zufallsziffer 0, 1, ... oder 9")
print(" zuf_zahl( (1, 365), 6, w=nein ) - 6 Tage eines Jahres, ohne")
print(" Wiederh.")
print(" zuf_zahl( [0, 1], 3 ) - zur Simulation des 3-maligen Werfens")
print(" einer Münze")
print(" zuf_zahl( [1, 6], [1, 6], 100 ) - zur Simulation des 100-ma-")
print(" ligen Werfens zweier Würfel\n")
return
if not args:
print('zufall: Mindestens ein Argument angeben')
return
if not iterable(args[0]):
print('zufall: Mindestens einen Bereich angeben')
return
if iterable(args[-1]):
anzahl = 1
bereich = [*args]
else:
anzahl = args[-1]
bereich = [*args[:-1]]
for ber in bereich:
if not (iterable(ber) and len(ber) == 2):
print('zufall: Bereiche der Länge 2 und eventuell Anzahl angeben')
return
if not (isinstance(ber[0], (int, Integer)) and isinstance(ber[1], (int, Integer))):
print('zufall: die Bereichsgrenzen müssen ganzzahlig sein')
return
if ber[0] >= ber[1]:
print('zufall: es muss 1.Bereichsgrenze < 2.Bereichsgranze sein')
return
w = kwargs.get('w')
if w == None:
w = True
s = kwargs.get('s')
if anzahl == 1:
if len(bereich) == 1:
return randint(*bereich[0])
else:
return [randint(*b) for b in bereich]
else:
if len(bereich) == 1:
b = bereich[0]
if w:
if not s:
return [randint(*b) for i in range(anzahl)]
return sorted([randint(*b) for i in range(anzahl)])
if anzahl > len(range(b[0], b[1]+1)):
print('zufall: es muss Anzahl <= Bereichsgröße sein')
return
if not s:
return sample(range(b[0], b[1]+1), anzahl)
return sorted(sample(range(b[0], b[1]+1), anzahl))
else:
if w is None:
samp = [[randint(*b) for b in bereich] for i in range(anzahl)]
if s is None:
return samp
return sorted(samp)
anz = 1
for b in bereich:
g = b[1] - b[0] + 1
anz *= g
if anz < anzahl and not w:
print('zufall: die angegebene Anzahl ist größer als die Vorratsmenge')
return
samp, i = [], 0
while i < anzahl:
sa = []
for b in bereich:
sa += [randint(*b)]
samp += [tuple(sa)]
i += 1
if not s:
return samp
return sorted(samp)
zufZahl = zuf_zahl
# -------------------------------------
# Anzahl des Vorkommens eines Elementes
# in einer DatenReihe / Liste
# -------------------------------------
def anzahl(*args, **kwargs):
"""Anzahl von Elementen"""
if kwargs.get('h'):
print("\nanzahl - Anzahl des Vorkommens eines Elementes in einer DatenReihe /")
print(" Liste\n")
print("Aufruf anzahl( daten /[, elem ] )\n")
print(" daten Liste von Elementen | DatenReihe")
print(" elem Listen- / Datenelement")
print(" bei Fehlen wird die Anzahl der Elemente")
print(" von daten zurückgegeben")
print(" oder anzahl( elem )\n")
print(" es wird eine Funktion zurückgegeben, die die Anzahl")
print(" des Vorkommens des Elementes elem in einer Liste /")
print(" DatenReihe zählt")
print(" bei deren Aufruf ist die Liste / DatenReihe als")
print(" Argument anzugeben; ist elem selbst eine Liste, ist")
print(" der Zusatz el=ja anzugeben\n")
print("Beispiele")
print("anzahl( [ 1, 0, 0, 1, 1, 1 ], 1 ) ergibt 4")
print("anzahl( [ a, b, c ] ) ergibt 3")
print("anzahl(sp, W) ergibt die Anzahl der W[appen] in der Stichprobe sp beim")
print(" Münzwurf-ZufallsExperiment)")
print("anzahl( el ) ergibt eine Funktion zum Zählen des Elements el")
print(" anzahl(0)( [0,1,1,0,0] ) ergibt 3")
print(" ist el eine Liste, wird der Zusatz el=ja angegeben")
print(" anzahl([a, b], el=ja)( [[a, a], [a, b], [a, c], [a, b]]")
print(" ergibt 2\n")
return
dr = importlib.import_module('zufall.lib.objekte.datenreihe')
DatenReihe = dr.DatenReihe
if len(args) == 1:
a = args[0]
if isinstance(a, list) and not kwargs.get('el'):
return len(a)
elif isinstance(a, DatenReihe):
return a.n
else:
def fkt(*li):
liste = li[0]
if not liste or not isinstance(liste, (list, DatenReihe, \
tuple, Tuple)):
print('zufall: Liste oder DatenReihe angeben')
return
if isinstance(liste, DatenReihe):
liste = liste.daten
return len([x for x in liste if x == a])
return fkt
elif len(args) == 2:
liste, elem = args
if not isinstance(liste, (list, DatenReihe)):
print('zufall: als 1. Argument Liste oder DatenReihe angeben')
return
if isinstance(liste, DatenReihe):
liste = liste.daten
return len([x for x in liste if x == elem])
else:
print('zufall: ein oder zwei Argumente angeben')
return
# --------------
# Anzahl Treffer
# --------------
def anzahl_treffer(*args, **kwargs):
"""Anzahl Treffer"""
if kwargs.get('h'):
print("\nanzahl_treffer - Anzahl des Treffer\n")
print("Aufruf anzahl_treffer( treffer )\n")
print(" treffer Element, das als Treffer / Erfolg angesehen")
print(" wird (etwa Wappen oder W beim Münzwurf)\n")
print("Die Funktion ist nur als ZG-Funktion beim Erzeugen von ZufallsGröße-Ob-")
print("jekten verwendbar\n")
return
if len(args) != 1:
print('zufall: ein Element als Treffer angeben')
return
return anzahl(args[0])
anzahlTreffer = anzahl_treffer
# -----
# Summe
# -----
def summe(*args, **kwargs):
"""Summe der Elemente"""
if kwargs.get('h'):
print("\nsumme - Summe der Elemente einer Liste mit Daten / DatenReihe\n")
print("Aufruf summe( daten )\n")
print(" daten Liste mit Daten | DatenReihe\n")
print("Synonyme augen_summe, augenSumme\n")
print("Beispiel")
print("summe( [ 1, 0, 0, 1, 1, 1 ] ) ergibt 4\n")
return
dr = importlib.import_module('zufall.lib.objekte.datenreihe')
DatenReihe = dr.DatenReihe
if len(args) != 1 or not isinstance(args[0], (list, tuple, Tuple, DatenReihe)):
print('zufall: Liste oder Datenreihe angeben')
return
liste = args[0]
if isinstance(liste, DatenReihe):
liste = liste.daten
if not all([isinstance(x, (int, Integer, Rational, float, Float)) for x in liste]):
print('zufall: in der Liste nur Zahlen angeben')
return
return Add(*liste)
augen_summe = summe
augenSumme = summe
# ------------------
# Allgemeiner solver
# ------------------
def loese(*args, **kwargs):
if kwargs.get("h") == 1:
print("\nlöse - Funktion\n")
print("Zum Lösen von Gleichungen sowie von Ungleichungen\n")
print(" Aufruf löse( gleich /[, variable ] )\n")
print(" gleich linke Seite einer Gleichung der Form")
print(" ausdruck = 0 oder Liste mit solchen")
print(" Elementen (Gleichungssystem)")
print(" variable einzelne oder Liste von Variablen")
print(" ausdruck Ausdruck in den Variablen\n")
print(" oder löse( ungleich /[, variable ] )\n")
print(" ungleich Ungleichung der Form ausdruck rel ausdruck1")
print(" rel Relation '<' | '<=' | '>' | '>='\n")
print("Zusatz set=ja Verwendung von solveset; standardmäßig wird solve ver-")
print(" wendet (siehe SymPy-Dokumentation)\n")
print("Beispiele")
print("löse( 3*x^2 + 5*x - 3 ) - einzelne Gleichung")
print("löse( 3*x^2 + 5*x - 3, set=ja )")
print("löse( (1-1/3)^n > 0.01, set=ja ) - Ungleichung")
print("löse( [2*x-4*y-2, 3*x+5*y+1] ) - Gleichungssystem\n")
return
ve = importlib.import_module('agla.lib.objekte.vektor')
Vektor = ve.Vektor
if len(args) == 1:
gleich = args[0]
var = []
elif len(args) == 2:
gleich = sympify(args[0])
var = args[1]
else:
print('zufall: ein oder zwei Argumente angeben')
return
if not type(var) in (Symbol, list, tuple, Tuple):
print('zufall: einzelne Variable als Symbol, mehrere in einer' +
' Liste angeben')
return
se = kwargs.get('set')
if is_zahl(gleich):
if se:
if not var:
return solveset(gleich, domain=S.Reals)
return solveset(gleich, var, domain=S.Reals)
if not var:
res = solve(gleich, dict=True, rational=True)
else:
res = solve(gleich, var, dict=True, rational=True)
if isinstance(res, list) and len(res) == 1:
return res[0]
if not res:
return set()
return res
elif isinstance(gleich, _Gleichung):
gleich = gleich.lhs - gleich.rhs
if se:
if not var:
return solveset(gleich, domain=S.Reals)
return solveset(gleich, var, domain=S.Reals)
if not var:
res = solve(gleich, dict=True, rational=True)
else:
res = solve(gleich, var, dict=True, rational=True)
if isinstance(res, list) and len(res) == 1:
return res[0]
if not res:
return set()
return res
elif isinstance(gleich, Vektor):
gleich = [gleich.komp[i] for i in range(gleich.dim)]
if not var:
res = solve(gleich, dict=True, rational=True)
else:
res = solve(gleich, var, dict=True, rational=True)
if isinstance(res, list) and len(res) == 1:
return res[0]
if not res:
return set()
return res
elif isinstance(gleich, (list, tuple, Tuple)):
res = solve(gleich, rational=True)
if not res:
return set()
return res
elif '<' in str(gleich) or '>' in str(gleich):
if se:
if not var:
return solveset(gleich, domain=S.Reals)
return solveset(gleich, var, domain=S.Reals)
if not var:
res = solve(gleich)
else:
res = solve(gleich, var)
if isinstance(res, list) and len(res) == 1:
return res[0]
if not res:
return set()
return res
else:
print('zufall: linke Seite einer Gleichung oder einer ' +
'Vektorgleichung oder Gleichungssystem angeben')
# -------------
# Vereinfachung
# -------------
from zufall.lib.objekte.umgebung import UMG
def einfach(*x, **kwargs):
if kwargs.get('h') == 1:
print("\neinfach - Funktion\n")
print("Vereinfachung von Objekten\n")
print("Aufruf einfach( objekt )\n")
print(" objekt numm. Ausdruck, Vektor, Matrix\n")
print("Zusatz rad=ja Einsatz von radsimp")
print(" trig=ja Einsatz von trigsimp")
print(" num=ja Einsatz von nsimplify")
print(" sign=ja Einsatz von signsimp")
print(" (siehe SymPy-Dokumentation)\n")
return
Vektor = importlib.import_module('agla.lib.objekte.vektor').Vektor
if len(x) != 1:
print('zufall: ein Objekt angeben')
return
x = x[0]
if not UMG.SIMPL:
return x
if not (is_zahl(x) or isinstance(x, (Vektor, SympyMatrix))):
print('zufall: nummerischen Wert, Vektor oder Matrix angeben')
return
if isinstance(x, Vektor):
li = [einfach(k, **kwargs) for k in x.komp]
return Vektor(li)
if isinstance(x, SympyMatrix):
Matrix = importlib.import_module('zufall.lib.objekte.matrix').Matrix
return Matrix(*[einfach(v, **kwargs) for v in x.vekt])
elif is_zahl(x):
if not kwargs:
return simplify(x)
elif kwargs.get('rad'):
return radsimp(x)
elif kwargs.get('trig'):
return trigsimp(x)
elif kwargs.get('num'):
try:
return nsimplify(x)
except RecursionError:
return x
elif kwargs.get('sign'):
return signsimp(x)
else:
return x
# --------------------------
# k-Auswahlen aus n Objekten
# --------------------------
def auswahlen(**kwargs):
"""k-Auswahlen aus n Objekten; Übericht"""
if kwargs.get('h'):
print("\nk-Auswahlen aus n Objekten (Übersicht)\n")
print("Aufruf auswahlen( )\n")
print("Zusatz a=ja Algorithmus als Pseudocode\n")
return
if not kwargs.get('a'):
dm = lambda x: display(Math(x))
print(' ')
dm('\\text{Tabelle der $k$-Auswahlen aus $n$ Objekten}')
print(' ')
dm('\\text{Bezeichnung $\\qquad\qquad\\quad$ Eigenschaften \
$\\qquad\\quad$ Formel $\\qquad\\quad$ Beispiel}')
dm('\\text{$k$-Kombination oW mA } \\quad\:\, k \\lt n \\qquad\\quad \
\\qquad\quad\; \\dfrac{n!}{(n-k)!} \\qquad\\quad\, \\text{Pakplatzbelegung}')
dm('\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad \
\\qquad\\qquad\;\, \\text{15 Autos, 6 Plätze}')
dm('\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad \
\\qquad\\qquad\; \, \\Rightarrow n=15, k=6')
dm('\\text{$k$-Kombination mW mA} \\quad\:\, k, n \; \\text{beliebig} \\qquad\\quad \
\,\quad n^k \\qquad\\qquad\\quad\, \\text{Fußballtoto}')
dm('\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad \
\\qquad\\qquad\; \, \\Rightarrow n=3, k=11')
dm('\\text{$k$-Permutation oW } \\qquad\\quad\:\, \\text{mA; jedes Element} \\quad\:\;\, \
n! \\qquad\\qquad\\quad\, \\text{Startaufstellung}')
dm('\\qquad\\qquad\\qquad\\qquad\\qquad\\text{wird benutzt} \
\\qquad\\qquad\\qquad\\qquad\\quad\:\;\; \, \\text{8 Läufer auf 8 Bahnen}')
dm('\\qquad\\qquad\\qquad\\qquad\\qquad\, k=n \\qquad\\qquad \
\\qquad\\qquad\\qquad\\quad\:\:\:\; \, \\Rightarrow n=k=8')
dm('\\text{$k$-Permutation mW } \\qquad\\quad\:\, \\text{mA; jedes Element} \\quad\:\;\, \
\\dfrac{n!}{n_1!\cdot \dots \cdot n_p}\\quad\, \\text{Anagramm}')
dm('\\qquad\\qquad\\qquad\\qquad\\qquad\\text{wird benutzt} \
\\qquad\\qquad\\qquad\\qquad\\quad\:\;\; \, \\text{RENNEN}')
dm('\\qquad\\qquad\\qquad\\qquad\\qquad\, k>n \\qquad\\qquad \
\\qquad\\qquad\\qquad\\quad\:\:\:\; \, \\Rightarrow p=3,n=6')
dm('\\text{$k$-Kombination oW oA } \\quad\:\,\:\; k \\lt n \\qquad\\quad \
\\qquad\quad\;\; \\dfrac{n!}{(n-k)! \cdot k!} \\quad\, \\text{Zahlenlotto}')
dm('\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad \
\\qquad\\qquad\;\, \\text{6 aus 49}')
dm('\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad \
\\qquad\\qquad\; \, \\Rightarrow n=49, k=6')
dm('\\text{$k$-Kombination mW oA } \\quad\:\,\: k, n \\text{beliebig} \\qquad\\quad \
\\quad \;\; {n+k-1 \choose k} \\quad \\text{Flaschenträger}')
dm('\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad \
\\qquad\\qquad\;\, \\text{12 Flaschen aus 3 Sorten}')
dm('\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad\\qquad \
\\qquad\\qquad\; \, \\Rightarrow n=3, k=12')
print(' ')
dm('\\text{(mW/oW - mit/ohne Wiederholung, mA/oA - mit/ohne Anordnung)}')
print(' ')
dm('\\text{Oft werden Kombinationen mit Berücksichtigung der Anordnung Variationen genannt}')
dm('\\text{die ohne Berücksichtigung der Anordnung heißen dann Kombinationen}')
print(' ')
return
# Algorithmus
print(""" \
Algorithmus zur Berechnug der k-Auswahlen aus n Objekten (Python-ähn-
licher Pseudocode)
Analyse der Aufgabenstellung
WENN die Elemente 'angeordnet' sind:
WENN einzelne Elemente wiederholt werden dürfen:
WENN jedes Element mindestens einmal benutzt wird:
Permutation mW mit n > p
/ Aus n > p ergibt sich die Zuordnung von p und n:
/ Die Länge n der Anordnung ist größer als die Größe p
/ der Vorratsmenge ]
SONST:
Kombination mW mA
/ Zuordnung von k und n:
/ Das, was 'wiederholt' werden kann, gehört zur Vorrats-
/ menge
SONST:
WENN jedes Element genau einmal benutzt wird:
WENN k = n ist:
Permutation oW
SONST:
ES WURDE ETWAS ÜBERSEHEN
neu beginnen
SONST:
Kombination oW mA mit k < n
/ Aus k < n ergibt sich die Zuordnung von n und k:
/ Die Länge k der Anordnung ist kleiner als die Größe n
/ der Vorratsmenge
SONST:
WENN Elemente wiederholt werden dürfen:
Kombination mW oA
/ Zuordnung von n und k:
/ Das, was 'wiederholt' werden kann, gehört zur Vorratsmenge
SONST:
Kombination oW oA mit k < n
/ Aus k < n ergibt sich die Zuordnung von n und k:
/ Die Größe k der Teilmenge ist kleiner als die Größe n
/ der Vorratsmenge
Oft werden Kombinationen mit Berücksichtigung der Anordnung Variationen
genannt, die ohne Berücksichtigung der Anordnung heißen dann Kombinationen
Grundlage:
<NAME>
mentor Abiturhilfe
Mathemat<NAME>
Stochastik
mentor Verlag 2000
""")
return
# ---------------------------------------
# Gesetze der Wahrscheinlichkeitsrechnung
# ---------------------------------------
def gesetze(**kwargs):
"""Gesetze der Wahrscheinlichkeitsrechnung"""
if kwargs.get('h'):
print("\nEinige Gesetze der Wahrscheinlichkeitsrechnung\n")
print("Aufruf gesetze( )\n")
return
dm = lambda x: display(Math(x))
print(' ')
dm('\\text{Einige Gesetze der Wahrscheinlichkeitsrechnung}')
print(' ')
dm('\\text{Additionssatz}')
dm('\\qquad\\text{Für beliebige Ereignisse} \; A \\text{ und } B \\text{ gilt } P( \\cup B) = \
P(A)+P(B)-P(A \\cap B)')
dm('\\text{Satz von Bayes}')
dm('\\qquad\\text{Sei } A \\text{ ein Ereignis und } B \\text{ eine Bedingung, \
unter der das Ereignis betrachtet}')
dm('\qquad\\text{wird. Dann berechnet sich die Wahrscheinlichkeit } P_B(A) \
\\text{ für } A \\text{ unter der Be-}')
dm('\\qquad \\text{dingung } B \\text{nach der Formel }\; P_B(A) = \\dfrac{P(A \\cap B)}{P(B)}')
dm('\\text{Multiplikationssatz}')
dm('\\qquad\\text{Ist } P(A) \\neq 0 \\text{, so gilt } P(A \\cap B) = P(A) \cdot \
P_A(B)')
dm('\\text{Satz von der totalen Wahrscheinlichkeit}')
dm('\\qquad\\text{Für beliebige Ereignisse }A \\text{ und }B \\text{ gilt } P(B) = \
P(A \\cap B) + P(\\overline{A} \\cap B) = ')
dm('\\qquad P(A) \\cdot P_A(B) + P(\\overline{A}) \\cdot P_\\overline{A}(B)')
dm('\\qquad\\text{oder allgemeiner}')
dm('\\qquad\\text{Wenn } A_1 \\cup A_2 \\cup \\dots \\cup A_n = \\Omega, \; A_i\\cap A_j = \
\\emptyset \\text{ für } i,j=1\dots n, i \\neq j \\text{ gilt, dann ist}')
dm('\\qquad P(B) = \\sum_{i=1}^n P(A_i)\\cdot P_{A_i}(B)')
dm('\\text{Empirisches Gesetz der großen Zahlen}')
dm('\\qquad\\text{Bei langen Versuchsreihen, also bei häufiger Wiederholung eines Zufallsex-}')
dm('\\qquad\\text{perimentes verändern sich die relativen Häufigkeiten eines Ergebnisses in }')
dm('\\qquad\\text{der Regel nur noch wenig. Sie stabilisieren sich in der Nähe der Wahrschein-}')
dm('\\qquad\\text{lichkeit des Ergebnisses.}')
dm('\\text{Bernoullisches Gesetz der großen Zahlen}')
dm('\\qquad\\text{Gegeben sei ein } n \\text{-stufiges Bernoulli-Experiment mit der Trefferwahrschein-}')
dm('\\qquad\\text{lichkeit }p. X \\text{ sei die Zufallsgröße \'Anzahl der Treffer\'. Für jedes beliebige}')
dm('\\qquad\\text{positive } \\epsilon \\text{ gilt dann }\\lim\\limits_{n \\rightarrow \\infty} P \left( \
\left| \\frac{X}{n} - p \\right| \\le \
\\epsilon \\right) = 1')
dm('\\text{Tschebyschew - Ungleichung}')
dm('\\qquad\\text{Sei } X \\text{ eine beliebige Zufallsgröße mit Erwartungswert } \\mu \\text{ und Standardabwei-}')
dm('\\qquad\\text{chung }\\sigma. \\text{ Für die Wahrscheinlichkeit, dass } X \\text{ einen Wert annimmt, der um}')
dm('\\qquad\\text{mindestens } c\; (c \\gt 0) \\text{ vom Erwartungswert abweicht, gilt}')
dm('\\qquad P\\left(\\left|X - \\mu\\right| \\ge c \\right) \\le \\dfrac{\\sigma^2}{c^2}. \
\\qquad \\text{Daraus folgt}')
dm('\\qquad P(\\mu - \\sigma\cdot c \\le X \\le \\mu + \\sigma\\cdot c ) \\ge 1 -\\dfrac{1}{c^2}')
dm('\\dfrac{1}{\\sqrt{n}} \\text{ - Gesetz}')
dm('\\qquad X_1, X_2, \\dots , X_n \\text{ seien identisch verteilte unabhängige Zufallsgrößen mit dem }')
dm('\\qquad\\text{Erwartungswert } \\mu \\text{ und der Standardabweichung } \\sigma. \\text{ Für die Zufallsgröße }')
dm('\\qquad\\overline{X} = \\dfrac{1}{n} \, (X_1 + X_2 + \\dots + X_n) \ \\text{ gilt dann:} ')
dm('\\qquad\\text{Sie hat den Erwartungswert } \\mu \\text{ und die Standardabweichung } \
\\dfrac{\\sigma}{\\sqrt{n}}')
dm('\\text{Zentraler Grenzwertsatz}')
dm('\\qquad X_1, X_2, \\dots , X_n \\text{ seien unabhängige Zufallsgrößen. Die Zufallsgröße } \
\;X = X_1+')
dm('\\qquad \\dots + X_n \\text{ habe den Erwartungswert } \\mu \\text{ und die Standardabweichung } \\sigma. \
\\text{Dann}')
dm('\\qquad\\text{gilt unter gewissen Bedingungen, die fast immer erfüllt sind (insbesondere}')
dm('\\qquad\\text{für großes } n \\text{): }')
dm('\\qquad\\text{Die Zufallsgröße $X$ ist näherungsweise nomalverteilt mit } \\mu \\text{ und } \\sigma')
print(' ')
# ------------------
# ja-nein - Funktion
# ------------------
def ja_nein(*args, **kwargs):
"""Bewertung eines logischen Ausdruckes"""
if kwargs.get('h'):
print("\nja_nein - Bewertung eines logischen Ausdruckes\n")
print("Aufruf ja_nein( ausdruck )\n")
print(" ausdruck Ausdruck mit dem Wert True oder False\n")
print("Rückgabe 1, wenn ausdruck==True")
print(" 0, wenn ausdruck==False\n")
return
if len(args) != 1:
print('zufall: ein Argument angeben')
return
ausdruck = args[0]
if not isinstance(bool(ausdruck), bool):
print('zufall: der Ausdruck hat nicht den Wert True oder False')
return
if ausdruck:
return 1
return 0
jaNein = ja_nein
# --------------------------------------------------------
# stochastisch - Test auf stochastische(n) Vektor / Matrix
# --------------------------------------------------------
def stochastisch(*args, **kwargs):
"""Test auf stochastischen Vektor / Matrix"""
if kwargs.get('h'):
print("\nstochastisch - Test auf stochastische(n) Vektor / Matrix\n")
print("Aufruf stochastisch( objekt )\n")
print(" objekt Vektor, Matrix\n")
print("Ein Vektor ist stochastisch, wenn alle Komponenten in [0, 1] liegen")
print("und ihre Summe 1 ist\n")
print("Eine quadratische Matrix ist stochastisch, wenn alle Spaltenvektoren")
print("stochastisch sind\n")
return
if len(args) != 1:
print('zufall: Vektor oder Matrix angeben')
return
obj = args[0]
ve = importlib.import_module('agla.lib.objekte.vektor')
Vektor = ve.Vektor
if not isinstance(obj, (Vektor, SympyMatrix)):
print('zufall: Vektor oder Matrix angeben')
return
if isinstance(obj, Vektor):
if not all(k >= 0 for k in obj.komp):
return False
s = 0
for k in obj.komp:
s += k
if s != 1:
return False
return True
else:
if obj.shape[0] != obj.shape[1]:
return False
for i in range(obj.shape[0]):
col = Vektor(*[obj[j, i] for j in range(obj.shape[1])])
if not stochastisch(col):
return False
return True
# ------------------
# Kurzform für Tupel
# ------------------
def kurz_form(iterable):
menge = list(iterable)
symbole = all(map(lambda x: isinstance(x, Symbol), menge))
ziffern = all(map(lambda x: isinstance(x, (int, Integer)), menge))
if symbole or ziffern:
kf = ''
for el in menge:
kf += str(el)
return Symbol(kf)
return None
# ------------------------------------------
# Erzeugen der Baumstruktur einer Tupelmenge
# ------------------------------------------
def tupel2baum(liste):
def kopf(liste):
if not isinstance(liste, list):
return liste
elif len(liste) == 0:
return None
return liste[0]
def rest(liste):
if not isinstance(liste, list):
return []
elif len(liste) == 1:
return []
return liste[1:]
def ibaum(liste):
rliste = []
if liste:
rliste = ['o']
#li = map(lambda x: not isinstance(x, list), liste)
li = [not isinstance(x, list) for x in liste]
if all(li):
rliste += [[x] for x in liste]
else:
nam = set([kopf(x) for x in liste if kopf(x) is not None])
nam = list(nam)
nam.sort(key=str)
for nm in nam:
nm_liste = [ nm ]
nm_rest_liste = [x for x in liste if kopf(x) == nm]
nm_rest_liste = [rest(x) for x in nm_rest_liste]
nm_rest_baum = ibaum(nm_rest_liste)
nm_liste += rest(nm_rest_baum)
rliste += [nm_liste]
return rliste
return ibaum(liste)
# --------------
# Hilfe-Funktion
# --------------
def Hilfe(**kwargs):
h = kwargs.get('h')
if not h:
h = 1
if h == 1:
print("h=2 - Einleitung")
print("h=3 - Online-Hilfeinformationen")
print("h=4 - Bezeichner")
print("h=5 - Zugriff auf Eigenschaften und Methoden")
print("h=6 - Klassen")
print("h=7 - Funktionen")
print("h=8 - Operatoren")
print("h=9 - Jupyter-Notebook")
print("h=10 - Nutzung von SymPy-Anweisungen")
print("h=11 - Griechische Buchstaben")
print("h=12 - Kleiner Python-Exkurs")
print("h=13 - Bemerkungen für Programmierer/Entwickler")
return
if h == 2:
print(
"""
Einleitung
Python ist ein leistungsfähiger konventioneller Taschenrechner. Durch das CAS
SymPy werden seine Fähigkeiten vor allem um das symbolische Rechnen erwei-
tert. Mit dem Paket zufall sollen Berechnungen auf dem Gebiet der Stochastik
unterstützt werden, wobei es für den Gebrauch in der Schule vorgesehen ist
zufall ist ein Python-Paket und kann innerhalb von Jupyter-Notebooks benutzt
werden
In zufall werden die Objekte der Stochastik, wie Zufallsexperiment, Bernoul-,
likette, Urne, Binomialverteilung usw. mit entsprechenden Python-Klassen dar-
gestellt. Über eine Konstruktor-/Erzeugerfunktion gleichen Namens können In-
stanzen dieser Klassen (Objekte), erzeugt werden. Mit diesen und ihren Eigen-
schaften + Methoden wird dann interaktiv gearbeitet. Weiterhin unterstützen
einige Funktionen die Arbeit
Das Paket basiert auf dem vollständig in Python geschriebenen CAS SymPy und
ist selbst ebenfalls (mit leichten Modifizierungen) in reinem Python ge-
schrieben. Für Grafiken wird das matplotlib-Paket benutzt
Die Programme von zufall werden im Quellcode für die Benutzung bereitgestellt\n
Die Syntax zur Handhabung von zufall ist so gestaltet, dass sie leicht er-
lernbar ist. Es sind nur geringe Python-Kenntnisse sowie Fähigkeiten zum
Bedienen eines Jupyter-Notebooks notwendig
Bei der Arbeit mit zufall kann auf den gesamten Leistungsumfang von Python
zugegriffen werden, der vor allem duch eine Vielzahl weiterer Pakete reali-
siert wird
""")
return
if h == 3:
print(
"""
Erhalten von Hilfe-Informationen
Unter dem Namen Hilfe steht eine Funktion zur Verfügung, über die zentrale
Hilfeinformationen erhalten werden können. Mit der Eingabe
In [..]: Hilfe() oder Hilfe(h=1)
in eine Zelle des Notebooks wird man auf einzelne Seiten geleitet
Weitere Hilfeinformationen können zu jedem zufall-Objekt und zu den Metho-
den eines Objektes gewonnen werden, indem bei der Erzeugung des Objektes
mit Hilfe seiner Erzeugerfunktion oder beim Aufruf der Methode als letzter
Eintrag in der Argumentenliste h=1 geschrieben wird. Man erhält dann unmit-
telbar die gewünschte Information oder wird auf eine andere Hilfeseite ge-
leitet
Analoges gilt für die Funktionen, die von zufall zur Verfügung gestellt wer-
den
Weiterhin ist für jedes Objekt eine Eigenschaft mit dem Namen h (Kurzform
von hilfe) vorhanden, bei deren Aufruf die verfügbaren Eigenschaften und
Methoden aufgelistet werden
Tritt in einer Syntaxdarstellung die Konstruktion /[...] auf, kann die Anga-
be zwischen den eckigen Klammern entfallen. Ein |-Zeichen bedeutet i.A.,
dass zwischen zwei Angaben ausgewählt werden kann
""")
return
if h == 4:
print(
"""
Bezeichner (Namen)
Die erzeugten zufall-Objekte können einem Bezeichner zugewiesen werden,
z.B. wird mit der Anweisung
In [..]: bv = BV(12, 0.3)
dem Bezeichner bv als Wert ein BinomialVerteilung-Objekt zugewiesen ('=' ist
in Python für Zuweisungen vorgesehen)
Ein Bezeichner kann in zufall aus allen Buchstaben des englischen Alphabets,
allen Ziffern 0, 1, ..., 9 und dem Unterstrich '_' bestehen, wobei er mit
einem Buchstaben beginnen muß. Der Name kann beliebig lang sein, es wird
zwischen großen und kleinen Buchstaben unterschieden. Auf diese Art gebil-
deten Namen kann jederzeit ein Objekt (zufall-Objekt oder anderes, z.B. ei-
ne Zahl) zugewiesen werden. Dabei darf es sich nicht um einen geschützten
Namen handeln (s.u.)
Anders verhält es sich bei den 'freien' Bezeichnern, denen unmittelbar kein
Wert zugewiesen wird und die als Variablen oder als Parameter u.a. in Glei-
chungen auftreten. Im Unterschied zu anderen CAS werden in dem von zufall
benutzten SymPy solche Bezeichner nicht einfach durch Hinschreiben erkannt
und akzeptiert, sondern sie müssen explizit als Symbole deklariert werden.
Für Buchstaben und kleine griechische Buchstaben wird das bereits innerhalb
von zufall erledigt, so dass Bezeichner wie r, g, b, A, X usw. jederzeit
frei verwendet werden können. Soll ein freier Bezeichner länger als ein
Zeichen sein, muss er mittels einer entsprechenden SymPy-Anweisung dekla-
riert werden, etwa durch
In [..]: xyz = Symbol('xyz')
Es gibt eine Reihe von Bezeichnern, die in zufall eine feste Bedeutung ha-
ben und nicht anderweitig verwendet werden können, indem sie einen anderen
Wert bekommen. Beim Versuch, einen anderen Wert an einen solchen Bezeichner
zu binden, warnt zufall mit einem Hinweis und verhindert das Überschreiben.
Ebenfalls in das Warnsystem aufgenommen wurden die Elemente der SymPy-Spra-
che, die innerhalb von zufall zur Verfüung des Nutzers gestellt werden\n
Besondere Beachtung erfordern die Bezeichner E N und I, denen Konstanten zu-
gewiesen sind. Sie werden kommentarlos überschrieben werden, wenn ihnen ein
anderer Wert zugewiesen wird
Viele Eigenschaften/Methoden haben synonyme Bezeichner, die folgendermaßen
gebildet werden:
- ein '_' (Unterstrich) innerhalb des Bezeichners einer Eigenschaft oder
Methode wird eliminiert, indem der nächste Buchstabe groß geschrieben
wird, z.B. sch_el -> schEl ('Kamelschreibweise'; Methode
'Scharelement')
- ein '_' am Ende eines Bezeichners wird elimimiert, indem das erste Zei-
chen groß geschrieben wird, z.B: umfang_ -> Umfang (Methode 'Umfang')
In einem zufall-Notebook kann explizit mit anderen Python-Paketen gearbeitet
werden, speziell mit SymPy, von dem einige Anweisungen dem System bereits
bekannt sind. Soll ein weiteres SymPy-Element benutzt werden, z.B. die Funk-
tion ceiling, so ist dieses mit der üblichen import-Anweisung zu importieren
und kann danach aufgerufen werden
In [..]: from sympy import ceiling
...
In [..]: ceiling(3.12) # das Ergebnis ist 4
""")
return
if h == 5:
print(
"""
Zugriff auf Eigenschaften und Methoden von Objekten
Die zufall-Objekte haben verschiedene Eigenschaften und Methoden (die letz-
teren erwarten für ihre Ausführung Argumente - ein weiteres Objekt, einen
Parameterwert o.ä.). Die implementierten Eigenschaften und Methoden eines
Objektes können über seine Hilfeseite wie etwa
In [..]: BV(h=1)
ermittelt werden. Ein BV-Objekt (BV ist der Kurzname von BinomialVerteilung)
hat z.B. die Eigenschaft erw (Erwartungswert) und die Methode P (zur Berech-
nung von Wahrscheinlichkeiten). Der Zugriff erfolgt mittels des '.' - Ope-
rators, der allgemein in der Objektorientierten Programmierung Verwendung
findet. Sei etwa dem Bezeichner bv ein BV-Objekt zugewiesen, etwa mit der
Anweisung
In [..]: bv = BV(50, 1/3))
so sind die Anweisungen für den Zugriff zu seinem Erwartungswert
In [..]: bv.erw
und zu der Methode für die Berechnung von Wahrscheinlichkeiten
In [..]: bv.P(25)
Eine Methode wird generell über einen Funktionsaufruf realisiert, der Argu-
mente erwartet, die in Klammern eingeschlossen werden. Hier wurde das Argu-
ment 25 angegeben, es soll die Wahrscheinlichkeit dafür berechnet werden,
dass eine Zufallsgröße mit der betrachteten Verteilung diesen Wert annimmt
Zu einer Reihe von Eigenschaften existiert eine Methode mit gleichem Namen,
der auf einen Unterstrich '_' endet. Damit besteht die Möglichkeit, mittels
des entsprechenden Funktionsaufrufes zusätzliche Informationen/Leistungen
anzufordern. Welche das sind, kann über die Hilfeanforderung (h=1 als letz-
ter Eintrag in der Argumentliste) erfahren werden. Diese zu Eigenschaften
gehörenden Methoden können auch über den Namen der Eigenschaft mit großem
Anfangsbuchstaben aufgerufen werden, also z.B. für die Eigenschaft erw von
bv
In [..]: bv.erw_(...) oder
In [..]: bv.Erw(...)
Das Ergebnis eines Eigenschafts-/Methodenaufrufes kann ein Tupel oder eine
Liste sein, etwa die Daten einer DatenReihe dr, die als Liste dr.daten vor-
liegen. Um auf ein einzelnes Element zuzugreifen, wird der Indexzugriff ver-
wendet, etwa
In [..]: dr.daten[3]
für das 4. Element der Liste (gemäß der Python-Konvention beginnt die Zählung
bei 0) oder
In [..]: dr.daten[:3]
für den Zugriff auf die ersten 3 Elemente
Wahrscheinlichkeits- und Häufigkeits-Verteilungen und anderes werden als
dictionary bereitgestellt (Schlüssel/Wert-Paare). Hier erfolgt der Zugriff
auf einen einzelnen Wert über den Schlüssel, z.B. bei der Methode vert
(Wahrscheinlichkeitsverteilung) der betrachteten BinomialVerteilung
In [..]: bv.vert[4]
""")
return
if h == 6:
print(
"""
Klassen in zufall
Kurz- Langname
ZE ZufallsExperiment
= ZV ZufallsVersuch
ZG ZufallsGröße
BK BernoulliKette
BV BinomialVerteilung
HGV HyperGeometrischeVerteilung
GLV GleichVerteilung
GV GeometrischeVerteilung
PV PoissonVerteilung\n
NV NormalVerteilung
EV ExponentialVerteilung
DR DatenReihe
EA EreignisAlgebra
VT VierFelderTafel
HB HäufigkeitsBaum
KI KonfidenzIntervall
AT AlternativTest
STP SignifikanzTestP
Urne
Münze
Würfel
Rad GlücksRad
MK MarkoffKette
Roulette
Chuck ChuckALuck
Craps
Toto FussballToto
Lotto
Skat SkatBlatt
Vektor analog zu agla
Matrix analog zu agla
""")
return
if h == 7:
print(
"""
Funktionen in zufall
Allgemeine Funktionen
Hilfe Hilfefunktion
fakultät = fak Fakultät
binomial = B Binomialkoeffizient
perm = permutationen Permutationen
komb = kombinationen Kombinationen
variationen Variationen
auswahlen Berechnung von k-Auswahlen
zuf_zahl = zufZahl Erzeugen von (Pseudo)-Zufallszahlen
anzahl Anzahl des Vorkommens eines Elementes in einer
Liste/DatenReihe
anzahl_treffer Anzahl Treffer in einer Liste
= anzahlTreffer
summe Summe der Elemente einer Liste/DatenReihe
gesetze Einige Gesetze der Wahrscheinlichkeitsrechnung
löse Allgemeiner Gleichungs-/Ungleichungs-Löser
ja_nein = jaNein Bewertung logischer Ausdrücke
stochastisch Test auf stochastische(n) Vektor/Matrix
einfach Vereinfachen von Objekten
ja, nein, mit, ohne, Hilfsgrößen
Ja, Nein, Mit, Ohne für True/False
Mathematische Funktionen
sqrt, exp, log, ln, lg, abs
sin, arcsin (= asin), sing, arcsing (= asing) / ...g:
cos, arccos (= acos), cosg, arccosg (= acosg) / Funktionen
tan, arctan (= atan), tang, arctang (= atang) / mit Grad-
cot, arccot (= acot), cotg, arccotg (= acotg) / werten
sinh, arsinh (= asinh)
cosh, arcosh (= acosh)
tanh, artanh (= atanh)
deg = grad Umrechnung Bogen- in Gradmaß
rad = bog Umrechnung Grad- in Bogenmaß
kug_koord (= kugKoord) Umrechnung in Kugelkoordinaten
min, max - Minimum bzw. Maximum von zwei oder mehr Zahlen
N oder Methode n - Umwandlung SymPy- in Dezimal-Ausdruck
re - Realteil einer komplexen Zahl
im - Imaginärteil einer komplexen Zahl
conjugate (= konjugiert) - Konjugiert-komplexe Zahl
Konstanten
pi - Zahl Pi (3.1415...)
E - Eulersche Zahl e (2.7182...)
I - imaginäre Einheit
ACHTUNG! B, E, N, I sind kommentarlos überschreibbar
""")
return
if h == 8:
print(
"""
Operatoren
Folgende Operatoren stehen zusätzlich zu den Python-Operatoren zur
Verfügung bzw. ersetzen diese
^ Potenzierung; zusätzlich zum Operator **; Umdefinition des
Python-Operators ^
° Skalarprodukt von Vektoren; zusätzlich zum Operator *
| Verkettung von Vektoren; Umdefinition des Python-Operators |
""")
return
if h == 9:
print(
"""
Jupyter-Notebook
+==================================================================+
| Um in einem Notebook mit zufall arbeiten zu können, muss zu |
| Beginn der Sitzung die (Jupyter-) Anweisung |
| |
| In [..]: %run zufall/start |
| |
| in einer Codezelle ausgeführt werden |
+==================================================================+
zufall benutzt als Bedienoberfläche Jupyter. Dieses wurde unter dem
Namen IPython ursprünglich als Entwicklungsumgebung für Python-
Anwendungen bereitgestellt, unterstützt aber inzwischen eine Vielzahl
weiterer Programmiersprachen. Der Name setzt sich aus den Namen von
drei Sprachen zusammen - Julia (eine Sprache, die sehr schnellen Code
erzeugt), Python und R (inzwischen ein leistungsfähiges Statistikpaket)
Ausschlaggebend für die Wahl dieser Plattform war das hier realisierte
Notebook-Konzept, wie es auch in kommerziellen CAS (z.B. Mathematica)
Verwendung findet
Jupyter läuft als lokale Anwendung auf dem Standardbrowser des
Computers, Kern (kernel) ist der Python-Interpreter
Ein Jupyer-Notebook ist in Zellen (cells) unterteilt, wobei drei
Zelltypen auftreten, die hier interessieren:
- Code-Zellen Kennzeichnung: In [..]
In diese Zellen werden Anweisungen in der benutzten
Programmiersprache (hier Python) geschrieben, also auch
Anweisungen zur Benutzung von zufall; die Zellen sind analog zu
einem Texteditor editierbar; beim Ausführen (run) einer solchen
Zelle wird ihr Inhalt an den Python-Interpreter übergeben, der
für seine Verarbeitung sorgt
Eine neue Zelle wird standardmäßig als Code-Zelle erzeugt; die
Umwandlung einer Markdown-Zelle in eine Code-Zelle ist über das
Code-Menü oder die Platzierung des Cursors im vorderen
Zellbereich und Drücken der Y-Taste erreichbar
-Ausgabe-Zellen Kennzeichnung: Out [..]
Die Zellen entstehen, wenn nach der Auswertung einer Codezelle
durch den Python-Interpreter eine Ausgabe erforderlich ist; in
diese Zellen kann der Benutzer nicht direkt schreiben
- Markdown-Zellen Ohne Kennzeichnung
Die Zellen dienen vor allem zur Aufnahme von Texten, wobei diese
mit Markdown- (eine einfache Auszeichnungssprache) oder HTML-
Anweisungen formatiert werden können; sie können auch
mathematische Formeln enthalten (Nutzung von LATEX), außerdem
können in solchen Zellen Grafiken und Bilder dargestellt und/
oder Audio- und Video-Dateien aktiv sein; beim Ausführen einer
solchen Zelle werden eventuell vorhandene Formatierungs-
Anweisungen ausgeführt und der Inhalt auf dem Ausgabemedium
präsentiert
Die Umwandlung einer Code-Zelle in eine Markdown-Zelle ist über
das entsprechende Menü oder die Platzierung des Cursors im
vorderen Zellbereich und Drücken der M-Taste erreichbar
Code- und Markdown-Zellen können beliebig erzeugt, gelöscht, kopiert,
eingefügt und verschoben werden
Es kann zu jeder dieser Zellen gesprungen werden, um sie zu verändern
und/oder erneut auszuführen
In einem Notebook kann in zwei Modi gearbeitet werden
- Editier-Modus: Einschalten mit Enter; oben rechts ist ein Stift
dargestellt
In diesem Modus kann der Inhalt der aktuellen Zelle editiert
werden
Das Editieren einer bestehenden Markdown-Zelle kann auch mit
einem Doppel-Klick eingeleitet werden
- Kommando-Modus: Einschalten mit ESC; der Stift rechts oben fehlt
in diesem Modus können Aktionen durchgeführt werden, die das
Notebook als Ganzes betreffen (Zellen erzeugen/kopieren/
löschen/verschieben, zwischen ihnen navigieren, Dateien öffnen
und speichern usw.)
Wenn der Kern beschäftigt ist, ist der schwarze Kreis rechts oben
gefüllt; auch in dieser Zeit kann editiert werden, die Ausführung
weiterer Zellen kann aber erst erfolgen, wenn der Kern wieder frei
ist
Eine Datei, in die der Inhalt eines Notebooks gespeichert wird,
erhält die Endung .ipynb
Für den Export eines Notebooks, z.B. in das .html- oder .pdf-
Format, ist das separat zu nutzende Werkzeug nbconvert vorgesehen
Die Bedienung eines Notebooks kann über das Menü und/oder über die Tastatur
erfolgen
Einige Tastatur-Kürzel für das Jupyter-Notebook
Umsch+Enter Zelle ausführen, zur nächsten gehen (diese wird even-
tuell neu angefügt)
Strg+Enter Zelle ausführen, in der Zelle verbleiben
Strg+M B Zelle unterhalb einfügen
Strg+M A Zelle oberhalb einfügen
Strg+M DD Zelle löschen (D 2-mal drücken)
Esc X Zelle löschen
Strg+Z Zurücksetzen beim Editieren\n
Esc Einschalten des Kommando-Modus
Enter Einschalten des Editier-Modus
Strg+M H Anzeigen aller Tastatur-Kürzel für die beiden Modi
Ausführen: (z.B. Strg-M B)
Strg-Taste drücken, dann M-Taste, Strg loslassen, dann B-Taste
durch mehrmaliges Drücken der B-Taste können mehrere Zellen eingefügt
werden
""")
return
if h == 10:
print(
"""
Nutzung von SymPy-Anweisungen
In zufall sind folgende Elemente von SymPy integriert:
Symbol, symbols - zur Definition von (mehrstelligen) Bezeichnern
Rational - zur Erzeugung von rationalen Zahlen (wird in zufall weitgehend
automatisch erledigt)
solve, solveset, expand, collect, factor, simplify, nsimplify
N [der Wert ist überschreibbar]
pi - die Kreiszahl
E - die Basis der natürlichen Logarithmen (e) [der Wert ist überschreibbar]
I - die imaginäre Einheit (i) [der Wert ist überschreibbar]
Sollen weitere Elemente benutzt werden, sind diese zu importieren, z.B.
In [..]: from sympy import Piecewise
(eventuell ist der Pfad im SymPy-Verzeichnis-Baum anzugeben)
""")
return
if h == 11:
print("\nGriechische Buchstaben\n")
print("Es werden die kleinen griechischen Buchstaben\n")
print("alpha, beta, gamma, delta, epsilon, zeta, eta, theta, iota, kappa ")
display(Math("\\alpha \qquad \\beta \qquad \\gamma \qquad \\delta \qquad \\epsilon \qquad \
\\zeta \qquad \\eta \qquad \\theta \qquad \\iota \qquad \\kappa "))
print("lamda (Schreibweise!), mu, nu, xi, omicron, pi, rho, sigma, tau ")
display(Math("\\lambda \qquad \\mu \qquad \\nu \qquad \\xi \qquad \\omicron \qquad \\pi \
\qquad \\rho \qquad \\sigma \qquad \\tau"))
print("upsilon, phi, chi, psi, omega\n")
display(Math("\\upsilon \qquad \\phi \qquad \\chi \qquad \\psi \qquad \\omega"))
print("bereitgestellt. Die Namen sind nicht überschreibbar\n")
return
if h == 12:
print(
"""
Kleiner Python-Exkurs
Eingabe von Code (in eine Code-Zelle des Jupyter-Notebooks):
Die Ausführung einer Zelle wird durch Umsch+Enter bzw. Strg+Enter
veranlaßt
Eine Zuweisung (eines Wertes an einen Bezeichner) wird mittels '='
realisiert:
In [..]: a = 4
Der Wert eines Bezeichners kann über eine Abfrage ermittelt werden
In [..]: a
Mehrere Zuweisungen in einer Zeile sind durch ';' zu trennen
In [..]: a = 4; b = 34; c = -8
Mehrere Abfrageanweisungen in einer Zeile sind durch ',' zu trennen
(ein ';' unterdrückt die Anzeige der vorausgehenden Elemente)
In [..]: a, b, c
Eine neue Zeile (innerhalb einer Zelle des Notebooks) wird über die
Enter-Taste erzeugt; in der neuen Zeile ist ab derselben Stelle zu
schreiben wie in der vorangehenden Zeile, wenn nicht ein eingerückter
Block entstehen soll (bzw. wenn nicht durch ein '\\' am Zeilenende ei-
ne Verlängerung der Zeile erreicht werden soll)
Das ist Teil der Python-Syntax und führt bei Nichtbeachten zu einem
Syntaxfehler
Eingerückte Blöcke sind z.B. bei Kontrollstrukturen (vor allem in Pro-
grammen benutzt) erforderlich. Dabei müssen alle Einrückungen die glei-
che Stellenanzahl (standardmäßig 4 Stellen) haben
Bei der if-else-Anweisung sieht das z.B. folgendermaßen aus:
In [..]: if a < 1:
b = 0 # 4 Stellen eingerückt
c = 3 # ebenso
else:
b = 1 # ebenso
oder bei einer Funktions-Definition:
In [..]: def summe(x):
sum = 0
for y in x:
sum += y # weitere Einrückung
return sum
Die Funktion berechnet die Summe der Elemente des Zahlen-Containers
x (eine Liste, ein Tupel oder eine Menge)
Mittels '#' können in Codezellen Kommentare geschrieben werden, sie wer-
den bei der Ausführung ignoriert
Einige Datentypen:
Zeichenkette (string) z.B.: 'Tab23' oder \"Tab23\"#
Tupel (tuple) z.B.:
In [..]: t = ( 1, 2, 3 ); t1 = ( 'a', a, Rational(1, 2), 2.7 )
Zugriff auf Elemente t[0], t1[-1], Slicing (Zählung ab 0)
Liste (list) z.B.:
In [..]: L = [ 1, 2, 3 ]; L1 = [ 'a', a, Rational(1, 2), 2.7 ]
Zugriff auf Elemente L[0], L1[-1], Slicing (Zählung ab 0)
Schlüssel-Wert-Liste (dictionary, dict) z.B.:
In [..]: d = { a:4, b:34, c:-8 }
Zugriff auf Elemente d[a], d[c]
Menge (set) z.B.:
In [..]: m = { a, b, c }; m1 = set() (leere Menge)
Zugriff auf Elemente m.pop(), Indexzugriff mit list(m)[index] mög-
lich
Weitere nützliche Python-Elemente:
Mittels type(obj) kann der Datentyp eines Objektes obj erfragt werden
List-Comprehension
In [..]: tup = 1, 2, 3, 4, 5, 6 # oder anderer Datencontainer
In [..]: [ x^2 for x in tup ] # sehr mächtige Anweisung
Out[..]: [1, 4, 9, 16, 25, 36]
Funktionsdefinition mit anonymer Funktion
lambda arg1, arg2, ... : ausdruck in arg1, arg2, ...
Klasse Rational: da p/q in Python (und damit auch in SymPy) eine float-
Zahl ergibt, kann bei Bedarf eine rationale Zahl Rational(p, q) ver-
wendet werden (in zufall erfolgt das an den meisten Stellen automa-
tisch)
*liste als Argument einer Funktion packt den Container liste aus
Ersetzen des Wertes eines Bezeichners in einem Ausdruck durch
einen anderen Wert (eine SymPy-Anweisung)
ausdruck.subs(bez, wert)
In [..]: (x+y).subs(x, 2)
Out[..]: y+2
Die Ausgabe '<bound method ...>' weist auf eine an ein Objekt gebundene
Methode (eine Funktion) hin, die zu ihrer Ausführung in Klammern ein-
gefasste Parameter erwartet
""")
return
if h == 13:
print(
"""
Bemerkungen für Programmierer / Entwickler
Zur Unterstützung der Fehlersuche ist im Hauptprogramm die Variable _TEST
vorgesehen, die im Quelltext geändert werden kann; bei _TEST = True werden
bei Fehlern die vollständigen Python-Fehlermeldungen angezeigt
Durch das zufall-Paket wird die Python-Sprache an einigen Stellen modifiziert
(Umdefinition der Operatoren '^' und '|', Unterbinden der Zuweisung eines
Wertes an die Eigenschaft/Methode eines Objektes ('objekt.eigenschaft = wert'-
Konstrukt), Verwenden der deutschen Umlaute in Bezeichnern u.a.m.). Bei Ände-
rungen oder Ergänzungen der zufall-Quelltexte dürfen diese Modifizierungen nicht
benutzt werden. Ebenso ist es nicht ratsam, innerhalb eines zufall-Notebooks
eine allgemeine Python-Programmierung durchzuführen
Aus der Sicht des Autors sollten die Schwerpunkte der weiteren Entwicklung
des Paketes sein:
- Konfiguration der Jupyter-Oberfläche entsprechend den Bedürfnissen von
Lehrern und Schülern\n
- Vereinheitlichung der Schriftart und -größe für Ausgaben\n
- Aufnahme weiterer statistischer Tests in das Paket\n
- Gestaltung der EreignisAlgebra-Klasse auf der Basis von logischen Aus-
drücken\n
- Verbesserung der Fehlererkennung und -mitteilung\n
- Bessere Verknüpfung der Dokumentation mit den Programmen\n
- Eventuelle Anpassung an die SymEngine (nach deren Fertigstellung durch
die Entwickler)
""")
return
# ------------------------------
# Hilfsgroessen für True / False
# ------------------------------
Ja = ja = Mit = mit = True
Nein = nein = Ohne = ohne = False
# ---------------------------
# Hilfsklasse für Gleichungen
# ---------------------------
class _Gleichung(ZufallsObjekt):
def __new__(cls, *args):
printmethod = '_latex'
try:
if not args:
raise ZufallError("mindestens die linke Seite der Gleichung angeben")
if len(args) > 2:
raise ZufallError("nur die beiden Seiten der Gleichung angeben")
lhs = args[0]
rhs = 0
if len(args) > 1:
rhs = args[1]
ve = importlib.import_module('agla.lib.objekte.vektor')
Vektor = ve.Vektor
if not ((is_zahl(lhs) or isinstance(lhs, Vektor)) and
(is_zahl(rhs) or isinstance(rhs, Vektor))):
raise ZufallError("nur arithmetische Ausdrücke oder Vektoren angeben")
return ZufallObjekt.__new__(cls, lhs, rhs)
except ZufallError as e:
print('zufall', str(e))
return
def __str__(self):
return str(self.lhs) + " = " + str(self.rhs)
def __repr__(self):
return 'gleichung(' + repr(self.lhs) + ',' + repr(self.rhs) + ')'
def _latex(self, printer):
return latex(self.lhs) + '=' + latex(self.rhs)
@property
def lhs(self):
return self.args[0]
@property
def rhs(self):
return self.args[1]
def __mul__(self, other):
if not is_zahl(other):
print('zufall: Zahlenwert als Faktor angeben')
return
return gleichung(other * self.lhs, other * self.rhs)
def __rmul__(self, other):
if not is_zahl(other):
print('zufall: Zahlenwert als Faktor angeben')
return
return gleichung(other * self.lhs, other * self.rhs)
def __truediv__(self, other):
if not is_zahl(other):
print('zufall: Zahlenwert angeben')
return
return gleichung(self.lhs / other, self.rhs / other)
def __add__(self, other):
if not is_zahl(other):
print('zufall: Zahlenwert als Summand angeben')
return
return gleichung(other + self.lhs, other + self.rhs)
def __radd__(self, other):
if not is_zahl(other):
print('zufall: Zahlenwert als Summand angeben')
return
return gleichung(other + self.lhs, other + self.rhs)
def __neg__(self):
return gleichung(-self.lhs, -self.rhs)
def __pow__(self, other):
if not is_zahl(other):
print('zufall: Zahlenwert als Exponent angeben')
return
return gleichung(self.lhs**other, self.rhs**other)
def __sub__(self, other):
if not is_zahl(other):
print('zufall: Zahlenwert angeben')
return
return gleichung(self.lhs - other, self.rhs - other)
| StarcoderdataPython |
3240402 | """
Unit and regression test for the alchemicalitp package.
"""
# Import package, test suite, and other packages as needed
import alchemicalitp
import pytest
import os
from pkg_resources import resource_filename
from tempfile import NamedTemporaryFile
@pytest.fixture
def urea():
return alchemicalitp.top.Topology(filename=resource_filename(__name__, 'example/urea.itp'))
def test_defaults(urea):
topology = urea
defaults = topology.content_dict['defaults']
assert (defaults.nbfunc, defaults.comb_rule, defaults.gen_pairs, defaults.fudgeLJ, defaults.fudgeQQ) == (
'1', '2', 'yes', '0.5000', '0.8333')
def test_atomtypes(urea):
topology = urea
atomtype = topology.content_dict['atomtypes'][1]
assert (atomtype.at_num, atomtype.name, atomtype.mass, atomtype.charge, atomtype.ptype, atomtype.sigma, atomtype.epsilon) == (
'n4', 'n4', '0.00000', '0.00000', 'A', '3.25000e-01', '7.11280e-01')
def test_moleculetype(urea):
topology = urea
moleculetype = topology.content_dict['moleculetype']
assert (moleculetype.name, moleculetype.nrexcl) == ('URE', '3')
def test_atoms(urea):
topology = urea
atom = topology.content_dict['atoms'][0]
assert (atom.nr, atom.type, atom.resnr, atom.residue, atom.atom, atom.cgnr, atom.charge, atom.mass,
atom.typeB, atom.chargeB, atom.massB) == (
1, 'C', 1, 'URE', 'C', 1, '0.880229', '12.01000', '', '', '')
atom = topology.content_dict['atoms'][6]
assert (atom.nr, atom.type, atom.resnr, atom.residue, atom.atom, atom.cgnr, atom.charge, atom.mass,
atom.typeB, atom.chargeB, atom.massB) == (
7, 'N', 2, 'E2J', 'N', 7, '-0.516300', '14.0100', 'N', '-0.415700', '14.0100')
def test_pairs(urea):
topology = urea
pair = topology.content_dict['pairs'][0]
assert (pair.i, pair.j, pair.func) == (1,7,1)
def test_bonds(urea):
topology = urea
bond = topology.content_dict['bonds'][0]
assert (bond.i, bond.j, bond.func, bond.b0, bond.kb, bond.b0B, bond.kbB) == (1,2,1,'','','','')
bond = topology.content_dict['bonds'][1]
assert (bond.i, bond.j, bond.func, bond.b0, bond.kb, bond.b0B, bond.kbB) == (1,3,1,'0.09572','502416.0','','')
bond = topology.content_dict['bonds'][2]
assert (bond.i, bond.j, bond.func, bond.b0, bond.kb, bond.b0B, bond.kbB) == (1,4,1,'0.09572','502416.0','0.09572','502416.0')
def test_comment(urea):
topology = urea
comment = topology.content_dict['angles'][0]
assert comment.comment == ' i j k funct angle force_constant'
def test_angles(urea):
topology = urea
angle = topology.content_dict['angles'][1]
assert (angle.i, angle.j, angle.k, angle.func, angle.th0, angle.cth, angle.th0B, angle.cthB) == (
2, 1, 3, 1, '', '', '', '')
angle = topology.content_dict['angles'][2]
assert (angle.i, angle.j, angle.k, angle.func, angle.th0, angle.cth, angle.th0B, angle.cthB) == (
2, 1, 6, 1, '104.52', '628.02', '', '')
angle = topology.content_dict['angles'][3]
assert (angle.i, angle.j, angle.k, angle.func, angle.th0, angle.cth, angle.th0B, angle.cthB) == (
3, 1, 6, 1, '104.52', '628.02', '104.52', '628.02')
def test_dihedrals(urea):
topology = urea
dihedral = topology.content_dict['dihedrals'][1]
assert (dihedral.i, dihedral.j, dihedral.k, dihedral.l, dihedral.func, dihedral.phase, dihedral.kd, dihedral.pn,
dihedral.phaseB, dihedral.kdB, dihedral.pnB) == (2, 1, 3, 4, 9, '', '', '', '', '', '')
dihedral = topology.content_dict['dihedrals'][2]
assert (dihedral.i, dihedral.j, dihedral.k, dihedral.l, dihedral.func, dihedral.phase, dihedral.kd, dihedral.pn,
dihedral.phaseB, dihedral.kdB, dihedral.pnB) == (2, 1, 3, 5, 9, 180.0, '0.56484', 4, '', '', '')
dihedral = topology.content_dict['dihedrals'][3]
assert (dihedral.i, dihedral.j, dihedral.k, dihedral.l, dihedral.func, dihedral.phase, dihedral.kd, dihedral.pn,
dihedral.phaseB, dihedral.kdB, dihedral.pnB) == (2, 1, 6, 7, 9, 357.2, '1.48473', 3, 180.0, '1.72381', 3)
dihedral = topology.content_dict['dihedrals'][4]
assert (dihedral.i, dihedral.j, dihedral.k, dihedral.l, dihedral.func,
dihedral.C0, dihedral.C1, dihedral.C2, dihedral.C3, dihedral.C4, dihedral.C5,
dihedral.C0B, dihedral.C1B, dihedral.C2B, dihedral.C3B, dihedral.C4B, dihedral.C5B) == \
(2, 1, 6, 8, 3, 3.68192, -4.35136, 0.00000, 1.33888, 0.00000, 0.00000,
'', '', '', '', '', '')
dihedral = topology.content_dict['dihedrals'][5]
assert (dihedral.i, dihedral.j, dihedral.k, dihedral.l, dihedral.func, dihedral.phase, dihedral.kd, dihedral.pn,
dihedral.phaseB, dihedral.kdB, dihedral.pnB) == (2, 1, 6, 3, 4, '', '', '', '', '', '')
dihedral = topology.content_dict['dihedrals'][6]
assert (dihedral.i, dihedral.j, dihedral.k, dihedral.l, dihedral.func, dihedral.phase, dihedral.kd, dihedral.pn,
dihedral.phaseB, dihedral.kdB, dihedral.pnB) == (1, 4, 3, 5, 4, 180.0, '0.56484', 4, '', '', '')
dihedral = topology.content_dict['dihedrals'][7]
assert (dihedral.i, dihedral.j, dihedral.k, dihedral.l, dihedral.func, dihedral.phase, dihedral.kd, dihedral.pn,
dihedral.phaseB, dihedral.kdB, dihedral.pnB) == (1, 7, 6, 8, 4, 357.2, '1.48473', 3, 180.0, '1.72381', 3)
def test_write(urea):
topology = urea
topology.write(resource_filename(__name__, 'test.itp'))
test_result = alchemicalitp.top.Topology(filename=resource_filename(__name__, 'test.itp'))
assert topology.content_dict['atoms'] == test_result.content_dict['atoms']
assert topology.content_dict['bonds'] == test_result.content_dict['bonds']
assert topology.content_dict['pairs'] == test_result.content_dict['pairs']
assert topology.content_dict['angles'] == test_result.content_dict['angles']
assert topology.content_dict['dihedrals'] == test_result.content_dict['dihedrals']
os.remove(resource_filename(__name__, 'test.itp'))
def test_writeitp(urea):
topology = urea
topology.write(resource_filename(__name__, 'test.itp'), format = 'itp')
test_result = alchemicalitp.top.Topology(filename=resource_filename(__name__, 'test.itp'))
assert topology.content_dict['atoms'] == test_result.content_dict['atoms']
assert topology.content_dict['bonds'] == test_result.content_dict['bonds']
assert topology.content_dict['pairs'] == test_result.content_dict['pairs']
assert topology.content_dict['angles'] == test_result.content_dict['angles']
assert topology.content_dict['dihedrals'] == test_result.content_dict['dihedrals']
os.remove(resource_filename(__name__, 'test.itp'))
topology.write(format = 'itp')
test_result = alchemicalitp.top.Topology(filename='urea.itp')
assert topology.content_dict['atoms'] == test_result.content_dict['atoms']
assert topology.content_dict['bonds'] == test_result.content_dict['bonds']
assert topology.content_dict['pairs'] == test_result.content_dict['pairs']
assert topology.content_dict['angles'] == test_result.content_dict['angles']
assert topology.content_dict['dihedrals'] == test_result.content_dict['dihedrals']
os.remove('urea.itp')
def test_writetop(urea):
topology = urea
topology.write(resource_filename(__name__, 'test.top'), format = 'top')
test_result = alchemicalitp.top.Topology(filename=resource_filename(__name__, 'test.top'))
assert topology.content_dict['defaults'] == test_result.content_dict['defaults']
assert topology.content_dict['atomtypes'] == test_result.content_dict['atomtypes']
os.remove(resource_filename(__name__, 'test.top'))
topology.write(format='top')
test_result = alchemicalitp.top.Topology(filename='urea.top')
assert topology.content_dict['defaults'] == test_result.content_dict['defaults']
assert topology.content_dict['atomtypes'] == test_result.content_dict['atomtypes']
os.remove('urea.top')
def test_cmap_IO():
topology = alchemicalitp.top.Topology(
filename=resource_filename(__name__, 'cmap/state_A.top'))
topology.write(resource_filename(__name__, 'test.top'), format='top')
new = alchemicalitp.top.Topology(filename=resource_filename(__name__, 'test.top'))
assert len(topology.content_dict['cmaptypes'].content[0].data) == len(
new.content_dict['cmaptypes'].content[0].data)
os.remove(resource_filename(__name__, 'test.top'))
topology.write(resource_filename(__name__, 'test.itp'), format='itp')
new = alchemicalitp.top.Topology(
filename=resource_filename(__name__, 'test.itp'))
assert len(topology.content_dict['cmap'].content) == len(
new.content_dict['cmap'].content)
| StarcoderdataPython |
2266 | #!/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('LICENSE', encoding='utf-8') as f:
license = f.read()
with open('requirements.txt', encoding='utf-8') as f:
reqs = f.read()
pkgs = [p for p in find_packages() if p.startswith('fastNLP')]
print(pkgs)
setup(
name='FastNLP',
version='0.7.0',
url='https://gitee.com/fastnlp/fastNLP',
description='fastNLP: Deep Learning Toolkit for NLP, developed by Fudan FastNLP Team',
long_description=readme,
long_description_content_type='text/markdown',
license='Apache License',
author='<NAME>',
python_requires='>=3.6',
packages=pkgs,
install_requires=reqs.strip().split('\n'),
)
| StarcoderdataPython |
3225469 | string = "THIS SHOULD ALL BE LOWERCASE."
print(string.lower())
string = "this should all be uppercase."
print(string.upper())
string = "ThIs ShOuLd Be MiXeD cAsEd."
print(string.swapcase()) | StarcoderdataPython |
1703843 | <reponame>Virksaabnavjot/MapperReducer<filename>train/Mapper.py
#!/usr/bin/env python
import sys
# Mapper to return 10 passengers by age groups
# Data source: https://www.kaggle.com/c/titanic/data
# Data header: "PassengerId" "Survived" "Pclass" "Name" "Sex" "Age" "SibSp" "Parch" "Ticket" "Fare" "Cabin" "Embarked"
myList = []
n = 10 # Number of top N records
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split data values into list
data = line.split(",")
# convert age (currently a string) to int
try:
age = int(data[6])
except ValueError:
# ignore/discard this line
continue
# add (age, record) touple to list
myList.append( (age, line) )
# sort list in reverse order
myList.sort(reverse=True)
# keep only first N records
if len(myList) > n:
myList = myList[:n]
# Print top N records
for (k,v) in myList:
print(v) | StarcoderdataPython |
158111 | import copy
from musicscore.basic_functions import flatten
class Tree(object):
"""
A simple Tree class
"""
def __init__(self, label=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._children = []
self._up = None
self._leaves = []
self.label = label
@property
def up(self):
return self._up
@property
def previous_sibling(self):
try:
siblings = self.up.get_children()
index = siblings.index(self)
if index != 0:
return siblings[index - 1]
else:
return None
except (IndexError, AttributeError):
return None
@property
def next_sibling(self):
try:
siblings = self.up.get_children()
index = siblings.index(self)
if index != len(siblings) - 1:
return siblings[index + 1]
else:
return None
except (IndexError, AttributeError):
return None
@property
def is_root(self):
if self._up is None:
return True
else:
return False
@property
def index(self):
if self.is_root:
_index = [0]
elif self.get_distance() == 1:
_index = [self.up.get_children().index(self) + 1]
else:
_index = self.up.index
_index.append(self.up.get_children().index(self) + 1)
return _index
def get_root(self):
if self.is_root:
return self
root = self.up
while not root.is_root:
root = root.get_root()
return root
def get_children(self):
return self._children
def add_child(self, child):
if not isinstance(child, Tree):
raise TypeError('child must be of type Tree and not {}'.format(type(child)))
self._children.append(child)
child._up = self
return child
def remove_child(self, child):
try:
self._children.remove(child)
except ValueError:
pass
def remove(self):
if self.is_root:
raise Exception()
parent = self.up
insert_index = self.up._children.index(self)
self.up._children.remove(self)
for child in self.get_children().__reversed__():
parent.add_child(child)
new_child = parent._children.pop(-1)
parent._children.insert(insert_index, new_child)
def clear_children(self):
self._children.clear()
def get_branch(self):
output = [self]
node = self
while node.up is not None:
output.append(node.up)
node = node.up
output.reverse()
return output
def get_common_ancestor(self, *other_nodes):
for other_node in other_nodes:
if self.get_root() != other_node.get_root():
raise Exception('{} has not the same root'.format(other_node))
me_branch = self.get_branch()[:]
other_branches = [other_node.get_branch()[:] for other_node in other_nodes]
for node in reversed(me_branch):
node_in_all_branches = True
for branch in other_branches:
if node not in branch:
node_in_all_branches = False
break
if node_in_all_branches:
return node
@property
def is_leaf(self):
if len(self.get_children()) == 0:
return True
else:
return False
def get_leaves(self, key=None):
output = []
for child in self.get_children():
if not child.is_leaf:
output.append(child.get_leaves(key=key))
else:
if key is not None:
output.append(key(child))
else:
output.append(child)
return output
def traverse(self):
yield self
for child in self.get_children():
for grand_child in child.traverse():
yield grand_child
def dump(self):
output = []
for node in self.traverse():
output.append(node)
return output
def get_distance(self, reference=None):
if reference is None:
reference = self.get_root()
if self.is_root:
return 0
parent = self.up
count = 1
while parent is not reference:
parent = parent.up
count += 1
if parent.is_root and parent is not reference:
return None
return count
def get_farthest_leaf(self):
leaves = list(self.traverse_leaves())
if not leaves:
leaves = [self]
return max(leaves, key=lambda leaf: leaf.get_distance())
def get_layer(self, layer, key=None):
if layer == 0:
output = [self]
elif layer == 1:
output = self.get_children()
else:
output = []
for child in self.get_layer(layer - 1):
if child.is_leaf:
output.append(child)
else:
output.extend(child.get_children())
if key is None:
return output
else:
return [key(child) for child in output]
def get_siblings(self):
if self.is_root:
return []
output = []
for child in self.up.get_children():
if child != self:
output.append(child)
return output
def detach(self):
self.up._children.remove(self)
self._up = None
def clone(self, except_nodes=[]):
if self not in except_nodes:
cloned = copy.copy(self)
cloned._children = []
for child in self.get_children():
cloned.add_child(child.clone(except_nodes=except_nodes))
return cloned
else:
return self
def goto(self, index):
if index == [0]:
return self
if len(index) == 1:
return self.get_children()[index[0] - 1]
return self.goto(index[:1]).goto(index[1:])
def replace_node(self, new_node):
if self.is_root:
raise Exception('root cannot be replaced')
else:
index = self.up.get_children().index(self)
new_node._up = self._up
self.up.get_children()[index] = new_node
self._up = None
def traverse_leaves(self):
for leaf in flatten(self.get_leaves()):
yield leaf
def find_leaf(self, condition):
for leaf in self.traverse_leaves():
if condition(leaf) is True:
return leaf
return False
def __deepcopy__(self, *args, **kwargs):
copied = self.__class__(*args, **kwargs)
for item in self.__dict__.items():
key = item[0]
value = item[1]
if key not in ('_children', '_up'):
try:
new_value = value.__deepcopy__()
except (AttributeError, TypeError):
new_value = value
copied.__dict__[key] = new_value
for child in self.get_children():
copied.add_child(child.__deepcopy__())
return copied
def no_child_copy(self, *arguments, **kwargs):
# Tree.COUNT_COPYING += 1
# print(Tree.COUNT_COPYING)
# print(self)
copied = self.__class__(*arguments, **kwargs)
for item in self.__dict__.items():
key = item[0]
value = item[1]
if key not in ('_children', '_up'):
try:
new_value = value.__copy__()
except (AttributeError, TypeError):
new_value = value
copied.__dict__[key] = new_value
return copied
@property
def __next__(self):
if not self.is_leaf:
raise Exception('__next__ is only possible for leaves')
leave_iterator = self.get_root().traverse_leaves()
def forward_leaves(leave_iterator):
leaf = leave_iterator.__next__()
if leaf != self:
forward_leaves(leave_iterator)
forward_leaves(leave_iterator)
return leave_iterator.__next__()
class TreeNode(Tree):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| StarcoderdataPython |
103773 | # Copyright 2020 Softwerks LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import http
import logging
from typing import List, Mapping, Iterable, Optional, Tuple, Union
import urllib.parse
import aioredis
import websockets
from joust import redis
logger: logging.Logger = logging.getLogger(__name__)
class ServerProtocol(websockets.WebSocketServerProtocol):
auth_token: str
session_token: str
async def process_request(
self, path: str, request_headers: websockets.http.Headers
) -> Optional[
Tuple[
http.HTTPStatus,
Union[
websockets.http.Headers, Mapping[str, str], Iterable[Tuple[str, str]]
],
bytes,
]
]:
parsed_url: urllib.parse.ParseResult = urllib.parse.urlparse(path)
query_params: dict = urllib.parse.parse_qs(parsed_url.query)
try:
self.auth_token = query_params["token"][0]
except KeyError:
logger.info(f"Missing credentials: {query_params}")
return (
http.HTTPStatus.UNAUTHORIZED,
[("WWW-Authenticate", "Token")],
b"Missing credentials\n",
)
conn: aioredis.Redis = await redis.get_connection()
session_token: Optional[str] = await conn.get(
f"websocket:{self.auth_token}", encoding="utf-8"
)
if session_token is None:
logger.info(f"Invalid token: {self.auth_token}")
return (
http.HTTPStatus.UNAUTHORIZED,
[("WWW-Authenticate", "Token")],
b"Invalid credentials\n",
)
self.session_token = session_token
return await super().process_request(path, request_headers)
| StarcoderdataPython |
4832568 | <filename>pyglet/libs/x11/xinput.py<gh_stars>1000+
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 <NAME>
# Copyright (c) 2008-2021 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Wrapper for Xi
Generated with:
tools/genwrappers.py xinput
Do not modify this file.
"""
import ctypes
from ctypes import *
import pyglet.lib
_lib = pyglet.lib.load_library('Xi')
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
import pyglet.libs.x11.xlib
sz_xGetExtensionVersionReq = 8 # /usr/include/X11/extensions/XI.h:56
sz_xGetExtensionVersionReply = 32 # /usr/include/X11/extensions/XI.h:57
sz_xListInputDevicesReq = 4 # /usr/include/X11/extensions/XI.h:58
sz_xListInputDevicesReply = 32 # /usr/include/X11/extensions/XI.h:59
sz_xListDevicePropertiesReq = 8 # /usr/include/X11/extensions/XI.h
sz_xListDevicePropertiesReply = 32 # /usr/include/X11/extensions/XI.h
sz_xGetDevicePropertyReq = 24 # /usr/include/X11/extensions/XI.h
sz_xGetDevicePropertyReply = 32 # /usr/include/X11/extensions/XI.h
sz_xOpenDeviceReq = 8 # /usr/include/X11/extensions/XI.h:60
sz_xOpenDeviceReply = 32 # /usr/include/X11/extensions/XI.h:61
sz_xCloseDeviceReq = 8 # /usr/include/X11/extensions/XI.h:62
sz_xSetDeviceModeReq = 8 # /usr/include/X11/extensions/XI.h:63
sz_xSetDeviceModeReply = 32 # /usr/include/X11/extensions/XI.h:64
sz_xSelectExtensionEventReq = 12 # /usr/include/X11/extensions/XI.h:65
sz_xGetSelectedExtensionEventsReq = 8 # /usr/include/X11/extensions/XI.h:66
sz_xGetSelectedExtensionEventsReply = 32 # /usr/include/X11/extensions/XI.h:67
sz_xChangeDeviceDontPropagateListReq = 12 # /usr/include/X11/extensions/XI.h:68
sz_xGetDeviceDontPropagateListReq = 8 # /usr/include/X11/extensions/XI.h:69
sz_xGetDeviceDontPropagateListReply = 32 # /usr/include/X11/extensions/XI.h:70
sz_xGetDeviceMotionEventsReq = 16 # /usr/include/X11/extensions/XI.h:71
sz_xGetDeviceMotionEventsReply = 32 # /usr/include/X11/extensions/XI.h:72
sz_xChangeKeyboardDeviceReq = 8 # /usr/include/X11/extensions/XI.h:73
sz_xChangeKeyboardDeviceReply = 32 # /usr/include/X11/extensions/XI.h:74
sz_xChangePointerDeviceReq = 8 # /usr/include/X11/extensions/XI.h:75
sz_xChangePointerDeviceReply = 32 # /usr/include/X11/extensions/XI.h:76
sz_xGrabDeviceReq = 20 # /usr/include/X11/extensions/XI.h:77
sz_xGrabDeviceReply = 32 # /usr/include/X11/extensions/XI.h:78
sz_xUngrabDeviceReq = 12 # /usr/include/X11/extensions/XI.h:79
sz_xGrabDeviceKeyReq = 20 # /usr/include/X11/extensions/XI.h:80
sz_xGrabDeviceKeyReply = 32 # /usr/include/X11/extensions/XI.h:81
sz_xUngrabDeviceKeyReq = 16 # /usr/include/X11/extensions/XI.h:82
sz_xGrabDeviceButtonReq = 20 # /usr/include/X11/extensions/XI.h:83
sz_xGrabDeviceButtonReply = 32 # /usr/include/X11/extensions/XI.h:84
sz_xUngrabDeviceButtonReq = 16 # /usr/include/X11/extensions/XI.h:85
sz_xAllowDeviceEventsReq = 12 # /usr/include/X11/extensions/XI.h:86
sz_xGetDeviceFocusReq = 8 # /usr/include/X11/extensions/XI.h:87
sz_xGetDeviceFocusReply = 32 # /usr/include/X11/extensions/XI.h:88
sz_xSetDeviceFocusReq = 16 # /usr/include/X11/extensions/XI.h:89
sz_xGetFeedbackControlReq = 8 # /usr/include/X11/extensions/XI.h:90
sz_xGetFeedbackControlReply = 32 # /usr/include/X11/extensions/XI.h:91
sz_xChangeFeedbackControlReq = 12 # /usr/include/X11/extensions/XI.h:92
sz_xGetDeviceKeyMappingReq = 8 # /usr/include/X11/extensions/XI.h:93
sz_xGetDeviceKeyMappingReply = 32 # /usr/include/X11/extensions/XI.h:94
sz_xChangeDeviceKeyMappingReq = 8 # /usr/include/X11/extensions/XI.h:95
sz_xGetDeviceModifierMappingReq = 8 # /usr/include/X11/extensions/XI.h:96
sz_xSetDeviceModifierMappingReq = 8 # /usr/include/X11/extensions/XI.h:97
sz_xSetDeviceModifierMappingReply = 32 # /usr/include/X11/extensions/XI.h:98
sz_xGetDeviceButtonMappingReq = 8 # /usr/include/X11/extensions/XI.h:99
sz_xGetDeviceButtonMappingReply = 32 # /usr/include/X11/extensions/XI.h:100
sz_xSetDeviceButtonMappingReq = 8 # /usr/include/X11/extensions/XI.h:101
sz_xSetDeviceButtonMappingReply = 32 # /usr/include/X11/extensions/XI.h:102
sz_xQueryDeviceStateReq = 8 # /usr/include/X11/extensions/XI.h:103
sz_xQueryDeviceStateReply = 32 # /usr/include/X11/extensions/XI.h:104
sz_xSendExtensionEventReq = 16 # /usr/include/X11/extensions/XI.h:105
sz_xDeviceBellReq = 8 # /usr/include/X11/extensions/XI.h:106
sz_xSetDeviceValuatorsReq = 8 # /usr/include/X11/extensions/XI.h:107
sz_xSetDeviceValuatorsReply = 32 # /usr/include/X11/extensions/XI.h:108
sz_xGetDeviceControlReq = 8 # /usr/include/X11/extensions/XI.h:109
sz_xGetDeviceControlReply = 32 # /usr/include/X11/extensions/XI.h:110
sz_xChangeDeviceControlReq = 8 # /usr/include/X11/extensions/XI.h:111
sz_xChangeDeviceControlReply = 32 # /usr/include/X11/extensions/XI.h:112
Dont_Check = 0 # /usr/include/X11/extensions/XI.h:135
XInput_Initial_Release = 1 # /usr/include/X11/extensions/XI.h:136
XInput_Add_XDeviceBell = 2 # /usr/include/X11/extensions/XI.h:137
XInput_Add_XSetDeviceValuators = 3 # /usr/include/X11/extensions/XI.h:138
XInput_Add_XChangeDeviceControl = 4 # /usr/include/X11/extensions/XI.h:139
XInput_Add_DevicePresenceNotify = 5 # /usr/include/X11/extensions/XI.h:140
XI_Absent = 0 # /usr/include/X11/extensions/XI.h:142
XI_Present = 1 # /usr/include/X11/extensions/XI.h:143
XI_Initial_Release_Major = 1 # /usr/include/X11/extensions/XI.h:145
XI_Initial_Release_Minor = 0 # /usr/include/X11/extensions/XI.h:146
XI_Add_XDeviceBell_Major = 1 # /usr/include/X11/extensions/XI.h:148
XI_Add_XDeviceBell_Minor = 1 # /usr/include/X11/extensions/XI.h:149
XI_Add_XSetDeviceValuators_Major = 1 # /usr/include/X11/extensions/XI.h:151
XI_Add_XSetDeviceValuators_Minor = 2 # /usr/include/X11/extensions/XI.h:152
XI_Add_XChangeDeviceControl_Major = 1 # /usr/include/X11/extensions/XI.h:154
XI_Add_XChangeDeviceControl_Minor = 3 # /usr/include/X11/extensions/XI.h:155
XI_Add_DevicePresenceNotify_Major = 1 # /usr/include/X11/extensions/XI.h:157
XI_Add_DevicePresenceNotify_Minor = 4 # /usr/include/X11/extensions/XI.h:158
DEVICE_RESOLUTION = 1 # /usr/include/X11/extensions/XI.h:160
DEVICE_ABS_CALIB = 2 # /usr/include/X11/extensions/XI.h:161
DEVICE_CORE = 3 # /usr/include/X11/extensions/XI.h:162
DEVICE_ENABLE = 4 # /usr/include/X11/extensions/XI.h:163
DEVICE_ABS_AREA = 5 # /usr/include/X11/extensions/XI.h:164
NoSuchExtension = 1 # /usr/include/X11/extensions/XI.h:166
COUNT = 0 # /usr/include/X11/extensions/XI.h:168
CREATE = 1 # /usr/include/X11/extensions/XI.h:169
NewPointer = 0 # /usr/include/X11/extensions/XI.h:171
NewKeyboard = 1 # /usr/include/X11/extensions/XI.h:172
XPOINTER = 0 # /usr/include/X11/extensions/XI.h:174
XKEYBOARD = 1 # /usr/include/X11/extensions/XI.h:175
UseXKeyboard = 255 # /usr/include/X11/extensions/XI.h:177
IsXPointer = 0 # /usr/include/X11/extensions/XI.h:179
IsXKeyboard = 1 # /usr/include/X11/extensions/XI.h:180
IsXExtensionDevice = 2 # /usr/include/X11/extensions/XI.h:181
IsXExtensionKeyboard = 3 # /usr/include/X11/extensions/XI.h:182
IsXExtensionPointer = 4 # /usr/include/X11/extensions/XI.h:183
AsyncThisDevice = 0 # /usr/include/X11/extensions/XI.h:185
SyncThisDevice = 1 # /usr/include/X11/extensions/XI.h:186
ReplayThisDevice = 2 # /usr/include/X11/extensions/XI.h:187
AsyncOtherDevices = 3 # /usr/include/X11/extensions/XI.h:188
AsyncAll = 4 # /usr/include/X11/extensions/XI.h:189
SyncAll = 5 # /usr/include/X11/extensions/XI.h:190
FollowKeyboard = 3 # /usr/include/X11/extensions/XI.h:192
RevertToFollowKeyboard = 3 # /usr/include/X11/extensions/XI.h:194
DvAccelNum = 1 # /usr/include/X11/extensions/XI.h:197
DvAccelDenom = 2 # /usr/include/X11/extensions/XI.h:198
DvThreshold = 4 # /usr/include/X11/extensions/XI.h:199
DvKeyClickPercent = 1 # /usr/include/X11/extensions/XI.h:201
DvPercent = 2 # /usr/include/X11/extensions/XI.h:202
DvPitch = 4 # /usr/include/X11/extensions/XI.h:203
DvDuration = 8 # /usr/include/X11/extensions/XI.h:204
DvLed = 16 # /usr/include/X11/extensions/XI.h:205
DvLedMode = 32 # /usr/include/X11/extensions/XI.h:206
DvKey = 64 # /usr/include/X11/extensions/XI.h:207
DvAutoRepeatMode = 128 # /usr/include/X11/extensions/XI.h:208
DvString = 1 # /usr/include/X11/extensions/XI.h:210
DvInteger = 1 # /usr/include/X11/extensions/XI.h:212
DeviceMode = 1 # /usr/include/X11/extensions/XI.h:214
Relative = 0 # /usr/include/X11/extensions/XI.h:215
Absolute = 1 # /usr/include/X11/extensions/XI.h:216
ProximityState = 2 # /usr/include/X11/extensions/XI.h:218
InProximity = 0 # /usr/include/X11/extensions/XI.h:219
OutOfProximity = 2 # /usr/include/X11/extensions/XI.h:220
AddToList = 0 # /usr/include/X11/extensions/XI.h:222
DeleteFromList = 1 # /usr/include/X11/extensions/XI.h:223
KeyClass = 0 # /usr/include/X11/extensions/XI.h:225
ButtonClass = 1 # /usr/include/X11/extensions/XI.h:226
ValuatorClass = 2 # /usr/include/X11/extensions/XI.h:227
FeedbackClass = 3 # /usr/include/X11/extensions/XI.h:228
ProximityClass = 4 # /usr/include/X11/extensions/XI.h:229
FocusClass = 5 # /usr/include/X11/extensions/XI.h:230
OtherClass = 6 # /usr/include/X11/extensions/XI.h:231
KbdFeedbackClass = 0 # /usr/include/X11/extensions/XI.h:233
PtrFeedbackClass = 1 # /usr/include/X11/extensions/XI.h:234
StringFeedbackClass = 2 # /usr/include/X11/extensions/XI.h:235
IntegerFeedbackClass = 3 # /usr/include/X11/extensions/XI.h:236
LedFeedbackClass = 4 # /usr/include/X11/extensions/XI.h:237
BellFeedbackClass = 5 # /usr/include/X11/extensions/XI.h:238
_devicePointerMotionHint = 0 # /usr/include/X11/extensions/XI.h:240
_deviceButton1Motion = 1 # /usr/include/X11/extensions/XI.h:241
_deviceButton2Motion = 2 # /usr/include/X11/extensions/XI.h:242
_deviceButton3Motion = 3 # /usr/include/X11/extensions/XI.h:243
_deviceButton4Motion = 4 # /usr/include/X11/extensions/XI.h:244
_deviceButton5Motion = 5 # /usr/include/X11/extensions/XI.h:245
_deviceButtonMotion = 6 # /usr/include/X11/extensions/XI.h:246
_deviceButtonGrab = 7 # /usr/include/X11/extensions/XI.h:247
_deviceOwnerGrabButton = 8 # /usr/include/X11/extensions/XI.h:248
_noExtensionEvent = 9 # /usr/include/X11/extensions/XI.h:249
_devicePresence = 0 # /usr/include/X11/extensions/XI.h:251
DeviceAdded = 0 # /usr/include/X11/extensions/XI.h:253
DeviceRemoved = 1 # /usr/include/X11/extensions/XI.h:254
DeviceEnabled = 2 # /usr/include/X11/extensions/XI.h:255
DeviceDisabled = 3 # /usr/include/X11/extensions/XI.h:256
DeviceUnrecoverable = 4 # /usr/include/X11/extensions/XI.h:257
XI_BadDevice = 0 # /usr/include/X11/extensions/XI.h:259
XI_BadEvent = 1 # /usr/include/X11/extensions/XI.h:260
XI_BadMode = 2 # /usr/include/X11/extensions/XI.h:261
XI_DeviceBusy = 3 # /usr/include/X11/extensions/XI.h:262
XI_BadClass = 4 # /usr/include/X11/extensions/XI.h:263
XEventClass = c_ulong # /usr/include/X11/extensions/XI.h:272
class struct_anon_93(Structure):
__slots__ = [
'present',
'major_version',
'minor_version',
]
struct_anon_93._fields_ = [
('present', c_int),
('major_version', c_short),
('minor_version', c_short),
]
XExtensionVersion = struct_anon_93 # /usr/include/X11/extensions/XI.h:285
_deviceKeyPress = 0 # /usr/include/X11/extensions/XInput.h:4902
_deviceKeyRelease = 1 # /usr/include/X11/extensions/XInput.h:4903
_deviceButtonPress = 0 # /usr/include/X11/extensions/XInput.h:4905
_deviceButtonRelease = 1 # /usr/include/X11/extensions/XInput.h:4906
_deviceMotionNotify = 0 # /usr/include/X11/extensions/XInput.h:4908
_deviceFocusIn = 0 # /usr/include/X11/extensions/XInput.h:4910
_deviceFocusOut = 1 # /usr/include/X11/extensions/XInput.h:4911
_proximityIn = 0 # /usr/include/X11/extensions/XInput.h:4913
_proximityOut = 1 # /usr/include/X11/extensions/XInput.h:4914
_deviceStateNotify = 0 # /usr/include/X11/extensions/XInput.h:4916
_deviceMappingNotify = 1 # /usr/include/X11/extensions/XInput.h:4917
_changeDeviceNotify = 2 # /usr/include/X11/extensions/XInput.h:4918
class struct_anon_94(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'deviceid',
'root',
'subwindow',
'time',
'x',
'y',
'x_root',
'y_root',
'state',
'keycode',
'same_screen',
'device_state',
'axes_count',
'first_axis',
'axis_data',
]
Display = pyglet.libs.x11.xlib.Display
Window = pyglet.libs.x11.xlib.Window
XID = pyglet.libs.x11.xlib.XID
Time = pyglet.libs.x11.xlib.Time
struct_anon_94._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('deviceid', XID),
('root', Window),
('subwindow', Window),
('time', Time),
('x', c_int),
('y', c_int),
('x_root', c_int),
('y_root', c_int),
('state', c_uint),
('keycode', c_uint),
('same_screen', c_int),
('device_state', c_uint),
('axes_count', c_ubyte),
('first_axis', c_ubyte),
('axis_data', c_int * 6),
]
XDeviceKeyEvent = struct_anon_94 # /usr/include/X11/extensions/XInput.h:5043
XDeviceKeyPressedEvent = XDeviceKeyEvent # /usr/include/X11/extensions/XInput.h:5045
XDeviceKeyReleasedEvent = XDeviceKeyEvent # /usr/include/X11/extensions/XInput.h:5046
class struct_anon_95(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'deviceid',
'root',
'subwindow',
'time',
'x',
'y',
'x_root',
'y_root',
'state',
'button',
'same_screen',
'device_state',
'axes_count',
'first_axis',
'axis_data',
]
struct_anon_95._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('deviceid', XID),
('root', Window),
('subwindow', Window),
('time', Time),
('x', c_int),
('y', c_int),
('x_root', c_int),
('y_root', c_int),
('state', c_uint),
('button', c_uint),
('same_screen', c_int),
('device_state', c_uint),
('axes_count', c_ubyte),
('first_axis', c_ubyte),
('axis_data', c_int * 6),
]
XDeviceButtonEvent = struct_anon_95 # /usr/include/X11/extensions/XInput.h:5075
XDeviceButtonPressedEvent = XDeviceButtonEvent # /usr/include/X11/extensions/XInput.h:5077
XDeviceButtonReleasedEvent = XDeviceButtonEvent # /usr/include/X11/extensions/XInput.h:5078
class struct_anon_96(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'deviceid',
'root',
'subwindow',
'time',
'x',
'y',
'x_root',
'y_root',
'state',
'is_hint',
'same_screen',
'device_state',
'axes_count',
'first_axis',
'axis_data',
]
struct_anon_96._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('deviceid', XID),
('root', Window),
('subwindow', Window),
('time', Time),
('x', c_int),
('y', c_int),
('x_root', c_int),
('y_root', c_int),
('state', c_uint),
('is_hint', c_char),
('same_screen', c_int),
('device_state', c_uint),
('axes_count', c_ubyte),
('first_axis', c_ubyte),
('axis_data', c_int * 6),
]
XDeviceMotionEvent = struct_anon_96 # /usr/include/X11/extensions/XInput.h:5108
class struct_anon_97(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'deviceid',
'mode',
'detail',
'time',
]
struct_anon_97._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('deviceid', XID),
('mode', c_int),
('detail', c_int),
('time', Time),
]
XDeviceFocusChangeEvent = struct_anon_97 # /usr/include/X11/extensions/XInput.h:5133
XDeviceFocusInEvent = XDeviceFocusChangeEvent # /usr/include/X11/extensions/XInput.h:5135
XDeviceFocusOutEvent = XDeviceFocusChangeEvent # /usr/include/X11/extensions/XInput.h:5136
class struct_anon_98(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'deviceid',
'root',
'subwindow',
'time',
'x',
'y',
'x_root',
'y_root',
'state',
'same_screen',
'device_state',
'axes_count',
'first_axis',
'axis_data',
]
struct_anon_98._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('deviceid', XID),
('root', Window),
('subwindow', Window),
('time', Time),
('x', c_int),
('y', c_int),
('x_root', c_int),
('y_root', c_int),
('state', c_uint),
('same_screen', c_int),
('device_state', c_uint),
('axes_count', c_ubyte),
('first_axis', c_ubyte),
('axis_data', c_int * 6),
]
XProximityNotifyEvent = struct_anon_98 # /usr/include/X11/extensions/XInput.h:5164
XProximityInEvent = XProximityNotifyEvent # /usr/include/X11/extensions/XInput.h:5165
XProximityOutEvent = XProximityNotifyEvent # /usr/include/X11/extensions/XInput.h:5166
class struct_anon_99(Structure):
__slots__ = [
'class',
'length',
]
struct_anon_99._fields_ = [
('class', c_ubyte),
('length', c_ubyte),
]
XInputClass = struct_anon_99 # /usr/include/X11/extensions/XInput.h:5183
class struct_anon_100(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'deviceid',
'time',
'num_classes',
'data',
]
struct_anon_100._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('deviceid', XID),
('time', Time),
('num_classes', c_int),
('data', c_char * 64),
]
XDeviceStateNotifyEvent = struct_anon_100 # /usr/include/X11/extensions/XInput.h:5195
class struct_anon_101(Structure):
__slots__ = [
'class',
'length',
'num_valuators',
'mode',
'valuators',
]
struct_anon_101._fields_ = [
('class', c_ubyte),
('length', c_ubyte),
('num_valuators', c_ubyte),
('mode', c_ubyte),
('valuators', c_int * 6),
]
XValuatorStatus = struct_anon_101 # /usr/include/X11/extensions/XInput.h:5207
class struct_anon_102(Structure):
__slots__ = [
'class',
'length',
'num_keys',
'keys',
]
struct_anon_102._fields_ = [
('class', c_ubyte),
('length', c_ubyte),
('num_keys', c_short),
('keys', c_char * 32),
]
XKeyStatus = struct_anon_102 # /usr/include/X11/extensions/XInput.h:5218
class struct_anon_103(Structure):
__slots__ = [
'class',
'length',
'num_buttons',
'buttons',
]
struct_anon_103._fields_ = [
('class', c_ubyte),
('length', c_ubyte),
('num_buttons', c_short),
('buttons', c_char * 32),
]
XButtonStatus = struct_anon_103 # /usr/include/X11/extensions/XInput.h:5229
class struct_anon_104(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'deviceid',
'time',
'request',
'first_keycode',
'count',
]
struct_anon_104._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('deviceid', XID),
('time', Time),
('request', c_int),
('first_keycode', c_int),
('count', c_int),
]
XDeviceMappingEvent = struct_anon_104 # /usr/include/X11/extensions/XInput.h:5250
class struct_anon_105(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'deviceid',
'time',
'request',
]
struct_anon_105._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('deviceid', XID),
('time', Time),
('request', c_int),
]
XChangeDeviceNotifyEvent = struct_anon_105 # /usr/include/X11/extensions/XInput.h:5268
class struct_anon_106(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'time',
'devchange',
'deviceid',
'control',
]
struct_anon_106._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('time', Time),
('devchange', c_int),
('deviceid', XID),
('control', XID),
]
XDevicePresenceNotifyEvent = struct_anon_106 # /usr/include/X11/extensions/XInput.h:5293
class struct_anon_107(Structure):
__slots__ = [
'class',
'length',
'id',
]
struct_anon_107._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
]
XFeedbackState = struct_anon_107 # /usr/include/X11/extensions/XInput.h:5311
class struct_anon_108(Structure):
__slots__ = [
'class',
'length',
'id',
'click',
'percent',
'pitch',
'duration',
'led_mask',
'global_auto_repeat',
'auto_repeats',
]
struct_anon_108._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('click', c_int),
('percent', c_int),
('pitch', c_int),
('duration', c_int),
('led_mask', c_int),
('global_auto_repeat', c_int),
('auto_repeats', c_char * 32),
]
XKbdFeedbackState = struct_anon_108 # /usr/include/X11/extensions/XInput.h:5328
class struct_anon_109(Structure):
__slots__ = [
'class',
'length',
'id',
'accelNum',
'accelDenom',
'threshold',
]
struct_anon_109._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('accelNum', c_int),
('accelDenom', c_int),
('threshold', c_int),
]
XPtrFeedbackState = struct_anon_109 # /usr/include/X11/extensions/XInput.h:5341
class struct_anon_110(Structure):
__slots__ = [
'class',
'length',
'id',
'resolution',
'minVal',
'maxVal',
]
struct_anon_110._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('resolution', c_int),
('minVal', c_int),
('maxVal', c_int),
]
XIntegerFeedbackState = struct_anon_110 # /usr/include/X11/extensions/XInput.h:5354
class struct_anon_111(Structure):
__slots__ = [
'class',
'length',
'id',
'max_symbols',
'num_syms_supported',
'syms_supported',
]
KeySym = pyglet.libs.x11.xlib.KeySym
struct_anon_111._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('max_symbols', c_int),
('num_syms_supported', c_int),
('syms_supported', POINTER(KeySym)),
]
XStringFeedbackState = struct_anon_111 # /usr/include/X11/extensions/XInput.h:5367
class struct_anon_112(Structure):
__slots__ = [
'class',
'length',
'id',
'percent',
'pitch',
'duration',
]
struct_anon_112._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('percent', c_int),
('pitch', c_int),
('duration', c_int),
]
XBellFeedbackState = struct_anon_112 # /usr/include/X11/extensions/XInput.h:5380
class struct_anon_113(Structure):
__slots__ = [
'class',
'length',
'id',
'led_values',
'led_mask',
]
struct_anon_113._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('led_values', c_int),
('led_mask', c_int),
]
XLedFeedbackState = struct_anon_113 # /usr/include/X11/extensions/XInput.h:5392
class struct_anon_114(Structure):
__slots__ = [
'class',
'length',
'id',
]
struct_anon_114._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
]
XFeedbackControl = struct_anon_114 # /usr/include/X11/extensions/XInput.h:5402
class struct_anon_115(Structure):
__slots__ = [
'class',
'length',
'id',
'accelNum',
'accelDenom',
'threshold',
]
struct_anon_115._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('accelNum', c_int),
('accelDenom', c_int),
('threshold', c_int),
]
XPtrFeedbackControl = struct_anon_115 # /usr/include/X11/extensions/XInput.h:5415
class struct_anon_116(Structure):
__slots__ = [
'class',
'length',
'id',
'click',
'percent',
'pitch',
'duration',
'led_mask',
'led_value',
'key',
'auto_repeat_mode',
]
struct_anon_116._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('click', c_int),
('percent', c_int),
('pitch', c_int),
('duration', c_int),
('led_mask', c_int),
('led_value', c_int),
('key', c_int),
('auto_repeat_mode', c_int),
]
XKbdFeedbackControl = struct_anon_116 # /usr/include/X11/extensions/XInput.h:5433
class struct_anon_117(Structure):
__slots__ = [
'class',
'length',
'id',
'num_keysyms',
'syms_to_display',
]
struct_anon_117._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('num_keysyms', c_int),
('syms_to_display', POINTER(KeySym)),
]
XStringFeedbackControl = struct_anon_117 # /usr/include/X11/extensions/XInput.h:5445
class struct_anon_118(Structure):
__slots__ = [
'class',
'length',
'id',
'int_to_display',
]
struct_anon_118._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('int_to_display', c_int),
]
XIntegerFeedbackControl = struct_anon_118 # /usr/include/X11/extensions/XInput.h:5456
class struct_anon_119(Structure):
__slots__ = [
'class',
'length',
'id',
'percent',
'pitch',
'duration',
]
struct_anon_119._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('percent', c_int),
('pitch', c_int),
('duration', c_int),
]
XBellFeedbackControl = struct_anon_119 # /usr/include/X11/extensions/XInput.h:5469
class struct_anon_120(Structure):
__slots__ = [
'class',
'length',
'id',
'led_mask',
'led_values',
]
struct_anon_120._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('led_mask', c_int),
('led_values', c_int),
]
XLedFeedbackControl = struct_anon_120 # /usr/include/X11/extensions/XInput.h:5481
class struct_anon_121(Structure):
__slots__ = [
'control',
'length',
]
struct_anon_121._fields_ = [
('control', XID),
('length', c_int),
]
XDeviceControl = struct_anon_121 # /usr/include/X11/extensions/XInput.h:5492
class struct_anon_122(Structure):
__slots__ = [
'control',
'length',
'first_valuator',
'num_valuators',
'resolutions',
]
struct_anon_122._fields_ = [
('control', XID),
('length', c_int),
('first_valuator', c_int),
('num_valuators', c_int),
('resolutions', POINTER(c_int)),
]
XDeviceResolutionControl = struct_anon_122 # /usr/include/X11/extensions/XInput.h:5500
class struct_anon_123(Structure):
__slots__ = [
'control',
'length',
'num_valuators',
'resolutions',
'min_resolutions',
'max_resolutions',
]
struct_anon_123._fields_ = [
('control', XID),
('length', c_int),
('num_valuators', c_int),
('resolutions', POINTER(c_int)),
('min_resolutions', POINTER(c_int)),
('max_resolutions', POINTER(c_int)),
]
XDeviceResolutionState = struct_anon_123 # /usr/include/X11/extensions/XInput.h:5509
class struct_anon_124(Structure):
__slots__ = [
'control',
'length',
'min_x',
'max_x',
'min_y',
'max_y',
'flip_x',
'flip_y',
'rotation',
'button_threshold',
]
struct_anon_124._fields_ = [
('control', XID),
('length', c_int),
('min_x', c_int),
('max_x', c_int),
('min_y', c_int),
('max_y', c_int),
('flip_x', c_int),
('flip_y', c_int),
('rotation', c_int),
('button_threshold', c_int),
]
XDeviceAbsCalibControl = struct_anon_124 # /usr/include/X11/extensions/XInput.h:5522
class struct_anon_125(Structure):
__slots__ = [
'control',
'length',
'min_x',
'max_x',
'min_y',
'max_y',
'flip_x',
'flip_y',
'rotation',
'button_threshold',
]
struct_anon_125._fields_ = [
('control', XID),
('length', c_int),
('min_x', c_int),
('max_x', c_int),
('min_y', c_int),
('max_y', c_int),
('flip_x', c_int),
('flip_y', c_int),
('rotation', c_int),
('button_threshold', c_int),
]
XDeviceAbsCalibState = struct_anon_125 # /usr/include/X11/extensions/XInput.h:5522
class struct_anon_126(Structure):
__slots__ = [
'control',
'length',
'offset_x',
'offset_y',
'width',
'height',
'screen',
'following',
]
struct_anon_126._fields_ = [
('control', XID),
('length', c_int),
('offset_x', c_int),
('offset_y', c_int),
('width', c_int),
('height', c_int),
('screen', c_int),
('following', XID),
]
XDeviceAbsAreaControl = struct_anon_126 # /usr/include/X11/extensions/XInput.h:5533
class struct_anon_127(Structure):
__slots__ = [
'control',
'length',
'offset_x',
'offset_y',
'width',
'height',
'screen',
'following',
]
struct_anon_127._fields_ = [
('control', XID),
('length', c_int),
('offset_x', c_int),
('offset_y', c_int),
('width', c_int),
('height', c_int),
('screen', c_int),
('following', XID),
]
XDeviceAbsAreaState = struct_anon_127 # /usr/include/X11/extensions/XInput.h:5533
class struct_anon_128(Structure):
__slots__ = [
'control',
'length',
'status',
]
struct_anon_128._fields_ = [
('control', XID),
('length', c_int),
('status', c_int),
]
XDeviceCoreControl = struct_anon_128 # /usr/include/X11/extensions/XInput.h:5539
class struct_anon_129(Structure):
__slots__ = [
'control',
'length',
'status',
'iscore',
]
struct_anon_129._fields_ = [
('control', XID),
('length', c_int),
('status', c_int),
('iscore', c_int),
]
XDeviceCoreState = struct_anon_129 # /usr/include/X11/extensions/XInput.h:5546
class struct_anon_130(Structure):
__slots__ = [
'control',
'length',
'enable',
]
struct_anon_130._fields_ = [
('control', XID),
('length', c_int),
('enable', c_int),
]
XDeviceEnableControl = struct_anon_130 # /usr/include/X11/extensions/XInput.h:5552
class struct_anon_131(Structure):
__slots__ = [
'control',
'length',
'enable',
]
struct_anon_131._fields_ = [
('control', XID),
('length', c_int),
('enable', c_int),
]
XDeviceEnableState = struct_anon_131 # /usr/include/X11/extensions/XInput.h:5552
class struct__XAnyClassinfo(Structure):
__slots__ = [
]
struct__XAnyClassinfo._fields_ = [
('_opaque_struct', c_int)
]
class struct__XAnyClassinfo(Structure):
__slots__ = [
]
struct__XAnyClassinfo._fields_ = [
('_opaque_struct', c_int)
]
XAnyClassPtr = POINTER(struct__XAnyClassinfo) # /usr/include/X11/extensions/XInput.h:5564
class struct__XAnyClassinfo(Structure):
__slots__ = [
'class',
'length',
]
struct__XAnyClassinfo._fields_ = [
('class', XID),
('length', c_int),
]
XAnyClassInfo = struct__XAnyClassinfo # /usr/include/X11/extensions/XInput.h:5573
class struct__XDeviceInfo(Structure):
__slots__ = [
]
struct__XDeviceInfo._fields_ = [
('_opaque_struct', c_int)
]
class struct__XDeviceInfo(Structure):
__slots__ = [
]
struct__XDeviceInfo._fields_ = [
('_opaque_struct', c_int)
]
XDeviceInfoPtr = POINTER(struct__XDeviceInfo) # /usr/include/X11/extensions/XInput.h:5575
class struct__XDeviceInfo(Structure):
__slots__ = [
'id',
'type',
'name',
'num_classes',
'use',
'inputclassinfo',
]
Atom = pyglet.libs.x11.xlib.Atom
struct__XDeviceInfo._fields_ = [
('id', XID),
('type', Atom),
('name', c_char_p),
('num_classes', c_int),
('use', c_int),
('inputclassinfo', XAnyClassPtr),
]
XDeviceInfo = struct__XDeviceInfo # /usr/include/X11/extensions/XInput.h:5585
class struct__XKeyInfo(Structure):
__slots__ = [
]
struct__XKeyInfo._fields_ = [
('_opaque_struct', c_int)
]
class struct__XKeyInfo(Structure):
__slots__ = [
]
struct__XKeyInfo._fields_ = [
('_opaque_struct', c_int)
]
XKeyInfoPtr = POINTER(struct__XKeyInfo) # /usr/include/X11/extensions/XInput.h:5587
class struct__XKeyInfo(Structure):
__slots__ = [
'class',
'length',
'min_keycode',
'max_keycode',
'num_keys',
]
struct__XKeyInfo._fields_ = [
('class', XID),
('length', c_int),
('min_keycode', c_ushort),
('max_keycode', c_ushort),
('num_keys', c_ushort),
]
XKeyInfo = struct__XKeyInfo # /usr/include/X11/extensions/XInput.h:5600
class struct__XButtonInfo(Structure):
__slots__ = [
]
struct__XButtonInfo._fields_ = [
('_opaque_struct', c_int)
]
class struct__XButtonInfo(Structure):
__slots__ = [
]
struct__XButtonInfo._fields_ = [
('_opaque_struct', c_int)
]
XButtonInfoPtr = POINTER(struct__XButtonInfo) # /usr/include/X11/extensions/XInput.h:5602
class struct__XButtonInfo(Structure):
__slots__ = [
'class',
'length',
'num_buttons',
]
struct__XButtonInfo._fields_ = [
('class', XID),
('length', c_int),
('num_buttons', c_short),
]
XButtonInfo = struct__XButtonInfo # /usr/include/X11/extensions/XInput.h:5612
class struct__XAxisInfo(Structure):
__slots__ = [
]
struct__XAxisInfo._fields_ = [
('_opaque_struct', c_int)
]
class struct__XAxisInfo(Structure):
__slots__ = [
]
struct__XAxisInfo._fields_ = [
('_opaque_struct', c_int)
]
XAxisInfoPtr = POINTER(struct__XAxisInfo) # /usr/include/X11/extensions/XInput.h:5614
class struct__XAxisInfo(Structure):
__slots__ = [
'resolution',
'min_value',
'max_value',
]
struct__XAxisInfo._fields_ = [
('resolution', c_int),
('min_value', c_int),
('max_value', c_int),
]
XAxisInfo = struct__XAxisInfo # /usr/include/X11/extensions/XInput.h:5620
class struct__XValuatorInfo(Structure):
__slots__ = [
]
struct__XValuatorInfo._fields_ = [
('_opaque_struct', c_int)
]
class struct__XValuatorInfo(Structure):
__slots__ = [
]
struct__XValuatorInfo._fields_ = [
('_opaque_struct', c_int)
]
XValuatorInfoPtr = POINTER(struct__XValuatorInfo) # /usr/include/X11/extensions/XInput.h:5622
class struct__XValuatorInfo(Structure):
__slots__ = [
'class',
'length',
'num_axes',
'mode',
'motion_buffer',
'axes',
]
struct__XValuatorInfo._fields_ = [
('class', XID),
('length', c_int),
('num_axes', c_ubyte),
('mode', c_ubyte),
('motion_buffer', c_ulong),
('axes', XAxisInfoPtr),
]
XValuatorInfo = struct__XValuatorInfo # /usr/include/X11/extensions/XInput.h:5636
class struct_anon_132(Structure):
__slots__ = [
'input_class',
'event_type_base',
]
struct_anon_132._fields_ = [
('input_class', c_ubyte),
('event_type_base', c_ubyte),
]
XInputClassInfo = struct_anon_132 # /usr/include/X11/extensions/XInput.h:5653
class struct_anon_133(Structure):
__slots__ = [
'device_id',
'num_classes',
'classes',
]
struct_anon_133._fields_ = [
('device_id', XID),
('num_classes', c_int),
('classes', POINTER(XInputClassInfo)),
]
XDevice = struct_anon_133 # /usr/include/X11/extensions/XInput.h:5659
class struct_anon_134(Structure):
__slots__ = [
'event_type',
'device',
]
struct_anon_134._fields_ = [
('event_type', XEventClass),
('device', XID),
]
XEventList = struct_anon_134 # /usr/include/X11/extensions/XInput.h:5672
class struct_anon_135(Structure):
__slots__ = [
'time',
'data',
]
struct_anon_135._fields_ = [
('time', Time),
('data', POINTER(c_int)),
]
XDeviceTimeCoord = struct_anon_135 # /usr/include/X11/extensions/XInput.h:5685
class struct_anon_136(Structure):
__slots__ = [
'device_id',
'num_classes',
'data',
]
struct_anon_136._fields_ = [
('device_id', XID),
('num_classes', c_int),
('data', POINTER(XInputClass)),
]
XDeviceState = struct_anon_136 # /usr/include/X11/extensions/XInput.h:5699
class struct_anon_137(Structure):
__slots__ = [
'class',
'length',
'num_valuators',
'mode',
'valuators',
]
struct_anon_137._fields_ = [
('class', c_ubyte),
('length', c_ubyte),
('num_valuators', c_ubyte),
('mode', c_ubyte),
('valuators', POINTER(c_int)),
]
XValuatorState = struct_anon_137 # /usr/include/X11/extensions/XInput.h:5722
class struct_anon_138(Structure):
__slots__ = [
'class',
'length',
'num_keys',
'keys',
]
struct_anon_138._fields_ = [
('class', c_ubyte),
('length', c_ubyte),
('num_keys', c_short),
('keys', c_char * 32),
]
XKeyState = struct_anon_138 # /usr/include/X11/extensions/XInput.h:5733
class struct_anon_139(Structure):
__slots__ = [
'class',
'length',
'num_buttons',
'buttons',
]
struct_anon_139._fields_ = [
('class', c_ubyte),
('length', c_ubyte),
('num_buttons', c_short),
('buttons', c_char * 32),
]
XButtonState = struct_anon_139 # /usr/include/X11/extensions/XInput.h:5744
# /usr/include/X11/extensions/XInput.h:5754
XChangeKeyboardDevice = _lib.XChangeKeyboardDevice
XChangeKeyboardDevice.restype = c_int
XChangeKeyboardDevice.argtypes = [POINTER(Display), POINTER(XDevice)]
# /usr/include/X11/extensions/XInput.h:5759
XChangePointerDevice = _lib.XChangePointerDevice
XChangePointerDevice.restype = c_int
XChangePointerDevice.argtypes = [POINTER(Display), POINTER(XDevice), c_int, c_int]
# /usr/include/X11/extensions/XInput.h:5766
XGrabDevice = _lib.XGrabDevice
XGrabDevice.restype = c_int
XGrabDevice.argtypes = [POINTER(Display), POINTER(XDevice), Window, c_int, c_int, POINTER(XEventClass), c_int, c_int, Time]
# /usr/include/X11/extensions/XInput.h:5778
XUngrabDevice = _lib.XUngrabDevice
XUngrabDevice.restype = c_int
XUngrabDevice.argtypes = [POINTER(Display), POINTER(XDevice), Time]
# /usr/include/X11/extensions/XInput.h:5784
XGrabDeviceKey = _lib.XGrabDeviceKey
XGrabDeviceKey.restype = c_int
XGrabDeviceKey.argtypes = [POINTER(Display), POINTER(XDevice), c_uint, c_uint, POINTER(XDevice), Window, c_int, c_uint, POINTER(XEventClass), c_int, c_int]
# /usr/include/X11/extensions/XInput.h:5798
XUngrabDeviceKey = _lib.XUngrabDeviceKey
XUngrabDeviceKey.restype = c_int
XUngrabDeviceKey.argtypes = [POINTER(Display), POINTER(XDevice), c_uint, c_uint, POINTER(XDevice), Window]
# /usr/include/X11/extensions/XInput.h:5807
XGrabDeviceButton = _lib.XGrabDeviceButton
XGrabDeviceButton.restype = c_int
XGrabDeviceButton.argtypes = [POINTER(Display), POINTER(XDevice), c_uint, c_uint, POINTER(XDevice), Window, c_int, c_uint, POINTER(XEventClass), c_int, c_int]
# /usr/include/X11/extensions/XInput.h:5821
XUngrabDeviceButton = _lib.XUngrabDeviceButton
XUngrabDeviceButton.restype = c_int
XUngrabDeviceButton.argtypes = [POINTER(Display), POINTER(XDevice), c_uint, c_uint, POINTER(XDevice), Window]
# /usr/include/X11/extensions/XInput.h:5830
XAllowDeviceEvents = _lib.XAllowDeviceEvents
XAllowDeviceEvents.restype = c_int
XAllowDeviceEvents.argtypes = [POINTER(Display), POINTER(XDevice), c_int, Time]
# /usr/include/X11/extensions/XInput.h:5837
XGetDeviceFocus = _lib.XGetDeviceFocus
XGetDeviceFocus.restype = c_int
XGetDeviceFocus.argtypes = [POINTER(Display), POINTER(XDevice), POINTER(Window), POINTER(c_int), POINTER(Time)]
# /usr/include/X11/extensions/XInput.h:5845
XSetDeviceFocus = _lib.XSetDeviceFocus
XSetDeviceFocus.restype = c_int
XSetDeviceFocus.argtypes = [POINTER(Display), POINTER(XDevice), Window, c_int, Time]
# /usr/include/X11/extensions/XInput.h:5853
XGetFeedbackControl = _lib.XGetFeedbackControl
XGetFeedbackControl.restype = POINTER(XFeedbackState)
XGetFeedbackControl.argtypes = [POINTER(Display), POINTER(XDevice), POINTER(c_int)]
# /usr/include/X11/extensions/XInput.h:5859
XFreeFeedbackList = _lib.XFreeFeedbackList
XFreeFeedbackList.restype = None
XFreeFeedbackList.argtypes = [POINTER(XFeedbackState)]
# /usr/include/X11/extensions/XInput.h:5863
XChangeFeedbackControl = _lib.XChangeFeedbackControl
XChangeFeedbackControl.restype = c_int
XChangeFeedbackControl.argtypes = [POINTER(Display), POINTER(XDevice), c_ulong, POINTER(XFeedbackControl)]
# /usr/include/X11/extensions/XInput.h:5870
XDeviceBell = _lib.XDeviceBell
XDeviceBell.restype = c_int
XDeviceBell.argtypes = [POINTER(Display), POINTER(XDevice), XID, XID, c_int]
KeyCode = pyglet.libs.x11.xlib.KeyCode
# /usr/include/X11/extensions/XInput.h:5878
XGetDeviceKeyMapping = _lib.XGetDeviceKeyMapping
XGetDeviceKeyMapping.restype = POINTER(KeySym)
XGetDeviceKeyMapping.argtypes = [POINTER(Display), POINTER(XDevice), KeyCode, c_int, POINTER(c_int)]
# /usr/include/X11/extensions/XInput.h:5890
XChangeDeviceKeyMapping = _lib.XChangeDeviceKeyMapping
XChangeDeviceKeyMapping.restype = c_int
XChangeDeviceKeyMapping.argtypes = [POINTER(Display), POINTER(XDevice), c_int, c_int, POINTER(KeySym), c_int]
XModifierKeymap = pyglet.libs.x11.xlib.XModifierKeymap
# /usr/include/X11/extensions/XInput.h:5899
XGetDeviceModifierMapping = _lib.XGetDeviceModifierMapping
XGetDeviceModifierMapping.restype = POINTER(XModifierKeymap)
XGetDeviceModifierMapping.argtypes = [POINTER(Display), POINTER(XDevice)]
# /usr/include/X11/extensions/XInput.h:5904
XSetDeviceModifierMapping = _lib.XSetDeviceModifierMapping
XSetDeviceModifierMapping.restype = c_int
XSetDeviceModifierMapping.argtypes = [POINTER(Display), POINTER(XDevice), POINTER(XModifierKeymap)]
# /usr/include/X11/extensions/XInput.h:5910
XSetDeviceButtonMapping = _lib.XSetDeviceButtonMapping
XSetDeviceButtonMapping.restype = c_int
XSetDeviceButtonMapping.argtypes = [POINTER(Display), POINTER(XDevice), POINTER(c_ubyte), c_int]
# /usr/include/X11/extensions/XInput.h:5917
XGetDeviceButtonMapping = _lib.XGetDeviceButtonMapping
XGetDeviceButtonMapping.restype = c_int
XGetDeviceButtonMapping.argtypes = [POINTER(Display), POINTER(XDevice), POINTER(c_ubyte), c_uint]
# /usr/include/X11/extensions/XInput.h:5924
XQueryDeviceState = _lib.XQueryDeviceState
XQueryDeviceState.restype = POINTER(XDeviceState)
XQueryDeviceState.argtypes = [POINTER(Display), POINTER(XDevice)]
# /usr/include/X11/extensions/XInput.h:5929
XFreeDeviceState = _lib.XFreeDeviceState
XFreeDeviceState.restype = None
XFreeDeviceState.argtypes = [POINTER(XDeviceState)]
# /usr/include/X11/extensions/XInput.h:5933
XGetExtensionVersion = _lib.XGetExtensionVersion
XGetExtensionVersion.restype = POINTER(XExtensionVersion)
XGetExtensionVersion.argtypes = [POINTER(Display), c_char_p]
# /usr/include/X11/extensions/XInput.h:5938
XListInputDevices = _lib.XListInputDevices
XListInputDevices.restype = POINTER(XDeviceInfo)
XListInputDevices.argtypes = [POINTER(Display), POINTER(c_int)]
# /usr/include/X11/extensions/XInput.h
XListDeviceProperties = _lib.XListDeviceProperties
XListDeviceProperties.restype = POINTER(Atom)
XListDeviceProperties.argtypes = [POINTER(Display), POINTER(XDevice), POINTER(c_int)]
# /usr/include/X11/extensions/XInput.h
XGetDeviceProperty = _lib.XGetDeviceProperty
XGetDeviceProperty.restype = c_int
XGetDeviceProperty.argtypes = [POINTER(Display), POINTER(XDevice), Atom, c_long, c_long, c_bool, Atom, POINTER(Atom), POINTER(c_int), POINTER(c_ulong), POINTER(c_ulong), POINTER(c_char_p)]
# /usr/include/X11/extensions/XInput.h:5943
XFreeDeviceList = _lib.XFreeDeviceList
XFreeDeviceList.restype = None
XFreeDeviceList.argtypes = [POINTER(XDeviceInfo)]
# /usr/include/X11/extensions/XInput.h:5947
XOpenDevice = _lib.XOpenDevice
XOpenDevice.restype = POINTER(XDevice)
XOpenDevice.argtypes = [POINTER(Display), XID]
# /usr/include/X11/extensions/XInput.h:5952
XCloseDevice = _lib.XCloseDevice
XCloseDevice.restype = c_int
XCloseDevice.argtypes = [POINTER(Display), POINTER(XDevice)]
# /usr/include/X11/extensions/XInput.h:5957
XSetDeviceMode = _lib.XSetDeviceMode
XSetDeviceMode.restype = c_int
XSetDeviceMode.argtypes = [POINTER(Display), POINTER(XDevice), c_int]
# /usr/include/X11/extensions/XInput.h:5963
XSetDeviceValuators = _lib.XSetDeviceValuators
XSetDeviceValuators.restype = c_int
XSetDeviceValuators.argtypes = [POINTER(Display), POINTER(XDevice), POINTER(c_int), c_int, c_int]
# /usr/include/X11/extensions/XInput.h:5971
XGetDeviceControl = _lib.XGetDeviceControl
XGetDeviceControl.restype = POINTER(XDeviceControl)
XGetDeviceControl.argtypes = [POINTER(Display), POINTER(XDevice), c_int]
# /usr/include/X11/extensions/XInput.h:5977
XChangeDeviceControl = _lib.XChangeDeviceControl
XChangeDeviceControl.restype = c_int
XChangeDeviceControl.argtypes = [POINTER(Display), POINTER(XDevice), c_int, POINTER(XDeviceControl)]
# /usr/include/X11/extensions/XInput.h:5984
XSelectExtensionEvent = _lib.XSelectExtensionEvent
XSelectExtensionEvent.restype = c_int
XSelectExtensionEvent.argtypes = [POINTER(Display), Window, POINTER(XEventClass), c_int]
# /usr/include/X11/extensions/XInput.h:5991
XGetSelectedExtensionEvents = _lib.XGetSelectedExtensionEvents
XGetSelectedExtensionEvents.restype = c_int
XGetSelectedExtensionEvents.argtypes = [POINTER(Display), Window, POINTER(c_int), POINTER(POINTER(XEventClass)), POINTER(c_int), POINTER(POINTER(XEventClass))]
# /usr/include/X11/extensions/XInput.h:6000
XChangeDeviceDontPropagateList = _lib.XChangeDeviceDontPropagateList
XChangeDeviceDontPropagateList.restype = c_int
XChangeDeviceDontPropagateList.argtypes = [POINTER(Display), Window, c_int, POINTER(XEventClass), c_int]
# /usr/include/X11/extensions/XInput.h:6008
XGetDeviceDontPropagateList = _lib.XGetDeviceDontPropagateList
XGetDeviceDontPropagateList.restype = POINTER(XEventClass)
XGetDeviceDontPropagateList.argtypes = [POINTER(Display), Window, POINTER(c_int)]
XEvent = pyglet.libs.x11.xlib.XEvent
# /usr/include/X11/extensions/XInput.h:6014
XSendExtensionEvent = _lib.XSendExtensionEvent
XSendExtensionEvent.restype = c_int
XSendExtensionEvent.argtypes = [POINTER(Display), POINTER(XDevice), Window, c_int, c_int, POINTER(XEventClass), POINTER(XEvent)]
# /usr/include/X11/extensions/XInput.h:6024
XGetDeviceMotionEvents = _lib.XGetDeviceMotionEvents
XGetDeviceMotionEvents.restype = POINTER(XDeviceTimeCoord)
XGetDeviceMotionEvents.argtypes = [POINTER(Display), POINTER(XDevice), Time, Time, POINTER(c_int), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/XInput.h:6034
XFreeDeviceMotionEvents = _lib.XFreeDeviceMotionEvents
XFreeDeviceMotionEvents.restype = None
XFreeDeviceMotionEvents.argtypes = [POINTER(XDeviceTimeCoord)]
# /usr/include/X11/extensions/XInput.h:6038
XFreeDeviceControl = _lib.XFreeDeviceControl
XFreeDeviceControl.restype = None
XFreeDeviceControl.argtypes = [POINTER(XDeviceControl)]
__all__ = ['sz_xGetExtensionVersionReq', 'sz_xGetExtensionVersionReply',
'sz_xListInputDevicesReq', 'sz_xListInputDevicesReply', 'sz_xOpenDeviceReq',
'sz_xOpenDeviceReply', 'sz_xCloseDeviceReq', 'sz_xSetDeviceModeReq',
'sz_xSetDeviceModeReply', 'sz_xSelectExtensionEventReq',
'sz_xGetSelectedExtensionEventsReq', 'sz_xGetSelectedExtensionEventsReply',
'sz_xChangeDeviceDontPropagateListReq', 'sz_xGetDeviceDontPropagateListReq',
'sz_xGetDeviceDontPropagateListReply', 'sz_xGetDeviceMotionEventsReq',
'sz_xGetDeviceMotionEventsReply', 'sz_xChangeKeyboardDeviceReq',
'sz_xChangeKeyboardDeviceReply', 'sz_xChangePointerDeviceReq',
'sz_xChangePointerDeviceReply', 'sz_xGrabDeviceReq', 'sz_xGrabDeviceReply',
'sz_xUngrabDeviceReq', 'sz_xGrabDeviceKeyReq', 'sz_xGrabDeviceKeyReply',
'sz_xUngrabDeviceKeyReq', 'sz_xGrabDeviceButtonReq',
'sz_xGrabDeviceButtonReply', 'sz_xUngrabDeviceButtonReq',
'sz_xAllowDeviceEventsReq', 'sz_xGetDeviceFocusReq',
'sz_xGetDeviceFocusReply', 'sz_xSetDeviceFocusReq',
'sz_xGetFeedbackControlReq', 'sz_xGetFeedbackControlReply',
'sz_xChangeFeedbackControlReq', 'sz_xGetDeviceKeyMappingReq',
'sz_xGetDeviceKeyMappingReply', 'sz_xChangeDeviceKeyMappingReq',
'sz_xGetDeviceModifierMappingReq', 'sz_xSetDeviceModifierMappingReq',
'sz_xSetDeviceModifierMappingReply', 'sz_xGetDeviceButtonMappingReq',
'sz_xGetDeviceButtonMappingReply', 'sz_xSetDeviceButtonMappingReq',
'sz_xSetDeviceButtonMappingReply', 'sz_xQueryDeviceStateReq',
'sz_xQueryDeviceStateReply', 'sz_xSendExtensionEventReq', 'sz_xDeviceBellReq',
'sz_xSetDeviceValuatorsReq', 'sz_xSetDeviceValuatorsReply',
'sz_xGetDeviceControlReq', 'sz_xGetDeviceControlReply',
'sz_xChangeDeviceControlReq', 'sz_xChangeDeviceControlReply', 'Dont_Check',
'XInput_Initial_Release', 'XInput_Add_XDeviceBell',
'XInput_Add_XSetDeviceValuators', 'XInput_Add_XChangeDeviceControl',
'XInput_Add_DevicePresenceNotify', 'XI_Absent', 'XI_Present',
'XI_Initial_Release_Major', 'XI_Initial_Release_Minor',
'XI_Add_XDeviceBell_Major', 'XI_Add_XDeviceBell_Minor',
'XI_Add_XSetDeviceValuators_Major', 'XI_Add_XSetDeviceValuators_Minor',
'XI_Add_XChangeDeviceControl_Major', 'XI_Add_XChangeDeviceControl_Minor',
'XI_Add_DevicePresenceNotify_Major', 'XI_Add_DevicePresenceNotify_Minor',
'DEVICE_RESOLUTION', 'DEVICE_ABS_CALIB', 'DEVICE_CORE', 'DEVICE_ENABLE',
'DEVICE_ABS_AREA', 'NoSuchExtension', 'COUNT', 'CREATE', 'NewPointer',
'NewKeyboard', 'XPOINTER', 'XKEYBOARD', 'UseXKeyboard', 'IsXPointer',
'IsXKeyboard', 'IsXExtensionDevice', 'IsXExtensionKeyboard',
'IsXExtensionPointer', 'AsyncThisDevice', 'SyncThisDevice',
'ReplayThisDevice', 'AsyncOtherDevices', 'AsyncAll', 'SyncAll',
'FollowKeyboard', 'RevertToFollowKeyboard', 'DvAccelNum', 'DvAccelDenom',
'DvThreshold', 'DvKeyClickPercent', 'DvPercent', 'DvPitch', 'DvDuration',
'DvLed', 'DvLedMode', 'DvKey', 'DvAutoRepeatMode', 'DvString', 'DvInteger',
'DeviceMode', 'Relative', 'Absolute', 'ProximityState', 'InProximity',
'OutOfProximity', 'AddToList', 'DeleteFromList', 'KeyClass', 'ButtonClass',
'ValuatorClass', 'FeedbackClass', 'ProximityClass', 'FocusClass',
'OtherClass', 'KbdFeedbackClass', 'PtrFeedbackClass', 'StringFeedbackClass',
'IntegerFeedbackClass', 'LedFeedbackClass', 'BellFeedbackClass',
'_devicePointerMotionHint', '_deviceButton1Motion', '_deviceButton2Motion',
'_deviceButton3Motion', '_deviceButton4Motion', '_deviceButton5Motion',
'_deviceButtonMotion', '_deviceButtonGrab', '_deviceOwnerGrabButton',
'_noExtensionEvent', '_devicePresence', 'DeviceAdded', 'DeviceRemoved',
'DeviceEnabled', 'DeviceDisabled', 'DeviceUnrecoverable', 'XI_BadDevice',
'XI_BadEvent', 'XI_BadMode', 'XI_DeviceBusy', 'XI_BadClass', 'XEventClass',
'XExtensionVersion', '_deviceKeyPress', '_deviceKeyRelease',
'_deviceButtonPress', '_deviceButtonRelease', '_deviceMotionNotify',
'_deviceFocusIn', '_deviceFocusOut', '_proximityIn', '_proximityOut',
'_deviceStateNotify', '_deviceMappingNotify', '_changeDeviceNotify',
'XDeviceKeyEvent', 'XDeviceKeyPressedEvent', 'XDeviceKeyReleasedEvent',
'XDeviceButtonEvent', 'XDeviceButtonPressedEvent',
'XDeviceButtonReleasedEvent', 'XDeviceMotionEvent', 'XDeviceFocusChangeEvent',
'XDeviceFocusInEvent', 'XDeviceFocusOutEvent', 'XProximityNotifyEvent',
'XProximityInEvent', 'XProximityOutEvent', 'XInputClass',
'XDeviceStateNotifyEvent', 'XValuatorStatus', 'XKeyStatus', 'XButtonStatus',
'XDeviceMappingEvent', 'XChangeDeviceNotifyEvent',
'XDevicePresenceNotifyEvent', 'XFeedbackState', 'XKbdFeedbackState',
'XPtrFeedbackState', 'XIntegerFeedbackState', 'XStringFeedbackState',
'XBellFeedbackState', 'XLedFeedbackState', 'XFeedbackControl',
'XPtrFeedbackControl', 'XKbdFeedbackControl', 'XStringFeedbackControl',
'XIntegerFeedbackControl', 'XBellFeedbackControl', 'XLedFeedbackControl',
'XDeviceControl', 'XDeviceResolutionControl', 'XDeviceResolutionState',
'XDeviceAbsCalibControl', 'XDeviceAbsCalibState', 'XDeviceAbsAreaControl',
'XDeviceAbsAreaState', 'XDeviceCoreControl', 'XDeviceCoreState',
'XDeviceEnableControl', 'XDeviceEnableState', 'XAnyClassPtr', 'XAnyClassInfo',
'XDeviceInfoPtr', 'XDeviceInfo', 'XKeyInfoPtr', 'XKeyInfo', 'XButtonInfoPtr',
'XButtonInfo', 'XAxisInfoPtr', 'XAxisInfo', 'XValuatorInfoPtr',
'XValuatorInfo', 'XInputClassInfo', 'XDevice', 'XEventList',
'XDeviceTimeCoord', 'XDeviceState', 'XValuatorState', 'XKeyState',
'XButtonState', 'XChangeKeyboardDevice', 'XChangePointerDevice',
'XGrabDevice', 'XUngrabDevice', 'XGrabDeviceKey', 'XUngrabDeviceKey',
'XGrabDeviceButton', 'XUngrabDeviceButton', 'XAllowDeviceEvents',
'XGetDeviceFocus', 'XSetDeviceFocus', 'XGetFeedbackControl',
'XFreeFeedbackList', 'XChangeFeedbackControl', 'XDeviceBell',
'XGetDeviceKeyMapping', 'XChangeDeviceKeyMapping',
'XGetDeviceModifierMapping', 'XSetDeviceModifierMapping',
'XSetDeviceButtonMapping', 'XGetDeviceButtonMapping', 'XQueryDeviceState',
'XFreeDeviceState', 'XGetExtensionVersion', 'XListInputDevices',
'XListDeviceProperties', 'XGetDeviceProperty', 'XFreeDeviceList',
'XOpenDevice', 'XCloseDevice', 'XSetDeviceMode',
'XSetDeviceValuators', 'XGetDeviceControl', 'XChangeDeviceControl',
'XSelectExtensionEvent', 'XGetSelectedExtensionEvents',
'XChangeDeviceDontPropagateList', 'XGetDeviceDontPropagateList',
'XSendExtensionEvent', 'XGetDeviceMotionEvents', 'XFreeDeviceMotionEvents',
'XFreeDeviceControl']
| StarcoderdataPython |
1743531 | <reponame>pg42870/SIB<gh_stars>0
import numpy as np
from .model import Model
from ..util.util import sigmoid, add_intersect
class LogisticRegression(Model):
def __init__(self, gd=False, epochs=1000, lr=0.001):
"""Linear regression Model
epochs: number of epochs
lr: learning rate for GD
"""
super(LogisticRegression, self).__init__()
self.theta = None
self.epochs = epochs
self.lr = lr
def fit(self, dataset):
X, Y = dataset.getXy()
X = np.hstack(
(np.ones((X.shape[0], 1)), X)) # acrescentar o nosso x só com 1 que corresponde ao termo independente
self.X = X
self.Y = Y
# Closed form or GD
self.train(X, Y) # implement closed train form (see notes)
self.is_fitted = True
def train(self, X, Y):
n = X.shape[1]
self.history = {}
self.theta = np.zeros(n)
for epoch in range(self.epochs):
z = np.dot(X, self.theta)
h = sigmoid(z)
gradient = np.dot(X.T, (h - Y)) / Y.size
self.theta -= self.lr * gradient
self.history[epoch] = [self.theta[:], self.cost()]
def predict(self, x):
assert self.is_fitted, 'Model must be fit before predicting'
hs = np.hstack(([1], x))
p = sigmoid(np.dot(self.theta, hs))
if p >= 0.5:
res = 1
else:
res = 0
return res
def cost(self, X=None, y=None, theta=None):
X = add_intersect(X) if X is not None else self.X
y = y if y is not None else self.Y
theta = theta if theta is not None else self.theta
m, n = X.shape
h = sigmoid(np.dot(X, theta))
cost = (-y * np.log(h) - (1 - y) * np.log(1 - h))
res = np.sum(cost) / m
return res
class LogisticRegressionReg:
""" Regressão Logística com regularização"""
def __init__(self, epochs=1000, lr=0.1, lbd=1):
super(LogisticRegressionReg, self).__init__()
self.theta = None
self.epochs = epochs
self.lr = lr
self.lbd = lbd # lbd = lambda
def fit(self, dataset):
X, Y = dataset.getXy()
X = add_intersect(X)
# Só é necessário para fazer o score (cost) caso não queiram dar os dados
self.X = X
self.Y = Y
# closed form or GD
self.train(X, Y)
self.is_fitted = True
def train(self, X, y):
m, n = X.shape
self.history = {}
self.theta = np.zeros(n)
for epoch in range(self.epochs):
z = np.dot(X, self.theta)
h = sigmoid(z)
grad = np.dot(X.T, (h - y)) / y.size
reg = (self.lbd / m) * self.theta[1:] # parentheses
grad[1:] = grad[1:] + reg
self.theta -= self.lr * grad
self.history[epoch] = [self.theta[:], self.cost()]
def predict(self, x):
assert self.is_fitted, 'Model must be fit before predicting'
hs = np.hstack(([1], x))
p = sigmoid(np.dot(self.theta, hs))
if p >= 0.5:
res = 1
else:
res = 0
return res
def cost(self, X=None, y=None, theta=None):
X = add_intersect(X) if X is not None else self.X
y = y if y is not None else self.Y
theta = theta if theta is not None else self.theta
m = X.shape[0]
h = sigmoid(np.dot(X, theta))
cost = (-y * np.log(h) - (1 - y) * np.log(1 - h))
reg = np.dot(theta[1:], theta[1:]) * self.lbd / (2 * m)
res = np.sum(cost) / m
return res + reg
| StarcoderdataPython |
3297198 | <filename>BPt/dataset/tests/test_encoding.py
import numpy as np
import pandas as pd
import pytest
from .datasets import (get_fake_dataset, get_fake_dataset7,
get_fake_multi_index_dataset)
from ..Dataset import Dataset
def test_to_category():
df = Dataset([1, 2, 3], columns=['0'])
df = df.add_scope('0', 'category')
assert df['0'].dtype.name == 'category'
def test_add_unique_overlap():
df = get_fake_dataset()
df.add_scope('1', 'q', inplace=True)
df.add_scope('1', 'b', inplace=True)
df.add_scope('2', 'q', inplace=True)
df = df.ordinalize(scope='2')
df.add_unique_overlap(cols=['1', '2'], new_col='combo',
decode_values=True, inplace=True)
assert df['combo'].nunique() == 3
assert 'category' in df.scopes['combo']
assert 'q' in df.scopes['combo']
assert 'b' not in df.scopes['combo']
assert df.roles['combo'] == 'data'
with pytest.raises(RuntimeError):
df.add_unique_overlap(cols='1', new_col='combo')
with pytest.raises(RuntimeError):
df.add_unique_overlap(cols=['1'], new_col='combo')
with pytest.raises(KeyError):
df.add_unique_overlap(cols=['does not exist', '1'], new_col='combo')
with pytest.raises(KeyError):
df.add_unique_overlap(cols=['1', '2'], new_col='1')
def test_multi_index_add_unique_overlap():
df = get_fake_multi_index_dataset()
df = df.add_unique_overlap(cols=['0', '1'],
new_col='new',
decode_values=True)
assert df['new'].nunique() == 6
def test_multi_index_add_unique_overlap_inplace():
df = get_fake_multi_index_dataset()
df.add_unique_overlap(cols=['0', '1'],
new_col='new',
decode_values=True, inplace=True)
assert df['new'].nunique() == 6
def test_to_binary_object():
df = get_fake_dataset()
df = df.to_binary('2')
assert len(df['2'].unique() == 2)
df = get_fake_dataset7()
df['2'] = [' ', 1, 1, 1, 2, 2, 2]
df = df.to_binary('2')
assert len(df['2'].unique() == 2)
assert 0 not in df.index
assert 1 in df.index
assert df.shape == (6, 2)
def test_to_binary_inplace_copy():
df = get_fake_dataset()
assert len(df['2'].unique()) == 3
df.to_binary('2', inplace=False)
assert len(df['2'].unique()) == 3
def test_to_binary_inplace_copy2():
df = get_fake_dataset()
assert len(df['2'].unique()) == 3
df.to_binary('2', drop=False, inplace=False)
assert np.nan not in df['2'].values
df.to_binary('2', drop=False, inplace=True)
assert np.nan in df['2'].values
def test_to_binary_from_bool():
df = Dataset()
df['1'] = [True, False, True, True]
df['1'] = df['1'].astype('bool')
df = df.to_binary('1')
assert len(pd.unique(df['1'])) == 2
def test_to_binary_from_bool_inplace_copy():
df = Dataset()
df['1'] = [True, False, True, True]
df['1'] = df['1'].astype('bool')
df_copy = df.to_binary('1')
assert df['1'].dtype.name != df_copy['1'].dtype.name
def test_to_binary():
df = get_fake_dataset7()
assert len(df) == 7
df = df.to_binary(scope='1')
assert len(df) == 6
assert 0 in df['1'].unique()
assert 1 in df['1'].unique()
assert len(df['1'].unique()) == 2
df = get_fake_dataset7()
df = df.to_binary(scope='2', drop=False)
assert df['2'].dtype.name == 'category'
assert pd.isnull(df.loc[0, '2'])
assert len(df) == 7
assert df.encoders['2'][0] == 1
assert df.encoders['2'][1] == 2
df = get_fake_dataset7()
df['2'] = [1, 1, 1, 1, 1, 1, 1]
df = df.to_binary(scope='2')
assert len(df['2'].unique() == 1)
def test_to_binary_inplace():
df = get_fake_dataset7()
df_copy = df.to_binary(scope='1', inplace=False)
assert len(df) == 7
assert len(df_copy) == 6
df = get_fake_dataset7()
df['2'] = [1, 1, 1, 1, 1, 1, 1]
df.to_binary(scope='2', inplace=True)
assert len(df['2'].unique() == 1)
def test_nan_to_class():
df = get_fake_dataset7()
df = df.to_binary(scope='1', drop=False)
assert len(df) == 7
assert pd.isnull(df.loc[0, '1'])
df = df.nan_to_class(scope='1')
assert df.loc[0, '1'] == 2
df = get_fake_dataset7()
df.loc[6, '2'] = np.nan
df = df.to_binary(scope='2', drop=False)
df = df.nan_to_class(scope='2')
assert df.loc[6, '2'] == 2
assert df.loc[0, '2'] == 2
assert pd.isnull(df.encoders['2'][2])
def test_nan_to_class_inplace_copy():
df = Dataset([1, 2, np.nan], columns=['1'])
new = df.nan_to_class(scope='1', inplace=False)
assert not pd.isna(new['1'].values[2])
assert pd.isna(df['1'].values[2])
def test_k_bin():
df = get_fake_dataset7()
df = df.k_bin(scope='1', n_bins=2, strategy='uniform')
assert df['1'].nunique() == 2
df = df.k_bin(scope='2', n_bins=2, strategy='uniform')
assert df['2'].nunique() == 2
df._check_scopes()
assert 'category' in df.scopes['2']
assert len(df.encoders['1']) == 2
# Test with nans
df = get_fake_dataset7()
df.loc[1, '1'] = np.nan
df.k_bin(scope='1', n_bins=2, strategy='uniform', inplace=True)
assert df['1'].nunique(dropna=True) == 2
# Test compat with nan to class
df = df.nan_to_class(scope='1')
assert df['1'].nunique(dropna=True) == 3
assert len(df.encoders['1']) == 3
def test_k_bin_quantile():
data = Dataset([.1, .2, .3, .4, .5, .6, .7, .8, .9], columns=['feat'])
data = data.k_bin('feat', n_bins=3, strategy='quantile')
assert len(np.unique(data['feat'])) == 3
def test_k_bin_quantile_inplace():
data = Dataset([.1, .2, .3, .4, .5, .6, .7, .8, .9], columns=['feat'])
assert len(np.unique(data['feat'])) == 9
data.k_bin('feat', n_bins=3, strategy='uniform', inplace=False)
assert len(np.unique(data['feat'])) == 9
def test_ordinalize_inplace():
data = Dataset(['a', 'b', 'c'], columns=['feat'])
assert 'a' in data['feat'].values
data.ordinalize(scope='all', inplace=False)
assert 'a' in data['feat'].values
ord = data.ordinalize(scope='all', inplace=False)
assert 'a' not in ord['feat'].values
assert 'a' in data['feat'].values
def test_ordinalize():
data = Dataset(['a', 'b', 'c'], columns=['feat'])
assert 'a' in data['feat'].values
data.ordinalize(scope='all', inplace=True)
assert 'a' not in data['feat'].values
def test_binarize_threshold():
df = get_fake_dataset()
df = df.binarize('1', threshold=1.5)
assert df.loc[0, '1'] == 0
assert df.loc[1, '1'] == 1
assert 'category' in df.scopes['1']
assert df.encoders['1'] == {0: '<1.5', 1: '>=1.5'}
def test_binarize_inplace_copy():
df = get_fake_dataset()
assert df.loc[0, '1'] != 0
assert df.loc[1, '1'] != 1
df.binarize('1', threshold=1.5, inplace=False)
assert df.loc[0, '1'] != 0
assert df.loc[1, '1'] != 1
def test_binarize_replace():
df = get_fake_dataset()
df = df.binarize('1', threshold=1.5, replace=False)
assert df.shape == (3, 4)
assert 3 in df['1'].values
def test_binarize_with_nans():
df = get_fake_dataset()
df = df.binarize('3', threshold=2.5)
assert pd.isnull(df.loc[0, '3'])
assert df.loc[1, '3'] == 0
assert df.loc[2, '3'] == 1
def test_binarize_upper_lower():
df = get_fake_dataset()
df = df.binarize('1', threshold=(2, 2))
assert len(df) == 2
assert df.loc[0, '1'] == 0
assert df.loc[2, '1'] == 1
assert 'category' in df.scopes['1']
assert df.encoders['1'] == {0: '<2', 1: '>2'}
def test_binarize_upper_lower_drop():
# Test with drop True
df = get_fake_dataset()
df = df.binarize('1', threshold=(1.1, 2.2), drop=True)
assert len(df) == 2
assert pd.isnull(df.loc[0, '3'])
assert df.loc[0, '1'] == 0
assert df.loc[2, '1'] == 1
# With drop False
df = get_fake_dataset()
df.binarize('1', threshold=(1.1, 2.2), drop=False, inplace=True)
assert len(df) == 3
assert df.loc[0, '1'] == 0
assert pd.isnull(df.loc[1, '1'])
assert df.loc[2, '1'] == 1
def test_copy_as_non_input():
df = get_fake_dataset()
df.add_scope('1', 'bleh', inplace=True)
df.copy_as_non_input(col='1', new_col='1_copy',
copy_scopes=False, inplace=True)
df._check_scopes()
assert df.shape == ((3, 4))
assert df.roles['1_copy'] == 'non input'
assert df.roles['1'] != 'non input'
assert np.max(np.array(df['1_copy'])) == 2
assert 'bleh' not in df.scopes['1_copy']
# Make sure copy scopes works
df = get_fake_dataset()
df.add_scope('1', 'bleh', inplace=True)
df = df.copy_as_non_input(col='1', new_col='1_copy', copy_scopes=True)
assert df.shape == ((3, 4))
assert 'bleh' in df.scopes['1_copy']
def test_copy_as_non_input_not_inplace():
df = get_fake_dataset()
df.add_scope('1', 'bleh', inplace=True)
df_copy = df.copy_as_non_input(col='1', new_col='1_copy',
copy_scopes=True, inplace=False)
df._check_scopes()
df_copy._check_scopes()
assert df.shape == ((3, 3))
assert df_copy.shape == ((3, 4))
assert '1_copy' not in df
assert '1_copy' in df_copy
assert 'bleh' in df_copy.scopes['1_copy']
assert '1_copy' not in df.scopes
| StarcoderdataPython |
1679486 | import boto3
import boto3.session
import json
from keydra.providers.base import BaseProvider
from keydra.providers.base import exponential_backoff_retry
from keydra.exceptions import DistributionException
from typing import Dict, NamedTuple, Optional
from keydra.exceptions import RotationException
from keydra.clients.aws.secretsmanager import GetSecretException
from keydra.clients.aws.secretsmanager import InsertSecretException
from keydra.clients.aws.secretsmanager import SecretsManagerClient
from keydra.clients.aws.secretsmanager import UpdateSecretException
from keydra.logging import get_logger
LOGGER = get_logger()
class SecretsManagerProvider(BaseProvider):
def __init__(
self,
session=None,
client: SecretsManagerClient = None,
region_name=None,
# credentials must be present for the loader to init the provider
credentials=None):
if session is None: # pragma: no cover
session = boto3.session.Session()
self._client = client or SecretsManagerClient(
session=session,
region_name=region_name
)
class Options(NamedTuple):
bypass: bool = False
rotate_attribute: Optional[str] = None
length: int = 32
exclude_char: str = ''
exclude_num: bool = False
exclude_punct: bool = False
exclude_upper: bool = False
exclude_lower: bool = False
include_space: bool = False
require_each_type: bool = True
def _distribute_secret(self, secret, dest):
current_secret = None
try:
current_secret = self._client.describe_secret(
secret_id=dest['key']
)
except GetSecretException as e:
print('not existing, ', e)
LOGGER.info(
'{} not present in SecretsManager: {}'.format(dest['key'], e)
)
try:
if current_secret:
print('present, updating ', dest)
LOGGER.info(
'{} is present in SecretsManager, updating.'.format(
dest['key']
)
)
self._client.update_secret(
secret_id=dest['key'],
secret_value=json.dumps(secret)
)
else:
print('not present, creating')
LOGGER.info(
'{} not present in SecretsManager, adding.'.format(
dest['key'],
)
)
self._client.create_secret(
secret_name=dest['key'],
secret_value=json.dumps(secret),
description='Keydra managed secret {}'.format(
dest['key']
)
)
except (InsertSecretException, UpdateSecretException) as e:
raise DistributionException(
'Error distributing {}: {}'.format(dest['key'], e)
)
LOGGER.info(
'Successfully distributed {} to SecretsManager'.format(
dest['key']
)
)
return dest
def _get_current_secret(self, secret_key):
try:
value = self._client.get_secret_value(secret_key)
# Assume that all secret values are formatted as valid JSON objects
obj = json.loads(value)
obj['provider'] = 'secretsmanager'
return obj
except Exception as e:
raise RotationException(e)
def _generate_secret_value(self, opts: Options) -> str:
return self._client.generate_random_password(
length=opts.length,
ExcludeCharacters=opts.exclude_char,
ExcludeNumbers=opts.exclude_num,
ExcludePunctuation=opts.exclude_punct,
ExcludeUppercase=opts.exclude_upper,
ExcludeLowercase=opts.exclude_lower,
IncludeSpace=opts.include_space,
RequireEachIncludedType=opts.require_each_type)
def rotate(self, spec) -> Dict[str, str]:
current_secret = self._get_current_secret(spec['key'])
options = SecretsManagerProvider.Options(**spec['config'])
if options.bypass:
LOGGER.debug('Bypassing rotation for {}'
.format(spec['key']))
return current_secret
LOGGER.debug('Rotating {} with options: {}'
.format(spec['key'], options))
current_secret[options.rotate_attribute] = \
self._generate_secret_value(options)
self._client.update_secret(
spec['key'],
json.dumps(current_secret))
return current_secret
@exponential_backoff_retry(3)
def distribute(self, secret, destination):
try:
return self._distribute_secret(secret, destination)
except Exception as e:
raise DistributionException(e)
@classmethod
def validate_spec(cls, spec):
valid, msg = BaseProvider.validate_spec(spec)
if not valid:
return False, msg
if ('config' in spec) == ('source' in spec):
return False, 'Must specify either "config" or \
"source", not both'
if 'source' in spec:
return True, 'All good!'
options = SecretsManagerProvider.Options(**spec['config'])
if options.bypass == (options.rotate_attribute is not None):
return False, 'Must specify either "bypass" or \
"rotate_attribute", not both'
return True, 'All good!'
@classmethod
def redact_result(cls, result, spec=None):
if 'value' in result:
for key, value in result['value'].items():
if key != 'provider':
result['value'][key] = '***'
return result
@classmethod
def has_creds(cls):
return False
| StarcoderdataPython |
6382 | <filename>utils/functions.py
import torch
from torch import nn
import math
#0 left hip
#1 left knee
#2 left foot
#3 right hip
#4 right knee
#5 right foot
#6 middle hip
#7 neck
#8 nose
#9 head
#10 left shoulder
#11 left elbow
#12 left wrist
#13 right shoulder
#14 right elbow
#15 right wrist
def random_rotation(J3d):
J = J3d # need copy????
batch_size = J.shape[0]
theta = torch.rand(batch_size).cuda() * 2*torch.tensor(math.pi).cuda() # random theta
root = J[:,:,8] # joint 8 = nose is root
J3d_R = rotation(J.cuda(), theta.cuda(), root.unsqueeze(-1).cuda(), False)
return J3d_R, theta, root # need these values in the code
def rotation(J, theta, root, is_reversed): # rotation over y axis by theta
D = root[:,2].cuda() # absolute depth of the root joint
batch_size = root.shape[0]
v_t = torch.zeros((batch_size, 3, 1)).cuda()
v_t[:, 2, :] = D.cuda() # translation vector
if is_reversed:
root, v_t = v_t, root # swap
theta = -theta
# R = torch.tensor([[torch.cos(theta), -torch.sin(theta), 0], [torch.sin(theta), torch.cos(theta), 0], [0, 0, 1]]) # rotation matrix over z by theta degrees
R = torch.zeros((batch_size, 3, 3)).cuda() # rotation matrix over y by theta degrees
R[:, 0, 0] = torch.cos(theta)
R[:, 0, 2] = torch.sin(theta)
R[:, 1, 1] = torch.ones(batch_size)
R[:, 2, 0] = -torch.sin(theta)
R[:, 2, 2] = torch.cos(theta)
# R = torch.tensor([[torch.cos(theta), 0, torch.sin(theta)], [0, 1, 0], [-torch.sin(theta), 0, torch.cos(theta)]]) # rotation matrix over y by theta degrees
# R = torch.tensor([[1, 0, 0], [0, torch.cos(theta), -torch.sin(theta)], [0, torch.sin(theta), torch.cos(theta)]]) # rotation matrix over x by theta degrees
J_R = torch.matmul(R, J - root) + v_t # rotation
return J_R
def reverse_rotation(J3d_R, theta, root):
J = J3d_R # need copy????
return rotation(J.cuda(), theta.cuda(), root.unsqueeze(-1).cuda(), True)
def temporal_loss(J, K, J_R, K_R): # J is J3d at time t and K is J3d at time t+k. J_R means the reversed rotation of J
#print(torch.norm(J.reshape(J.shape[0], 3, 16) - K.reshape(J.shape[0], 3, 16) - J_R.reshape(J.shape[0], 3, 16) + K_R.reshape(J.shape[0], 3, 16), dim=1).shape)
#stop
mse_fn = nn.MSELoss()
return mse_fn(J.reshape(J.shape[0], 3, 16) - K.reshape(J.shape[0], 3, 16) - J_R.reshape(J.shape[0], 3, 16) + K_R.reshape(J.shape[0], 3, 16), torch.zeros(J.shape[0], 3, 16).cuda())
#return torch.norm(J.reshape(J.shape[0], 3, 16) - K.reshape(J.shape[0], 3, 16) - J_R.reshape(J.shape[0], 3, 16) + K_R.reshape(J.shape[0], 3, 16), dim=1)**2
'''
def temporal_loss(J, K, J_R, K_R): # J is J3d at time t and K is J3d at time t+k. J_R means the reversed rotation of J
return torch.norm(J - K - J_R + K_R, dim=1)**2
'''
'''
def random_rotation(J3d):
# J = torch.transpose(J3d, 1, 2)
J = J3d
root = torch.zeros(J.shape[0:2])
for i in range(J.shape[0]):
theta = torch.rand(1).cuda() * 2*torch.tensor(math.pi).cuda() # random theta
root[i] = J[i,:,8] # joint 8 = nose is root
temp = rotation(J[i,:,:], theta, root[i].unsqueeze(1), False)
# print(temp.shape)
J[i,:,:] = temp
return J, theta, root # need these values in the code
def rotation(J, theta, root, is_reversed): # rotation over y axis by theta
D = root[2] # absolute depth of the root joint
v_t = torch.tensor([[0], [0], [D]]).cuda() # translation vector
if is_reversed:
root, v_t = v_t, root # swap
theta = -theta
# R = torch.tensor([[torch.cos(theta), -torch.sin(theta), 0], [torch.sin(theta), torch.cos(theta), 0], [0, 0, 1]]) # rotation matrix over z by theta degrees
R = torch.tensor([[torch.cos(theta), 0, torch.sin(theta)], [0, 1, 0], [-torch.sin(theta), 0, torch.cos(theta)]]).cuda() # rotation matrix over y by theta degrees
# R = torch.tensor([[1, 0, 0], [0, torch.cos(theta), -torch.sin(theta)], [0, torch.sin(theta), torch.cos(theta)]]) # rotation matrix over x by theta degrees
J_R = torch.matmul(R, J.cuda() - root.cuda()) + v_t # rotation
return J_R
def reverse_rotation(J3d_R, theta, root):
# J = torch.transpose(J3d_R, 1, 2)
J = J3d_R
for i in range(J.shape[0]):
J[i,:,:] = rotation(J[i,:,:].cuda(), theta.cuda(), root[i].unsqueeze(1).cuda(), True)
return J
''' | StarcoderdataPython |
3392247 | <reponame>deeplycloudy/brawl4d
""" Support for LMA data display in brawl4d.
These are meant to be lightweight wrappers to coordinate data formats
understood by the lmatools package.
"""
import numpy as np
from lmatools.flashsort.autosort.LMAarrayFile import LMAdataFile
from stormdrain.bounds import Bounds, BoundsFilter
from stormdrain.data import NamedArrayDataset, indexed
from stormdrain.pipeline import Branchpoint, coroutine, ItemModifier
from stormdrain.support.matplotlib.artistupdaters import PanelsScatterController
from stormdrain.support.matplotlib.poly_lasso import LassoPayloadController
class LMAAnimator(object):
def __init__(self, duration, variable='time'):
self.tstart = time.time()
self.duration = duration
def draw_frame(self, animator, time_fraction):
pass
def init_draw(self, animator):
pass
class LMAController(object):
""" Manages bounds object with LMA-specific criteria. Convenience functions for loading LMA data.
"""
z_alt_mapping = {'z':('alt', (lambda v: (v[0]*1.0e3 - 1.0e3, v[1]*1.0e3 + 1.0e3)) ) }
def __init__(self, *args, **kwargs):
super(LMAController, self).__init__(*args, **kwargs)
self.bounds = Bounds(chi2=(0.0, 1.0), stations=(6, 99))
self.default_color_bounds = Bounds(parent=self.bounds, charge=(-1,1), power=(-10,50))
self.datasets = set()
self.flash_datasets = set()
def pipeline_for_dataset(self, d, panels,
names4d=('lon', 'lat', 'alt', 'time'),
transform_mapping=None,
scatter_kwargs = {}
):
""" Set 4d_names to the spatial coordinate names in d that provide
longitude, latitude, altitude, and time. Default of
lon, lat, alt, and time which are assumed to be in deg, deg, meters, seconds
entries in the scatter_kwargs dictionary are passed as kwargs to the matplotlib
scatter call.
"""
# Set up dataset -> time-height bound filter -> brancher
branch = Branchpoint([])
brancher = branch.broadcast()
# strictly speaking, z in the map projection and MSL alt aren't the same - z is somewhat distorted by the projection.
# therefore, add some padding. filtered again later after projection.
quality_filter = BoundsFilter(target=brancher, bounds=self.bounds).filter()
if transform_mapping is None:
transform_mapping = self.z_alt_mapping
# Use 'time', which is the name in panels.bounds, and not names4d[3], which should
# is linked to 'time' by transform_mapping if necessary
bound_filter = BoundsFilter(target=quality_filter, bounds=panels.bounds,
restrict_to=('time'), transform_mapping=transform_mapping)
filterer = bound_filter.filter()
d.target = filterer
# Set up brancher -> coordinate transform -> final_filter -> mutli-axis scatter updater
scatter_ctrl = PanelsScatterController(
panels=panels,
color_field=names4d[3],
default_color_bounds=self.default_color_bounds,
**scatter_kwargs)
scatter_outlet_broadcaster = scatter_ctrl.branchpoint
scatter_updater = scatter_outlet_broadcaster.broadcast()
final_bound_filter = BoundsFilter(target=scatter_updater, bounds=panels.bounds)
final_filterer = final_bound_filter.filter()
cs_transformer = panels.cs.project_points(
target=final_filterer,
x_coord='x', y_coord='y', z_coord='z',
lat_coord=names4d[1], lon_coord=names4d[0], alt_coord=names4d[2],
distance_scale_factor=1.0e-3)
branch.targets.add(cs_transformer)
# return each broadcaster so that other things can tap into results of transformation of this dataset
return branch, scatter_ctrl
@coroutine
def flash_stat_printer(self, min_points=10):
while True:
ev, fl = (yield)
if fl is not None:
template = "{0} of {1} flashes have > {3} points. Their average area = {2:5.1f} km^2"
N = len(fl)
good = (fl['n_points'] >= min_points)
N_good = len(fl[good])
area = np.mean(fl['area'][good])
print template.format(N_good, N, area, min_points)
def flash_stats_for_dataset(self, d, selection_broadcaster):
flash_stat_branchpoint = Branchpoint([self.flash_stat_printer()])
flash_stat_brancher = flash_stat_branchpoint.broadcast()
@coroutine
def flash_data_for_selection(target, flash_id_key = 'flash_id'):
""" Accepts an array of event data from the pipeline, and sends
event and flash data.
"""
while True:
ev = (yield) # array of event data
try:
fl_dat = d.flash_data
flash_ids = set(ev[flash_id_key])
flashes = np.fromiter(
(fl for fl in fl_dat if fl[flash_id_key] in flash_ids),
dtype=fl_dat.dtype)
except AttributeError:
# There are no flash data in the dataset
flashes = None
target.send((ev, flashes))
selection_broadcaster.targets.add(flash_data_for_selection(flash_stat_brancher))
return flash_stat_branchpoint
@indexed()
def read_dat(self, *args, **kwargs):
""" All args and kwargs are passed to the LMAdataFile object from lmatools"""
lma = LMAdataFile(*args, **kwargs)
stn = lma.stations # adds stations to lma.data as a side-effect
d = NamedArrayDataset(lma.data)
self.datasets.add(d)
return d
def load_dat_to_panels(self, panels, *args, **kwargs):
""" All args and kwargs are passed to the LMAdataFile object from lmatools"""
d = self.read_dat(*args, **kwargs)
post_filter_brancher, scatter_ctrl = self.pipeline_for_dataset(d, panels)
branch_to_scatter_artists = scatter_ctrl.branchpoint
# ask for a copy of the array from each selection operation, so that
# it's saved and ready for any lasso operations
charge_lasso = LassoChargeController(
target=ItemModifier(
target=d.update(field_names=['charge']),
item_name='charge').modify())
branch_to_scatter_artists.targets.add(charge_lasso.cache_segment.cache_segment())
return d, post_filter_brancher, scatter_ctrl, charge_lasso
@indexed(index_name='hdf_row_idx')
def read_hdf5(self, LMAfileHDF):
try:
import tables
except ImportError:
print "couldn't import pytables"
return None
from hdf5_lma import HDF5Dataset
# get the HDF5 table name
LMAh5 = tables.openFile(LMAfileHDF, 'r')
table_names = LMAh5.root.events._v_children.keys()
table_path = '/events/' + table_names[0]
LMAh5.close()
d = HDF5Dataset(LMAfileHDF, table_path=table_path, mode='a')
self.datasets.add(d)
if d.flash_table is not None:
print "found flash data"
return d
def load_hdf5_to_panels(self, panels, LMAfileHDF, scatter_kwargs={}):
d = self.read_hdf5(LMAfileHDF)
post_filter_brancher, scatter_ctrl = self.pipeline_for_dataset(d, panels, scatter_kwargs=scatter_kwargs)
branch_to_scatter_artists = scatter_ctrl.branchpoint
charge_lasso = LassoChargeController(
target=ItemModifier(
target=d.update(index_name='hdf_row_idx',
field_names=['charge']),
item_name='charge').modify())
branch_to_scatter_artists.targets.add(charge_lasso.cache_segment.cache_segment())
return d, post_filter_brancher, scatter_ctrl, charge_lasso
def load_hdf5_flashes_to_panels(self, panels, hdf5dataset, min_points=10):
""" Set up a flash dataset display. The sole argument is usually the HDF5
LMA dataset returned by a call to self.load_hdf5_to_panels """
from hdf5_lma import HDF5FlashDataset
if hdf5dataset.flash_table is not None:
point_count_dtype = hdf5dataset.flash_data['n_points'].dtype
self.bounds.n_points = (min_points, np.iinfo(point_count_dtype))
flash_d = HDF5FlashDataset(hdf5dataset)
transform_mapping = {}
transform_mapping['time'] = ('start', (lambda v: (v[0], v[1])) )
transform_mapping['lat'] = ('init_lat', (lambda v: (v[0], v[1])) )
transform_mapping['lon'] = ('init_lon', (lambda v: (v[0], v[1])) )
transform_mapping['z'] = ('init_alt', (lambda v: (v[0]*1.0e3 - 1.0e3, v[1]*1.0e3 + 1.0e3)) )
flash_post_filter_brancher, flash_scatter_ctrl = self.pipeline_for_dataset(flash_d, panels,
transform_mapping=transform_mapping,
names4d=('init_lon', 'init_lat', 'init_alt', 'start') )
for art in flash_scatter_ctrl.artist_outlet_controllers:
# there is no time variable, but the artist updater is set to expect
# time. Patch that up.
if art.coords == ('time', 'z'):
art.coords = ('start', 'z')
# Draw flash markers in a different style
art.artist.set_edgecolor('k')
self.flash_datasets.add(flash_d)
return flash_d, flash_post_filter_brancher, flash_scatter_ctrl
class LassoChargeController(LassoPayloadController):
""" The "charge" attribute is one of {-1, 0, 1} to set
negative, unclassified, or positive charge, or None
to do nothing.
"""
charge = LassoPayloadController.Payload() | StarcoderdataPython |
1735149 | # Generated by Django 2.1.2 on 2019-01-04 13:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Component",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
models.CharField(default="undefined", max_length=255),
),
(
"type",
models.CharField(
choices=[
[
"converter",
[
["hp", "heat pump"],
["pv", "photovoltaic"],
["sol_col", "solar collector"],
],
],
[
"storage",
[
["hp_tank", "heat pump tank"],
["sol_tank", "solar storage tank"],
],
],
[
"distribution",
[
["inv", "inverter"],
["dist_pump", "circulator pump"],
["sol_pump", "solar pump"],
],
],
],
default="undefined",
max_length=255,
),
),
("created", models.DateTimeField(auto_now_add=True)),
(
"description",
models.TextField(
default="This is an example description what the model is doing, how it is implemented and anything else, which might be of interest to know about the model.\n\nThis can be a rather longer text, since it may contain relevant information about the model, which can't be stored elsewhere."
),
),
(
"params",
models.TextField(
default="{'param' : 123., 'param_2' : 'abc'}"
),
),
("size", models.FloatField(default=1.0)),
],
),
migrations.CreateModel(
name="Configuration",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
models.CharField(default="undefined", max_length=255),
),
(
"description",
models.TextField(
default="This is an example description what the model is doing, how it is implemented and anything else, which might be of interest to know about the model.\n\nThis can be a rather longer text, since it may contain relevant information about the model, which can't be stored elsewhere."
),
),
("result", models.CharField(default="3.0", max_length=255)),
(
"components",
models.ManyToManyField(blank=True, to="system.Component"),
),
],
),
]
| StarcoderdataPython |
108739 | <gh_stars>1-10
from django.http import JsonResponse
def index_view(request):
"""
Function returns basic information about our project.
"""
data = {
'name': 'Mikaponics API Web-Service',
'version': 1.0,
}
return JsonResponse(data)
| StarcoderdataPython |
1624367 | r"""
This folder contains all of the Tortoise ORM model classes
"""
from .guilds import GuildModel
from .members import MemberModel
__all__ = ("GuildModel", "MemberModel")
| StarcoderdataPython |
1789011 | <gh_stars>0
from .... pyaz_utils import _call_az
def set(account_name, container_name, tags, allow_protected_append_writes_all=None, resource_group=None):
'''
Set legal hold tags.
Required Parameters:
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.
- container_name -- The container name.
- tags -- Space-separated tags. Each tag should be 3 to 23 alphanumeric characters and is normalized to lower case
Optional Parameters:
- allow_protected_append_writes_all -- When enabled, new blocks can be written to both Append and Block Blobs while maintaining legal hold protection and compliance. Only new blocks can be added and any existing blocks cannot be modified or deleted.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az storage container legal-hold set", locals())
def clear(account_name, container_name, tags, allow_protected_append_writes_all=None, resource_group=None):
'''
Clear legal hold tags.
Required Parameters:
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.
- container_name -- The container name.
- tags -- Space-separated tags. Each tag should be 3 to 23 alphanumeric characters and is normalized to lower case
Optional Parameters:
- allow_protected_append_writes_all -- When enabled, new blocks can be written to both Append and Block Blobs while maintaining legal hold protection and compliance. Only new blocks can be added and any existing blocks cannot be modified or deleted.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az storage container legal-hold clear", locals())
def show(account_name, container_name, resource_group=None):
'''
Get the legal hold properties of a container.
Required Parameters:
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.
- container_name -- The container name.
Optional Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az storage container legal-hold show", locals())
| StarcoderdataPython |
103785 | import pickle
from hashlib import sha256
from urlparse import urlparse
from xml.dom import minidom
from fetch_remote_file import *
class Reader():
cache = '/tmp/'
expire = 5
feeds = []
hashes = []
stories = []
def __init__(self):
if not os.path.exists(self.cache):
os.makedirs(self.cache)
if os.path.isfile(self.cache + 'hashes.dict') and os.path.isfile(self.cache + 'stories.dict'):
self.hashes = pickle.load(open(self.cache + 'hashes.dict', 'r'))
self.stories = pickle.load(open(self.cache + 'stories.dict', 'r'))
def add(self, url):
self.feeds.append(url)
def run(self):
for url in self.feeds:
self.parse(url, fetch_remote_file(url, self.cache + sha256(url).hexdigest(), self.expire))
pickle.dump(self.hashes[0:1000], open(self.cache + 'hashes.dict', 'wb'))
pickle.dump(self.stories[0:1000], open(self.cache + 'stories.dict', 'wb'))
def parse(self, url, content):
try:
dom = minidom.parseString(content)
for story in dom.getElementsByTagName('entry'):
title = story.getElementsByTagName('title')[0].childNodes[0].nodeValue
link = story.getElementsByTagName('link')[0].getAttribute('href')
hash = sha256(link).hexdigest()
if hash not in self.hashes:
self.stories.insert(0, {
'origin': urlparse(url).netloc,
'site': urlparse(link).netloc,
'title': title,
'link': link,
'hash': hash
})
self.hashes.append(hash)
for story in dom.getElementsByTagName('item'):
title = story.getElementsByTagName('title')[0].childNodes[0].nodeValue
link = story.getElementsByTagName('link')[0].childNodes[0].nodeValue
hash = sha256(link).hexdigest()
if hash not in self.hashes:
self.stories.insert(0, {
'origin': urlparse(url).netloc,
'site': urlparse(link).netloc,
'title': title,
'link': link,
'hash': hash
})
self.hashes.append(hash)
except Exception, e:
print e
| StarcoderdataPython |
27086 | <reponame>victor-gil-sepulveda/PhD-HIVProteaseMutation
"""
Created on 25/8/2014
@author: victor
"""
import prody
import numpy
class CurationSelections():
LIGAND_SELECTION = "hetero not water not ion"
HEAVY_LIGAND_SELECTION = "hetero and not water and not ion and not hydrogen"
PROTEIN_CHAIN_TEMPLATE = "protein chain %s"
def __init__(self):
pass
def choose_main_chains(initial_pdb):
"""
We can have complexes attached to the chain or even duplicated chains
that cover the same space (ex. in the same model, A and B are one structure
and C and B form a duplicated protein). We only have to leave two of that
main chains, and that's what this function does :) .
:param initial_pdb: The pdb (prody structure) we want to extract the chains.
:return: An array containing the chain ids of the main chains.
"""
hw = prody.HierView(initial_pdb.select("protein"))
chain_lengths = []
for chain in hw.iterChains():
chain_lengths.append((len(chain.getSequence()), chain.getChid()))
leave_chains = sorted(chain_lengths)[-2:]
leave_chains = [chain_id for _, chain_id in leave_chains]
return leave_chains
def process_water_structures(initial_pdb, main_chains, ligand):
"""
Detects the waters we have to keep (important for the simulation) and returns
a structure holding them.
Important waters are the ones closer to Template residue 50 (Ile), the aa is not
but it is not guaranteed to be conserved, which means we have to rely into the
residue number to choose it, and take any offset into account if needed.
Extra: water molecules must be also close to the binding site. We will pick then the
water that has minimum distance to the binding site and residue 50
:param initial_pdb: The pdb (prody structure) we want to extract the chains.
:return: A dictionary indexed by the water id (res. num. + chain id) holding the prody pdb
structure of that water.
"""
hw = prody.HierView(initial_pdb.select("protein"))
water_structs = {}
for chain in hw.iterChains():
if chain.getChid() in main_chains:
# We cannot do a direct selection, instead we iterate
for i, residue in enumerate(chain.iterResidues()):
if i == 50: # 50th residue
break
residue_com = prody.calcCenter(residue)
if ligand is None:
ligand_com = prody.calcCenter(initial_pdb)
else:
ligand_com =prody.calcCenter(ligand)
# Identify closer water
waters = initial_pdb.select("name O and water")
if waters is not None:
distance_to_R50 = numpy.sqrt(((residue_com - waters.getCoords())**2).sum(axis=1))
distance_to_BindSite = numpy.sqrt(((ligand_com - waters.getCoords())**2).sum(axis=1))
distances = distance_to_R50 + distance_to_BindSite
min_dist = numpy.min(distances)
min_dist_index = numpy.where(distances == min_dist)
water_resnum = waters.getResnums()[min_dist_index]
water_chid = waters.getChids()[min_dist_index][0]
water_id = "%d:%s"%(water_resnum, water_chid)
# We use a dict in order to get rid of repeats
selection_string = "resnum %d and chain %s"%(water_resnum,
water_chid)
water_structs[water_id] = initial_pdb.water.select(selection_string).copy()
return water_structs
def curate_struct(initial_pdb, main_chains, pdb_alignment, parameters):
"""
Returns the "curated" pdb. A curated pdb has potentially 2 waters around residue
50 of each chain, a ligand and two main (symmetric) chains; everything else must be
deleted. This function will work even in the case that the 2 later are not present,
which can happen when processing any of the "mandatory" structures (those can pass
the filters automatically).
:param initial_pdb: The prody pdb structure we want to extract the chains.
:return: The "curated" pdb and the ligand
"""
# Get chain info (without ligand or waters)
hw = prody.HierView(initial_pdb.select("protein"))
pdb_alignment["pdb"]["num_chains"] = hw.numChains()
# Pick main chains
prot_struct = initial_pdb.select(CurationSelections.PROTEIN_CHAIN_TEMPLATE%(" ".join(main_chains))).copy()
# Add the ligand (if found), must be part of other chains (not main_chains)
ligand_struct = initial_pdb.select(CurationSelections.LIGAND_SELECTION)
if ligand_struct is not None and ligand_struct.numAtoms() >= parameters["min_ligand_atoms"]:
tmp_struct = prot_struct + ligand_struct.copy()
else:
tmp_struct = prot_struct
# Add "important" waters, if found
water_structs = process_water_structures(initial_pdb, main_chains, ligand_struct)
pdb_alignment["pdb"]["waters"] = water_structs.keys() # Keep track of added waters in the alignment file
for water_id in water_structs:
tmp_struct = tmp_struct + water_structs[water_id]
return tmp_struct, ligand_struct
| StarcoderdataPython |
1660328 | <reponame>AutoDash/AutoDash<gh_stars>1-10
class IndexedRect(object):
def __init__(self, i, x1, y1, x2, y2):
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.i = i
def __str__(self):
return "[({0},{1}),({2},{3})]".format(
self.x1,
self.y1,
self.x2,
self.y2
)
def get_points(self):
return [(self.x1,self.y1),(self.x2,self.y2)]
def get_flat_points(self):
return [self.x1,self.y1,self.x2,self.y2] | StarcoderdataPython |
4824951 | #!/usr/bin/env python2
from pwn import *
context.log_level = 1000
with tempfile.NamedTemporaryFile() as fd:
s = randoms(12)
fd.write(s)
fd.flush()
l = listen(0)
l.spawn_process(['./ropasaurusrex-85a84f36f81e11f720b1cf5ea0d1fb0d5a603c0d'])
p = process(["./doit.py", "SILENT", "HOST=localhost", "PORT=" + str(l.lport)])
p.sendline("base64 " + fd.name)
if p.recvline().strip() == b64e(s):
print "ok"
else:
print "not ok"
| StarcoderdataPython |
1766425 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, <NAME>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author: <NAME> (@techbeck03)
description: Enables creation, modification, deletion and query of an application
extends_documentation_fragment: tetration
module: tetration_application
notes:
- Requires the tetpyclient Python module.
- Supports check mode.
options:
alternate_query_mode:
description: Indicates if dynamic mode is used for the application. In the dynamic
mode, an ADM run creates one or more candidate queries for each cluster. Default
value is false
type: bool
app_id:
description:
- The id for the Application
- Require one of [C(app_name), C(app_id)]
- Mutually exclusive to C(app_name)
type: string
app_name:
description:
- The name for the Application
- Require one of [C(app_name), C(app_id)]
- Mutually exclusive to C(app_id)
type: string
app_scope_id:
description:
- The id for the Scope associated with the application
- Require one of [C(app_scope_name), C(app_scope_id), C(app_id)]
- Mutually exclusive to C(app_scope_name)
type: string
app_scope_name:
description:
- The name for the Scope associated with the application
- Require one of [C(app_scope_name), C(app_scope_id), C(app_id)]
- Mutually exclusive to C(app_scope_id)
type: string
description:
description: User specified description of the application
type: string
force:
description: If set to True will allow removing workspace with enforcement enabled
type: bool
primary:
description: Indicates if the application is primary for its scope
type: bool
query_level:
choices: '[top, details]'
default: top
description: The level of detail of returned query data
type: string
query_type:
choices: '[single, scope, tenant]'
default: single
description: Options for expanding query search
type: string
state:
choices: '[present, absent, query]'
description: Add, change, remove or query for application
required: true
type: string
strict_validation:
description: Unix timestamp of when the application was created (Epoch Timestamp)
type: bool
requirements: tetpyclient
version_added: '2.8'
'''
EXAMPLES = r'''
# Add or Modify application
tetration_application:
app_name: ACME InfoSec Policies
app_scope_name: ACME:Example:Application
description: InfoSec Policies for Acme Application
primary: yes
state: present
provider:
host: "<EMAIL>"
api_key: 1234567890QWERTY
api_secret: 1234567890QWERTY
# Delete application
tetration_application:
app_name: ACME InfoSec Policies
app_scope_name: ACME:Example:Application
primary: yes
state: absent
provider:
host: "<EMAIL>"
api_key: 1234567890QWERTY
api_secret: 1234567890QWERTY
# Query for application details
tetration_application:
app_name: ACME InfoSec Policies
app_scope_name: ACME:Example:Application
query_type: single
query_level: details
state: query
provider:
host: "<EMAIL>"
api_key: 1234567890QWERTY
api_secret: 1234567890QWERTY
'''
RETURN = r'''
---
object:
contains:
alternate_query_mode:
description: Indicates if dynamic mode is used for the application
returned: when C(state) is present or query
sample: 'false'
type: bool
app_scope_id:
description: Unique identifier of app scope associated with application workspace
returned: when C(state) is present or query
sample: 596d5215497d4f3eaef1fd04
type: int
author:
description: Author of application workspace
returned: when C(state) is present or query
sample: <NAME>
type: string
created_at:
description: Date this application was created (Unix Epoch)
returned: when C(state) is present or query
sample: 1500402190
type: string
description:
description: A description for the application
returned: when C(state) is present or query
sample: Security policies for my application
type: string
enforced_version:
description: The policy version to enforce
returned: when C(state) is present or query
sample: 7
type: int
enforcement_enabled:
description: Sets whether enforcement is enabled on this application
returned: when C(state) is present or query
sample: 'true'
type: bool
id:
description: Unique identifier for the application workspace
returned: when C(state) is present or query
sample: 5c93da83497d4f33d7145960
type: int
latest_adm_version:
description: Latest policy version
returned: when C(state) is present or query
sample: 8
type: int
name:
description: Name of application workspace
returned: when C(state) is present or query
sample: My Application Policy
type: string
primary:
description: Sets whether this application should be primary for the given scope
returned: when C(state) is present or query
sample: 'true'
type: bool
description: the changed or modified object
returned: always
type: complex
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.tetration.api import TetrationApiModule
from ansible.module_utils.tetration.api import TETRATION_API_APPLICATIONS
from ansible.module_utils.tetration.api import TETRATION_API_SCOPES
from ansible.utils.display import Display
display = Display()
from time import sleep
def main():
tetration_spec=dict(
app_name=dict(type='str', required=False),
app_id=dict(type='str', required=False),
app_scope_id=dict(type='str', required=False),
app_scope_name=dict(type='str', required=False),
description=dict(type='str', required=False),
alternate_query_mode=dict(type='bool', required=False, default=False),
strict_validation=dict(type='bool', required=False, default=False),
primary=dict(type='bool', required=False, default=True),
force=dict(type='bool', required=False, default=False),
query_type=dict(type='str', required=False, choices=['single', 'scope', 'tenant'], default='single'),
query_level=dict(type='str', choices=['top', 'details'], default='top')
)
argument_spec = dict(
provider=dict(required=True),
state=dict(required=True, choices=['present', 'absent', 'query'])
)
argument_spec.update(tetration_spec)
argument_spec.update(TetrationApiModule.provider_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['app_scope_name', 'app_scope_id'],
],
required_one_of=[
# ['app_name', 'app_id'],
['app_scope_name', 'app_scope_id'],
],
)
tet_module = TetrationApiModule(module)
# These are all elements we put in our return JSON object for clarity
result = dict(
changed=False,
object=None,
)
state = module.params['state']
app_name = module.params['app_name']
app_id = module.params['app_id']
app_scope_name = module.params['app_scope_name']
app_scope_id = module.params['app_scope_id']
description = module.params['description']
alternate_query_mode = module.params['alternate_query_mode']
strict_validation = module.params['strict_validation']
primary = module.params['primary']
force = module.params['force']
query_type = module.params['query_type']
query_level = module.params['query_level']
existing_app = None
existing_app_scope = None
# =========================================================================
# Get current state of the object
if app_scope_id:
existing_app_scope = tet_module.run_method(
method_name = 'get',
target = '%s/%s' % (TETRATION_API_SCOPES, app_scope_id)
)
elif not app_scope_id:
existing_app_scope = tet_module.get_object(
target = TETRATION_API_SCOPES,
filter = dict(name=app_scope_name)
)
if not existing_app_scope:
module.fail_json(msg='Unable to find existing app scope named: %s' % app_scope_name)
if app_id:
existing_app = tet_module.run_method(
method_name = 'get',
target = '%s/%s' % (TETRATION_API_APPLICATIONS, app_id)
)
elif not app_id:
existing_app = tet_module.get_object(
target = TETRATION_API_APPLICATIONS,
filter = dict(name=app_name,app_scope_id=existing_app_scope['id'])
)
# =========================================================================
# Now enforce the desired state (present, absent, query)
# ---------------------------------
# STATE == 'present'
# ---------------------------------
if state == 'present':
# if the object does not exist at all, create it
new_object = dict(
app_scope_id = existing_app_scope['id'],
name = app_name,
description = description,
alternate_query_mode = alternate_query_mode,
primary = primary,
)
if existing_app:
new_object['id'] = existing_app['id']
result['changed'] = tet_module.filter_object(new_object, existing_app, check_only=True)
if result['changed']:
if not module.check_mode:
del new_object['id']
tet_module.run_method(
method_name = 'put',
target = '%s/%s' % (TETRATION_API_APPLICATIONS, existing_app['id']),
req_payload = dict(
name = app_name,
description = description,
primary = primary
)
)
else:
result['object'] = new_object
else:
if not module.check_mode:
new_object['strict_validation'] = strict_validation,
app_object = tet_module.run_method(
method_name = 'post',
target = TETRATION_API_APPLICATIONS,
req_payload = new_object
)
existing_app = dict(id=app_object['id'])
result['changed'] = True
# if the object does exist, check to see if any part of it should be
# changed
if result['changed']:
if not module.check_mode:
result['object'] = tet_module.run_method(
method_name = 'get',
target = '%s/%s' % (TETRATION_API_APPLICATIONS, existing_app['id'])
)
else:
result['object'] = new_object
else:
result['changed'] = False
result['object'] = existing_app
# ---------------------------------
# STATE == 'absent'
# ---------------------------------
elif state == 'absent':
if existing_app:
if existing_app['enforcement_enabled'] and not force:
module.fail_json(msg='Cannot delete workspace with enforcement. Try disabling enforcement or use the force option')
elif existing_app['primary'] and not force:
module.fail_json(msg='Cannot delete primary application. Try making application secondary or use the force option')
elif existing_app['primary'] and force:
if existing_app['enforcement_enabled']:
tet_module.run_method(
method_name = 'post',
target = '%s/%s/disable_enforce' % (TETRATION_API_APPLICATIONS, existing_app['id'])
)
sleep(10)
tet_module.run_method(
method_name = 'put',
target = '%s/%s' % (TETRATION_API_APPLICATIONS, existing_app['id']),
req_payload = dict(
name = app_name,
description = description,
primary = False
)
)
sleep(2)
result['changed'] = True
if not module.check_mode:
tet_module.run_method(
method_name='delete',
target = '%s/%s' % (TETRATION_API_APPLICATIONS, existing_app['id'])
)
# ---------------------------------
# STATE == 'query'
# ---------------------------------
elif state == 'query':
if query_type == 'tenant':
if existing_app_scope['id'] != existing_app_scope['root_app_scope_id']:
module.fail_json(msg='query_type `tenant` is only allowed on root scopes')
app_scopes = tet_module.get_object(
target = TETRATION_API_SCOPES,
filter = dict(root_app_scope_id = existing_app_scope['root_app_scope_id']),
allow_multiple = True
)
scope_ids = [ scope['id'] for scope in app_scopes ]
applications = tet_module.run_method(
method_name = 'get',
target = TETRATION_API_APPLICATIONS
)
if applications:
applications = [ valid_item for valid_item in applications if valid_item['app_scope_id'] in scope_ids ]
if query_level == 'details':
app_details = []
for application in applications:
app_details.append(
tet_module.run_method(
method_name = 'get',
target = '%s/%s/details' % (TETRATION_API_APPLICATIONS, application['id'])
)
)
result['object'] = app_details
else:
result['object'] = applications if applications else []
elif query_type == 'scope':
applications = tet_module.run_method(
method_name = 'get',
target = TETRATION_API_APPLICATIONS,
)
if applications:
applications = [ valid_item for valid_item in applications if valid_item['app_scope_id'] == existing_app_scope['id'] ]
if query_level == 'details':
app_details = []
for application in applications:
app_details.append(
tet_module.run_method(
method_name = 'get',
target = '%s/%s/details' % (TETRATION_API_APPLICATIONS, application['id'])
)
)
result['object'] = app_details
else:
result['object'] = applications if applications else []
else:
if query_level == 'details':
existin_app = []
app_details = tet_module.run_method(
method_name = 'get',
target = '%s/%s/details' % (TETRATION_API_APPLICATIONS, application['id'])
)
result['object'] = app_details
else:
result['object'] = existing_app
# Return result
module.exit_json(**result)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1725523 | '''
Implement a first in first out (FIFO) queue using only two stacks. The implemented queue should support all the functions of a normal queue (push, peek, pop, and empty).
Implement the MyQueue class:
void push(int x) Pushes element x to the back of the queue.
int pop() Removes the element from the front of the queue and returns it.
int peek() Returns the element at the front of the queue.
boolean empty() Returns true if the queue is empty, false otherwise.
Notes:
You must use only standard operations of a stack, which means only push to top, peek/pop from top, size, and is empty operations are valid.
Depending on your language, the stack may not be supported natively. You may simulate a stack using a list or deque (double-ended queue) as long as you use only a stack's standard operations.
'''
class MyQueue:
def __init__(self):
self.stack = []
self.transit_stack = []
self.size = 0
def push(self, x: int) -> None:
self.size += 1
if self.size == 0:
self.stack.append(x)
else:
while self.stack:
self.transit_stack.append(self.stack.pop())
self.stack.append(x)
while self.transit_stack:
self.stack.append(self.transit_stack.pop())
def pop(self) -> int:
if self.size == 0:
return
self.size -= 1
return self.stack.pop()
def peek(self) -> int:
if self.size == 0:
return
return self.stack[-1] | StarcoderdataPython |
91970 | '''The framework module contains the logic used in building the graph and
inferring the order that the nodes have to be executed in forward and backward
direction.
Based on FrEIA (https://github.com/VLL-HD/FrEIA)'''
import torch.nn as nn
from torch.autograd import Variable
import FrEIA.dummy_modules as dummys
class Node:
'''The Node class represents one transformation in the graph, with an
arbitrary number of in- and outputs.'''
def __init__(self, inputs, module_type, module_args, name=None):
self.inputs = inputs
self.outputs = []
self.module_type = module_type
self.module_args = module_args
self.input_dims, self.module = None, None
self.computed = None
self.computed_rev = None
self.id = None
if name:
self.name = name
else:
self.name = hex(id(self))[-6:]
for i in range(255):
exec('self.out{0} = (self, {0})'.format(i))
def build_modules(self, verbose=True):
''' Returns a list with the dimension of each output of this node,
recursively calling build_modules of the nodes connected to the input.
Use this information to initialize the pytorch nn.Module of this node.
'''
if not self.input_dims: # Only do it if this hasn't been computed yet
self.input_dims = [n.build_modules(verbose=verbose)[c]
for n, c in self.inputs]
try:
self.module = self.module_type(self.input_dims,
**self.module_args)
except Exception as e:
print('Error in node %s' % (self.name))
raise e
if verbose:
print("Node %s has following input dimensions:" % (self.name))
for d, (n, c) in zip(self.input_dims, self.inputs):
print("\t Output #%i of node %s:" % (c, n.name), d)
print()
self.output_dims = self.module.output_dims(self.input_dims)
self.n_outputs = len(self.output_dims)
return self.output_dims
def run_forward(self, op_list):
'''Determine the order of operations needed to reach this node. Calls
run_forward of parent nodes recursively. Each operation is appended to
the global list op_list, in the form (node ID, input variable IDs,
output variable IDs)'''
if not self.computed:
# Compute all nodes which provide inputs, filter out the
# channels you need
self.input_vars = []
for i, (n, c) in enumerate(self.inputs):
self.input_vars.append(n.run_forward(op_list)[c])
# Register youself as an output in the input node
n.outputs.append((self, i))
# All outputs could now be computed
self.computed = [(self.id, i) for i in range(self.n_outputs)]
op_list.append((self.id, self.input_vars, self.computed))
# Return the variables you have computed (this happens mulitple times
# without recomputing if called repeatedly)
return self.computed
def run_backward(self, op_list):
'''See run_forward, this is the same, only for the reverse computation.
Need to call run_forward first, otherwise this function will not
work'''
assert len(self.outputs) > 0, "Call run_forward first"
if not self.computed_rev:
# These are the input variables that must be computed first
output_vars = [(self.id, i) for i in range(self.n_outputs)]
# Recursively compute these
for n, c in self.outputs:
n.run_backward(op_list)
# The variables that this node computes are the input variables
# from the forward pass
self.computed_rev = self.input_vars
op_list.append((self.id, output_vars, self.computed_rev))
return self.computed_rev
class InputNode(Node):
'''Special type of node that represents the input data of the whole net (or
ouput when running reverse)'''
def __init__(self, *dims, name='node'):
self.name = name
self.data = dummys.dummy_data(*dims)
self.outputs = []
self.module = None
self.computed_rev = None
self.n_outputs = 1
self.input_vars = []
self.out0 = (self, 0)
def build_modules(self, verbose=True):
return [self.data.shape]
def run_forward(self, op_list):
return [(self.id, 0)]
class OutputNode(Node):
'''Special type of node that represents the output of the whole net (of the
input when running in reverse)'''
class dummy(nn.Module):
def __init__(self, *args):
super(OutputNode.dummy, self).__init__()
def __call__(*args):
return args
def output_dims(*args):
return args
def __init__(self, inputs, name='node'):
self.module_type, self.module_args = self.dummy, {}
self.output_dims = []
self.inputs = inputs
self.input_dims, self.module = None, None
self.computed = None
self.id = None
self.name = name
for c, inp in enumerate(self.inputs):
inp[0].outputs.append((self, c))
def run_backward(self, op_list):
return [(self.id, 0)]
class ReversibleGraphNet(nn.Module):
'''This class represents the invertible net itself. It is a subclass of
torch.nn.Module and supports the same methods. The forward method has an
additional option 'rev', whith which the net can be computed in reverse.'''
def __init__(self, node_list, ind_in=None, ind_out=None, verbose=True):
'''node_list should be a list of all nodes involved, and ind_in,
ind_out are the indexes of the special nodes InputNode and OutputNode
in this list.'''
super(ReversibleGraphNet, self).__init__()
# Gather lists of input and output nodes
if ind_in is not None:
if isinstance(ind_in, int):
self.ind_in = list([ind_in])
else:
self.ind_in = ind_in
else:
self.ind_in = [i for i in range(len(node_list))
if isinstance(node_list[i], InputNode)]
assert len(self.ind_in) > 0, "No input nodes specified."
if ind_out is not None:
if isinstance(ind_out, int):
self.ind_out = list([ind_out])
else:
self.ind_out = ind_out
else:
self.ind_out = [i for i in range(len(node_list))
if isinstance(node_list[i], OutputNode)]
assert len(self.ind_out) > 0, "No output nodes specified."
self.return_vars = []
self.input_vars = []
# Assign each node a unique ID
self.node_list = node_list
for i, n in enumerate(node_list):
n.id = i
# Recursively build the nodes nn.Modules and determine order of
# operations
ops = []
for i in self.ind_out:
node_list[i].build_modules(verbose=verbose)
node_list[i].run_forward(ops)
# create list of Pytorch variables that are used
variables = set()
for o in ops:
variables = variables.union(set(o[1] + o[2]))
self.variables_ind = list(variables)
self.indexed_ops = self.ops_to_indexed(ops)
self.module_list = nn.ModuleList([n.module for n in node_list])
self.variable_list = [Variable(requires_grad=True) for v in variables]
# Find out the order of operations for reverse calculations
ops_rev = []
for i in self.ind_in:
node_list[i].run_backward(ops_rev)
self.indexed_ops_rev = self.ops_to_indexed(ops_rev)
def ops_to_indexed(self, ops):
'''Helper function to translate the list of variables (origin ID, channel),
to variable IDs.'''
result = []
for o in ops:
try:
vars_in = [self.variables_ind.index(v) for v in o[1]]
except ValueError:
vars_in = -1
vars_out = [self.variables_ind.index(v) for v in o[2]]
# Collect input/output nodes in separate lists, but don't add to
# indexed ops
if o[0] in self.ind_out:
self.return_vars.append(self.variables_ind.index(o[1][0]))
continue
if o[0] in self.ind_in:
self.input_vars.append(self.variables_ind.index(o[1][0]))
continue
result.append((o[0], vars_in, vars_out))
# Sort input/output variables so they correspond to initial node list
# order
self.return_vars.sort(key=lambda i: self.variables_ind[i][0])
self.input_vars.sort(key=lambda i: self.variables_ind[i][0])
return result
def forward(self, x, rev=False):
'''Forward or backward computation of the whole net.'''
if rev:
use_list = self.indexed_ops_rev
input_vars, output_vars = self.return_vars, self.input_vars
else:
use_list = self.indexed_ops
input_vars, output_vars = self.input_vars, self.return_vars
if isinstance(x, (list, tuple)):
assert len(x) == len(input_vars), (
f"Got list of {len(x)} input tensors for "
f"{'inverse' if rev else 'forward'} pass, but expected "
f"{len(input_vars)}."
)
for i in range(len(input_vars)):
self.variable_list[input_vars[i]] = x[i]
else:
assert len(input_vars) == 1, (f"Got single input tensor for "
f"{'inverse' if rev else 'forward'} "
f"pass, but expected list of "
f"{len(input_vars)}.")
self.variable_list[input_vars[0]] = x
for o in use_list:
try:
results = self.module_list[o[0]]([self.variable_list[i]
for i in o[1]], rev=rev)
except TypeError:
raise RuntimeError("Are you sure all used Nodes are in the "
"Node list?")
for i, r in zip(o[2], results):
self.variable_list[i] = r
# self.variable_list[o[2][0]] = self.variable_list[o[1][0]]
out = [self.variable_list[output_vars[i]]
for i in range(len(output_vars))]
if len(out) == 1:
return out[0]
else:
return out
def jacobian(self, x=None, rev=False, run_forward=True):
'''Compute the jacobian determinant of the whole net.'''
jacobian = 0
if rev:
use_list = self.indexed_ops_rev
else:
use_list = self.indexed_ops
if run_forward:
if x is None:
raise RuntimeError("You need to provide an input if you want "
"to run a forward pass")
self.forward(x, rev=rev)
for o in use_list:
try:
jacobian += self.module_list[o[0]].jacobian(
[self.variable_list[i] for i in o[1]], rev=rev
)
except TypeError:
raise RuntimeError("Are you sure all used Nodes are in the "
"Node list?")
return jacobian
# Testing example
if __name__ == '__main__':
inp = InputNode(4, 64, 64, name='input')
t1 = Node([(inp, 0)], dummys.dummy_mux, {}, name='t1')
s1 = Node([(t1, 0)], dummys.dummy_2split, {}, name='s1')
t2 = Node([(s1, 0)], dummys.dummy_module, {}, name='t2')
s2 = Node([(s1, 1)], dummys.dummy_2split, {}, name='s2')
t3 = Node([(s2, 0)], dummys.dummy_module, {}, name='t3')
m1 = Node([(t3, 0), (s2, 1)], dummys.dummy_2merge, {}, name='m1')
m2 = Node([(t2, 0), (m1, 0)], dummys.dummy_2merge, {}, name='m2')
outp = OutputNode([(m2, 0)], name='output')
all_nodes = [inp, outp, t1, s1, t2, s2, t3, m1, m2]
net = ReversibleGraphNet(all_nodes, 0, 1)
| StarcoderdataPython |
1609420 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Hpcviewer(AutotoolsPackage):
"""Uses version-test-pkg, as a build dependency"""
homepage = "http://www.spack.org"
url = "http://www.spack.org/downloads/aml-1.0.tar.gz"
version('2019.02', '0123456789abcdef0123456789abcdef')
depends_on('java@11:', type=('build', 'run'), when='@2021.0:')
depends_on('java@8', type=('build', 'run'), when='@:2020')
| StarcoderdataPython |
3247214 | import os
import argparse
# late import of alembic because it destroys loggers
def get_config(directory, x_arg=None, opts=None):
from alembic.config import Config as AlembicConfig
class Config(AlembicConfig):
def get_template_directory(self):
package_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(package_dir, "templates")
config = Config(os.path.join(directory, "alembic.ini"))
config.set_main_option("script_location", directory)
if config.cmd_opts is None:
config.cmd_opts = argparse.Namespace()
for opt in opts or []:
setattr(config.cmd_opts, opt, True)
if not hasattr(config.cmd_opts, "x"):
if x_arg is not None:
setattr(config.cmd_opts, "x", [])
if isinstance(x_arg, list) or isinstance(x_arg, tuple):
for x in x_arg:
config.cmd_opts.x.append(x)
else:
config.cmd_opts.x.append(x_arg)
else:
setattr(config.cmd_opts, "x", None)
return config
| StarcoderdataPython |
3357537 | import functools
from copy import deepcopy
from abc import abstractmethod, ABCMeta
from ..compare import compare
class BaseMatcher(object):
"""
BaseMatcher implements the basic HTTP request matching interface.
"""
__metaclass__ = ABCMeta
# Negate matching if necessary
negate = False
def __init__(self, expectation, negate=False):
if not expectation:
raise ValueError('expectation argument cannot be empty')
self.negate = negate
self._expectation = expectation
@property
def name(self):
return type(self).__name__
@property
def expectation(self):
return self._expectation
@expectation.setter
def expectation(self, value):
self._expectation = value
@abstractmethod
def match(self, request):
"""
Match performs the value matching.
This is an abstract method that must be implemented by child classes.
Arguments:
request (pook.Request): request object to match.
"""
pass
def compare(self, value, expectation, regex_expr=False):
"""
Compares two values with regular expression matching support.
Arguments:
value (mixed): value to compare.
expectation (mixed): value to match.
regex_expr (bool, optional): enables string based regex matching.
Returns:
bool
"""
return compare(value, expectation, regex_expr=regex_expr)
def to_dict(self):
"""
Returns the current matcher representation as dictionary.
Returns:
dict
"""
return {self.name: deepcopy(self.expectation)}
def __repr__(self):
return '{}({})'.format(self.name, self.expectation)
def __str__(self):
return self.expectation
@staticmethod
def matcher(fn):
@functools.wraps(fn)
def wrapper(self, *args):
result = fn(self, *args)
return not result if self.negate else result
return wrapper
| StarcoderdataPython |
48150 | import numpy as np
import pandas as pa
import time
from sklearn.metrics import pairwise_distances
from scipy.sparse import csr_matrix
class Kmeans:
def __init__(self,data,k,geneNames,cellNames,cluster_label=None,seed=None):
self.data=data
self.k=k
self.geneNames=geneNames
self.cellNames=cellNames
self.seed=seed
self.centroids=None
self.cluster_assignment=None
self.cluster_label=cluster_label
self.heterogeneity=0.0
self.get_initial_centroids()
self.heterogeneities=None
def getCentroids(self):
return self.centroids
def getCluster_assignment(self):
return self.cluster_assignment
def getHeterogenity(self):
return self.heterogeneity
def getHetrogenities(self):
return self.heterogeneities
def get_initial_centroids(self):
'''Randomly choose k data points as initial centroids'''
if self.seed is not None: # useful for obtaining consistent results
np.random.seed(self.seed)
n = self.data.shape[0] # number of data points
# Pick K indices from range [0, N).
rand_indices = np.random.randint(0, n, self.k)
# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
# it will carry a nonzero weight in the TF-IDF vector of the centroid.
centroids = self.data[rand_indices,:].toarray()
self.centroids=centroids
return centroids
def smart_initialize(self):
'''Use k-means++ to initialize a good set of centroids'''
if self.seed is not None: # useful for obtaining consistent results
np.random.seed(self.seed)
centroids = np.zeros((self.k, self.data.shape[1]))
# Randomly choose the first centroid.
# Since we have no prior knowledge, choose uniformly at random
idx = np.random.randint(self.data.shape[0])
centroids[0] = self.data[idx,:].toarray()
# Compute distances from the first centroid chosen to all the other data points
squared_distances = pairwise_distances(self.data, centroids[0:1], metric='euclidean').flatten()**2
for i in range(1, self.k):
# Choose the next centroid randomly, so that the probability for each data point to be chosen
# is directly proportional to its squared distance from the nearest centroid.
# Roughtly speaking, a new centroid should be as far as from ohter centroids as possible.
idx = np.random.choice(self.data.shape[0], 1, p=squared_distances/sum(squared_distances))
centroids[i] = self.data[idx,:].toarray()
# Now compute distances from the centroids to all data points
squared_distances = np.min(pairwise_distances(self.data, centroids[0:i+1], metric='euclidean')**2,axis=1)
self.centroids=centroids
return centroids
def assign_clusters(self):
# Compute distances between each data point and the set of centroids:
# Fill in the blank (RHS only)
distances_from_centroids = pairwise_distances(self.data,self.centroids,metric='euclidean')
# Compute cluster assignments for each data point:
# Fill in the blank (RHS only)
cluster_assignment = np.apply_along_axis(np.argmin, 1, distances_from_centroids)
self.cluster_assignment=cluster_assignment
return cluster_assignment
def revise_centroids(self):
new_centroids = []
for i in range(self.k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = self.data[self.cluster_assignment==i]
# Compute the mean of the data points. Fill in the blank (RHS only)
centroid = member_data_points.mean(axis=0)
# Convert numpy.matrix type to numpy.ndarray type
centroid = centroid.A1
new_centroids.append(centroid)
new_centroids = np.array(new_centroids)
self.centroids=new_centroids
return new_centroids
def kmeans(self, maxiter, record_heterogeneity=None, verbose=False):
'''This function runs k-means on given data and initial set of centroids.
maxiter: maximum number of iterations to run.
record_heterogeneity: (optional) a list, to store the history of heterogeneity as function of iterations
if None, do not store the history.
verbose: if True, print how many data points changed their cluster labels in each iteration'''
centroids = self.centroids[:]
prev_cluster_assignment = None
for itr in range(int(maxiter)):
if verbose:
print(itr)
# 1. Make cluster assignments using nearest centroids
# YOUR CODE HERE
cluster_assignment = self.assign_clusters()
# 2. Compute a new centroid for each of the k clusters, averaging all data points assigned to that cluster.
# YOUR CODE HERE
centroids = self.revise_centroids()
# Check for convergence: if none of the assignments changed, stop
if prev_cluster_assignment is not None and \
(prev_cluster_assignment==self.cluster_assignment).all():
break
# Print number of new assignments
if prev_cluster_assignment is not None:
num_changed = np.sum(prev_cluster_assignment!=self.cluster_assignment)
if verbose:
print(' {0:5d} elements changed their cluster assignment.'.format(num_changed))
# Record heterogeneity convergence metric
if record_heterogeneity is not None:
# YOUR CODE HERE
score = compute_heterogeneity(self.data, self.k, centroids, cluster_assignment)
record_heterogeneity.append(score)
prev_cluster_assignment = cluster_assignment[:]
self.centroids=centroids
self.cluster_assignment=cluster_assignment
return centroids, cluster_assignment
def kmeans_multiple_runs(self, maxiter, num_runs, seed_list=None, verbose=False):
heterogeneity = {}
min_heterogeneity_achieved = float('inf')
best_seed = None
final_centroids = None
final_cluster_assignment = None
for i in range(num_runs):
# Use UTC time if no seeds are provided
if seed_list is not None:
seed = seed_list[i]
np.random.seed(seed)
else:
seed = int(time.time())
np.random.seed(seed)
# Use k-means++ initialization
self.initial_centroids = self.smart_initialize()
# Run k-means
centroids, cluster_assignment = self.kmeans(maxiter, record_heterogeneity=None, verbose=False)
# To save time, compute heterogeneity only once in the end
heterogeneity[seed] = self.compute_heterogeneity()
if verbose:
print('seed={0:06d}, heterogeneity={1:.5f}'.format(seed, heterogeneity[seed]))
sys.stdout.flush()
# if current measurement of heterogeneity is lower than previously seen,
# update the minimum record of heterogeneity.
if heterogeneity[seed] < min_heterogeneity_achieved:
min_heterogeneity_achieved = heterogeneity[seed]
best_seed = seed
final_centroids = centroids
final_cluster_assignment = cluster_assignment
self.centroids=final_centroids
self.cluster_assignment=final_cluster_assignment
self.heterogeneities=heterogeneity
return final_centroids, final_cluster_assignment
def clusterEvaluation(self):
clustMaxDist={}
clustMinDist={}
clustMeanDist={}
for i in range(self.k):
binMaxDist=[]
binMinDist=[]
binMeanDist=[]
for j in np.concatenate(np.argwhere(self.cluster_assignment==i)):
dist=pairwise_distances(self.data[np.concatenate(np.argwhere(self.cluster_assignment==i))], self.data[j], metric='euclidean').flatten()
dist=dist**2
binMaxDist.append(np.max(dist))
binMinDist.append(np.min(dist))
binMeanDist.append(np.mean(dist))
clustMaxDist[i]=np.max(binMaxDist)
clustMinDist[i]=np.min(binMinDist)
clustMeanDist[i]=np.mean(binMeanDist)
plt.figure(figsize=(7,4.5))
plt.plot(clustMaxDist.keys(),clustMaxDist.values(), linewidth=2, label='Maximum distance among clusters')
plt.plot(clustMaxDist.keys(),clustMinDist.values(), linewidth=2, label='Minimum distance among clusters')
plt.plot(clustMaxDist.keys(),clustMeanDist.values(), linewidth=2, label='avarage distance among clusters')
plt.xlabel('Cluster number')
plt.ylabel('Eculidean distance')
plt.legend(loc='best', prop={'size':15})
plt.rcParams.update({'font.size':16})
plt.tight_layout()
plt.show()
return np.sum(clustMeanDist)
def compute_heterogeneity(self):
heterogeneity = 0.0
for i in range(self.k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = self.data[self.cluster_assignment==i, :]
if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty
# Compute distances from centroid to data points (RHS only)
distances = pairwise_distances(member_data_points, [self.centroids[i]], metric='euclidean')
squared_distances = distances**2
heterogeneity += np.sum(squared_distances)
self.heterogeneity=heterogeneity
return heterogeneity
def plot_k_vs_heterogeneity(self, k_values, heterogeneity_values):
plt.figure(figsize=(7,4))
plt.plot(k_values, heterogeneity_values, linewidth=4)
plt.xlabel('K')
plt.ylabel('Heterogeneity')
plt.title('K vs. Heterogeneity')
plt.rcParams.update({'font.size': 16})
plt.tight_layout()
plt.show()
return None
def get_cluster_data(self, cluster_number):
return self.data[np.in1d(np.array(self.cluster_assignment), cluster_number),:], self.cellNames[np.in1d(np.array(self.cluster_assignment), cluster_number)]
def select_K(self):
cluster_centroids={}
cluster_assignments={}
hetroginity_score=float('inf')
delta_k={}
max_K_value=self.k
hetro_Per_K={}
deltaHetro=None
for i in range(max_K_value):
self.k=i+1
print("going for k=", i+1)
cluster_centroid, cluster_assignment=self.kmeans_multiple_runs(5,100)
hetro=self.compute_heterogeneity()
hetro_Per_K[i+1]=hetro
if hetro<hetroginity_score:
if hetroginity_score==float('inf'):
hetroginity_score=hetro
deltaHetro=0
else:
deltaHetro=hetroginity_score-hetro
hetroginity_score=hetro
cluster_centroids[i+1]=cluster_centroid
cluster_assignments[i+1]=cluster_assignment
delta_k[i+1]=deltaHetro
best_k=sum(delta_k.values()[1:]>sum(delta_k.values())/(2*len(delta_k.values())))
print("best k value:", best_k, delta_k)
self.centroids=cluster_centroids[best_k]
self.cluster_assignment=cluster_assignments[best_k]
self.k=best_k
self.getVisualization(method="tsne")
self.plot_k_vs_heterogeneity(hetro_Per_K.keys(), hetro_Per_K.values())
return self.k
| StarcoderdataPython |
3321654 | import six
from .node import Node
@six.python_2_unicode_compatible
class StockExchange(Node):
"""Represents a Website on CrunchBase"""
KNOWN_PROPERTIES = [
'name',
'short_name',
'symbol',
'created_at',
'updated_at',
]
def __str__(self):
return u'{name} {symbol}'.format(
name=self.name,
symbol=self.symbol,
)
def __repr__(self):
return self.__str__()
| StarcoderdataPython |
4818149 | """
Compute language embeddings for the labels
NB: we release our language embeddings so you won't need to run this script. We only provide it as indication and in case you wish to compute word embedding for new words.
We use a Word2vec model trained on GoogleNews:
https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing
"""
# Get language embedding : in dev -> to put in data loader
from __future__ import division
from gensim.models import KeyedVectors
import pickle
import os.path as osp
import numpy as np
# Load model
path_to_word2vec = './data/GoogleNews-vectors-negative300.bin'
word_vectors = KeyedVectors.load_word2vec_format(path_to_word2vec, binary=True) # C binary format
# Example for HICO-DET
data_path = '/sequoia/data2/jpeyre/iccv19_final/datasets/hico'
vocab = pickle.load(open(osp.join(data_path, 'vocab.pkl'),'rb'))
print('Found %d words in vocab' %len(vocab))
# Manual mapping of some of COCO objects
vocab[vocab.index('teddy bear')] = 'teddybear'
vocab[vocab.index('fire hydrant')] = 'fire_hydrant'
vocab[vocab.index('tennis racket')] = 'tennis_racket'
vocab[vocab.index('hot dog')] = 'hotdog'
vocab[vocab.index('cell phone')] = 'cellphone'
vocab[vocab.index('wine glass')] = 'wineglass'
# For the rest do average and map 'no_interaction' and 'background' directly to random
embeddings = np.zeros((len(vocab),300))
for j in range(len(vocab)):
word = vocab[j]
if word in word_vectors.vocab:
embeddings[j,:] = word_vectors[word]
# Map 'no_interaction' and 'background' directly to random (xavier init)
elif word in ['no interaction', 'background']:
embeddings[j,:] = np.random.normal(0, scale=1/np.sqrt(300), size=300)
# Usually a compound word -> either separated by _ or space. Take average embedding (alternative strategy : focus on first word for verb/ second word for noun)
elif word not in word_vectors.vocab:
words = word.split('_') if '_' in word else word.split(' ')
embeddings[j,:] = (word_vectors[words[0]] + word_vectors[words[1]])/2
else:
print('Could not find embedding for word %s' %word)
save_file = osp.join(data_path, 'pretrained_embeddings_w2v.pkl')
if osp.exists(save_file):
answer = raw_input("File %s already exists. Continue: yes/no?" %save_file)
assert answer=='yes', 'Please speficy another file name'
pickle.dump(embeddings, open(osp.join(data_path, 'pretrained_embeddings_w2v.pkl'), 'wb'), protocol=2)
| StarcoderdataPython |
190433 | <reponame>nishi-yuki/trimtr-api
import unittest
import random
from trimtr.trimmer import Trimmer
class TestTrimmer(unittest.TestCase):
def setUp(self):
self.trimmer = Trimmer.get_instance()
# 文と文の間は改行される
def test_new_line_between_sentences(self):
original_sentence = "How are you? I am fine thank you."
expected_sentence = "How are you?\nI am fine thank you."
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# 省略形が来た時に改行されない
def test_abbreviation(self):
abbreviations = set(['dr', 'vs', 'mr', 'mrs', 'prof', 'inc', 'i.e', 'e.g', 'u.s', 'etc'])
random_abbreviation = random.sample(abbreviations, 1)[0]
expected_sentence = random_abbreviation
trimmed_sentence = self.trimmer.trim(random_abbreviation)
self.assertEqual(expected_sentence, trimmed_sentence)
# 文中の不要な空白は消される
def test_shape_unnecessary_white_space(self):
original_sentence = "I am fine thank you."
expected_sentence = "I am fine thank you."
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# 文中の不要な改行は消される
def test_shape_unnecessary_new_line(self):
original_sentence = "I am fine\n thank you."
expected_sentence = "I am fine thank you."
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# 3個以上の改行は2個にフォーマットされる
def test_format_new_line(self):
original_sentence = "How are you?\n\n\nI am fine thank you."
expected_sentence = "How are you?\n\nI am fine thank you."
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# 1個の改行は維持される
def test_sentence_block(self):
original_sentence = "How are you?\n\nI am fine thank you."
expected_sentence = "How are you?\n\nI am fine thank you."
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# コロン+空白が来たら改行される
def test_colon(self):
original_sentence = "I like this: "
expected_sentence = "I like this:\n"
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# コロン+改行が来たら改行される
def test_colon_not_new_line(self):
original_sentence = "This is:\n"
expected_sentence = original_sentence
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# コロン+英数字が来たら改行されない
def test_colon_alphanumeric_character(self):
original_sentence = "This:is"
expected_sentence = original_sentence
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# 先頭の空白は消される
def test_shape_head_white_space(self):
original_sentence = " How are you?"
expected_sentence = "How are you?"
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# 人名のピリオドが含まれる場合
def test_not_break_one_sentence(self):
original_sentence = "Punkt knows that the periods in Mr.Smith and Johann S.Bach do not mark sentence boundaries."
expected_sentence = "Punkt knows that the periods in Mr. Smith and Johann S. Bach do not mark sentence boundaries."
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# 数字のピリオドが含まれる場合
def test_number_period(self):
original_sentence = "For the quarter that ended March 31, Nokia earned $1.9 billion (1.2 euros), up 25% from the same quarter last year but short of an expected $2.3 billion."
expected_sentence = original_sentence
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# 空文の場合
def test_empty(self):
original_sentence = ""
expected_sentence = original_sentence
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# 単語だけの場合
def test_simple_word(self):
original_sentence = "hello"
expected_sentence = original_sentence
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
def test_singleton_trimmer(self):
new_trimmer = Trimmer.get_instance()
self.assertEqual(id(self.trimmer), id(new_trimmer))
# 空白無しで文章が連続する場合
def test_continuing_sentences_without_white_space(self):
last_words = set([')', '\"', '>', 'p'])
last_word = random.sample(last_words, 1)[0]
original_sentence = "A clinical study that lacks a 2.2 comparison (i.e., a control) grou" + last_word + ".I like it."
expected_sentence = "A clinical study that lacks a 2.2 comparison (i.e., a control) grou" + last_word + ".\nI like it."
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# コロンがあって空白無しで文章が連続する場合
def test_colon_and_continuing_sentence(self):
original_sentence = "This obviously has drawbacks: paper is wasted, manual vote counting takes time and is potentially more error-prone than electronic vote counting.As tempting as electronic voting may seem, it is important to realize the potential risks and drawbacks."
expected_sentence = "This obviously has drawbacks:\npaper is wasted, manual vote counting takes time and is potentially more error-prone than electronic vote counting.\nAs tempting as electronic voting may seem, it is important to realize the potential risks and drawbacks."
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# 略語を含んで空白無しで文章が連続する場合
def test_continuing_sentences_without_white_space_contain_abbrev(self):
original_sentence = "A clinical study that lacks a 2.2 comparison (i.e., a control) group.I like it. I am e.g. hoge."
expected_sentence = "A clinical study that lacks a 2.2 comparison (i.e., a control) group.\nI like it.\nI am e.g. hoge."
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# d.i.y など2個以上のピリオドを含む略語を含んで空白無しで文章が連続する場合
def test_continuing_sentences_without_white_space_contain_abbrev_more_two(self):
original_sentence = "I like d.i.y in Monday."
expected_sentence = original_sentence
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
# 2,3点ドットがそのまま表示される
def test_dot(self):
original_sentence1 = "A1,...,A3,..,A4 is array."
expected_sentence1 = original_sentence1
original_sentence2 = "A1,. . .,A3,. .,A4 is array."
expected_sentence2 = "A1,...,A3,..,A4 is array."
trimmed_sentence1 = self.trimmer.trim(original_sentence1)
self.assertEqual(expected_sentence1, trimmed_sentence1)
trimmed_sentence2 = self.trimmer.trim(original_sentence2)
self.assertEqual(expected_sentence2, trimmed_sentence2)
# SBが含まれる時にちゃんと改行される
def test_sentences_contain_sentence_block(self):
original_sentence = "Hello.\n\nHow are you? I am fine."
expected_sentence = "Hello.\n\nHow are you?\nI am fine."
trimmed_sentence = self.trimmer.trim(original_sentence)
self.assertEqual(expected_sentence, trimmed_sentence)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3330780 | """ Display some information about a user """
from registrar import AbstractCommand, bot_command
from discord import embeds
from discord.enums import Status
@bot_command
class Command(AbstractCommand):
""" Template for bot command classes. """
_name = 'tellmeabout'
_aliases = ['tellmeabout']
_enabled = True
@staticmethod
async def execute(shards, client, msg):
""" Executes this command.
:param shards:
:type client: discord.Client
:type msg: discord.Message
"""
if msg.mentions:
target = msg.mentions[0]
else:
target = msg.author
shared_guilds = len([i for i in client.get_all_members() if i.id == target.id])
if target.status == Status.online:
status = ':sunny: Online'
elif target.status == Status.offline:
status = ':sleeping_accommodation: Offline'
elif target.status == Status.idle:
status = ':zzz: Idle'
elif target.status == Status.dnd:
status = ':octagonal_sign: DnD'
nick = target.display_name if target.name != target.display_name else "None"
created_at = "{:%B %d %Y @ %I:%M%p}".format(target.created_at)
joined_at = "{:%B %d %Y @ %I:%M%p}".format(target.joined_at)
embed = embeds.Embed()
embed.color = target.color
embed.set_author(name=f'{target.name}#{target.discriminator}',
icon_url='https://i.imgur.com/s2r7jp7.png')
embed.set_thumbnail(url=target.avatar_url)
embed.add_field(name='User ID', value=target.id)
embed.add_field(name='Status', value=status)
embed.add_field(name='Nick', value=nick)
embed.add_field(name='Shared Servers', value=shared_guilds)
embed.add_field(name='Account Created', value=created_at, inline=False)
embed.add_field(name='Join Date', value=joined_at, inline=False)
embed.set_footer(text=f'Requested by {msg.author.display_name}#{msg.author.discriminator}',
icon_url=msg.author.avatar_url)
await client.send_message(msg.channel, embed=embed)
@property
def name(self):
""" The name of this command """
return self._name
@property
def aliases(self):
""" The aliases that can be used to call this command """
return self._aliases
@property
def enabled(self):
""" Controls whether the command is allowed to be executed. """
return self._enabled
@enabled.setter
def enabled(self, value):
""" Setter for `enabled` """
self.enabled = value
| StarcoderdataPython |
91821 | n,k=map(int,input().split());a=[int(i) for i in input().split()];m=sum(a[:k]);s=m
for i in range(k,n):
s+=(a[i]-a[i-k])
if s>m:m=s
print(m)
| StarcoderdataPython |
1791425 | '''
<NAME>
Rule to detect binding to 0.0.0.0
Jan 21, 2019
'''
from ansiblelint import AnsibleLintRule
class InvalidBindingRule(AnsibleLintRule):
id = 'SECURITY:::BINDING_TO_ALL:::'
shortdesc = 'Binding to 0.0.0.0'
description = 'Check for use for binding to 0.0.0.0'
tags = { 'security' }
def match(self, file, line):
if ( ('#' not in line) and (':' in line) ):
line = line.lower()
attr = line.split(':')[0]
if (len(attr) > 0):
if ( '0.0.0.0' in line ):
print 'SECURITY:::BINDING_TO_ALL:::Do not bind to 0.0.0.0. This may cause a DDOS attack. Restrict your available IPs.' | StarcoderdataPython |
3234533 | import requests
import argparse
import os
from bs4 import BeautifulSoup
import youtube_dl
from termcolor import colored
from banner import banner
def downloader(courseTitle, dl, qualityCode="18"):
if not os.path.exists(courseTitle):
os.mkdir(courseTitle)
for mod, lecs in dl.items():
mod = courseTitle + "/" + mod.strip().replace("/", " - ")
if not os.path.exists(mod):
os.mkdir(mod)
print(colored("\nDownloading "+mod+" ...\n", 'green'))
for lecname, videoid in lecs.items():
lecname = lecname.strip()
print(colored("\nDownloading "+lecname+" ...\n", 'green'))
videoUrl = "https://www.youtube.com/watch?v="+videoid
filepath = mod+"/"+lecname+".mp4"
ydl_opts = {
'outtmpl': filepath,
'format': str(qualityCode)
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
ydl.download([videoUrl])
except:
print(colored("Some error occured or user interrupted", 'red'))
exit()
def createJSON(courseId, outputDict):
with open(str(courseId)+".json", 'w') as f:
f.write(str(outputDict).replace("'", "\""))
def getCourseData(courseId):
outputDict = {}
url = "https://nptel.ac.in/courses/"+str(courseId)
resonse = requests.get(url)
soup = BeautifulSoup(resonse.content, 'lxml')
courseTitle = soup.select_one('title').text.split(
'NOC:')[-1].replace('/', '-')
print(colored("\n"+courseTitle+"\n", 'green'))
mainHeads = soup.select("#div_lm>ul>li")
# print(len(mainHeads))
i = 0
j = 0
for mainTitle in mainHeads:
i += 1
try:
head = mainTitle.select("li>a[href='#']")[0].text
outputDict[str(i)+"-"+head.replace(":", "-")] = {}
subs = mainTitle.select("li>a[href='']")
for sub in subs:
j += 1
outputDict[str(i)+"-"+head.replace(":", "-")][str(j)+"-"+sub.text.replace(
":", "-").replace("/", "-")] = sub.get('onclick').split(',')[1].replace("'", "").replace(")", "")
except:
print(colored("the course with id: "+str(courseId) +
" does not have any videos...", 'red'))
return outputDict, courseTitle
argsParser = argparse.ArgumentParser(description='Nptel Course Downloader')
argsParser.add_argument("--c", default='', required=True,
help="Course ID to download the course")
argsParser.add_argument("--q", default='18', help="Video quality code")
args = argsParser.parse_args()
courseID = args.c
videoQuality = args.q
print(banner())
(courseDict, courseTitle) = getCourseData(int(courseID))
# createJSON(int(courseID), courseDict)
downloader(courseTitle, courseDict, int(videoQuality))
# 106106169
| StarcoderdataPython |
3260310 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
# Prepare object pointes
nx = 8
ny = 6
# Make a list of calibration images
fname = '../images/calibration_test.png'
img = cv2.imread(fname)
# convert to grayscale
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret , corners = cv2.findChessboardCorners(gray_img, (nx, ny), None)
# If found, draw corners
if ret:
cv2.drawChessboardCorners(img, (nx,ny), corners, ret)
cv2.imshow("Test window", img)
cv2.waitKey(0)
| StarcoderdataPython |
3395107 | from ftmstore import Dataset, settings
def get_dataset(name, origin, database_uri=None):
database_uri = database_uri or settings.DATABASE_URI
return Dataset(name, origin, database_uri=database_uri)
| StarcoderdataPython |
3252127 | #!/usr/bin/python
#
# Simple subunit testrunner for python
# Copyright (C) <NAME> <<EMAIL>> 2007
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Run a unittest testcase reporting results as Subunit.
$ python -m subunit.run mylib.tests.test_suite
"""
import sys
from subunit import TestProtocolClient, get_default_formatter
from testtools.run import (
BUFFEROUTPUT,
CATCHBREAK,
FAILFAST,
TestProgram,
USAGE_AS_MAIN,
)
class SubunitTestRunner(object):
def __init__(self, stream=sys.stdout):
self.stream = stream
def run(self, test):
"Run the given test case or test suite."
result = TestProtocolClient(self.stream)
test(result)
return result
class SubunitTestProgram(TestProgram):
USAGE = USAGE_AS_MAIN
def usageExit(self, msg=None):
if msg:
print msg
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
usage_text = self.USAGE % usage
usage_lines = usage_text.split('\n')
usage_lines.insert(2, "Run a test suite with a subunit reporter.")
usage_lines.insert(3, "")
print('\n'.join(usage_lines))
sys.exit(2)
if __name__ == '__main__':
stream = get_default_formatter()
runner = SubunitTestRunner(stream)
SubunitTestProgram(module=None, argv=sys.argv, testRunner=runner,
stdout=sys.stdout)
| StarcoderdataPython |
3256990 | <gh_stars>0
import pytest
import requests
from typing import Dict, Union, List
def _print_kg(kg: Dict[str, Dict[str, Dict[str, Dict[str, Union[List[str], str, None]]]]]):
nodes_by_qg_id = kg["nodes"]
edges_by_qg_id = kg["edges"]
for qnode_key, node_ids in sorted(nodes_by_qg_id.items()):
print(f"{qnode_key}: {node_ids}")
for qedge_key, edge_ids in sorted(edges_by_qg_id.items()):
print(f"{qedge_key}: {edge_ids}")
def _run_query(trapi_qg: Dict[str, Dict[str, Dict[str, Union[List[str], str, None]]]]):
response = requests.post(f"{pytest.endpoint}/query", json=trapi_qg, headers={'accept': 'application/json'})
if response.status_code == 200:
return response.json()
else:
print(f"Response status code was {response.status_code}. Response was: {response.text}")
return dict()
def test_1():
# Simplest one-hop
query = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01",
"predicates": ["biolink:related_to"]
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL25"],
"categories": ["biolink:ChemicalEntity"]
},
"n01": {
"categories": ["biolink:ChemicalEntity"]
}
}
}
kg = _run_query(query)
assert kg["nodes"]["n00"] and kg["nodes"]["n01"] and kg["edges"]["e00"]
_print_kg(kg)
def test_2():
# Output qnode is lacking a category
query = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01",
"predicates": ["biolink:related_to"]
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL25"],
"categories": ["biolink:ChemicalEntity"]
},
"n01": {
}
}
}
kg = _run_query(query)
assert kg["nodes"]["n00"] and kg["nodes"]["n01"] and kg["edges"]["e00"]
_print_kg(kg)
def test_3():
# No predicate is specified
query = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01"
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL25"],
"categories": ["biolink:ChemicalEntity"]
},
"n01": {
"categories": ["biolink:ChemicalEntity"]
}
}
}
kg = _run_query(query)
assert kg["nodes"]["n00"] and kg["nodes"]["n01"] and kg["edges"]["e00"]
_print_kg(kg)
def test_4():
# Multiple output categories
query = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01"
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL25"]
},
"n01": {
"categories": ["biolink:Protein", "biolink:Procedure"]
}
}
}
kg = _run_query(query)
assert kg["nodes"]["n00"] and kg["nodes"]["n01"] and kg["edges"]["e00"]
_print_kg(kg)
def test_5():
# Multiple predicates
query = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01",
"predicates": ["biolink:physically_interacts_with", "biolink:related_to"]
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL25"]
},
"n01": {
"categories": ["biolink:Protein", "biolink:Gene"]
}
}
}
kg = _run_query(query)
assert kg["nodes"]["n00"] and kg["nodes"]["n01"] and kg["edges"]["e00"]
_print_kg(kg)
def test_6():
# Curie-to-curie query
query = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01"
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL25"]
},
"n01": {
"ids": ["CHEMBL.COMPOUND:CHEMBL833", "CHEMBL.COMPOUND:CHEMBL4128999", "CHEMBL.COMPOUND:CHEMBL112"]
}
}
}
kg = _run_query(query)
assert kg["nodes"]["n00"] and kg["nodes"]["n01"] and kg["edges"]["e00"]
_print_kg(kg)
def test_7():
# Multiple curie to multiple curie query
query = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01"
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL25", "UniProtKB:P04070"]
},
"n01": {
}
},
"include_metadata": True
}
kg = _run_query(query)
assert kg["nodes"]["n00"] and kg["nodes"]["n01"] and kg["edges"]["e00"]
_print_kg(kg)
def test_8():
# Single-node query
query = {
"edges": {
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL25"]
}
}
}
kg = _run_query(query)
assert len(kg["nodes"]["n00"]) == 1
_print_kg(kg)
def test_9():
# Single-node query with multiple curies
query = {
"edges": {
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL25", "CHEMBL.COMPOUND:CHEMBL112"]
}
}
}
kg = _run_query(query)
assert len(kg["nodes"]["n00"]) == 2
_print_kg(kg)
def test_10():
# Edgeless query with multiple nodes
query = {
"edges": {
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL25"]
},
"n01": {
"ids": ["CHEMBL.COMPOUND:CHEMBL112"]
}
}
}
kg = _run_query(query)
assert len(kg["nodes"]["n00"]) == 1
assert len(kg["nodes"]["n01"]) == 1
_print_kg(kg)
def test_11():
# Verify catches larger than one-hop query
query = {
"edges": {
"e00": {},
"e01": {}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL25"]
},
"n01": {
"ids": ["CHEMBL.COMPOUND:CHEMBL411"]
}
}
}
kg = _run_query(query)
assert not kg
def test_12():
ids = ["CHEMBL.COMPOUND:CHEMBL25", "CHEMBL.COMPOUND:CHEMBL2106453"]
# Test subject as input node with enforced direction
query = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01"
}
},
"nodes": {
"n00": {
"ids": ids
},
"n01": {
}
},
"include_metadata": True,
"enforce_directionality": True
}
kg = _run_query(query)
assert kg["nodes"]["n00"] and kg["nodes"]["n01"] and kg["edges"]["e00"]
num_edges_enforce_direction_subject = len(kg['edges']['e00'])
print(f"Got back {num_edges_enforce_direction_subject} edges")
assert all(edge for edge in kg["edges"]["e00"].items() if edge[0] in ids)
# Test object as input node with enforced direction
query = {
"edges": {
"e00": {
"subject": "n01",
"object": "n00"
}
},
"nodes": {
"n00": {
"ids": ids
},
"n01": {
}
},
"include_metadata": True,
"enforce_directionality": True
}
kg = _run_query(query)
assert kg["nodes"]["n00"] and kg["nodes"]["n01"] and kg["edges"]["e00"]
num_edges_enforce_direction_object = len(kg['edges']['e00'])
print(f"Got back {num_edges_enforce_direction_object} edges")
assert all(edge for edge in kg["edges"]["e00"].items() if edge[1] in ids)
# Test subject as input node with ignored direction
query = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01"
}
},
"nodes": {
"n00": {
"ids": ids
},
"n01": {
}
},
"include_metadata": True,
"enforce_directionality": False
}
kg = _run_query(query)
assert kg["nodes"]["n00"] and kg["nodes"]["n01"] and kg["edges"]["e00"]
num_edges_ignore_direction_subject = len(kg['edges']['e00'])
print(f"Got back {num_edges_ignore_direction_subject} edges")
assert any(edge for edge in kg["edges"]["e00"].values() if edge[0] in ids)
assert any(edge for edge in kg["edges"]["e00"].values() if edge[1] in ids)
# Test object as input node with ignored direction
query = {
"edges": {
"e00": {
"subject": "n01",
"object": "n00"
}
},
"nodes": {
"n00": {
"ids": ids
},
"n01": {
}
},
"include_metadata": True
}
kg = _run_query(query)
assert kg["nodes"]["n00"] and kg["nodes"]["n01"] and kg["edges"]["e00"]
num_edges_ignore_direction_object = len(kg['edges']['e00'])
print(f"Got back {num_edges_ignore_direction_object} edges")
assert any(edge for edge in kg["edges"]["e00"].values() if edge[0] in ids)
assert any(edge for edge in kg["edges"]["e00"].values() if edge[1] in ids)
# Final checks on edge counts to make sure all makes sense
assert num_edges_ignore_direction_subject == num_edges_ignore_direction_object
assert num_edges_enforce_direction_subject + num_edges_enforce_direction_object == num_edges_ignore_direction_object
def test_13():
# TRAPI 1.1 property names
query = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01",
"predicates": ["biolink:physically_interacts_with"]
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL25", "UniProtKB:P04070"]
},
"n01": {
"categories": ["biolink:Protein"]
}
},
"include_metadata": True
}
kg = _run_query(query)
assert kg["nodes"]["n00"] and kg["nodes"]["n01"] and kg["edges"]["e00"]
_print_kg(kg)
def test_14():
# Test subclass_of reasoning
query_subclass = {
"edges": {
},
"nodes": {
"n00": {
"ids": ["MONDO:0005015"], # Diabetes mellitus
"allow_subclasses": True
}
}
}
kg = _run_query(query_subclass)
assert len(kg["nodes"]["n00"]) > 1
_print_kg(kg)
query_no_subclass = {
"edges": {
},
"nodes": {
"n00": {
"ids": ["MONDO:0005015"] # Diabetes mellitus
}
}
}
kg = _run_query(query_no_subclass)
assert len(kg["nodes"]["n00"]) == 1
_print_kg(kg)
def test_15():
# Test predicate symmetry enforcement
query = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01",
"predicates": ["biolink:treats"]
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL112"]
},
"n01": {
"categories": ["biolink:Disease"]
}
}
}
kg = _run_query(query)
query_respecting_symmetry = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01",
"predicates": ["biolink:treats"]
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL112"]
},
"n01": {
"categories": ["biolink:Disease"]
}
},
"respect_predicate_symmetry": True
}
kg_symmetry = _run_query(query_respecting_symmetry)
query_symmetry_backwards = {
"edges": {
"e00": {
"subject": "n01",
"object": "n00",
"predicates": ["biolink:treats"]
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL112"]
},
"n01": {
"categories": ["biolink:Disease"]
}
},
"respect_predicate_symmetry": True
}
kg_symmetry_backwards = _run_query(query_symmetry_backwards)
assert not kg_symmetry_backwards["nodes"]["n01"]
assert len(kg_symmetry["nodes"]["n01"]) == len(kg["nodes"]["n01"])
def test_16():
# Test mixins in the QG
query = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01"
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL112"]
},
"n01": {
"categories": ["biolink:PhysicalEssence"]
}
}
}
kg = _run_query(query)
assert len(kg["nodes"]["n01"])
def test_17():
# Test canonical predicate handling
query_non_canonical = {
"edges": {
"e00": {
"subject": "n01",
"object": "n00",
"predicates": ["biolink:treated_by"]
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL112"]
},
"n01": {
"categories": ["biolink:Disease"]
}
},
"respect_predicate_symmetry": True
}
kg_non_canonical = _run_query(query_non_canonical)
assert len(kg_non_canonical["nodes"]["n01"])
query_canonical = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01",
"predicates": ["biolink:treats"]
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL112"]
},
"n01": {
"categories": ["biolink:Disease"]
}
},
"respect_predicate_symmetry": True
}
kg_canonical = _run_query(query_canonical)
assert len(kg_canonical["nodes"]["n01"])
assert len(kg_canonical["nodes"]["n01"]) == len(kg_non_canonical["nodes"]["n01"])
def test_18():
# Test hierarchical category reasoning
query = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01",
"predicates": ["biolink:interacts_with"]
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL112"]
},
"n01": {
"categories": ["biolink:NamedThing"]
}
},
"include_metadata": True
}
kg = _run_query(query)
assert len(kg["nodes"]["n01"])
assert any(node_tuple[1] != "biolink:NamedThing" for node_tuple in kg["nodes"]["n01"].values())
def test_19():
# Test hierarchical predicate reasoning
query = {
"edges": {
"e00": {
"subject": "n00",
"object": "n01",
"predicates": ["biolink:related_to"]
}
},
"nodes": {
"n00": {
"ids": ["CHEMBL.COMPOUND:CHEMBL112"]
},
"n01": {
"categories": ["biolink:Protein"]
}
},
"include_metadata": True
}
kg = _run_query(query)
assert len(kg["edges"]["e00"])
assert any(edge_tuple[2] != "biolink:related_to" for edge_tuple in kg["edges"]["e00"].values())
if __name__ == "__main__":
pytest.main(['-v', 'test.py'])
| StarcoderdataPython |
3287941 | # -*- coding: utf-8 -*-
import pandas as pd
from py2neo import Graph, Node, Relationship, NodeMatcher
# 读取csv文件
movies_df = pd.read_csv(r'./movies.csv')
actors_df = pd.read_csv(r'./actors.csv')
# 连接Neo4j服务
graph = Graph(host="localhost://7474", auth=("neo4j", "jc147369"))
# 创建电影节
for i in range(movies_df.shape[0]):
rank = str(movies_df.iloc[i, :]['rank'])
name = movies_df.iloc[i, :]['name']
box_office = movies_df.iloc[i, :]['box_office']
avg_price = str(movies_df.iloc[i, :]['avg_price'])
avg_people = str(movies_df.iloc[i, :]['avg_people'])
begin_date = movies_df.iloc[i, :]['begin_date']
node = Node("Movie",
name=name,
rank=rank,
box_office=box_office,
avg_price=avg_price,
avg_people=avg_people,
begin_date=begin_date
)
# print(movies_df.iloc[i, :]['rank'])
graph.create(node)
print('create movie nodes successfully!')
# 创建演员节点
all_actors = set()
for i in range(actors_df.shape[0]):
actor_list = actors_df.iloc[i, :]['actors'].split(',')
for actor in actor_list:
all_actors.add(actor)
for actor in all_actors:
node = Node("Actor", name=actor)
graph.create(node)
print('create actor nodes successfully!')
# 创建演员——电影关系
for i in range(actors_df.shape[0]):
name = actors_df.iloc[i, :]['name']
matcher = NodeMatcher(graph)
movie_node = matcher.match("Movie", name=name).first()
actors = actors_df.iloc[i, :]['actors'].split(',')
# print(name, actors)
for actor in actors:
actor_node = matcher.match("Actor", name=actor).first()
relationship = Relationship(actor_node, 'ACT_IN', movie_node)
graph.create(relationship)
print('create relationships successfully!')
print('You can check Neo4j now!')
| StarcoderdataPython |
1739218 | #!/usr/bin/env python3
import argparse
import torch
import torch.jit
import torch.nn as nn
import torch.nn.functional as F
from generate_cnn_model import PolicyHead, ValueHead
class TransformerModel(nn.Module):
def __init__(self, input_channel_num, block_num, channel_num, policy_channel_num, board_size):
super(TransformerModel, self).__init__()
self.first_encoding_ = torch.nn.Linear(input_channel_num, channel_num)
self.encoder_layer_ = torch.nn.TransformerEncoderLayer(channel_num, nhead=8, dim_feedforward=channel_num * 4)
self.board_size = board_size
self.block_num = block_num
square_num = board_size ** 2
self.policy_head_ = PolicyHead(channel_num, policy_channel_num)
self.value_head_ = ValueHead(channel_num, 51)
self.positional_encoding_ = torch.nn.Parameter(torch.zeros([square_num, 1, channel_num]), requires_grad=True)
def encode(self, x):
x = x.view([x.shape[0], x.shape[1], x.shape[2] * x.shape[3]])
x = x.permute([2, 0, 1])
x = self.first_encoding_(x)
x = F.relu(x)
x = x + self.positional_encoding_
for _ in range(self.block_num):
x = self.encoder_layer_(x)
x = x.permute([1, 2, 0])
x = x.view([x.shape[0], x.shape[1], self.board_size, self.board_size])
return x
def decode(self, representation):
policy = self.policy_head_(representation)
value = self.value_head_(representation)
return policy, value
def forward(self, x):
return self.decode(self.encode(x))
@torch.jit.export
def get_representations(self, x):
x = x.view([x.shape[0], x.shape[1], x.shape[2] * x.shape[3]])
x = x.permute([2, 0, 1])
x = self.first_encoding_(x)
x = F.relu(x)
x = x + self.positional_encoding_
representations = list()
for _ in range(self.block_num + 5):
x = self.encoder_layer_(x)
curr_x = x
curr_x = curr_x.permute([1, 2, 0])
curr_x = curr_x.view([curr_x.shape[0], curr_x.shape[1], self.board_size, self.board_size])
representations.append(curr_x)
return representations
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-game", default="shogi", choices=["shogi", "othello", "go"])
parser.add_argument("-value_type", default="cat", choices=["sca", "cat"])
parser.add_argument("--block_num", type=int, default=10)
parser.add_argument("--channel_num", type=int, default=256)
args = parser.parse_args()
if args.game == "shogi":
input_channel_num = 42
board_size = 9
policy_channel_num = 27
elif args.game == "othello":
input_channel_num = 2
board_size = 8
policy_channel_num = 2
else:
exit(1)
model = TransformerModel(input_channel_num, args.block_num, args.channel_num, policy_channel_num, board_size)
params = 0
for p in model.parameters():
if p.requires_grad:
params += p.numel()
print(f"パラメータ数 : {params:,}")
input_data = torch.randn([8, input_channel_num, board_size, board_size])
script_model = torch.jit.trace(model, input_data)
script_model = torch.jit.script(model)
model_path = f"./{args.game}_cat_transformer_bl{args.block_num}_ch{args.channel_num}.model"
script_model.save(model_path)
print(f"{model_path}にパラメータを保存")
representations = model.get_representations(input_data)
print(f"len(representations) = {len(representations)}")
if __name__ == "__main__":
main()
| StarcoderdataPython |
1788768 | <gh_stars>10-100
"""
This script constructs a MILP model for AES-like primitives, which can aid in
finding optimal parameter sets against differential attacks by counting the
minimum number of active S-boxes in a differential trail.
It uses the Gurobi Solver to solve the MILP instance, hence you need
a Gurobi license (free for Academic use).
"""
from gurobipy import *
from models.milpconstraints import addAESrndconstraints
def buildmodel(config):
"""
Constructs the model for the Gurobi Solver
"""
model = Model("aeslike")
# Parameters
state_dim = config["statedimension"]
num_rounds = config["rounds"]
branch_number = config["branchnumber"]
# Initialize all variables
var_x = [] # state
var_d = [] # dummy variable for MixColumns
state_words = state_dim * state_dim
for byte in range((num_rounds + 1) * state_words):
var_x.append(model.addVar(vtype=GRB.BINARY, name="x[{}]".format(byte)))
for col in range(num_rounds * state_dim):
var_d.append(model.addVar(name="dummy[{}]".format(col)))
activesboxes = model.addVar(name="Active S-boxes")
model.update()
# Constraints
# Optimize number of active S-boxes
model.setObjective(activesboxes, GRB.MINIMIZE)
# Count Active S-boxes
model.addConstr(quicksum(var_x[i] for i in range(num_rounds * state_words))
- activesboxes == 0, "Count Active S-boxes")
# Add constraints from AES round function
model = addAESrndconstraints(model, state_dim, var_x, var_d,
branch_number, num_rounds)
# No Zero Characteristic
model.addConstr(quicksum(var_x[i] for i in range((num_rounds + 1) *
state_words)) >= 1, "Avoid trivial solutions")
return model
def printmodel(model, config):
"""
Print the solution and the corresponding differential trail.
"""
state_dim = config["statedimension"]
num_rounds = config["rounds"]
print("Rounds:", num_rounds)
print("State dimension:", state_dim)
print("Branch number:", config["branchnumber"])
print("Minimum number of active S-boxes: {}".format(model.objVal))
print("Best differential trail:")
# Print differential trail
# Print Header
header = ""
for rnd in range(num_rounds + 1):
header += str(rnd) + " " * (2 * state_dim + 1 - len(str(rnd)))
print(header)
# Print State
for row in range(state_dim):
for rnd in range(num_rounds + 1):
for col in range(state_dim):
cur_index = row + col * state_dim + rnd * state_dim * state_dim
if model.getVarByName("x[{}]".format(cur_index)).x > 0.0:
print("\033[91mx\033[0m", end=" ")
else:
print(".", end=" ")
print(" ", end="")
print("")
return model.objVal
| StarcoderdataPython |
1762544 | # Generated by Django 2.0.5 on 2018-05-05 23:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('work', '0005_auto_20180505_2310'),
]
operations = [
migrations.AlterField(
model_name='article',
name='description',
field=models.CharField(blank=True, default='', max_length=1000),
),
migrations.AlterField(
model_name='article',
name='document',
field=models.URLField(blank=True, default='', max_length=1000),
),
]
| StarcoderdataPython |
1738684 | import sys
import string
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from aster.core import standard_fields as fields
from aster.c_ops import ops
def _apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
rand_sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([func(
control_flow_ops.switch(x, tf.equal(rand_sel, case))[1], case)
for case in range(num_cases)])[0]
def _apply_with_random_selector_tuples(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: A tuple of input tensors.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
num_inputs = len(x)
rand_sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
tuples = [list() for t in x]
for case in range(num_cases):
new_x = [control_flow_ops.switch(t, tf.equal(rand_sel, case))[1] for t in x]
output = func(tuple(new_x), case)
for j in range(num_inputs):
tuples[j].append(output[j])
for i in range(num_inputs):
tuples[i] = control_flow_ops.merge(tuples[i])[0]
return tuple(tuples)
def _random_integer(minval, maxval, seed):
"""Returns a random 0-D tensor between minval and maxval.
Args:
minval: minimum value of the random tensor.
maxval: maximum value of the random tensor.
seed: random seed.
Returns:
A random 0-D tensor between minval and maxval.
"""
return tf.random_uniform(
[], minval=minval, maxval=maxval, dtype=tf.int32, seed=seed)
def resize_image_random_method(image, target_size):
"""Resize image with random image interpolation method.
Args:
image: rank 3 tensor of shape [image_height, image_width, 3]
target_size: [target_height, target_width]
Returns:
resized_image
"""
with tf.name_scope('ResizeRandomMethod', values=[image]):
# resize image
resized_image = _apply_with_random_selector(
image,
lambda x, method: tf.image.resize_images(x, target_size, method),
num_cases=4)
return resized_image
def resize_image(image,
target_size,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False):
with tf.name_scope('ResizeImage', values=[image, target_size]):
new_image = tf.image.resize_images(image, target_size,
method=method,
align_corners=align_corners)
return new_image
def normalize_image(image, original_minval, original_maxval, target_minval,
target_maxval):
"""Normalizes pixel values in the image.
Moves the pixel values from the current [original_minval, original_maxval]
range to a the [target_minval, target_maxval] range.
Args:
image: rank 3 float32 tensor containing 1
image -> [height, width, channels].
original_minval: current image minimum value.
original_maxval: current image maximum value.
target_minval: target image minimum value.
target_maxval: target image maximum value.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('NormalizeImage', values=[image]):
original_minval = float(original_minval)
original_maxval = float(original_maxval)
target_minval = float(target_minval)
target_maxval = float(target_maxval)
image = tf.to_float(image)
image = tf.subtract(image, original_minval)
image = tf.multiply(image, (target_maxval - target_minval) /
(original_maxval - original_minval))
image = tf.add(image, target_minval)
return image
def random_pixel_value_scale(image, minval=0.9, maxval=1.1, seed=None):
"""Scales each value in the pixels of the image.
This function scales each pixel independent of the other ones.
For each value in image tensor, draws a random number between
minval and maxval and multiples the values with them.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
minval: lower ratio of scaling pixel values.
maxval: upper ratio of scaling pixel values.
seed: random seed.
Returns:
image: image which is the same shape as input image.
boxes: boxes which is the same shape as input boxes.
"""
with tf.name_scope('RandomPixelValueScale', values=[image]):
color_coef = tf.random_uniform(
tf.shape(image),
minval=minval,
maxval=maxval,
dtype=tf.float32,
seed=seed)
image = tf.multiply(image, color_coef)
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def random_rgb_to_gray(image, probability=0.1, seed=None):
"""Changes the image from RGB to Grayscale with the given probability.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
probability: the probability of returning a grayscale image.
The probability should be a number between [0, 1].
seed: random seed.
Returns:
image: image which is the same shape as input image.
"""
def _image_to_gray(image):
image_gray1 = tf.image.rgb_to_grayscale(image)
image_gray3 = tf.image.grayscale_to_rgb(image_gray1)
return image_gray3
with tf.name_scope('RandomRGBtoGray', values=[image]):
# random variable defining whether to do flip or not
do_gray_random = tf.random_uniform([], seed=seed)
image = tf.cond(
tf.greater(do_gray_random, probability), lambda: image,
lambda: _image_to_gray(image))
return image
def random_adjust_brightness(image, max_delta=0.2):
"""Randomly adjusts brightness.
Makes sure the output image is still between 0 and 1.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
max_delta: how much to change the brightness. A value between [0, 1).
Returns:
image: image which is the same shape as input image.
boxes: boxes which is the same shape as input boxes.
"""
with tf.name_scope('RandomAdjustBrightness', values=[image]):
image = tf.image.random_brightness(image, max_delta)
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
return image
def random_adjust_contrast(image, min_delta=0.8, max_delta=1.25):
"""Randomly adjusts contrast.
Makes sure the output image is still between 0 and 1.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
min_delta: see max_delta.
max_delta: how much to change the contrast. Contrast will change with a
value between min_delta and max_delta. This value will be
multiplied to the current contrast of the image.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustContrast', values=[image]):
image = tf.image.random_contrast(image, min_delta, max_delta)
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
return image
def random_adjust_hue(image, max_delta=0.02):
"""Randomly adjusts hue.
Makes sure the output image is still between 0 and 1.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
max_delta: change hue randomly with a value between 0 and max_delta.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustHue', values=[image]):
image = tf.image.random_hue(image, max_delta)
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
return image
def random_adjust_saturation(image, min_delta=0.8, max_delta=1.25):
"""Randomly adjusts saturation.
Makes sure the output image is still between 0 and 1.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
min_delta: see max_delta.
max_delta: how much to change the saturation. Saturation will change with a
value between min_delta and max_delta. This value will be
multiplied to the current saturation of the image.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustSaturation', values=[image]):
image = tf.image.random_saturation(image, min_delta, max_delta)
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
return image
def random_distort_color(image, color_ordering=0):
"""Randomly distorts color.
Randomly distorts color using a combination of brightness, hue, contrast
and saturation changes. Makes sure the output image is still between 0 and 1.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0, 1).
Returns:
image: image which is the same shape as input image.
Raises:
ValueError: if color_ordering is not in {0, 1}.
"""
with tf.name_scope('RandomDistortColor', values=[image]):
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
else:
raise ValueError('color_ordering must be in {0, 1}')
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def image_to_float(image):
"""Used in Faster R-CNN. Casts image pixel values to float.
Args:
image: input image which might be in tf.uint8 or sth else format
Returns:
image: image in tf.float32 format.
"""
with tf.name_scope('ImageToFloat', values=[image]):
image = tf.to_float(image)
return image
def subtract_channel_mean(image, means=None):
"""Normalizes an image by subtracting a mean from each channel.
Args:
image: A 3D tensor of shape [height, width, channels]
means: float list containing a mean for each channel
Returns:
normalized_images: a tensor of shape [height, width, channels]
Raises:
ValueError: if images is not a 4D tensor or if the number of means is not
equal to the number of channels.
"""
with tf.name_scope('SubtractChannelMean', values=[image, means]):
if len(image.get_shape()) != 3:
raise ValueError('Input must be of size [height, width, channels]')
if len(means) != image.get_shape()[-1]:
raise ValueError('len(means) must match the number of channels')
return image - [[means]]
def rgb_to_gray(image, three_channels=False):
"""Converts a 3 channel RGB image to a 1 channel grayscale image.
Args:
image: Rank 3 float32 tensor containing 1 image -> [height, width, 3]
with pixel values varying between [0, 1].
Returns:
image: A single channel grayscale image -> [image, height, 1].
"""
gray_image = tf.image.rgb_to_grayscale(image)
if three_channels:
gray_image = tf.tile(gray_image, [1,1,3])
return gray_image
def string_filtering(text, lower_case=False, include_charset=""):
return ops.string_filtering([text],
lower_case=lower_case, include_charset=include_charset)[0]
def get_default_func_arg_map():
prep_func_arg_map = {
resize_image: (fields.InputDataFields.image,),
resize_image_random_method: (fields.InputDataFields.image,),
random_pixel_value_scale: (fields.InputDataFields.image,),
random_rgb_to_gray: (fields.InputDataFields.image,),
random_adjust_brightness: (fields.InputDataFields.image,),
random_adjust_contrast: (fields.InputDataFields.image,),
random_adjust_hue: (fields.InputDataFields.image,),
random_adjust_saturation: (fields.InputDataFields.image,),
random_distort_color: (fields.InputDataFields.image,),
image_to_float: (fields.InputDataFields.image,),
subtract_channel_mean: (fields.InputDataFields.image,),
rgb_to_gray: (fields.InputDataFields.image,),
string_filtering: (fields.InputDataFields.groundtruth_text,)
}
return prep_func_arg_map
def preprocess(tensor_dict, preprocess_options, func_arg_map=None):
"""Preprocess images and bounding boxes.
Various types of preprocessing (to be implemented) based on the
preprocess_options dictionary e.g. "crop image" (affects image and possibly
boxes), "white balance image" (affects only image), etc. If self._options
is None, no preprocessing is done.
Args:
tensor_dict: dictionary that contains images, boxes, and can contain other
things as well.
images-> rank 4 float32 tensor contains
1 image -> [1, height, width, 3].
with pixel values varying between [0, 1]
boxes-> rank 2 float32 tensor containing
the bounding boxes -> [N, 4].
Boxes are in normalized form meaning
their coordinates vary between [0, 1].
Each row is in the form
of [ymin, xmin, ymax, xmax].
preprocess_options: It is a list of tuples, where each tuple contains a
function and a dictionary that contains arguments and
their values.
func_arg_map: mapping from preprocessing functions to arguments that they
expect to receive and return.
Returns:
tensor_dict: which contains the preprocessed images, bounding boxes, etc.
Raises:
ValueError: (a) If the functions passed to Preprocess
are not in func_arg_map.
(b) If the arguments that a function needs
do not exist in tensor_dict.
(c) If image in tensor_dict is not rank 4
"""
if func_arg_map is None:
func_arg_map = get_default_func_arg_map()
# changes the images to image (rank 4 to rank 3) since the functions
# receive rank 3 tensor for image
if fields.InputDataFields.image in tensor_dict:
image = tensor_dict[fields.InputDataFields.image]
# if len(images.get_shape()) != 4:
# raise ValueError('images in tensor_dict should be rank 4')
# image = tf.squeeze(images, squeeze_dims=[0])
if len(image.get_shape()) != 3:
raise ValueError('images in tensor_dict should be rank 3')
tensor_dict[fields.InputDataFields.image] = image
# Preprocess inputs based on preprocess_options
for option in preprocess_options:
func, params = option
if func not in func_arg_map:
raise ValueError('The function %s does not exist in func_arg_map' %
(func.__name__))
arg_names = func_arg_map[func]
for a in arg_names:
if a is not None and a not in tensor_dict:
raise ValueError('The function %s requires argument %s' %
(func.__name__, a))
def get_arg(key):
return tensor_dict[key] if key is not None else None
args = [get_arg(a) for a in arg_names]
results = func(*args, **params)
if not isinstance(results, (list, tuple)):
results = (results,)
# Removes None args since the return values will not contain those.
arg_names = [arg_name for arg_name in arg_names if arg_name is not None]
for res, arg_name in zip(results, arg_names):
tensor_dict[arg_name] = res
# # changes the image to images (rank 3 to rank 4) to be compatible to what
# # we received in the first place
# if fields.InputDataFields.image in tensor_dict:
# image = tensor_dict[fields.InputDataFields.image]
# images = tf.expand_dims(image, 0)
# tensor_dict[fields.InputDataFields.image] = images
return tensor_dict
| StarcoderdataPython |
1762119 | <reponame>KarrLab/bpforms<gh_stars>1-10
""" Test of bpforms.core
:Author: <NAME> <<EMAIL>>
:Date: 2019-01-31
:Copyright: 2019, Karr Lab
:License: MIT
"""
from bpforms import core
from bpforms.alphabet import dna
from bpforms.alphabet import protein
from bpforms.alphabet import rna
from wc_utils.util.chem import EmpiricalFormula, OpenBabelUtils
import copy
import imghdr
import lark.exceptions
import mock
import openbabel
import os
import requests
import shutil
import tempfile
import unittest
dAMP_inchi = dna.canonical_dna_alphabet.monomers.A.export('inchi')
dCMP_inchi = dna.canonical_dna_alphabet.monomers.C.export('inchi')
dGMP_inchi = dna.canonical_dna_alphabet.monomers.G.export('inchi')
dAMP_smiles = dna.canonical_dna_alphabet.monomers.A.export('smiles')
dCMP_smiles = dna.canonical_dna_alphabet.monomers.C.export('smiles')
dGMP_smiles = dna.canonical_dna_alphabet.monomers.G.export('smiles')
dIMP_smiles = 'OCC1OC(CC1O)N1C=NC2=C1N=CN=C2O'
class IdentifierTestCase(unittest.TestCase):
def test_constructor(self):
id = core.Identifier('ec-code', '1.1.1.2')
self.assertEqual(id.ns, 'ec-code')
self.assertEqual(id.id, '1.1.1.2')
id = core.Identifier('kegg.compound', 'C00001')
self.assertEqual(id.ns, 'kegg.compound')
self.assertEqual(id.id, 'C00001')
id = core.Identifier('chebi', 'CHEBI:57566')
self.assertEqual(id.ns, 'chebi')
self.assertEqual(id.id, 'CHEBI:57566')
id = core.Identifier('metacyc.compound', 'DAMP')
self.assertEqual(id.ns, 'metacyc.compound')
self.assertEqual(id.id, 'DAMP')
id = core.Identifier('pubchem.compound', '22848660')
self.assertEqual(id.ns, 'pubchem.compound')
self.assertEqual(id.id, '22848660')
with self.assertRaises(ValueError):
id.ns = ''
with self.assertRaises(ValueError):
id.ns = 0
with self.assertRaises(ValueError):
id.ns = None
with self.assertRaises(ValueError):
id.id = ''
with self.assertRaises(ValueError):
id.id = 0
with self.assertRaises(ValueError):
id.id = None
self.assertEqual(id.ns, 'pubchem.compound')
self.assertEqual(id.id, '22848660')
def test_eq(self):
id_1 = core.Identifier('ec-code', '1.1.1.2')
id_2 = core.Identifier('ec-code', '1.1.1.2')
id_3 = core.Identifier('ec-code', '1.1.1.1')
id_4 = core.Identifier('kegg.compound', '1.1.1.2')
self.assertEqual(id_1, id_2)
self.assertEqual(id_2, id_1)
self.assertNotEqual(id_1, id_3)
self.assertNotEqual(id_1, id_4)
self.assertNotEqual(id_1, '')
self.assertNotEqual(id_1, 0)
def test_hash(self):
id_1 = core.Identifier('ec-code', '1.1.1.2')
id_2 = core.Identifier('ec-code', '1.1.1.2')
id_3 = core.Identifier('ec-code', '1.1.1.1')
id_4 = core.Identifier('kegg.compound', '1.1.1.2')
self.assertEqual(id_1.__hash__(), id_2.__hash__())
self.assertNotEqual(id_1.__hash__(), id_3.__hash__())
self.assertNotEqual(id_1.__hash__(), id_4.__hash__())
def test_set(self):
id_1 = core.Identifier('ec-code', '1.1.1.2')
id_2 = core.Identifier('ec-code', '1.1.1.2')
id_3 = core.Identifier('ec-code', '1.1.1.1')
id_4 = core.Identifier('kegg.compound', '1.1.1.2')
ids = set([id_1, id_2, id_3, id_4])
self.assertEqual(len(ids), 3)
class IdentifiersTestCase(unittest.TestCase):
def test_constructor(self):
id_1 = core.Identifier('ec-code', '1.1.1.2')
id_2 = core.Identifier('ec-code', '1.1.1.1')
id_3 = core.Identifier('kegg.compound', '1.1.1.2')
ids = core.IdentifierSet()
self.assertEqual(len(ids), 0)
ids = core.IdentifierSet([id_1, id_2, id_3])
self.assertEqual(len(ids), 3)
with self.assertRaises(ValueError):
core.IdentifierSet(['ec-code'])
def test_add(self):
id_1 = core.Identifier('ec-code', '1.1.1.2')
id_2 = core.Identifier('ec-code', '1.1.1.1')
id_3 = core.Identifier('kegg.compound', '1.1.1.2')
ids = core.IdentifierSet()
ids.add(id_1)
ids.add(id_2)
ids.add(id_3)
self.assertEqual(len(ids), 3)
with self.assertRaises(ValueError):
ids.add(('ec-code', '1.1.1.2'))
def test_update(self):
id_1 = core.Identifier('ec-code', '1.1.1.2')
id_2 = core.Identifier('ec-code', '1.1.1.1')
id_3 = core.Identifier('kegg.compound', '1.1.1.2')
ids = core.IdentifierSet()
ids.update([id_1, id_2])
self.assertEqual(len(ids), 2)
with self.assertRaises(ValueError):
ids.update([('ec-code', '1.1.1.2')])
def test_symmetric_difference_update(self):
id_1 = core.Identifier('ec-code', '1.1.1.2')
id_2 = core.Identifier('ec-code', '1.1.1.1')
id_3 = core.Identifier('kegg.compound', '1.1.1.2')
ids_1 = core.IdentifierSet([id_1, id_2])
ids_2 = core.IdentifierSet([id_1, id_3])
ids_1.symmetric_difference_update(ids_2)
self.assertEqual(ids_1, core.IdentifierSet([id_2, id_3]))
ids_1 = core.IdentifierSet([id_1, id_2])
ids_2 = set([id_1, id_3])
ids_1.symmetric_difference_update(ids_2)
self.assertEqual(ids_1, core.IdentifierSet([id_2, id_3]))
with self.assertRaises(TypeError):
ids_1.symmetric_difference_update(id_1)
class SynonymsTestCase(unittest.TestCase):
def test_constructor(self):
syn_1 = 'a'
syn_2 = 'b'
syn_3 = 'c'
syns = core.SynonymSet()
self.assertEqual(len(syns), 0)
syns = core.SynonymSet([syn_1, syn_2, syn_3])
self.assertEqual(len(syns), 3)
with self.assertRaises(ValueError):
core.SynonymSet('')
with self.assertRaises(ValueError):
core.SynonymSet('a')
with self.assertRaises(ValueError):
core.SynonymSet([''])
with self.assertRaises(ValueError):
core.SynonymSet([0])
with self.assertRaises(ValueError):
core.SynonymSet([None])
def test_add(self):
syn_1 = 'a'
syn_2 = 'b'
syn_3 = 'c'
syns = core.SynonymSet()
syns.add(syn_1)
syns.add(syn_2)
syns.add(syn_3)
self.assertEqual(len(syns), 3)
with self.assertRaises(ValueError):
syns.add(0)
with self.assertRaises(ValueError):
syns.add(None)
with self.assertRaises(ValueError):
syns.add('')
def test_update(self):
syn_1 = 'a'
syn_2 = 'b'
syn_3 = 'c'
syns = core.SynonymSet()
syns.update([syn_1, syn_2])
self.assertEqual(len(syns), 2)
with self.assertRaises(ValueError):
syns.update([0])
def test_symmetric_difference_update(self):
syn_1 = 'a'
syn_2 = 'b'
syn_3 = 'c'
syns_1 = core.SynonymSet([syn_1, syn_2])
syns_2 = core.SynonymSet([syn_1, syn_3])
syns_1.symmetric_difference_update(syns_2)
self.assertEqual(syns_1, core.SynonymSet([syn_2, syn_3]))
syns_1 = core.SynonymSet([syn_1, syn_2])
syns_2 = set([syn_1, syn_3])
syns_1.symmetric_difference_update(syns_2)
self.assertEqual(syns_1, core.SynonymSet([syn_2, syn_3]))
with self.assertRaises(TypeError):
syns_1.symmetric_difference_update(0)
class MonomerTestCase(unittest.TestCase):
def test_init(self):
identifiers = set([
core.Identifier('chebi', 'CHEBI:58245'),
core.Identifier('pubchem.compound', '22848660'),
core.Identifier('metacyc.compound', 'DAMP'),
])
synonyms = set(['A', 'dAMP', 'deoxyadenosine monophosphate'])
monomer_0 = core.Monomer()
monomer = core.Monomer(id='dAMP', name='deoxyadenosine monophosphate', synonyms=synonyms, identifiers=identifiers,
structure=dAMP_smiles, delta_mass=1., delta_charge=-1, start_position=2, end_position=10,
base_monomers=[monomer_0],
comments='Long string')
self.assertEqual(monomer.id, 'dAMP')
self.assertEqual(monomer.name, 'deoxyadenosine monophosphate')
self.assertEqual(monomer.synonyms, synonyms)
self.assertEqual(monomer.identifiers, identifiers)
self.assertEqual(monomer.export('inchi'), dAMP_inchi)
self.assertEqual(monomer.delta_mass, 1.)
self.assertEqual(monomer.delta_charge, -1)
self.assertEqual(monomer.start_position, 2)
self.assertEqual(monomer.end_position, 10)
self.assertEqual(monomer.base_monomers, set([monomer_0]))
self.assertEqual(monomer.comments, 'Long string')
def test_id_setter(self):
monomer = core.Monomer()
monomer.id = None
monomer.id = ''
monomer.id = 'A'
with self.assertRaises(ValueError):
monomer.id = 1
def test_name_setter(self):
monomer = core.Monomer()
monomer.name = None
monomer.name = ''
monomer.name = 'A'
with self.assertRaises(ValueError):
monomer.name = 1
def test_synonyms_setter(self):
monomer = core.Monomer()
monomer.synonyms = core.SynonymSet()
monomer.synonyms = set(['A'])
monomer.synonyms = ['A']
with self.assertRaises(ValueError):
monomer.synonyms = None
with self.assertRaises(ValueError):
monomer.synonyms = 'A'
def test_identifiers_setter(self):
monomer = core.Monomer()
monomer.identifiers = core.IdentifierSet()
monomer.identifiers = set([core.Identifier('ns', 'id')])
monomer.identifiers = [core.Identifier('ns', 'id')]
with self.assertRaises(ValueError):
monomer.identifiers = None
with self.assertRaises(ValueError):
monomer.identifiers = 'A'
with self.assertRaises(TypeError):
monomer.identifiers = core.Identifier('ns', 'id')
def test_structure_setter(self):
monomer = core.Monomer()
monomer.structure = dAMP_smiles
ob_mol = openbabel.OBMol()
conversion = openbabel.OBConversion()
conversion.SetInFormat('smi')
conversion.ReadString(ob_mol, dAMP_smiles)
monomer.structure = ob_mol
monomer.structure = ''
monomer.structure = None
with self.assertRaises(ValueError):
monomer.structure = 'InChI'
def test_delta_mass_setter(self):
monomer = core.Monomer()
monomer.delta_mass = None
monomer.delta_mass = 1
monomer.delta_mass = 1.
with self.assertRaises(ValueError):
monomer.delta_mass = 'a'
def test_delta_charge_setter(self):
monomer = core.Monomer()
monomer.delta_charge = None
monomer.delta_charge = 1
monomer.delta_charge = 1.
with self.assertRaises(ValueError):
monomer.delta_charge = 1.5
with self.assertRaises(ValueError):
monomer.delta_charge = 'a'
def test_start_position_setter(self):
monomer = core.Monomer()
monomer.start_position = None
monomer.start_position = 1
monomer.start_position = 1.
with self.assertRaises(ValueError):
monomer.start_position = 1.5
with self.assertRaises(ValueError):
monomer.start_position = -1
with self.assertRaises(ValueError):
monomer.start_position = 'a'
def test_end_position_setter(self):
monomer = core.Monomer()
monomer.end_position = None
monomer.end_position = 1
monomer.end_position = 1.
with self.assertRaises(ValueError):
monomer.end_position = 1.5
with self.assertRaises(ValueError):
monomer.end_position = -1
with self.assertRaises(ValueError):
monomer.end_position = 'a'
def test_monomers_position_setter(self):
monomer = core.Monomer()
monomer.monomers_position = []
monomer.monomers_position = set([core.Monomer()])
with self.assertRaises(ValueError):
monomer.monomers_position = 'A'
def test_base_monomers_setter(self):
monomer = core.Monomer()
monomer.base_monomers = []
monomer.base_monomers = set([core.Monomer()])
with self.assertRaises(ValueError):
monomer.base_monomers = 'A'
def test_set_backbone_bond_atoms(self):
monomer = core.Monomer()
monomer.backbone_bond_atoms = []
monomer.backbone_bond_atoms = core.AtomList()
with self.assertRaises(ValueError):
monomer.backbone_bond_atoms = None
def test_set_backbone_displaced_atoms(self):
monomer = core.Monomer()
monomer.backbone_displaced_atoms = []
monomer.backbone_displaced_atoms = core.AtomList()
with self.assertRaises(ValueError):
monomer.backbone_displaced_atoms = None
def test_set_r_bond_atoms(self):
monomer = core.Monomer()
monomer.r_bond_atoms = []
monomer.r_bond_atoms = core.AtomList()
with self.assertRaises(ValueError):
monomer.r_bond_atoms = None
def test_set_bond_bond_atoms(self):
monomer = core.Monomer()
monomer.l_bond_atoms = []
monomer.l_bond_atoms = core.AtomList()
with self.assertRaises(ValueError):
monomer.l_bond_atoms = None
def test_set_r_displaced_atoms(self):
monomer = core.Monomer()
monomer.r_displaced_atoms = []
monomer.r_displaced_atoms = core.AtomList()
with self.assertRaises(ValueError):
monomer.r_displaced_atoms = None
def test_set_bond_displaced_atoms(self):
monomer = core.Monomer()
monomer.l_displaced_atoms = []
monomer.l_displaced_atoms = core.AtomList()
with self.assertRaises(ValueError):
monomer.l_displaced_atoms = None
def test_comments_setter(self):
monomer = core.Monomer()
monomer.comments = None
monomer.comments = '1'
with self.assertRaises(ValueError):
monomer.comments = 1
def test_get_major_micro_species(self):
monomer = core.Monomer()
monomer.get_major_micro_species(7.)
monomer = core.Monomer(structure=dAMP_smiles)
monomer.get_major_micro_species(7.)
monomer.get_major_micro_species(10.)
def test_export(self):
monomer = core.Monomer()
self.assertEqual(monomer.export('inchi'), None)
monomer = core.Monomer(structure=dAMP_smiles)
self.assertEqual(monomer.export('inchi'), dAMP_inchi)
def test_get_formula(self):
monomer = core.Monomer(structure=dAMP_smiles)
self.assertEqual(monomer.get_formula(), EmpiricalFormula('C10H12N5O6P'))
with self.assertRaises(ValueError):
monomer = core.Monomer()
monomer.get_formula()
def test_get_mol_wt(self):
monomer = core.Monomer()
self.assertEqual(monomer.get_mol_wt(), None)
monomer = core.Monomer(structure=dAMP_smiles)
self.assertEqual(monomer.get_mol_wt(), 329.208761998)
monomer.delta_mass = 1.
self.assertEqual(monomer.get_mol_wt(), 330.208761998)
def test_get_charge(self):
monomer = core.Monomer(structure=dAMP_smiles)
self.assertEqual(monomer.get_charge(), -2)
monomer = core.Monomer(structure=dAMP_smiles)
monomer.delta_charge = 1
self.assertEqual(monomer.get_charge(), -1)
with self.assertRaises(ValueError):
monomer = core.Monomer()
monomer.get_charge()
def test_to_from_dict(self):
alphabet = dna.dna_alphabet
monomer = core.Monomer()
monomer.monomers_position = [alphabet.monomers.C]
monomer.base_monomers = [alphabet.monomers.A]
monomer.backbone_bond_atoms = [core.Atom(core.Monomer, 'C', charge=3), core.Atom(core.Monomer, 'H', position=3, charge=2)]
monomer_as_dict = monomer.to_dict(alphabet=alphabet)
self.assertEqual(monomer_as_dict, {
'monomers_position': ['C'],
'base_monomers': ['A'],
'backbone_bond_atoms': [
{'molecule': 'Monomer', 'element': 'C', 'charge': 3},
{'molecule': 'Monomer', 'element': 'H', 'position': 3, 'charge': 2},
],
})
monomer_2 = core.Monomer()
monomer_2.from_dict(monomer_as_dict, alphabet=alphabet)
self.assertEqual(monomer_2.monomers_position, set([alphabet.monomers.C]))
self.assertEqual(monomer_2.base_monomers, set([alphabet.monomers.A]))
self.assertTrue(monomer.is_equal(monomer_2))
def test_str(self):
monomer = core.Monomer()
self.assertEqual(str(monomer), '[]')
monomer.id = 'dAMP'
self.assertEqual(str(monomer), '[id: "dAMP"]')
monomer.name = 'deoxyadenosine monophosphate'
self.assertEqual(str(monomer), '[id: "dAMP" | name: "deoxyadenosine monophosphate"]')
monomer.synonyms = set(['A', 'dAMP'])
self.assertIn(' | synonym: "A"', str(monomer))
self.assertIn(' | synonym: "dAMP"', str(monomer))
monomer.identifiers = set([core.Identifier('chebi', 'CHEBI:58245'), core.Identifier('biocyc.compound', 'DAMP')])
self.assertIn(' | identifier: "CHEBI:58245" @ "chebi"', str(monomer))
self.assertIn(' | identifier: "DAMP" @ "biocyc.compound"', str(monomer))
monomer.structure = dAMP_smiles
self.assertIn(' | structure: "{}"]'.format(dAMP_smiles), str(monomer))
monomer.backbone_bond_atoms.append(core.Atom(core.Monomer, 'C', 2, -3))
self.assertIn(' | backbone-bond-atom: C2-3]', str(monomer))
monomer.backbone_bond_atoms.append(core.Atom(core.Monomer, 'C', 2, +3))
self.assertIn(' | backbone-bond-atom: C2+3]', str(monomer))
monomer.backbone_bond_atoms.append(core.Atom(core.Monomer, 'C', 2, 0))
self.assertIn(' | backbone-bond-atom: C2]', str(monomer))
monomer.delta_mass = 1.
monomer.delta_charge = -1
self.assertIn(' | delta-mass: 1', str(monomer))
self.assertIn(' | delta-charge: -1', str(monomer))
monomer.start_position = 3
self.assertIn(' | position: 3-]', str(monomer))
monomer.end_position = 5
self.assertIn(' | position: 3-5]', str(monomer))
monomer.start_position = None
self.assertIn(' | position: -5]', str(monomer))
monomer.comments = 'help "me"'
self.assertIn(' | comments: "help \\"me\\""', str(monomer))
def test_is_equal(self):
monomer_1 = core.Monomer(id='A', structure=dAMP_smiles)
monomer_2 = core.Monomer(id='A', structure=dAMP_smiles)
monomer_3 = core.Monomer(id='B', structure=dAMP_smiles)
monomer_4 = core.Monomer(id='A', structure=dCMP_smiles)
monomer_5 = core.Monomer(id='A', structure=dAMP_smiles, monomers_position=[core.Monomer(id='A')])
monomer_6 = core.Monomer(id='A', structure=dAMP_smiles, monomers_position=[core.Monomer(id='A')])
monomer_7 = core.Monomer(id='A', structure=dAMP_smiles, monomers_position=[core.Monomer(id='B')])
monomer_8 = core.Monomer(id='A', structure=dAMP_smiles, base_monomers=[core.Monomer(id='A')])
monomer_9 = core.Monomer(id='A', structure=dAMP_smiles, base_monomers=[core.Monomer(id='A')])
monomer_10 = core.Monomer(id='A', structure=dAMP_smiles, base_monomers=[core.Monomer(id='B')])
monomer_11 = core.Monomer(id='A', structure=dAMP_smiles, backbone_bond_atoms=[core.Atom(None, 'S')])
self.assertTrue(monomer_1.is_equal(monomer_1))
self.assertTrue(monomer_1.is_equal(monomer_2))
self.assertTrue(monomer_2.is_equal(monomer_1))
self.assertFalse(monomer_1.is_equal(mock.Mock(id='A', structure=dAMP_smiles)))
self.assertFalse(monomer_1.is_equal(monomer_3))
self.assertFalse(monomer_1.is_equal(monomer_4))
self.assertFalse(monomer_1.is_equal(monomer_5))
self.assertTrue(monomer_5.is_equal(monomer_6))
self.assertFalse(monomer_5.is_equal(monomer_7))
self.assertFalse(monomer_1.is_equal(monomer_8))
self.assertTrue(monomer_8.is_equal(monomer_9))
self.assertFalse(monomer_8.is_equal(monomer_10))
self.assertFalse(monomer_1.is_equal(monomer_11))
def test_get_image_url(self):
url = dna.dna_alphabet.monomers.A.get_image_url()
self.assertNotEqual(url, None)
response = requests.get(url)
response.raise_for_status()
self.assertEqual(core.Monomer().get_image_url(), None)
def test_get_image(self):
smi = dna.dna_alphabet.monomers.A.export('smi')
svg = dna.dna_alphabet.monomers.A.get_image()
self.assertTrue(svg.startswith('<?xml'))
smi2 = dna.dna_alphabet.monomers.A.export('smi')
self.assertEqual(smi2, smi)
svg = dna.dna_alphabet.monomers.A.get_image(include_xml_header=False)
self.assertTrue(svg.startswith('<svg'))
self.assertEqual(core.Monomer().get_image(), None)
png = dna.dna_alphabet.monomers.A.get_image(image_format='png', width=250, height=150)
tempdir = tempfile.mkdtemp()
tmpfile = os.path.join(tempdir, 'test.png')
with open(tmpfile, 'wb') as file:
file.write(png)
self.assertEqual(imghdr.what(tmpfile), 'png')
shutil.rmtree(tempdir)
def test_blend_color_opacity(self):
self.assertEqual(core.Monomer._blend_color_opacity(0xff0000, 255), 0xff0000)
self.assertEqual(core.Monomer._blend_color_opacity(0xff0000, 0), 0xffffff)
self.assertEqual(core.Monomer._blend_color_opacity(0x00ff00, 255), 0x00ff00)
self.assertEqual(core.Monomer._blend_color_opacity(0x00ff00, 0), 0xffffff)
self.assertEqual(core.Monomer._blend_color_opacity(0x0000ff, 255), 0x0000ff)
self.assertEqual(core.Monomer._blend_color_opacity(0x0000ff, 0), 0xffffff)
self.assertEqual(core.Monomer._blend_color_opacity(0x000000, 255), 0x000000)
self.assertEqual(core.Monomer._blend_color_opacity(0x000000, 0), 0xffffff)
def test_get_fasta(self):
alphabet = core.Alphabet()
alphabet.monomers.A = core.Monomer()
alphabet.monomers.C = core.Monomer()
alphabet.monomers.G = core.Monomer()
alphabet.monomers.T = core.Monomer()
alphabet.monomers.m2A = core.Monomer(base_monomers=[alphabet.monomers.A])
alphabet.monomers.m22A = core.Monomer(base_monomers=[alphabet.monomers.m2A])
alphabet.monomers.m222A = core.Monomer(base_monomers=[alphabet.monomers.m22A])
alphabet.monomers.m2222A = core.Monomer(base_monomers=[alphabet.monomers.A, alphabet.monomers.m222A])
alphabet.monomers.m2222C = core.Monomer(base_monomers=[alphabet.monomers.C, alphabet.monomers.m222A])
monomer_codes = {monomer: code for code, monomer in alphabet.monomers.items()}
self.assertEqual(alphabet.monomers.A.get_canonical_code(monomer_codes), 'A')
self.assertEqual(alphabet.monomers.C.get_canonical_code(monomer_codes), 'C')
self.assertEqual(alphabet.monomers.G.get_canonical_code(monomer_codes), 'G')
self.assertEqual(alphabet.monomers.T.get_canonical_code(monomer_codes), 'T')
self.assertEqual(alphabet.monomers.m2A.get_canonical_code(monomer_codes), 'A')
self.assertEqual(alphabet.monomers.m22A.get_canonical_code(monomer_codes), 'A')
self.assertEqual(alphabet.monomers.m222A.get_canonical_code(monomer_codes), 'A')
self.assertEqual(alphabet.monomers.m2222A.get_canonical_code(monomer_codes), 'A')
self.assertEqual(alphabet.monomers.m2222C.get_canonical_code(monomer_codes), '?')
self.assertEqual(alphabet.monomers.m2222C.get_canonical_code(monomer_codes, default_code='X'), 'X')
self.assertEqual(core.Monomer().get_canonical_code(monomer_codes), '?')
self.assertEqual(core.Monomer().get_canonical_code(monomer_codes, default_code='X'), 'X')
class MonomerSequenceTestCase(unittest.TestCase):
def test_init(self):
seq = core.MonomerSequence(None)
seq = core.MonomerSequence()
self.assertEqual(len(seq), 0)
seq = core.MonomerSequence([core.Monomer(), core.Monomer()])
self.assertEqual(len(seq), 2)
with self.assertRaises(ValueError):
core.MonomerSequence('A')
with self.assertRaises(ValueError):
core.MonomerSequence(['A'])
def test_append(self):
seq = core.MonomerSequence()
seq.append(core.Monomer())
seq.append(core.Monomer())
self.assertEqual(len(seq), 2)
with self.assertRaises(ValueError):
seq.append('A')
def test_extend(self):
seq = core.MonomerSequence()
seq.extend([core.Monomer(), core.Monomer()])
self.assertEqual(len(seq), 2)
with self.assertRaises(ValueError):
seq.extend(['A'])
def test_insert(self):
seq = core.MonomerSequence()
seq.insert(0, core.Monomer())
with self.assertRaises(ValueError):
seq.insert(0, 'A')
def test_setitem(self):
seq = core.MonomerSequence([core.Monomer(), core.Monomer()])
seq[0] = core.Monomer()
seq[0:1] = [core.Monomer()]
seq[0:1] = core.MonomerSequence([core.Monomer()])
with self.assertRaises(ValueError):
seq[0] = 'A'
with self.assertRaises(ValueError):
seq[0] = ['A']
with self.assertRaises(ValueError):
seq[0:1] = 'A'
with self.assertRaises(ValueError):
seq[0:1] = ['A']
def test_get_monomer_counts(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='A')
monomer_3 = core.Monomer(id='A')
seq = core.MonomerSequence([monomer_1, monomer_2, monomer_3, monomer_3, monomer_3, monomer_2, monomer_2, monomer_3])
self.assertEqual(seq.get_monomer_counts(), {
monomer_1: 1,
monomer_2: 3,
monomer_3: 4,
})
def test_is_equal(self):
seq_1 = core.MonomerSequence([core.Monomer(id='A'), core.Monomer(id='B')])
seq_2 = core.MonomerSequence([core.Monomer(id='A'), core.Monomer(id='B')])
seq_3 = core.MonomerSequence([core.Monomer(id='A'), core.Monomer(id='C')])
self.assertTrue(seq_1.is_equal(seq_1))
self.assertTrue(seq_1.is_equal(seq_2))
self.assertFalse(seq_1.is_equal([]))
self.assertFalse(seq_1.is_equal(seq_3))
class AtomTestCase(unittest.TestCase):
def test_molecule_setter(self):
atom = core.Atom(core.Monomer, 'C')
atom.molecule = None
self.assertEqual(atom.molecule, None)
atom.molecule = core.Monomer
self.assertEqual(atom.molecule, core.Monomer)
atom.molecule = core.Backbone
self.assertEqual(atom.molecule, core.Backbone)
with self.assertRaises(ValueError):
atom.molecule = core.Atom
def test_element_setter(self):
atom = core.Atom(core.Monomer, 'C')
atom.element = 'C'
self.assertEqual(atom.element, 'C')
with self.assertRaises(ValueError):
atom.element = 1
def test_position_setter(self):
atom = core.Atom(core.Monomer, 'C')
atom.position = 2
self.assertEqual(atom.position, 2)
atom.position = 2.
self.assertEqual(atom.position, 2)
atom.position = None
self.assertEqual(atom.position, None)
with self.assertRaises(ValueError):
atom.position = 'a'
with self.assertRaises(ValueError):
atom.position = 2.5
with self.assertRaises(ValueError):
atom.position = -1
def test_charge_setter(self):
atom = core.Atom(core.Monomer, 'C')
atom.charge = 2
self.assertEqual(atom.charge, 2)
atom.charge = -3
self.assertEqual(atom.charge, -3)
with self.assertRaises(ValueError):
atom.charge = None
with self.assertRaises(ValueError):
atom.charge = 'a'
with self.assertRaises(ValueError):
atom.charge = 2.5
def test_monomer_setter(self):
atom = core.Atom(core.Monomer, 'C')
atom.monomer = 2
self.assertEqual(atom.monomer, 2)
atom.monomer = 2.
self.assertEqual(atom.monomer, 2)
atom.monomer = None
self.assertEqual(atom.monomer, None)
with self.assertRaises(ValueError):
atom.monomer = 'a'
with self.assertRaises(ValueError):
atom.monomer = 2.5
with self.assertRaises(ValueError):
atom.monomer = -1
def test_is_equal(self):
atom_1 = core.Atom(core.Monomer, 'C', position=2, charge=-3)
self.assertTrue(atom_1.is_equal(atom_1))
self.assertTrue(atom_1.is_equal(core.Atom(core.Monomer, 'C', position=2, charge=-3)))
self.assertFalse(atom_1.is_equal({}))
self.assertFalse(atom_1.is_equal(core.Atom(core.Monomer, 'H', position=2, charge=-3)))
self.assertFalse(atom_1.is_equal(core.Atom(core.Monomer, 'C', position=3, charge=-3)))
self.assertFalse(atom_1.is_equal(core.Atom(core.Monomer, 'C', position=2, charge=-2)))
def test_to_from_dict(self):
atom_1 = core.Atom(core.Monomer, 'C', position=None, charge=-3)
atom_1_dict = atom_1.to_dict()
self.assertEqual(atom_1_dict, {'molecule': 'Monomer', 'element': 'C', 'charge': -3})
atom_2 = core.Atom(None, '').from_dict(atom_1_dict)
self.assertTrue(atom_1.is_equal(atom_2))
atom_1 = core.Atom(core.Monomer, 'C', position=2, charge=-3)
atom_1_dict = atom_1.to_dict()
self.assertEqual(atom_1_dict, {'molecule': 'Monomer', 'element': 'C', 'charge': -3, 'position': 2})
atom_2 = core.Atom(None, '').from_dict(atom_1.to_dict())
self.assertTrue(atom_1.is_equal(atom_2))
atom_1 = core.Atom(core.Backbone, 'C', position=2, charge=-3)
atom_1_dict = atom_1.to_dict()
self.assertEqual(atom_1_dict, {'molecule': 'Backbone', 'element': 'C', 'charge': -3, 'position': 2})
atom_2 = core.Atom(None, '').from_dict(atom_1.to_dict())
self.assertTrue(atom_1.is_equal(atom_2))
atom_1 = core.Atom(None, 'C', position=2, charge=-3)
atom_1_dict = atom_1.to_dict()
self.assertEqual(atom_1_dict, {'element': 'C', 'charge': -3, 'position': 2})
atom_2 = core.Atom(None, '').from_dict(atom_1.to_dict())
self.assertTrue(atom_1.is_equal(atom_2))
atom_1 = core.Atom(None, 'C', position=2, charge=-3, monomer=5)
atom_1_dict = atom_1.to_dict()
self.assertEqual(atom_1_dict, {'element': 'C', 'charge': -3, 'position': 2, 'monomer': 5})
atom_2 = core.Atom(None, '').from_dict(atom_1.to_dict())
self.assertTrue(atom_1.is_equal(atom_2))
class AtomListTestCase(unittest.TestCase):
def test_init(self):
atom_1 = core.Atom(core.Monomer, 'C')
atom_2 = core.Atom(core.Monomer, 'H')
atom_list = core.AtomList([atom_1, atom_2])
def test_append(self):
atom_1 = core.Atom(core.Monomer, 'C')
atom_list = core.AtomList()
atom_list.append(atom_1)
self.assertEqual(atom_list, core.AtomList([atom_1]))
with self.assertRaises(ValueError):
atom_list.append('C')
def test_extend(self):
atom_1 = core.Atom(core.Monomer, 'C')
atom_2 = core.Atom(core.Monomer, 'H')
atom_list = core.AtomList()
atom_list.extend([atom_1, atom_2])
self.assertEqual(atom_list, core.AtomList([atom_1, atom_2]))
def test_insert(self):
atom_1 = core.Atom(core.Monomer, 'C')
atom_2 = core.Atom(core.Monomer, 'H')
atom_3 = core.Atom(core.Monomer, 'O')
atom_list = core.AtomList([atom_1, atom_2])
atom_list.insert(1, atom_3)
self.assertEqual(atom_list, core.AtomList([atom_1, atom_3, atom_2]))
with self.assertRaises(ValueError):
atom_list.insert(1, 'C')
def test_set_item(self):
atom_1 = core.Atom(core.Monomer, 'C')
atom_2 = core.Atom(core.Monomer, 'H')
atom_3 = core.Atom(core.Monomer, 'O')
atom_list = core.AtomList([atom_1, atom_2, atom_3])
atom_list[0] = atom_3
self.assertEqual(atom_list, core.AtomList([atom_3, atom_2, atom_3]))
atom_list = core.AtomList([atom_1, atom_2, atom_3])
atom_list[0:1] = [atom_3]
self.assertEqual(atom_list, core.AtomList([atom_3, atom_2, atom_3]))
atom_list = core.AtomList([atom_1, atom_2, atom_3])
with self.assertRaises(ValueError):
atom_list[0] = 'C'
atom_list = core.AtomList([atom_1, atom_2, atom_3])
with self.assertRaises(ValueError):
atom_list[0:1] = ['C']
def test_is_equal(self):
atom_1 = core.Atom(core.Monomer, 'C')
atom_2 = core.Atom(core.Monomer, 'H')
atom_3 = core.Atom(core.Monomer, 'O')
atom_list_1 = core.AtomList([core.Atom(core.Monomer, 'C'), core.Atom(core.Monomer, 'H'), core.Atom(core.Monomer, 'O')])
atom_list_2 = core.AtomList([core.Atom(core.Monomer, 'C'), core.Atom(core.Monomer, 'H'), core.Atom(core.Monomer, 'O')])
atom_list_3 = core.AtomList([core.Atom(core.Monomer, 'C'), core.Atom(core.Monomer, 'N'), core.Atom(core.Monomer, 'O')])
atom_list_4 = core.AtomList([core.Atom(core.Backbone, 'C'), core.Atom(core.Monomer, 'H'), core.Atom(core.Monomer, 'O')])
self.assertTrue(atom_list_1.is_equal(atom_list_1))
self.assertTrue(atom_list_1.is_equal(atom_list_2))
self.assertFalse(atom_list_1.is_equal({}))
self.assertFalse(atom_list_1.is_equal(atom_list_3))
self.assertFalse(atom_list_1.is_equal(atom_list_4))
def test_to_from_list(self):
atom_list_1 = core.AtomList([core.Atom(core.Monomer, 'C'), core.Atom(core.Monomer, 'H'), core.Atom(core.Monomer, 'O')])
atom_list_1_as_list = atom_list_1.to_list()
self.assertEqual(atom_list_1_as_list, [
{'molecule': 'Monomer', 'element': 'C'},
{'molecule': 'Monomer', 'element': 'H'},
{'molecule': 'Monomer', 'element': 'O'},
])
atom_list_2 = core.AtomList().from_list(atom_list_1_as_list)
self.assertTrue(atom_list_1.is_equal(atom_list_2))
class BackboneTestCase(unittest.TestCase):
def test_set_structure(self):
backbone = core.Backbone()
backbone.structure = None
backbone.structure = dAMP_smiles
with self.assertRaises(ValueError):
backbone.structure = 'dAMP'
def test_set_monomer_bond_atoms(self):
backbone = core.Backbone()
backbone.monomer_bond_atoms = []
backbone.monomer_bond_atoms = core.AtomList()
with self.assertRaises(ValueError):
backbone.monomer_bond_atoms = None
def test_set_monomer_displaced_atoms(self):
backbone = core.Backbone()
backbone.monomer_displaced_atoms = []
backbone.monomer_displaced_atoms = core.AtomList()
with self.assertRaises(ValueError):
backbone.monomer_displaced_atoms = None
def test_export(self):
backbone = core.Backbone()
backbone.structure = dAMP_smiles
self.assertEqual(backbone.export('inchi'), dAMP_inchi)
self.assertEqual(backbone.export('smiles'), dAMP_smiles)
backbone.structure = None
self.assertEqual(backbone.export('inchi'), None)
self.assertEqual(backbone.export('smiles'), None)
def test_get_formula(self):
backbone = core.Backbone()
backbone.structure = dAMP_smiles
self.assertEqual(backbone.get_formula(), EmpiricalFormula('C10H12N5O6P'))
backbone.backbone_displaced_atoms = core.AtomList([core.Atom(core.Monomer, 'C')])
backbone.monomer_displaced_atoms = core.AtomList([core.Atom(core.Monomer, 'H'), core.Atom(core.Monomer, 'H')])
self.assertEqual(backbone.get_formula(), EmpiricalFormula('C10H10N5O6P'))
backbone.structure = None
self.assertEqual(backbone.get_formula(), EmpiricalFormula('H2') * -1)
def test_get_mol_wt(self):
backbone = core.Backbone()
backbone.structure = dAMP_smiles
self.assertEqual(backbone.get_mol_wt(), 329.208761998)
def test_get_charge(self):
backbone = core.Backbone()
backbone.structure = dAMP_smiles
self.assertEqual(backbone.get_charge(), -2)
backbone.structure = None
self.assertEqual(backbone.get_charge(), 0)
backbone.backbone_displaced_atoms = core.AtomList([core.Atom(core.Monomer, 'C', charge=2)])
backbone.monomer_displaced_atoms = core.AtomList([core.Atom(core.Monomer, 'H', charge=3)])
self.assertEqual(backbone.get_charge(), -3)
def test_is_equal(self):
backbone_1 = core.Backbone()
backbone_2 = core.Backbone()
backbone_3 = core.Backbone(structure=dAMP_smiles)
backbone_4 = core.Backbone(monomer_bond_atoms=[core.Atom(core.Monomer, 'H')])
backbone_5 = core.Backbone(monomer_displaced_atoms=[core.Atom(core.Monomer, 'H')])
self.assertTrue(backbone_1.is_equal(backbone_1))
self.assertTrue(backbone_1.is_equal(backbone_2))
self.assertFalse(backbone_1.is_equal({}))
self.assertFalse(backbone_1.is_equal(backbone_3))
self.assertFalse(backbone_1.is_equal(backbone_4))
self.assertFalse(backbone_1.is_equal(backbone_5))
class BondTestCase(unittest.TestCase):
def test_set_id(self):
bond = core.Bond()
bond.id = None
bond.id = 'a'
with self.assertRaises(ValueError):
bond.id = 2
def test_set_name(self):
bond = core.Bond()
bond.name = None
bond.name = 'a'
with self.assertRaises(ValueError):
bond.name = 2
def test_set_synonyms(self):
bond = core.Bond()
bond.synonyms = set(['a', 'b'])
bond.synonyms = ['a', 'b', 'c']
bond.synonyms = ('a', 'b', 'c', 'd')
with self.assertRaises(ValueError):
bond.synonyms = None
def test_set_l_bond_atoms(self):
bond = core.Bond()
bond.l_bond_atoms = []
bond.l_bond_atoms = core.AtomList()
with self.assertRaises(ValueError):
bond.l_bond_atoms = None
def test_set_bond_bond_atoms(self):
bond = core.Bond()
bond.r_bond_atoms = []
bond.r_bond_atoms = core.AtomList()
with self.assertRaises(ValueError):
bond.r_bond_atoms = None
def test_set_l_displaced_atoms(self):
bond = core.Bond()
bond.l_displaced_atoms = []
bond.l_displaced_atoms = core.AtomList()
with self.assertRaises(ValueError):
bond.l_displaced_atoms = None
def test_set_bond_displaced_atoms(self):
bond = core.Bond()
bond.r_displaced_atoms = []
bond.r_displaced_atoms = core.AtomList()
with self.assertRaises(ValueError):
bond.r_displaced_atoms = None
def test_set_order(self):
bond = core.Bond()
bond.order = core.BondOrder.double
with self.assertRaises(ValueError):
bond.order = None
def test_set_stereo(self):
bond = core.Bond()
bond.stereo = None
bond.stereo = core.BondStereo.down
with self.assertRaises(ValueError):
bond.stereo = 2
def test_get_formula(self):
bond = core.Bond()
bond.l_displaced_atoms = core.AtomList([core.Atom(core.Monomer, 'C')])
bond.r_displaced_atoms = core.AtomList([core.Atom(core.Monomer, 'H'), core.Atom(core.Monomer, 'H')])
self.assertEqual(bond.get_formula(), EmpiricalFormula('CH2') * -1)
def test_get_mol_wt(self):
bond = core.Bond()
self.assertEqual(bond.get_mol_wt(), 0.)
def test_get_charge(self):
bond = core.Bond()
bond.l_displaced_atoms = core.AtomList([core.Atom(core.Monomer, 'C', charge=2)])
bond.r_displaced_atoms = core.AtomList([core.Atom(core.Monomer, 'H', charge=3)])
self.assertEqual(bond.get_charge(), -5)
def test_is_equal(self):
bond_1 = core.Bond()
bond_2 = core.Bond()
bond_3 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'H')])
bond_4 = core.Bond(r_bond_atoms=[core.Atom(core.Monomer, 'H')])
bond_5 = core.Bond(l_displaced_atoms=[core.Atom(core.Monomer, 'H')])
bond_6 = core.Bond(r_displaced_atoms=[core.Atom(core.Monomer, 'H')])
bond_7 = core.Bond(l_monomer=core.Monomer())
bond_8 = core.Bond(l_monomer=core.Monomer())
bond_9 = core.Bond(r_monomer=core.Monomer())
bond_10 = core.Bond(order=core.BondOrder.double)
bond_11 = core.Bond(stereo=core.BondStereo.up)
bond_12 = core.Bond(comments='a comment')
self.assertTrue(bond_1.is_equal(bond_1))
self.assertTrue(bond_1.is_equal(bond_2))
self.assertFalse(bond_1.is_equal({}))
self.assertFalse(bond_1.is_equal(bond_3))
self.assertFalse(bond_1.is_equal(bond_4))
self.assertFalse(bond_1.is_equal(bond_5))
self.assertFalse(bond_1.is_equal(bond_6))
self.assertFalse(bond_1.is_equal(bond_7))
self.assertFalse(bond_1.is_equal(bond_8))
self.assertTrue(bond_7.is_equal(bond_8))
self.assertFalse(bond_1.is_equal(bond_9))
self.assertFalse(bond_1.is_equal(bond_10))
self.assertFalse(bond_1.is_equal(bond_11))
self.assertFalse(bond_1.is_equal(bond_12))
def test_str(self):
bond = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'H')])
self.assertEqual(str(bond), '[l-bond-atom: H]')
bond = core.Bond(r_bond_atoms=[core.Atom(core.Monomer, 'H', position=2, charge=-3)])
self.assertEqual(str(bond), '[r-bond-atom: H2-3]')
bond = core.Bond(r_displaced_atoms=[core.Atom(core.Monomer, 'C', position=3, charge=4)])
self.assertEqual(str(bond), '[r-displaced-atom: C3+4]')
bond = core.Bond(order=core.BondOrder.triple)
self.assertEqual(str(bond), '[order: "triple"]')
bond = core.Bond(stereo=core.BondStereo.up)
self.assertEqual(str(bond), '[stereo: "up"]')
bond = core.Bond(comments='a comment')
self.assertEqual(str(bond), '[comments: "a comment"]')
def test_setter_errors(self):
bond = core.Bond()
bond.l_monomer = core.Monomer()
with self.assertRaisesRegex(ValueError, 'must be an instance of `Monomer` or `None`'):
bond.l_monomer = -2
bond.r_monomer = core.Monomer()
with self.assertRaisesRegex(ValueError, 'must be an instance of `Monomer` or `None`'):
bond.r_monomer = -2
bond.comments = 'a comment'
with self.assertRaisesRegex(ValueError, 'must be a string or None'):
bond.comments = 1
class OntoBondTestCase(unittest.TestCase):
def test_setter_errors(self):
bond = core.OntoBond()
bond.type = core.Bond()
with self.assertRaisesRegex(ValueError, 'must be an instance of `Bond` or `None`'):
bond.type = -2
bond.l_monomer = 2
with self.assertRaises(ValueError):
bond.l_monomer = 'a'
with self.assertRaises(ValueError):
bond.l_monomer = -2
bond.r_monomer = 2
with self.assertRaises(ValueError):
bond.r_monomer = 'a'
with self.assertRaises(ValueError):
bond.r_monomer = -2
def test_is_equal(self):
bond_1 = core.OntoBond()
bond_2 = core.OntoBond()
bond_3 = core.OntoBond(type=core.Bond())
bond_4 = core.OntoBond(l_monomer=2)
bond_5 = core.OntoBond(r_monomer=2)
self.assertTrue(bond_1.is_equal(bond_1))
self.assertTrue(bond_1.is_equal(bond_2))
self.assertFalse(bond_1.is_equal('bond'))
self.assertFalse(bond_1.is_equal(bond_3))
self.assertFalse(bond_1.is_equal(bond_4))
self.assertFalse(bond_1.is_equal(bond_5))
class BondSetTestCase(unittest.TestCase):
def test_add(self):
bonds = core.BondSet()
bond_1 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'H')],
r_bond_atoms=[core.Atom(core.Monomer, 'O')])
bond_2 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'N')],
r_bond_atoms=[core.Atom(core.Monomer, 'P')])
bond_3 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'S')],
r_bond_atoms=[core.Atom(core.Monomer, 'Na')])
bonds.add(bond_1)
bonds.add(bond_2)
self.assertEqual(len(bonds), 2)
self.assertIn(bond_1, bonds)
self.assertIn(bond_2, bonds)
self.assertNotIn(bond_3, bonds)
with self.assertRaisesRegex(ValueError, '`bond` must be an instance of `BondBase`'):
bonds.add(None)
def test_update(self):
bonds = core.BondSet()
bond_1 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'H')],
r_bond_atoms=[core.Atom(core.Monomer, 'O')])
bond_2 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'N')],
r_bond_atoms=[core.Atom(core.Monomer, 'P')])
bond_3 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'S')],
r_bond_atoms=[core.Atom(core.Monomer, 'Na')])
bonds.update(set([bond_1, bond_2]))
self.assertEqual(len(bonds), 2)
self.assertIn(bond_1, bonds)
self.assertIn(bond_2, bonds)
self.assertNotIn(bond_3, bonds)
def test_symmetric_difference_update(self):
bonds_1 = core.BondSet()
bonds_2 = core.BondSet()
bond_1 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'H')],
r_bond_atoms=[core.Atom(core.Monomer, 'O')])
bond_2 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'N')],
r_bond_atoms=[core.Atom(core.Monomer, 'P')])
bond_3 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'S')],
r_bond_atoms=[core.Atom(core.Monomer, 'Na')])
bonds_1.update(set([bond_1, bond_2]))
bonds_2.update(set([bond_1, bond_3]))
bonds_1.symmetric_difference_update(bonds_2)
self.assertEqual(bonds_1, core.BondSet([bond_2, bond_3]))
def test_is_equal(self):
bonds_1 = core.BondSet()
bonds_2 = core.BondSet()
bonds_3 = core.BondSet()
bond_1 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'H')],
r_bond_atoms=[core.Atom(core.Monomer, 'O')])
bond_2 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'N')],
r_bond_atoms=[core.Atom(core.Monomer, 'P')])
bond_3 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'S')],
r_bond_atoms=[core.Atom(core.Monomer, 'Na')])
bonds_1.update(set([bond_1, bond_2]))
bonds_2.update(set([bond_1, bond_2]))
bonds_3.update(set([bond_1, bond_3]))
self.assertTrue(bonds_1.is_equal(bonds_1))
self.assertTrue(bonds_1.is_equal(bonds_2))
self.assertTrue(bonds_2.is_equal(bonds_1))
self.assertFalse(bonds_1.is_equal(bonds_3))
self.assertFalse(bonds_3.is_equal(bonds_1))
self.assertFalse(bonds_1.is_equal(set()))
class NickTestCase(unittest.TestCase):
def test_init(self):
nick = core.Nick(position=3)
self.assertEqual(nick.position, 3)
def test_get_set_position(self):
nick = core.Nick()
nick.position = 4
self.assertEqual(nick.position, 4)
with self.assertRaises(ValueError):
nick.position = 4.1
def test_is_equal(self):
nick1 = core.Nick()
nick2 = core.Nick()
nick3 = core.Nick(position=3)
nick4 = core.Nick(position=3)
nick5 = core.Nick(position=4)
self.assertTrue(nick1.is_equal(nick1))
self.assertTrue(nick1.is_equal(nick2))
self.assertFalse(nick1.is_equal(nick3))
self.assertTrue(nick3.is_equal(nick4))
self.assertFalse(nick3.is_equal(nick5))
class NickSetTestCase(unittest.TestCase):
def test_add(self):
nick_set = core.NickSet()
nick = core.Nick()
nick_set.add(nick)
self.assertIn(nick, nick_set)
with self.assertRaisesRegex(ValueError, 'must be an instance of `Nick`'):
nick_set.add(9)
def test_update(self):
nick_set = core.NickSet()
nicks = [core.Nick(position=3), core.Nick(position=5), core.Nick(position=7)]
nick_set.update(nicks[0:2])
self.assertIn(nicks[0], nick_set)
self.assertIn(nicks[1], nick_set)
self.assertNotIn(nicks[2], nick_set)
def test_symmetric_difference_update(self):
nick_1 = core.Nick(position=3)
nick_2 = core.Nick(position=5)
nick_3 = core.Nick(position=7)
nicks_1 = core.NickSet([nick_1, nick_2])
nicks_2 = core.NickSet([nick_1, nick_3])
nicks_1.symmetric_difference_update(nicks_2)
self.assertEqual(nicks_1, core.NickSet([nick_2, nick_3]))
def test_is_equal(self):
nick_1 = core.Nick(position=3)
nick_2 = core.Nick(position=5)
nick_3 = core.Nick(position=7)
nicks_1 = core.NickSet([nick_1, nick_2])
nicks_2 = core.NickSet([nick_1, nick_2])
nicks_3 = core.NickSet([nick_1, nick_3])
self.assertTrue(nicks_1.is_equal(nicks_1))
self.assertTrue(nicks_1.is_equal(nicks_2))
self.assertFalse(nicks_1.is_equal(nicks_3))
self.assertFalse(nicks_1.is_equal(set()))
class BpFormTestCase(unittest.TestCase):
def test_init(self):
bp_form = core.BpForm()
self.assertEqual(bp_form.seq, core.MonomerSequence())
self.assertEqual(bp_form.alphabet.monomers, {})
self.assertEqual(bp_form.backbone.get_formula(), EmpiricalFormula())
self.assertEqual(bp_form.backbone.get_charge(), 0)
self.assertEqual(bp_form.bond.get_formula(), EmpiricalFormula())
self.assertEqual(bp_form.bond.get_charge(), 0)
def test_set_monomer_seq(self):
bp_form = core.BpForm()
bp_form.seq = core.MonomerSequence()
self.assertEqual(len(bp_form.seq), 0)
bp_form.seq = [core.Monomer(), core.Monomer()]
self.assertIsInstance(bp_form.seq, core.MonomerSequence)
self.assertEqual(len(bp_form.seq), 2)
with self.assertRaises(ValueError):
bp_form.seq = None
with self.assertRaises(ValueError):
bp_form.seq = 'A'
def test_set_alphabet(self):
bp_form = core.BpForm()
bp_form.alphabet = dna.canonical_dna_alphabet
self.assertEqual(len(bp_form.alphabet.monomers), 6)
with self.assertRaises(ValueError):
bp_form.alphabet = None
with self.assertRaises(ValueError):
bp_form.alphabet = 'A'
def test_set_backbone(self):
bp_form = core.BpForm()
bp_form.backbone = core.Backbone()
with self.assertRaises(ValueError):
bp_form.backbone = None
with self.assertRaises(ValueError):
bp_form.backbone = '123'
def test_set_bond(self):
bp_form = core.BpForm()
bp_form.bond = core.Bond()
with self.assertRaises(ValueError):
bp_form.bond = None
with self.assertRaises(ValueError):
bp_form.bond = '123'
def test_set_circular(self):
bp_form = core.BpForm()
bp_form.circular = True
self.assertEqual(bp_form.circular, True)
bp_form.circular = False
self.assertEqual(bp_form.circular, False)
with self.assertRaises(ValueError):
bp_form.circular = None
def test_set_crosslinks(self):
bp_form = core.BpForm()
bp_form.crosslinks = core.BondSet()
with self.assertRaises(ValueError):
bp_form.crosslinks = None
def test_get_set_nicks(self):
bp_form = core.BpForm()
nicks = core.NickSet()
bp_form.nicks = nicks
self.assertEqual(bp_form.nicks, nicks)
with self.assertRaises(ValueError):
bp_form.nicks = None
def test_is_equal(self):
bp_form_1 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]))
bp_form_2 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]))
bp_form_3 = None
bp_form_4 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]), alphabet=dna.canonical_dna_alphabet)
bp_form_5 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]), backbone=core.Backbone(structure='O'))
bp_form_6 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]), bond=core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'C')]))
bp_form_7 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]), circular=True)
bp_form_8 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]),
nicks=core.NickSet([core.Nick(position=1)]))
bp_form_9 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]),
nicks=core.NickSet([core.Nick(position=1)]))
bp_form_10 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]),
nicks=core.NickSet([core.Nick(position=2)]))
self.assertTrue(bp_form_1.is_equal(bp_form_1))
self.assertTrue(bp_form_1.is_equal(bp_form_2))
self.assertFalse(bp_form_1.is_equal(bp_form_3))
self.assertFalse(bp_form_1.is_equal(bp_form_4))
self.assertFalse(bp_form_1.is_equal(bp_form_5))
self.assertFalse(bp_form_1.is_equal(bp_form_6))
self.assertFalse(bp_form_1.is_equal(bp_form_7))
self.assertFalse(bp_form_1.is_equal(bp_form_8))
self.assertTrue(bp_form_8.is_equal(bp_form_9))
self.assertFalse(bp_form_8.is_equal(bp_form_10))
def test_diff(self):
form_1 = dna.DnaForm()
form_2 = dna.DnaForm()
self.assertEqual(form_1.diff(form_1), None)
self.assertEqual(form_1.diff(form_2), None)
form_2 = rna.RnaForm()
self.assertIn('DnaForm != RnaForm', form_1.diff(form_2))
form_2 = dna.DnaForm()
form_2.alphabet = rna.rna_alphabet
form_2.backbone = core.Backbone(monomer_bond_atoms=[core.Atom(core.Monomer, 'C')])
form_2.bond = core.Bond()
self.assertIn('Forms have different alphabets', form_1.diff(form_2))
self.assertIn('Forms have different backbones', form_1.diff(form_2))
self.assertIn('Forms have different inter-monomer bonds', form_1.diff(form_2))
form_2 = dna.DnaForm().from_str('A')
self.assertIn('Length 0 != 1', form_1.diff(form_2))
form_1 = dna.DnaForm().from_str('A')
form_2 = dna.DnaForm().from_str('C')
self.assertIn('Monomeric form 1', form_1.diff(form_2))
# crosslinks
form_2 = dna.DnaForm().from_str('A|x-link:[]')
self.assertIn('Number of crosslinks 0 != 1', form_1.diff(form_2))
form_1 = dna.DnaForm().from_str('A|x-link:[l-bond-atom: 1C1]')
form_2 = dna.DnaForm().from_str('A|x-link:[l-bond-atom: 2O2]')
self.assertIn('not in self', form_1.diff(form_2))
self.assertIn('not in other', form_1.diff(form_2))
form_1 = dna.DnaForm().from_str('A|x-link:[l-bond-atom: 1C1]')
form_2 = dna.DnaForm().from_str('A|x-link:[l-bond-atom: 1C1]')
self.assertEqual(form_1.diff(form_2), None)
# nicks
form_1 = dna.DnaForm().from_str('AA')
form_2 = dna.DnaForm().from_str('AA')
form_2.nicks.add(core.Nick(position=1))
self.assertIn('Number of nicks 0 != 1', form_1.diff(form_2))
form_1 = dna.DnaForm().from_str('AAA')
form_1.nicks.add(core.Nick(position=1))
form_2 = dna.DnaForm().from_str('AAA')
form_2.nicks.add(core.Nick(position=2))
self.assertIn('not in self', form_1.diff(form_2))
self.assertIn('not in other', form_1.diff(form_2))
form_1 = dna.DnaForm().from_str('AAA')
form_2 = dna.DnaForm().from_str('AAA')
form_1.nicks.add(core.Nick(position=1))
form_2.nicks.add(core.Nick(position=1))
self.assertEqual(form_1.diff(form_2), None)
# circularity
form_1 = dna.DnaForm(circular=False)
form_2 = dna.DnaForm(circular=True)
self.assertIn('Circularity False != True', form_1.diff(form_2))
def test_getitem(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='B')
monomer_3 = core.Monomer(id='C')
bp_form = core.BpForm([monomer_1, monomer_2, monomer_3])
self.assertEqual(bp_form[0], monomer_1)
self.assertEqual(bp_form[1], monomer_2)
self.assertEqual(bp_form[0:1], [monomer_1])
def test_setitem(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='B')
monomer_3 = core.Monomer(id='C')
bp_form = core.BpForm([monomer_1, monomer_2, monomer_3])
self.assertEqual(bp_form[0], monomer_1)
bp_form[0] = monomer_2
self.assertEqual(bp_form[0], monomer_2)
bp_form[0:1] = [monomer_3]
self.assertEqual(bp_form[0], monomer_3)
def test_delitem(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='B')
monomer_3 = core.Monomer(id='C')
bp_form = core.BpForm([monomer_1, monomer_2, monomer_3])
del(bp_form[1])
self.assertTrue(bp_form.is_equal(core.BpForm([monomer_1, monomer_3])))
def test_iter(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='B')
monomer_3 = core.Monomer(id='C')
bp_form = core.BpForm([monomer_1, monomer_2, monomer_3])
for i_monomer, monomer in enumerate(bp_form):
if i_monomer == 0:
self.assertEqual(monomer, monomer_1)
if i_monomer == 1:
self.assertEqual(monomer, monomer_2)
if i_monomer == 2:
self.assertEqual(monomer, monomer_3)
def test_reversed(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='B')
monomer_3 = core.Monomer(id='C')
bp_form = core.BpForm([monomer_1, monomer_2, monomer_3])
for i_monomer, monomer in enumerate(reversed(bp_form)):
if i_monomer == 2:
self.assertEqual(monomer, monomer_1)
if i_monomer == 1:
self.assertEqual(monomer, monomer_2)
if i_monomer == 0:
self.assertEqual(monomer, monomer_3)
def test_contains(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='B')
monomer_3 = core.Monomer(id='C')
bp_form = core.BpForm([monomer_1, monomer_2])
self.assertIn(monomer_1, bp_form)
self.assertIn(monomer_2, bp_form)
self.assertNotIn(monomer_3, bp_form)
def test_len(self):
bp_form = core.BpForm()
self.assertEqual(len(bp_form), 0)
bp_form = core.BpForm(seq=[core.Monomer(), core.Monomer()])
self.assertEqual(len(bp_form), 2)
def test_get_monomer_counts(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='B')
monomer_3 = core.Monomer(id='C')
bp_form = core.BpForm([monomer_1, monomer_2, monomer_1, monomer_1, monomer_1, monomer_2, monomer_2, monomer_3])
self.assertEqual(bp_form.get_monomer_counts(), {
monomer_1: 4,
monomer_2: 3,
monomer_3: 1,
})
def test_get_formula_mol_wt_charge(self):
monomer_A = core.Monomer(id='A', structure=dAMP_smiles)
monomer_C = core.Monomer(id='C', structure=dCMP_smiles)
bp_form = core.BpForm([monomer_A])
self.assertEqual(bp_form.get_formula(), monomer_A.get_formula())
self.assertEqual(bp_form.get_mol_wt(), monomer_A.get_mol_wt())
self.assertEqual(bp_form.get_charge(), monomer_A.get_charge())
bp_form = core.BpForm([monomer_C])
self.assertEqual(bp_form.get_formula(), monomer_C.get_formula())
self.assertEqual(bp_form.get_mol_wt(), monomer_C.get_mol_wt())
self.assertEqual(bp_form.get_charge(), monomer_C.get_charge())
bp_form = core.BpForm([monomer_A, monomer_C])
self.assertEqual(bp_form.get_formula(), monomer_A.get_formula() + monomer_C.get_formula())
self.assertEqual(bp_form.get_mol_wt(), monomer_A.get_mol_wt() + monomer_C.get_mol_wt())
self.assertEqual(bp_form.get_charge(), monomer_A.get_charge() + monomer_C.get_charge())
bp_form = core.BpForm([monomer_A, monomer_C],
bond=core.Bond(r_displaced_atoms=[core.Atom(core.Monomer, 'H', charge=-1, position=1)]))
self.assertEqual(bp_form.get_formula(), monomer_A.get_formula() + monomer_C.get_formula() - EmpiricalFormula('H'))
self.assertEqual(bp_form.get_mol_wt(), monomer_A.get_mol_wt() + monomer_C.get_mol_wt() -
EmpiricalFormula('H').get_molecular_weight())
self.assertEqual(bp_form.get_charge(), monomer_A.get_charge() + monomer_C.get_charge() + 1)
bp_form = core.BpForm([monomer_A, monomer_A, monomer_C, monomer_C, monomer_C],
bond=core.Bond(r_displaced_atoms=[core.Atom(core.Monomer, 'H', charge=-1, position=1)]))
self.assertEqual(bp_form.get_formula(), monomer_A.get_formula() * 2 + monomer_C.get_formula() * 3 - EmpiricalFormula('H') * 4)
self.assertEqual(bp_form.get_mol_wt(), monomer_A.get_mol_wt() * 2 + monomer_C.get_mol_wt()
* 3 - EmpiricalFormula('H').get_molecular_weight() * 4)
self.assertEqual(bp_form.get_charge(), monomer_A.get_charge() * 2 + monomer_C.get_charge() * 3 + 1 * 4)
def test_get_formula_charge_circular(self):
monomer_A = dna.canonical_dna_alphabet.monomers.A
monomer_C = dna.canonical_dna_alphabet.monomers.C
dimer = dna.CanonicalDnaForm([monomer_A, monomer_C])
self.assertEqual(clean_smiles(dimer.export('smiles')),
clean_smiles('Nc1c2ncn(c2ncn1)C1CC(OP(=O)(OCC2C(O)CC(n3c(=O)nc(N)cc3)O2)[O-])C(O1)COP(=O)([O-])[O-]'))
self.assertEqual(dimer.get_formula(), monomer_A.get_formula()
+ monomer_C.get_formula()
+ dimer.backbone.get_formula() * 2
- EmpiricalFormula('HO') * 1)
self.assertEqual(dimer.get_charge(), monomer_A.get_charge()
+ monomer_C.get_charge()
+ dimer.backbone.get_charge() * 2
+ 1 * 1)
dimer.circular = True
self.assertEqual(clean_smiles(dimer.export('smiles')),
clean_smiles('Nc1c2ncn(c2ncn1)C1CC2OP(=O)(OCC3C(OP(=O)(OCC2O1)[O-])CC(n1c(=O)nc(N)cc1)O3)[O-]'))
self.assertEqual(dimer.get_formula(), monomer_A.get_formula()
+ monomer_C.get_formula()
+ dimer.backbone.get_formula() * 2
- EmpiricalFormula('HO') * 2)
self.assertEqual(dimer.get_charge(), monomer_A.get_charge()
+ monomer_C.get_charge()
+ dimer.backbone.get_charge() * 2
+ 1 * 2)
def test_get_formula_charge_crosslinks(self):
monomer_A = dna.canonical_dna_alphabet.monomers.A
monomer_C = dna.canonical_dna_alphabet.monomers.C
dimer = dna.CanonicalDnaForm([monomer_A, monomer_C])
self.assertEqual(clean_smiles(dimer.export('smiles')),
clean_smiles('Nc1c2ncn(c2ncn1)C1CC(OP(=O)(OCC2C(O)CC(n3c(=O)nc(N)cc3)O2)[O-])C(O1)COP(=O)([O-])[O-]'))
self.assertEqual(dimer.get_formula(), monomer_A.get_formula()
+ monomer_C.get_formula()
+ dimer.backbone.get_formula() * 2
- EmpiricalFormula('HO') * 1)
self.assertEqual(dimer.get_charge(), monomer_A.get_charge()
+ monomer_C.get_charge()
+ dimer.backbone.get_charge() * 2
+ 1 * 1)
crosslink = core.Bond(
r_bond_atoms=[core.Atom(core.Monomer, monomer=2, element='O', position=1)],
l_bond_atoms=[core.Atom(core.Monomer, monomer=1, element='P', position=9)],
r_displaced_atoms=[core.Atom(core.Monomer, monomer=2, element='H', position=1)],
l_displaced_atoms=[core.Atom(core.Monomer, monomer=1, element='O', position=12, charge=-1)]
)
dimer.crosslinks = core.BondSet([crosslink])
self.assertEqual(clean_smiles(dimer.export('smiles')),
clean_smiles('Nc1ccn(c(=O)n1)C1OC2C(C1)OP(=O)([O-])OCC1C(OP(=O)(OC2)[O-])CC(O1)n1cnc2c1ncnc2N'))
self.assertEqual(dimer.get_formula(), monomer_A.get_formula()
+ monomer_C.get_formula()
+ dimer.backbone.get_formula() * 2
- EmpiricalFormula('HO') * 2)
self.assertEqual(dimer.get_charge(), monomer_A.get_charge()
+ monomer_C.get_charge()
+ dimer.backbone.get_charge() * 2
+ 1 * 2)
def test_get_major_micro_species(self):
bp_form = dna.CanonicalDnaForm([
dna.canonical_dna_alphabet.monomers.A,
dna.canonical_dna_alphabet.monomers.C,
])
structure = bp_form.get_major_micro_species(7.4, major_tautomer=True)
self.assertEqual(clean_smiles(OpenBabelUtils.export(structure, 'smiles')),
clean_smiles('Nc1nc(=O)n(cc1)C1CC(O)C(COP(=O)([O-])OC2CC(OC2COP(=O)([O-])[O-])n2cnc3c(N)ncnc23)O1'))
bp_form = dna.DnaForm()
self.assertEqual(bp_form.get_major_micro_species(7.), None)
def test_str(self):
monomer_A = core.Monomer(id='A', structure=dAMP_smiles)
monomer_C = core.Monomer(id='C', structure=dCMP_smiles)
monomer_G = core.Monomer(id='G', structure=dGMP_smiles)
bp_form = core.BpForm([monomer_A, monomer_C, monomer_G, monomer_A])
self.assertEqual(str(bp_form), '{}{}{}{}'.format(str(monomer_A), str(monomer_C), str(monomer_G), str(monomer_A)))
bp_form = core.BpForm([monomer_A, monomer_C, monomer_G, monomer_A], alphabet=core.Alphabet(monomers={
'A': monomer_A,
'C': monomer_C,
}))
self.assertEqual(str(bp_form), '{}{}{}{}'.format('A', 'C', str(monomer_G), 'A'))
dGMP_smiles_2 = 'OC1CC(OC1COP(=O)([O-])[O-])n1cnc2c1nc(N)[nH]c2=O'
self.assertEqual(str(bp_form), '{}{}{}{}'.format('A', 'C', '[id: "{}" | structure: "{}"]'.format('G', dGMP_smiles_2), 'A'))
def test_from_str(self):
self.assertTrue(dna.DnaForm().from_str('AAA').is_equal(dna.DnaForm([
dna.dna_alphabet.monomers.A,
dna.dna_alphabet.monomers.A,
dna.dna_alphabet.monomers.A,
])))
self.assertTrue(dna.DnaForm().from_str('ACTG').is_equal(dna.DnaForm([
dna.dna_alphabet.monomers.A, dna.dna_alphabet.monomers.C,
dna.dna_alphabet.monomers.T, dna.dna_alphabet.monomers.G,
])))
with self.assertRaisesRegex(lark.exceptions.VisitError, 'not in alphabet'):
dna.CanonicalDnaForm().from_str('EAA')
dna_form_1 = ('AA[id: "dI"'
+ ' | name: "2\'-deoxyinosine"'
+ ' | synonym: "2\'-deoxyinosine, 9-[(2R,4S,5R)-4-hydroxy-5-(hydroxymethyl)tetrahydrofuran-2-yl]-9H-purin-6-ol"'
+ ' | identifier: "CHEBI:28997" @ "chebi"'
+ ' | structure: "' + dIMP_smiles + '"'
+ ' | backbone-bond-atom: C1-1'
+ ' | backbone-bond-atom: D2-2'
+ ' | backbone-displaced-atom: D2-2'
+ ' | r-bond-atom: E3-3'
+ ' | r-displaced-atom: F4-4'
+ ' | l-bond-atom: G5-5'
+ ' | l-displaced-atom: H6-6'
+ ' | delta-mass: -2.5'
+ ' | delta-charge: 3'
+ ' | position: 3-5'
+ ' | base-monomer: "A"'
+ ' | comments: "A purine 2\'-deoxyribonucleoside that is inosine ..."]A')
dna_form_2 = dna.DnaForm().from_str(dna_form_1)
self.assertIsInstance(dna_form_2.seq[2].l_bond_atoms[0].element, str)
self.assertEqual(dna_form_2.seq[2].l_bond_atoms[0].element, 'G')
self.assertEqual(dna_form_2.seq[2].l_bond_atoms[0].position, 5)
self.assertEqual(dna_form_2.seq[2].l_bond_atoms[0].charge, -5)
self.assertEqual(list(dna_form_2.seq[2].base_monomers)[0].id, 'adenine')
self.assertIn(dna_form_2.seq[2].export('smiles'), [dIMP_smiles, 'OCC1OC(CC1O)n1cnc2c1ncnc2O'])
dna_form_3 = dna.DnaForm([
dna.dna_alphabet.monomers.A,
dna.dna_alphabet.monomers.A,
core.Monomer(
id='dI',
name="2'-deoxyinosine",
synonyms=core.SynonymSet(
["2'-deoxyinosine, 9-[(2R,4S,5R)-4-hydroxy-5-(hydroxymethyl)tetrahydrofuran-2-yl]-9H-purin-6-ol"]),
identifiers=core.IdentifierSet([core.Identifier('chebi', 'CHEBI:28997')]),
structure=dIMP_smiles,
backbone_bond_atoms=[
core.Atom(core.Monomer, 'C', position=1, charge=-1),
core.Atom(core.Monomer, 'D', position=2, charge=-2),
],
backbone_displaced_atoms=[core.Atom(core.Monomer, 'D', position=2, charge=-2)],
r_bond_atoms=[core.Atom(core.Monomer, 'E', position=3, charge=-3)],
r_displaced_atoms=[core.Atom(core.Monomer, 'F', position=4, charge=-4)],
l_bond_atoms=[core.Atom(core.Monomer, 'G', position=5, charge=-5)],
l_displaced_atoms=[core.Atom(core.Monomer, 'H', position=6, charge=-6)],
delta_mass=-2.5,
delta_charge=3,
start_position=3,
end_position=5,
base_monomers=[dna.dna_alphabet.monomers.A],
comments="A purine 2'-deoxyribonucleoside that is inosine ...",
),
dna.dna_alphabet.monomers.A,
])
self.assertEqual(str(dna_form_2), dna_form_1.replace(dIMP_smiles, 'OCC1OC(CC1O)n1cnc2c1ncnc2O'))
self.assertTrue(dna_form_2.is_equal(dna_form_3))
self.assertTrue(dna.DnaForm().from_str(
'AA[id: "dI"'
' | position: 3-]A').is_equal(dna.DnaForm([
dna.dna_alphabet.monomers.A,
dna.dna_alphabet.monomers.A,
core.Monomer(
id='dI',
start_position=3,
end_position=None,
),
dna.dna_alphabet.monomers.A,
])))
self.assertTrue(dna.DnaForm().from_str(
'AA[id: "dI"'
' | position: -5]A').is_equal(dna.DnaForm([
dna.dna_alphabet.monomers.A,
dna.dna_alphabet.monomers.A,
core.Monomer(
id='dI',
start_position=None,
end_position=5,
),
dna.dna_alphabet.monomers.A,
])))
alphabet = core.Alphabet()
alphabet.monomers['aA'] = core.Monomer()
alphabet.monomers['Aa'] = core.Monomer()
alphabet.monomers['A'] = core.Monomer()
alphabet.monomers['AA'] = core.Monomer()
alphabet.monomers['*'] = core.Monomer()
alphabet.monomers['A A'] = core.Monomer()
self.assertTrue(core.BpForm(alphabet=alphabet).from_str(
'AAA{AA}AA{aA}{Aa}AA*{A A}').is_equal(core.BpForm([
alphabet.monomers['A'], alphabet.monomers['A'], alphabet.monomers['A'],
alphabet.monomers['AA'],
alphabet.monomers['A'], alphabet.monomers['A'],
alphabet.monomers['aA'],
alphabet.monomers['Aa'],
alphabet.monomers['A'], alphabet.monomers['A'],
alphabet.monomers['*'],
alphabet.monomers['A A'],
], alphabet=alphabet)))
as_str = 'AAA{AA}AA{aA}{Aa}AA'
form = core.BpForm(alphabet=alphabet).from_str(as_str)
self.assertEqual(str(form), as_str)
alphabet = core.Alphabet()
alphabet.monomers['aA'] = core.Monomer()
alphabet.monomers['Aa'] = core.Monomer()
alphabet.monomers['A'] = core.Monomer()
alphabet.monomers['AA'] = core.Monomer()
with self.assertRaises(lark.exceptions.VisitError):
core.BpForm(alphabet=alphabet).from_str('AAA{(AA}AA{aA}{Aa}AA')
with self.assertRaises(lark.exceptions.UnexpectedCharacters):
core.BpForm(alphabet=alphabet).from_str('AAA{AA}AA{aA}{[Aa}AA')
with self.assertRaises(lark.exceptions.VisitError):
core.BpForm(alphabet=alphabet).from_str('AAA[base-monomer: "C"]')
with self.assertRaisesRegex(lark.exceptions.VisitError, 'cannot be repeated'):
dna.DnaForm().from_str(
'AA[id: "dI"'
' | name: "2\'-deoxyinosine"'
' | name: "2\'-deoxyinosine"]A')
dna_form_1 = dna.DnaForm().from_str('[structure: "' + dIMP_smiles + '"]')
dna_form_2 = dna.DnaForm().from_str('[structure: "' + dIMP_smiles + '"]')
self.assertTrue(dna_form_1.is_equal(dna_form_2))
form = dna.DnaForm().from_str(
'AA[id: "dI"'
' | position: 3-5 [A|C | G| m2A]]A')
self.assertTrue(form.is_equal(dna.DnaForm([
dna.dna_alphabet.monomers.A,
dna.dna_alphabet.monomers.A,
core.Monomer(
id='dI',
start_position=3,
end_position=5,
monomers_position=[
dna.dna_alphabet.monomers.A,
dna.dna_alphabet.monomers.C,
dna.dna_alphabet.monomers.G,
dna.dna_alphabet.monomers.m2A,
]
),
dna.dna_alphabet.monomers.A,
])))
self.assertEqual('AA[id: "dI" | position: 3-5 [A | C | G | m2A]]A', str(form))
with self.assertRaisesRegex(lark.exceptions.VisitError, 'not in alphabet'):
dna.DnaForm().from_str('A[id: "dI" | position: 3-5 [unknown]]')
def test_from_str_crosslinks(self):
form = dna.DnaForm().from_str('AAA')
self.assertEqual(form.crosslinks, core.BondSet())
form_str = ('AAA'
'|x-link: [l-bond-atom: 1C1] '
'| x-link: [r-displaced-atom: 5H3+1 '
'| r-displaced-atom: 6H2+3 '
'| r-bond-atom: 8P5-2]')
form = dna.DnaForm().from_str(form_str)
bond_1 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, monomer=1, element='C', position=1)])
bond_2 = core.Bond(r_displaced_atoms=[
core.Atom(core.Monomer, monomer=5, element='H', position=3, charge=1),
core.Atom(core.Monomer, monomer=6, element='H', position=2, charge=3),
], r_bond_atoms=[core.Atom(core.Monomer, monomer=8, element='P', position=5, charge=-2)])
bonds = core.BondSet([bond_1, bond_2])
self.assertTrue(form.crosslinks.is_equal(bonds))
form_str_1 = ('AAA'
' | x-link: [l-bond-atom: 1C1]'
' | x-link: [r-bond-atom: 8P5-2'
' | r-displaced-atom: 5H3+1'
' | r-displaced-atom: 6H2+3]')
form_str_2 = ('AAA'
' | x-link: [r-bond-atom: 8P5-2'
' | r-displaced-atom: 5H3+1'
' | r-displaced-atom: 6H2+3]'
' | x-link: [l-bond-atom: 1C1]')
self.assertIn(str(form), [form_str_1, form_str_2])
xlink = (' | x-link: [r-bond-atom: 8P5-2'
' | r-displaced-atom: 5H3+1'
' | r-displaced-atom: 6H2+3]'
' | x-link: [l-bond-atom: 1C1]')
form = dna.DnaForm().from_str('AAA' + xlink)
with self.assertRaisesRegex(lark.exceptions.VisitError, 'be repeated'):
form = dna.DnaForm().from_str('AAA' + xlink + xlink)
dna.DnaForm().from_str('A\n').validate()
dna.DnaForm().from_str('A \n').validate()
dna.DnaForm().from_str('A\n ').validate()
dna.DnaForm().from_str('A \n ').validate()
dna.DnaForm().from_str('A ').validate()
dna.DnaForm().from_str('A | x-link: []\n')
dna.DnaForm().from_str('A | x-link: [] ')
dna.DnaForm().from_str('A | x-link: [] \n')
dna.DnaForm().from_str('A | x-link: []\n ')
form_str = ('AAA'
'| x-link: [r-displaced-atom: 5H3+1 '
'| r-displaced-atom: 6H2+3 '
'| r-bond-atom: 8P5-2'
'| comments: "a comment"]')
form = dna.DnaForm().from_str(form_str)
self.assertEqual(list(form.crosslinks)[0].comments, 'a comment')
user_form = protein.ProteinForm().from_str(
'CAC'
' | x-link: ['
' l-bond-atom: 1S11'
' | r-bond-atom: 3S11'
' | l-displaced-atom: 1H11'
' | r-displaced-atom: 3H11'
']')
onto_form_str = (
'CAC'
' | x-link: ['
'type: "disulfide" '
'| l: 1 '
'| r: 3'
']')
onto_form = protein.ProteinForm().from_str(onto_form_str)
self.assertEqual(onto_form.export('smiles'), user_form.export('smiles'))
self.assertEqual(str(onto_form), onto_form_str)
onto_form_2 = protein.ProteinForm().from_str(onto_form_str)
self.assertTrue(onto_form_2.is_equal(onto_form))
onto_form_str = (
'CAC'
' | x-link: ['
'type: "unknown" '
'| l: 1 '
'| r: 3'
']')
with self.assertRaisesRegex(lark.exceptions.VisitError, 'not in ontology'):
protein.ProteinForm().from_str(onto_form_str)
def test_from_str_nicks(self):
form1 = dna.DnaForm().from_str('A:AA')
form2 = dna.DnaForm().from_str('AAA')
form2.nicks.add(core.Nick(position=1))
self.assertTrue(form1.is_equal(form2))
form1 = dna.DnaForm().from_str('AA:A')
form2 = dna.DnaForm().from_str('AAA')
form2.nicks.add(core.Nick(position=2))
self.assertTrue(form1.is_equal(form2))
form1 = dna.DnaForm().from_str('A:A:A')
form2 = dna.DnaForm().from_str('AAA')
form2.nicks.add(core.Nick(position=1))
form2.nicks.add(core.Nick(position=2))
self.assertTrue(form1.is_equal(form2))
def test__str__nicks(self):
form = dna.DnaForm().from_str('AAA')
form.nicks.add(core.Nick(position=1))
self.assertEqual(str(form), 'A:AA')
form = dna.DnaForm().from_str('AAA')
form.nicks.add(core.Nick(position=2))
self.assertEqual(str(form), 'AA:A')
form = dna.DnaForm().from_str('AAA')
form.nicks.add(core.Nick(position=1))
form.nicks.add(core.Nick(position=2))
self.assertEqual(str(form), 'A:A:A')
def test_from_str_circular(self):
form = dna.DnaForm().from_str('AAA')
self.assertFalse(form.circular)
form = dna.DnaForm(circular=True).from_str('AAA')
self.assertFalse(form.circular)
form = dna.DnaForm().from_str('AAA|circular')
self.assertTrue(form.circular)
self.assertEqual(str(form), 'AAA | circular')
def test_get_structure_with_null_monomer(self):
form = dna.DnaForm()
form.seq.append(core.Monomer())
self.assertEqual(form.seq[0].structure, None)
form.get_structure()
def test_get_structure_atom_map(self):
form = dna.DnaForm().from_str('ACG')
structure, atom_map = form.get_structure()
self.assertEqual(sorted(atom_map.keys()), [1, 2, 3])
self.assertEqual(sorted(atom_map[2].keys()), ['backbone', 'monomer'])
self.assertEqual(sorted(atom_map[2]['monomer'].keys())[0], 1)
self.assertEqual(sorted(atom_map[2]['monomer'].keys())[-1],
dna.dna_alphabet.monomers.C.structure.NumAtoms())
self.assertEqual(len(atom_map[2]['monomer'].keys()),
dna.dna_alphabet.monomers.C.structure.NumAtoms() - 1)
self.assertNotIn(12, atom_map[2]['monomer'])
for i_atom in range(dna.dna_alphabet.monomers.C.structure.NumAtoms()):
if i_atom + 1 in atom_map[2]['monomer']:
atom = structure.GetAtom(atom_map[2]['monomer'][i_atom + 1])
self.assertEqual(atom.GetAtomicNum(),
dna.dna_alphabet.monomers.C.structure.GetAtom(i_atom + 1).GetAtomicNum())
def test_get_structure_with_xlink_stereo(self):
form = protein.ProteinForm().from_str('CAC'
' | x-link: ['
' l-bond-atom: 1S11'
' | r-bond-atom: 3S11'
' | l-displaced-atom: 1H11'
' | r-displaced-atom: 3H11'
' | order: "double"'
' | stereo: "up"'
']'
)
self.assertEqual(list(form.crosslinks)[0].order, core.BondOrder.double)
self.assertEqual(list(form.crosslinks)[0].stereo, core.BondStereo.up)
form = protein.ProteinForm().from_str('CAC'
' | x-link: ['
' l-bond-atom: 1S11'
' | r-bond-atom: 3S11'
' | l-displaced-atom: 1H11'
' | r-displaced-atom: 3H11'
' | order: "single"'
' | stereo: "wedge"'
']'
)
self.assertEqual(form.export('smiles'), 'C1(=O)[C@@H]([NH3+])CSSC[C@@H](C(=O)O)NC(=O)[C@H](C)N1')
form = protein.ProteinForm().from_str('CAC'
' | x-link: ['
' l-bond-atom: 1S11'
' | r-bond-atom: 3S11'
' | l-displaced-atom: 1H11'
' | r-displaced-atom: 3H11'
' | order: "double"'
']'
)
self.assertEqual(form.export('smiles'), 'C1(=O)[C@@H]([NH3+])C[S]=[S]C[C@@H](C(=O)O)NC(=O)[C@H](C)N1')
def test_get_structure_with_nick(self):
form = protein.ProteinForm().from_str('CAC | x-link: [type: "disulfide" | l: 1 | r: 3]')
self.assertEqual(form.export('smiles'),
'C1(=O)[C@@H]([NH3+])CSSC[C@@H](C(=O)O)NC(=O)[C@H](C)N1')
self.assertEqual(form.get_formula(),
EmpiricalFormula('C9H16N3O4S2'))
self.assertEqual(form.get_charge(), 1)
form.nicks.add(core.Nick(position=1))
self.assertEqual(form.export('smiles'),
'OC(=O)[C@@H]([NH3+])CSSC[C@@H](C(=O)O)NC(=O)[C@H](C)[NH3+]')
self.assertEqual(form.get_formula(),
EmpiricalFormula('C9H16N3O4S2') + EmpiricalFormula('H3O'))
self.assertEqual(form.get_charge(), 1 + 1)
def test__bond_monomer_backbone(self):
form = dna.CanonicalDnaForm()
mol = openbabel.OBMol()
conv = openbabel.OBConversion()
conv.SetInFormat('smiles')
conv.ReadString(mol, '[O-]N([O-])C1=C2N=CNC2=NC=N1')
form._bond_monomer_backbone(mol, {
'monomer': {
'backbone_bond_atoms': [(mol.GetAtom(2), 1, core.Monomer, 2, 1), ],
'backbone_displaced_atoms': [(mol.GetAtom(1), 1, core.Monomer, 1, -1)],
},
'backbone': {
'monomer_bond_atoms': [(mol.GetAtom(8), 1, core.Monomer, 8, 1)],
'monomer_displaced_atoms': [(mol.GetAtom(3), 1, core.Monomer, 3, -1)],
}
}, {
1: {
'monomer': {
1: mol.GetAtom(1),
2: mol.GetAtom(2),
3: mol.GetAtom(3),
8: mol.GetAtom(8),
},
},
})
def test__bond_subunits(self):
form = dna.CanonicalDnaForm()
mol = openbabel.OBMol()
conv = openbabel.OBConversion()
conv.SetInFormat('smiles')
conv.ReadString(mol, '[O-]N([O-])C1=C2N=CNC2=NC=N1')
form._bond_subunits(mol, {
'right': {
'r_bond_atoms': [(mol.GetAtom(8), 1, core.Monomer, 8, 1)],
'r_displaced_atoms': [(mol.GetAtom(3), 1, core.Monomer, 3, -1)],
}
},
{
'left': {
'l_bond_atoms': [(mol.GetAtom(1), 1, core.Monomer, 1, 1), ],
'l_displaced_atoms': [(mol.GetAtom(2), 1, core.Monomer, 2, -1)],
}
}, {
1: {
'monomer': {
1: mol.GetAtom(1),
2: mol.GetAtom(2),
3: mol.GetAtom(3),
8: mol.GetAtom(8),
},
},
})
def test_export(self):
form = dna.CanonicalDnaForm()
self.assertEqual(form.export('inchi'), None)
form = dna.CanonicalDnaForm().from_str('A')
self.assertEqual(form.export('inchi'), ('InChI=1S'
'/C10H14N5O6P'
'/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5(16)6(21-7)2-20-22(17,18)19'
'/h3-7,16H,1-2H2,(H2,11,12,13)(H2,17,18,19)'
'/p-2'))
form = dna.CanonicalDnaForm().from_str('AA')
self.assertEqual(form.export('inchi'), ('InChI=1S'
'/C20H26N10O11P2'
'/c21-17-15-19(25-5-23-17)29(7-27-15)13-1-9(31)11(39-13)3-38-43(35,36)'
'41-10-2-14(40-12(10)4-37-42(32,33)34)30-8-28-16-18(22)24-6-26-20(16)30'
'/h5-14,31H,1-4H2,(H,35,36)(H2,21,23,25)(H2,22,24,26)(H2,32,33,34)'
'/p-3'))
form = rna.CanonicalRnaForm().from_str('AA')
self.assertEqual(form.export('inchi'), ('InChI=1S'
'/C20H26N10O13P2'
'/c21-15-9-17(25-3-23-15)29(5-27-9)19-12(32)11(31)7(41-19)1-40-45'
'(37,38)43-14-8(2-39-44(34,35)36)'
'42-20(13(14)33)30-6-28-10-16(22)24-4-26-18(10)30'
'/h3-8,11-14,19-20,31-33H,1-2H2,(H,37,38)(H2,21,23,25)(H2,22,24,26)(H2,34,35,36)'
'/p-3'))
form = protein.CanonicalProteinForm().from_str('AA')
self.assertEqual(form.export('inchi'),
'InChI=1S/C6H12N2O3/c1-3(7)5(9)8-4(2)6(10)11/h3-4H,7H2,1-2H3,(H,8,9)(H,10,11)/p+1/t3-,4?/m0/s1')
form = dna.CanonicalDnaForm()
self.assertEqual(form.export('inchi'), None)
def test_circular_export(self):
form = dna.CanonicalDnaForm(circular=True)
self.assertEqual(form.export('inchi'), None)
form = dna.CanonicalDnaForm(circular=True).from_str('A|circular')
self.assertEqual(form.export('inchi'), ('InChI=1S/C10H12N5O5P'
'/c11-9-8-10(13-3-12-9)15(4-14-8)7-1-5-6(19-7)2-18-21(16,17)20-5'
'/h3-7H,1-2H2,(H,16,17)(H2,11,12,13)'
'/p-1'))
form = dna.CanonicalDnaForm(circular=True).from_str('AA|circular')
self.assertEqual(form.export('inchi'), ('InChI=1S/C20H24N10O10P2'
'/c21-17-15-19(25-5-23-17)29(7-27-15)13-1-9-11(37-13)3-35-42(33,34)'
'40-10-2-14(38-12(10)4-36-41(31,32)39-9)30-8-28-16-18(22)24-6-26-20(16)30'
'/h5-14H,1-4H2,(H,31,32)(H,33,34)(H2,21,23,25)(H2,22,24,26)/p-2'))
form = rna.CanonicalRnaForm(circular=True).from_str('AA|circular')
self.assertEqual(form.export('inchi'), ('InChI=1S/C20H24N10O12P2'
'/c21-15-9-17(25-3-23-15)29(5-27-9)19-11(31)13-7(39-19)'
'1-37-43(33,34)42-14-8(2-38-44(35,36)41-13)40-20'
'(12(14)32)30-6-28-10-16(22)24-4-26-18(10)30'
'/h3-8,11-14,19-20,31-32H,1-2H2,(H,33,34)(H,35,36)(H2,21,23,25)(H2,22,24,26)/p-2'))
def test_get_fasta(self):
alphabet = core.Alphabet()
alphabet.monomers.A = core.Monomer()
alphabet.monomers.C = core.Monomer()
alphabet.monomers.G = core.Monomer()
alphabet.monomers.T = core.Monomer()
alphabet.monomers.m2A = core.Monomer(base_monomers=[alphabet.monomers.A])
alphabet.monomers.m22A = core.Monomer(base_monomers=[alphabet.monomers.m2A])
alphabet.monomers.m222A = core.Monomer(base_monomers=[alphabet.monomers.m22A])
alphabet.monomers.m2222A = core.Monomer(base_monomers=[alphabet.monomers.A, alphabet.monomers.m222A])
alphabet.monomers.m2222C = core.Monomer(base_monomers=[alphabet.monomers.C, alphabet.monomers.m222A])
self.assertEqual(alphabet.monomers.A.get_root_monomers(), set([alphabet.monomers.A]))
self.assertEqual(alphabet.monomers.C.get_root_monomers(), set([alphabet.monomers.C]))
self.assertEqual(alphabet.monomers.G.get_root_monomers(), set([alphabet.monomers.G]))
self.assertEqual(alphabet.monomers.T.get_root_monomers(), set([alphabet.monomers.T]))
self.assertEqual(alphabet.monomers.m2A.get_root_monomers(), set([alphabet.monomers.A]))
self.assertEqual(alphabet.monomers.m22A.get_root_monomers(), set([alphabet.monomers.A]))
self.assertEqual(alphabet.monomers.m2222A.get_root_monomers(), set([alphabet.monomers.A]))
self.assertEqual(alphabet.monomers.m2222C.get_root_monomers(), set([alphabet.monomers.A, alphabet.monomers.C]))
bpform = core.BpForm(alphabet=alphabet, seq=[
alphabet.monomers.A, alphabet.monomers.C, alphabet.monomers.G, alphabet.monomers.T,
alphabet.monomers.m2A, alphabet.monomers.m22A, alphabet.monomers.m222A,
alphabet.monomers.m2222A, alphabet.monomers.m2222C,
])
self.assertEqual(bpform.get_canonical_seq(), 'ACGTAAAA?')
class CustomBpForm(core.BpForm):
DEFAULT_FASTA_CODE = 'X'
bpform = CustomBpForm(alphabet=alphabet, seq=[
alphabet.monomers.A, alphabet.monomers.C, alphabet.monomers.G, alphabet.monomers.T,
alphabet.monomers.m2A, alphabet.monomers.m22A, alphabet.monomers.m222A,
alphabet.monomers.m2222A, alphabet.monomers.m2222C,
])
self.assertEqual(bpform.get_canonical_seq(), 'ACGTAAAAX')
def test_validate(self):
form = dna.DnaForm()
form.from_str('ACGT')
self.assertEqual(form.validate(), [])
form = dna.DnaForm()
form.from_str('ACGT')
form.backbone.monomer_bond_atoms.append(core.Atom(core.Monomer, 'C', None))
self.assertNotEqual(form.validate(), [])
form = dna.DnaForm()
form.from_str('ACGT')
form.bond.l_bond_atoms.append(core.Atom(core.Monomer, 'C', None))
self.assertNotEqual(form.validate(), [])
form = dna.DnaForm()
form.from_str('ACGT')
form.bond.l_bond_atoms[0].position = None
self.assertEqual(form.validate(), [])
form = dna.DnaForm()
form.from_str('ACGT')
form.seq[0].backbone_bond_atoms.append(core.Atom(core.Backbone, 'C', None))
self.assertNotEqual(form.validate(), [])
form.seq[0].backbone_bond_atoms = []
form = dna.DnaForm()
form.from_str('ACGT')
form.seq[0].r_bond_atoms.append(core.Atom(core.Backbone, 'C', 1))
self.assertNotEqual(form.validate(), [])
form.seq[0].r_bond_atoms.pop()
form = dna.DnaForm()
form.from_str('ACGT')
form.backbone.monomer_bond_atoms = []
self.assertEqual(form.validate(), [])
form = dna.DnaForm()
form.from_str('ACGT')
form.bond.l_bond_atoms = []
self.assertNotEqual(form.validate(), [])
form = dna.DnaForm()
form.from_str('ACGT')
form.bond.r_bond_atoms = []
self.assertNotEqual(form.validate(), [])
form = dna.DnaForm()
form.from_str('[structure: "OC(=O)[C@@H]([NH3+])CS"]')
self.assertEqual(form.validate(), [])
form = dna.DnaForm()
form.from_str('[structure: "OC(=O)[C@@H]([NH3+])CS" | position: 2-1]')
self.assertNotEqual(form.validate(), [])
form = dna.DnaForm()
form.from_str('[structure: "OC(=O)[C@@H]([NH3+])CS" | position: 1-2]')
self.assertNotEqual(form.validate(), [])
form = dna.DnaForm()
form.from_str('[structure: "OC(=O)[C@@H]([NH3+])CS" | position: 2-1]A')
self.assertNotEqual(form.validate(), [])
def test_validate_circular(self):
form = protein.ProteinForm()
form.from_str('CCC')
self.assertEqual(form.validate(), [])
form = protein.ProteinForm()
form.from_str('CCC|circular')
self.assertEqual(form.validate(), [])
form = protein.ProteinForm()
form.from_str('CC[id: "C2"'
' | structure: "OC(=O)[C@@H]([NH3+])CS"'
' | r-bond-atom: C2'
' | r-displaced-atom: O1'
' | r-displaced-atom: H1'
' | l-bond-atom: N6-1'
' | l-displaced-atom: H6+1'
' | l-displaced-atom: H6'
']|circular')
self.assertEqual(form.validate(), [])
form = protein.ProteinForm()
form.from_str('[id: "C2"'
' | structure: "OC(=O)[C@@H]([NH3+])CS"'
' | r-bond-atom: C2'
' | r-displaced-atom: O1'
' | r-displaced-atom: H1'
' | l-bond-atom: N6-1'
' | l-displaced-atom: H6+1'
' | l-displaced-atom: H6'
']CC|circular')
self.assertEqual(form.validate(), [])
# no atom for right bond
form = protein.ProteinForm()
form.from_str('CC[id: "C2"'
' | structure: "OC(=O)[C@@H]([NH3+])CS"'
' | r-displaced-atom: O1'
' | r-displaced-atom: H1'
' | l-bond-atom: N6-1'
' | l-displaced-atom: H6+1'
' | l-displaced-atom: H6'
']|circular')
self.assertNotEqual(form.validate(), [])
# not atom for left bond
form = protein.ProteinForm()
form.from_str('[id: "C2"'
' | structure: "OC(=O)[C@@H]([NH3+])CS"'
' | r-bond-atom: C2'
' | r-displaced-atom: O1'
' | r-displaced-atom: H1'
' | l-displaced-atom: H6+1'
' | l-displaced-atom: H6'
']CC|circular')
self.assertNotEqual(form.validate(), [])
# no structure defined
form = protein.ProteinForm()
form.from_str('[id: "C2"'
' | r-bond-atom: C2'
' | r-displaced-atom: O1'
' | r-displaced-atom: H1'
' | l-bond-atom: N6-1'
' | l-displaced-atom: H6+1'
' | l-displaced-atom: H6'
']CC|circular')
self.assertNotEqual(form.validate(), [])
# incorrect element (C1)
form = protein.ProteinForm()
form.from_str('[id: "C2"'
' | structure: "OC(=O)[C@@H]([NH3+])CS"'
' | r-bond-atom: C1'
' | r-displaced-atom: O1'
' | r-displaced-atom: H1'
' | l-bond-atom: N6-1'
' | l-displaced-atom: H6+1'
' | l-displaced-atom: H6'
']CC|circular')
self.assertNotEqual(form.validate(), [])
def test_validate_crosslinks(self):
form_str = ('AAA '
' | x-link: [l-bond-atom: 1P9'
' | r-bond-atom: 3O1'
' | l-displaced-atom: 1O12-1'
' | r-displaced-atom: 3H1]')
form = dna.DnaForm().from_str(form_str)
self.assertEqual(form.validate(), [])
# invalid atom parent
list(form.crosslinks)[0].l_bond_atoms[0].molecule = core.Backbone
self.assertNotEqual(form.validate(), [])
# unmatched bond atoms
form = dna.DnaForm().from_str('AAA')
form.bond.l_bond_atoms.append(core.Atom(core.Backbone, element='O', position=1))
self.assertNotEqual(form.validate(), [])
form = dna.DnaForm().from_str('AAA')
form.backbone.monomer_bond_atoms.clear()
self.assertEqual(form.validate(), [])
form = protein.ProteinForm()
form.from_str('CC[id: "C2"'
' | structure: "OC(=O)[C@@H]([NH3+])CS"'
' | r-bond-atom: C2'
' | r-displaced-atom: O1'
' | r-displaced-atom: H1'
' | l-bond-atom: N6-1'
' | l-bond-atom: N6-1'
' | l-displaced-atom: H6+1'
' | l-displaced-atom: H6'
']')
self.assertNotEqual(form.validate(), [])
form = protein.ProteinForm()
form.from_str('CC[id: "C2"'
' | structure: "OC(=O)[C@@H]([NH3+])CS"'
' | r-bond-atom: C2'
' | r-displaced-atom: O1'
' | r-displaced-atom: H1'
' | r-displaced-atom: H1'
' | l-bond-atom: N6-1'
' | l-displaced-atom: H6+1'
' | l-displaced-atom: H6'
']')
self.assertEqual(form.validate(), [])
form = protein.ProteinForm()
form.from_str('CC[id: "C2"'
' | structure: "OC(=O)[C@@H]([NH3+])CS"'
' | r-displaced-atom: O1'
' | r-displaced-atom: H1'
' | r-displaced-atom: H1'
' | l-bond-atom: N6-1'
' | l-displaced-atom: H6+1'
' | l-displaced-atom: H6'
']|circular')
self.assertNotEqual(form.validate(), [])
form_str = ('AAA '
' | x-link: [l-bond-atom: 1C5'
' | r-bond-atom: 3C5'
' | r-bond-atom: 3C5'
' | l-displaced-atom: 1H5'
' | r-displaced-atom: 3H5]')
form = dna.DnaForm().from_str(form_str)
self.assertNotEqual(form.validate(), [])
crosslink = (' | x-link: [l-bond-atom: 1P9'
' | r-bond-atom: 3O1'
' | l-displaced-atom: 1O12-1'
' | r-displaced-atom: 3H1]')
form = dna.DnaForm().from_str('AAA ' + crosslink)
self.assertEqual(form.validate(), [])
form.crosslinks.add(core.Bond(
l_bond_atoms=[core.Atom(core.Monomer, monomer=1, element='P', position=9)],
r_bond_atoms=[core.Atom(core.Monomer, monomer=3, element='O', position=1)],
l_displaced_atoms=[core.Atom(core.Monomer, monomer=1, element='O', position=12)],
r_displaced_atoms=[core.Atom(core.Monomer, monomer=3, element='H', position=1)],
))
self.assertNotEqual(form.validate(), [])
def test_validate_nicks(self):
form = dna.DnaForm().from_str('AAA')
form.nicks.add(core.Nick(position=1))
self.assertEqual(form.validate(), [])
form = dna.DnaForm().from_str('AAA')
form.nicks.add(core.Nick(position=4))
self.assertNotEqual(form.validate(), [])
form = dna.DnaForm().from_str('AAA')
form.nicks.add(core.Nick(position=2))
form.nicks.add(core.Nick(position=2))
self.assertNotEqual(form.validate(), [])
form = dna.DnaForm()
form.seq.append(core.Monomer(
structure='OC1CC(OC1COP(=O)([O-])[O-])n1cnc2c1ncnc2N',
#r_bond_atoms=[core.Atom(core.Monomer, 'O', position=1)],
l_bond_atoms=[core.Atom(core.Monomer, 'P', position=9)],
r_displaced_atoms=[core.Atom(core.Monomer, 'H', position=1)],
l_displaced_atoms=[core.Atom(core.Monomer, 'O', position=12, charge=-1)],
))
form.seq.append(core.Monomer(
structure='OC1CC(OC1COP(=O)([O-])[O-])n1cnc2c1ncnc2N',
r_bond_atoms=[core.Atom(core.Monomer, 'O', position=1)],
#l_bond_atoms=[core.Atom(core.Monomer, 'P', position=9)],
r_displaced_atoms=[core.Atom(core.Monomer, 'H', position=1)],
l_displaced_atoms=[core.Atom(core.Monomer, 'O', position=12, charge=-1)],
))
self.assertNotEqual(form.validate(), [])
form.nicks.add(core.Nick(position=1))
self.assertEqual(form.validate(), [])
def test_get_image(self):
form_str = ('AAA '
' | x-link: [l-bond-atom: 1P9'
' | r-bond-atom: 3O1'
' | l-displaced-atom: 1O12-1'
' | r-displaced-atom: 3H1]')
form = dna.DnaForm().from_str(form_str)
assert form.validate() == []
img = form.get_image(image_format='svg', width=800, height=600)
# with open('test1.svg', 'w') as file:
# file.write(img)
form_str = ('AAA | circular')
form = dna.DnaForm().from_str(form_str)
assert form.validate() == []
img = form.get_image(image_format='svg', width=800, height=600)
# with open('test2.svg', 'w') as file:
# file.write(img)
def test_get_genomic_image(self):
form = protein.ProteinForm().from_str(
('ACRGCRGAARGCHILCA{SEL}RC' * 30) + (
' | x-link: ['
' l-bond-atom: 2S11'
' | r-bond-atom: 5S11'
' | l-displaced-atom: 2H11'
' | r-displaced-atom: 5H11'
']'
' | x-link: ['
' l-bond-atom: 40S11'
' | r-bond-atom: 80S11'
' | l-displaced-atom: 40H11'
' | r-displaced-atom: 80H11'
']'
' | x-link: ['
' l-bond-atom: 60S11'
' | r-bond-atom: 100S11'
' | l-displaced-atom: 60H11'
' | r-displaced-atom: 100H11'
']'
' | x-link: ['
' l-bond-atom: 20S11'
' | r-bond-atom: 160S11'
' | l-displaced-atom: 20H11'
' | r-displaced-atom: 160H11'
']'
' | x-link: ['
' l-bond-atom: 140S11'
' | r-bond-atom: 220S11'
' | l-displaced-atom: 140H11'
' | r-displaced-atom: 220H11'
']'
))
seq_features = [{
'label': 'Processed',
'color': '#cccccc',
'positions': [[1, 30], [50, 85]],
}]
svg = form.get_genomic_image(
seq_features=seq_features)
self.assertIsInstance(svg, str)
# with open(os.path.join('.', 'test.svg'), 'w') as file:
# file.write(svg)
form = dna.DnaForm().from_str('AAAAA' * 30)
form.seq.append(core.Monomer(id='id'))
form.seq.append(core.Monomer(name='name'))
form.seq.append(core.Monomer(synonyms=['syn']))
form.seq.append(core.Monomer(identifiers=[core.Identifier(ns='ns', id='ns-id')]))
form.seq.append(core.Monomer())
form.get_genomic_image()
form = rna.RnaForm().from_str('AAAAA' * 30)
form.get_genomic_image()
with self.assertRaisesRegex(ValueError, 'must be an instance'):
protein.CanonicalProteinForm().get_genomic_image()
class AlphabetTestCase(unittest.TestCase):
def setUp(self):
self.dir_path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dir_path)
def test_set_monomers(self):
alphabet = core.Alphabet()
alphabet.monomers = core.MonomerDict()
alphabet.monomers = {}
with self.assertRaises(ValueError):
alphabet.monomers = None
def test_getitem(self):
self.assertEqual(dna.canonical_dna_alphabet.monomers.A.export('inchi'), dAMP_inchi)
def test_setitem(self):
alphabet = core.Alphabet()
alphabet.monomers['A'] = core.Monomer(structure=dAMP_smiles)
alphabet.monomers['aA'] = core.Monomer(structure=dAMP_smiles)
alphabet.monomers['Aa'] = core.Monomer(structure=dAMP_smiles)
alphabet.monomers['*'] = core.Monomer(structure=dAMP_smiles)
alphabet.monomers['* *'] = core.Monomer(structure=dAMP_smiles)
with self.assertRaises(ValueError):
alphabet.monomers['{aa'] = core.Monomer(structure=dAMP_smiles)
with self.assertRaises(ValueError):
alphabet.monomers['A]'] = core.Monomer(structure=dAMP_smiles)
with self.assertRaises(ValueError):
alphabet.monomers['* '] = core.Monomer(structure=dAMP_smiles)
with self.assertRaises(ValueError):
alphabet.monomers[' *'] = core.Monomer(structure=dAMP_smiles)
with self.assertRaises(ValueError):
alphabet.monomers['\n '] = core.Monomer(structure=dAMP_smiles)
with self.assertRaises(ValueError):
alphabet.monomers[' \n'] = core.Monomer(structure=dAMP_smiles)
with self.assertRaises(ValueError):
alphabet.monomers['*\n*'] = core.Monomer(structure=dAMP_smiles)
with self.assertRaises(ValueError):
alphabet.monomers['*\t*'] = core.Monomer(structure=dAMP_smiles)
with self.assertRaises(ValueError):
alphabet.monomers[' '] = core.Monomer(structure=dAMP_smiles)
with self.assertRaises(ValueError):
alphabet.monomers['\n '] = core.Monomer(structure=dAMP_smiles)
def test_get_monomer_code(self):
alphabet = dna.canonical_dna_alphabet
self.assertEqual(alphabet.get_monomer_code(alphabet.monomers.A), 'A')
with self.assertRaises(ValueError):
alphabet.get_monomer_code(core.Monomer())
def test_get_major_micro_species(self):
alphabet = core.Alphabet(monomers={
'A': core.Monomer(structure=dAMP_smiles),
'C': core.Monomer(structure=dCMP_smiles),
})
alphabet.get_major_micro_species(7.)
def test_is_equal(self):
self.assertTrue(dna.canonical_dna_alphabet.is_equal(dna.canonical_dna_alphabet))
self.assertFalse(dna.canonical_dna_alphabet.is_equal(rna.rna_alphabet))
self.assertFalse(dna.canonical_dna_alphabet.is_equal(None))
self.assertFalse(dna.canonical_dna_alphabet.is_equal(core.Alphabet()))
dna_alphabet_1 = core.Alphabet(id=dna.canonical_dna_alphabet.id,
name=dna.canonical_dna_alphabet.name,
description=dna.canonical_dna_alphabet.description,
monomers={
'A': dna.canonical_dna_alphabet.monomers.A,
'C': dna.canonical_dna_alphabet.monomers.C,
'G': dna.canonical_dna_alphabet.monomers.G,
'T': dna.canonical_dna_alphabet.monomers.T,
})
dna_alphabet_2 = core.Alphabet(id=dna.canonical_dna_alphabet.id,
name=dna.canonical_dna_alphabet.name,
description=dna.canonical_dna_alphabet.description,
monomers={
'A': dna.canonical_dna_alphabet.monomers.A,
'C': dna.canonical_dna_alphabet.monomers.C,
'G': dna.canonical_dna_alphabet.monomers.G,
'T': dna.canonical_dna_alphabet.monomers.T,
})
self.assertTrue(dna_alphabet_1.is_equal(dna_alphabet_2))
dna_alphabet_1 = core.Alphabet(id=dna.canonical_dna_alphabet.id,
name=dna.canonical_dna_alphabet.name,
description=dna.canonical_dna_alphabet.description,
monomers={
'A': dna.canonical_dna_alphabet.monomers.A,
'C': dna.canonical_dna_alphabet.monomers.C,
'G': dna.canonical_dna_alphabet.monomers.G,
'T': dna.canonical_dna_alphabet.monomers.T,
})
dna_alphabet_2 = core.Alphabet(id=dna.canonical_dna_alphabet.id,
name=dna.canonical_dna_alphabet.name,
description=dna.canonical_dna_alphabet.description,
monomers={
'A': dna.canonical_dna_alphabet.monomers.A,
'C': dna.canonical_dna_alphabet.monomers.C,
'G': dna.canonical_dna_alphabet.monomers.G,
})
self.assertFalse(dna_alphabet_1.is_equal(dna_alphabet_2))
dna_alphabet_1 = core.Alphabet(id=dna.canonical_dna_alphabet.id,
name=dna.canonical_dna_alphabet.name,
description=dna.canonical_dna_alphabet.description,
monomers={
'A': dna.canonical_dna_alphabet.monomers.A,
'C': dna.canonical_dna_alphabet.monomers.C,
'G': dna.canonical_dna_alphabet.monomers.G,
'T': dna.canonical_dna_alphabet.monomers.T,
})
dna_alphabet_2 = core.Alphabet(id=dna.canonical_dna_alphabet.id,
name=dna.canonical_dna_alphabet.name,
description=dna.canonical_dna_alphabet.description,
monomers={
'C': dna.canonical_dna_alphabet.monomers.A,
'A': dna.canonical_dna_alphabet.monomers.C,
'G': dna.canonical_dna_alphabet.monomers.G,
'T': dna.canonical_dna_alphabet.monomers.T,
})
self.assertFalse(dna_alphabet_1.is_equal(dna_alphabet_2))
def test_to_from_yaml(self):
dna_alphabet = core.Alphabet(id='test',
name='Test',
description='Test description',
monomers={
'A': dna.canonical_dna_alphabet.monomers.A,
'C': dna.canonical_dna_alphabet.monomers.C,
'G': dna.canonical_dna_alphabet.monomers.G,
'T': dna.canonical_dna_alphabet.monomers.T,
})
path = os.path.join(self.dir_path, 'alphabet.yml')
dna_alphabet.to_yaml(path)
dna_alphabet_2 = core.Alphabet().from_yaml(path)
self.assertTrue(dna_alphabet_2.is_equal(dna_alphabet))
def test_to_from_yaml_without_name(self):
dna_alphabet = core.Alphabet(id='test',
monomers={
'A': dna.canonical_dna_alphabet.monomers.A,
'C': dna.canonical_dna_alphabet.monomers.C,
'G': dna.canonical_dna_alphabet.monomers.G,
'T': dna.canonical_dna_alphabet.monomers.T,
})
path = os.path.join(self.dir_path, 'alphabet.yml')
dna_alphabet.to_yaml(path)
dna_alphabet_2 = core.Alphabet().from_yaml(path)
self.assertTrue(dna_alphabet_2.is_equal(dna_alphabet))
class AlphabetBuilderTestCase(unittest.TestCase):
def setUp(self):
self.dir_path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dir_path)
def test(self):
class AlphabetBuilder(core.AlphabetBuilder):
def build(self, ph=None, major_tautomer=False, dearomatize=False):
return core.Alphabet()
alphabet = AlphabetBuilder().run()
self.assertIsInstance(alphabet, core.Alphabet)
path = os.path.join(self.dir_path, 'alphabet.yml')
alphabet = AlphabetBuilder().run(ph=7.4, major_tautomer=True, path=path)
self.assertTrue(os.path.isfile(path))
class BpFormFeatureTestCase(unittest.TestCase):
def test(self):
form = core.BpForm()
self.assertEqual(len(form.features), 0)
feature = core.BpFormFeature(None, start_position=1, end_position=2)
self.assertEqual(feature.start_position, 1)
self.assertEqual(feature.end_position, 2)
form.features.add(feature)
self.assertEqual(len(form.features), 1)
self.assertIn(feature, form.features)
self.assertEqual(feature.form, form)
form.features.remove(feature)
self.assertEqual(len(form.features), 0)
self.assertEqual(feature.form, None)
form = core.BpForm()
feature = core.BpFormFeature(None, start_position=1, end_position=2)
feature.form = form
self.assertEqual(feature.form, form)
self.assertEqual(len(form.features), 1)
self.assertIn(feature, form.features)
feature.form = None
self.assertEqual(feature.form, None)
self.assertEqual(len(form.features), 0)
form = core.BpForm()
self.assertEqual(len(form.features), 0)
feature = core.BpFormFeature(form, start_position=1, end_position=2)
self.assertEqual(len(form.features), 1)
self.assertIn(feature, form.features)
with self.assertRaises(ValueError):
feature.form = -1
with self.assertRaises(ValueError):
feature.start_position = -1
with self.assertRaises(ValueError):
feature.end_position = -1
with self.assertRaises(ValueError):
form.features = None
with self.assertRaises(ValueError):
form.features = core.BpFormFeatureSet(form)
with self.assertRaises(ValueError):
form.features.form = None
with self.assertRaises(ValueError):
form.features.form = form
with self.assertRaises(ValueError):
form.features.add(None)
feature1 = core.BpFormFeature(None, 2, 3)
feature2 = core.BpFormFeature(None, 3, 4)
form.features.update(set([feature1]))
form.features.symmetric_difference_update([feature1, feature2])
def clean_smiles(smi):
conv = openbabel.OBConversion()
conv.SetInFormat('smi')
conv.SetOutFormat('smi')
conv.SetOptions('c', conv.OUTOPTIONS)
mol = openbabel.OBMol()
conv.ReadString(mol, smi)
smi = conv.WriteString(mol).partition('\t')[0]
mol = openbabel.OBMol()
conv.ReadString(mol, smi)
smi = conv.WriteString(mol).partition('\t')[0]
return smi
| StarcoderdataPython |
1772725 | <reponame>kamoliddeenov/translitobot
from aiogram import types
from aiogram.dispatcher.filters.builtin import CommandHelp
from loader import dp, db
from utils.misc.msg_dict import texts
@dp.message_handler(CommandHelp())
async def bot_help(message: types.Message):
lang = await db.select_user(telegram_id=message.from_user.id)
if lang[4] == "uzbek":
await message.answer(texts["help_uz"])
elif lang[4] == "english":
await message.answer(texts["help_en"])
| StarcoderdataPython |
1751552 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--incognito")
driver = webdriver.Chrome(chrome_options=chrome_options)
# driver = webdriver.Chrome('chromedriver.exe', chrome_options=self.browserProfile)
driver.get('https://www.instagram.com/accounts/login/')
# driver.add_experimental_option('prefs', {'intl.accept_languages': 'en,en_US'})
driver.email = 'life_explorer_4'
driver.password = '<PASSWORD>'
driver.username = 'natgeotravellerindia'
time.sleep(10)
try:
emailInput=driver.find_element_by_xpath("//input[@name = 'username']")
passwordInput=driver.find_element_by_xpath("//input[@name = 'password']")
except (NoSuchElementException, ElementNotVisibleException) as exceptions:
pass
emailInput.send_keys(driver.email)
passwordInput.send_keys(driver.password)
passwordInput.send_keys(Keys.ENTER)
time.sleep(10)
# ---pop up handle---
NotNow=driver.find_element_by_xpath("//button[text() = 'Not Now']").click()
time.sleep(10)
driver.get('https://www.instagram.com/' + driver.username + '/')
time.sleep(5)
driver.get('https://www.instagram.com/' + driver.username + '/followers/')
Followers=driver.find_element_by_xpath("//a[@class='-nal3 ']").click()
time.sleep(5)
# ------------------------
# ------------------------
#find all li elements in list
fBody = driver.find_element_by_xpath("//div[@class='isgrP']")
scroll = 0
for j in (1,100):
driver.execute_script('arguments[0].scrollTop = arguments[0].scrollTop + arguments[0].offsetHeight;', fBody)
time.sleep(10)
for k in (1,10):
driver.execute_script('arguments[0].scrollDown = arguments[0].scrollDown + arguments[0].offsetHeight;', fBody)
time.sleep(10)
while scroll < 500: # scroll 500 times
for i in (0,4):
try:
driver.execute_script('arguments[0].scrollTop = arguments[0].scrollTop + arguments[0].offsetHeight;', fBody)
Follow=driver.find_element_by_xpath("//button[text() = 'Follow']").click()
time.sleep(10)
except (NoSuchElementException, ElementNotVisibleException) as exceptions:
pass
time.sleep(8)
# driver.executeScript("window.scrollBy(0,100)")
scroll += 1
driver.execute_script('arguments[0].scrollTop = arguments[0].scrollTop + arguments[0].offsetHeight;', fBody)
time.sleep(2)
# fList = driver.find_elements_by_xpath("//div[@class='isgrP']//li")
# print("fList len is {}".format(len(fList)))
# print("ended") | StarcoderdataPython |
4829676 | # Python example to check if a class is
# subclass of another
class Base(object):
pass # Empty Class
class Derived(Base):
pass # Empty Class
# Driver Code
print(issubclass(Derived, Base))
print(issubclass(Base, Derived))
d = Derived()
b = Base()
# b is not an instance of Derived
print(isinstance(b, Derived))
# But d is an instance of Base
print(isinstance(d, Base))
| StarcoderdataPython |
3223554 | <filename>03-graph-algorithms/1_graph_decomposition/toposort_dfs.py
import sys
class Graph:
def __init__(self, n, edges):
self._vertices_count = n
self._build_adjacency_list(edges)
# Time Complexity: O(|E|)
# Space Complexity: O(1)
def _build_adjacency_list(self, edges):
self._adjacency_list = [[] for _ in range(self._vertices_count)]
for (a, b) in edges:
self._adjacency_list[a - 1].append(b - 1)
def explore(self, v, visited, cycle, order):
visited[v] = True
cycle[v] = True
for a in self._adjacency_list[v]:
if cycle[a]:
return
elif visited[a]:
continue
self.explore(a, visited, cycle, order)
if cycle[a]:
return
order.append(v)
cycle[v] = False
def toposort_dfs(self):
visited = [False for _ in range(self._vertices_count)]
cycle = [False for _ in range(self._vertices_count)]
order = []
for v in range(self._vertices_count):
if visited[v]:
continue
self.explore(v, visited, cycle, order)
if cycle[v]:
order.clear()
break
return order[::-1]
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n, m = data[0:2]
data = data[2:]
edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))
graph = Graph(n, edges)
order = graph.toposort_dfs()
for x in order:
print(x + 1, end=' ')
| StarcoderdataPython |
3289800 | <filename>scripts/turtlebot.py<gh_stars>10-100
from math import pi, sqrt, atan2, cos, sin
import numpy as np
import matplotlib.pyplot as plt
import rospy
import tf
from geometry_msgs.msg import Twist, Pose2D
from nav_msgs.msg import Odometry
from obstacle_detector.msg import Obstacles, CircleObstacle, SegmentObstacle
from vector2d import Vector2D
class TurtleBot:
def __init__(self, hexmap, turning_radius):
self.hexmap = hexmap
self.turning_radius = turning_radius
# motion planning parameters
self.sample_period = 0.1
self.rate = rospy.Rate(1/self.sample_period)
self.linear_vel = 1.0
self.angular_vel = self.linear_vel / self.turning_radius
self.remaining_time = 0 # within [0, sample_period]
self.smooth_start_update_interval = 0.03 # second
self.turning_clockwise = False
self.turning_clockwise_buffer = False
# send velocity commands
self.handpoint_offset = 0.2 # meter
self.controller_tune_K = 0.3
self.vel = Twist()
self.vel_pub = rospy.Publisher('cmd_vel_mux/input/navi', Twist, queue_size=10)
# odometry feedback
self.logging_counter = 0
self.trajectory = list()
self.trajectory_hp = list()
self.trajectory_cmd = list()
self.pose = Pose2D()
self.pose_hp = Pose2D()
self.odom_sub = rospy.Subscriber("odom", Odometry, self.odom_callback)
# perception # TODO: probabilistic update
self.obstacle_list_candidate = dict()
self.obstacle_threshold = rospy.get_param("/hdcp_planner/obstacle_threshold", 100)
self.valid_sensing_range = self.hexmap.radius * 6
self.obstacle_sub = rospy.Subscriber("raw_obstacles", Obstacles, self.obstacle_callback)
def straight_line_trajectory_planning(self, start_point, end_point):
direction_vector = end_point - start_point
angle = direction_vector.angle
total_distance = abs(direction_vector)
local_time = self.remaining_time # remaining time is within [0, self.smaple_period]
while not rospy.is_shutdown():
current_distance = self.linear_vel * local_time
x_ref = start_point[0] + current_distance * cos(angle)
y_ref = start_point[1] + current_distance * sin(angle)
vx_ref = self.linear_vel * cos(angle)
vy_ref = self.linear_vel * sin(angle)
self.tracking_controller(x_ref, y_ref, vx_ref, vy_ref)
local_time += self.sample_period
remaining_distance = total_distance - self.linear_vel * local_time
if remaining_distance < 0:
self.remaining_time = - remaining_distance/self.linear_vel
break
if self.remaining_time > self.sample_period or self.remaining_time < 0:
rospy.logwarn("line: remaining_time = " + str(self.remaining_time))
rospy.loginfo("local_time = " + str(local_time))
rospy.loginfo("start_point = " + str(start_point) + "; end_point = " + str(end_point))
rospy.loginfo("total_distance = " + str(total_distance) + "; remaining_distance" + str(remaining_distance))
def circle_trajectory_planning(self, start_point, end_point, center):
start_angle = atan2(start_point[1]-center[1], start_point[0]-center[0])
angle_difference = self.get_angle_difference(start_point, end_point, center)
local_time = self.remaining_time # remaining time is within [0, self.smaple_period]
while not rospy.is_shutdown():
if self.turning_clockwise:
current_angle = start_angle - self.angular_vel * local_time # from x to x_dot: take derivative wrt local_time
x_ref = center[0] + self.turning_radius * cos(current_angle) # x = cx + r*cos(a-vt)
y_ref = center[1] + self.turning_radius * sin(current_angle) # y = cy + r*sin(a-vt)
vx_ref = self.linear_vel * sin(current_angle) # x_dot = rv*sin(a-vt) # lin_vel = r*ang_vel
vy_ref = - self.linear_vel * cos(current_angle) # y_dot = -rv*cos(a-vt)
else:
current_angle = start_angle + self.angular_vel * local_time # from x to x_dot: take derivative wrt local_time
x_ref = center[0] + self.turning_radius * cos(current_angle) # x = cx + r*cos(a+vt)
y_ref = center[1] + self.turning_radius * sin(current_angle) # y = cy + r*sin(a+vt)
vx_ref = - self.linear_vel * sin(current_angle) # x_dot = -rv*sin(a+vt) # lin_vel = r*ang_vel
vy_ref = self.linear_vel * cos(current_angle) # y_dot = rv*cos(a+vt)
self.tracking_controller(x_ref, y_ref, vx_ref, vy_ref)
local_time += self.sample_period
remaining_angle = angle_difference - self.angular_vel * local_time
if remaining_angle < 0:
self.remaining_time = - remaining_angle/self.angular_vel
break
if self.remaining_time > self.sample_period or self.remaining_time < 0:
rospy.logwarn("circle: remaining_time = " + str(self.remaining_time))
rospy.loginfo("local_time = " + str(local_time) + "; center = " + str(center))
rospy.loginfo("start_point = " + str(start_point) + "; end_point = " + str(end_point))
rospy.loginfo("angle_difference = " + str(angle_difference) + "; remaining_angle" + str(remaining_angle))
def initial_circle_trajectory_planning(self):
angle_difference = 2*pi
factor = 0
current_angle = 0
while not rospy.is_shutdown():
factor = factor + self.smooth_start_update_interval
linear_vel = self.linear_vel * min(factor, 1)
angular_vel = self.angular_vel * min(factor, 1)
current_angle = current_angle + angular_vel * self.sample_period
x_ref = self.turning_radius * cos(current_angle)
y_ref = self.turning_radius * sin(current_angle)
vx_ref = - linear_vel * sin(current_angle)
vy_ref = linear_vel * cos(current_angle)
self.tracking_controller(x_ref, y_ref, vx_ref, vy_ref)
remaining_angle = angle_difference - current_angle
if remaining_angle < 0:
self.remaining_time = - remaining_angle/self.angular_vel
break
def get_angle_difference(self, start_point, end_point, center):
# compute CCW angle difference between two points on circumference
start_angle = atan2(start_point[1]-center[1], start_point[0]-center[0])
end_angle = atan2(end_point[1]-center[1], end_point[0]-center[0])
angle_difference = end_angle - start_angle
if angle_difference <= 0: # make sure value is within (0, 2pi]
angle_difference += 2*pi
# switch to CW angle difference if needed
if self.turning_clockwise and angle_difference != 2*pi: # 2*pi means we will turn full circle
angle_difference = 2*pi - angle_difference
return angle_difference
def find_tangent_points(self, current_hex, next_hex, init_point):
current_hex_center = self.hexmap.cube_to_cat(current_hex)
outer_start, outer_end = self.outer_tangent_points(current_hex, next_hex)
inner_start, inner_end = self.inner_tangent_points(current_hex, next_hex)
outer_diff = self.get_angle_difference(init_point, outer_start, current_hex_center)
inner_diff = self.get_angle_difference(init_point, inner_start, current_hex_center)
if inner_diff < outer_diff:
self.turning_clockwise_buffer = not self.turning_clockwise
return inner_start, inner_end
else:
self.turning_clockwise_buffer = self.turning_clockwise
return outer_start, outer_end
def inner_tangent_points(self, current_hex, target_hex):
current_center = self.hexmap.cube_to_cat(current_hex)
target_center = self.hexmap.cube_to_cat(target_hex)
ai = current_center[0]
bi = current_center[1]
aj = target_center[0]
bj = target_center[1]
w = (aj-ai)**2 + (bj-bi)**2
rt = self.turning_radius
rt2 = rt**2
if self.turning_clockwise:
xi = ai + (2*rt2*(aj-ai) - rt*(bj-bi)*sqrt(w-4*rt2))/w # minus sign for CW
yi = bi + (2*rt2*(bj-bi) - rt*(ai-aj)*sqrt(w-4*rt2))/w
xj = aj + (2*rt2*(ai-aj) - rt*(bi-bj)*sqrt(w-4*rt2))/w
yj = bj + (2*rt2*(bi-bj) - rt*(aj-ai)*sqrt(w-4*rt2))/w
else:
xi = ai + (2*rt2*(aj-ai) + rt*(bj-bi)*sqrt(w-4*rt2))/w # plus sign for CCW
yi = bi + (2*rt2*(bj-bi) + rt*(ai-aj)*sqrt(w-4*rt2))/w
xj = aj + (2*rt2*(ai-aj) + rt*(bi-bj)*sqrt(w-4*rt2))/w
yj = bj + (2*rt2*(bi-bj) + rt*(aj-ai)*sqrt(w-4*rt2))/w
return Vector2D(xi, yi), Vector2D(xj, yj) # start_point, end_point
def outer_tangent_points(self, current_hex, target_hex):
current_center = self.hexmap.cube_to_cat(current_hex)
target_center = self.hexmap.cube_to_cat(target_hex)
ai = current_center[0]
bi = current_center[1]
aj = target_center[0]
bj = target_center[1]
w = (aj-ai)**2 + (bj-bi)**2
rt = self.turning_radius
if self.turning_clockwise:
xi = ai + rt*(bi-bj)/sqrt(w) # plus sign for CW
yi = bi + rt*(aj-ai)/sqrt(w)
xj = aj + rt*(bi-bj)/sqrt(w)
yj = bj + rt*(aj-ai)/sqrt(w)
else:
xi = ai - rt*(bi-bj)/sqrt(w) # minus sign for CCW
yi = bi - rt*(aj-ai)/sqrt(w)
xj = aj - rt*(bi-bj)/sqrt(w)
yj = bj - rt*(aj-ai)/sqrt(w)
return Vector2D(xi, yi), Vector2D(xj, yj) # start_point, end_point
def tracking_controller(self, x_ref, y_ref, vx_ref, vy_ref):
'''
tracking controller design
vx = xh_d_dot - K * (xh - xh_d) => xh --> xh_d
vy = yh_d_dot - K * (yh - yh_d) => yh --> yh_d
'''
self.trajectory_cmd.append([x_ref, y_ref])
K = self.controller_tune_K # controller parameter
ux = vx_ref - K * (self.pose_hp.x - x_ref)
uy = vy_ref - K * (self.pose_hp.y - y_ref)
vel_hp = [ux, uy]
self.pub_vel_hp(vel_hp)
def pub_vel_hp(self, vel_hp):
'''
matrix transform
[ v ] 1 [ L*cos0 L*sin0 ] [ x ]
[ ] = --- * [ ] * [ ]
[ w ] L [ -sin0 cos0 ] [ y ]
'''
x = vel_hp[0]
y = vel_hp[1]
theta = self.pose_hp.theta
v = x*cos(theta) + y*sin(theta)
w = (x*(-sin(theta)) + y*cos(theta))/self.handpoint_offset
rospy.logdebug("vel: theta=" + str(theta) + "; x=" + str(x) +\
"; y=" + str(y) + "; v=" + str(v) + "; w=" + str(w))
self.vel.linear.x = v
self.vel.angular.z = w
self.vel_pub.publish(self.vel)
self.rate.sleep()
def odom_callback(self, msg):
# get (x, y, theta) specification from odometry topic
quarternion = [msg.pose.pose.orientation.x,msg.pose.pose.orientation.y,\
msg.pose.pose.orientation.z, msg.pose.pose.orientation.w]
(_, _, yaw) = tf.transformations.euler_from_quaternion(quarternion)
self.pose.theta = yaw
self.pose.x = msg.pose.pose.position.x
self.pose.y = msg.pose.pose.position.y
self.pose_hp.theta = yaw
self.pose_hp.x = self.pose.x + self.handpoint_offset * cos(yaw)
self.pose_hp.y = self.pose.y + self.handpoint_offset * sin(yaw)
# reduce the number of saved messages to 1/10
self.logging_counter += 1
if self.logging_counter == 10:
self.logging_counter = 0
self.trajectory.append([self.pose.x, self.pose.y, self.pose.theta])
self.trajectory_hp.append([self.pose_hp.x, self.pose_hp.y, self.pose_hp.theta])
rospy.logdebug("odom: x=" + str(self.pose.x) +\
"; y=" + str(self.pose.y) + "; theta=" + str(yaw))
rospy.logdebug("odom_hp: x_hp=" + str(self.pose_hp.x) +\
"; y_hp=" + str(self.pose_hp.y) + "; theta=" + str(yaw))
def valid_sensing(self, point):
return abs(point - Vector2D(self.pose.x, self.pose.y)) < self.valid_sensing_range
def obstacle_callback(self, msg):
# sampling points on the obstacles
points = list()
for circle in msg.circles:
center = Vector2D(circle.center.x, circle.center.y)
points.append(center)
r = circle.true_radius
if r > 0:
for theta in np.arange(0, 2*pi, 0.3):
radius = Vector2D(r*cos(theta), r*sin(theta))
points.append(center + radius)
for segment in msg.segments:
line = Vector2D(segment.last_point.x - segment.first_point.x, \
segment.last_point.y - segment.first_point.y)
interval = 0
while abs(line) - interval > 0:
cx = segment.first_point.x + interval * cos(line.angle)
cy = segment.first_point.y + interval * sin(line.angle)
points.append(Vector2D(cx, cy))
interval += 0.2
for p in points:
if self.valid_sensing(p):
p_hex = self.hexmap.cat_to_cube(p)
if p_hex not in self.obstacle_list_candidate:
self.obstacle_list_candidate[p_hex] = 1
else:
self.obstacle_list_candidate[p_hex] += 1
for candidate, times in self.obstacle_list_candidate.items(): #TODO: probabilistic update
if times > self.obstacle_threshold and not self.hexmap.is_explored(candidate):
self.hexmap.add_obstacle(candidate)
def save_and_plot_trajectory(self, directory, debugging_mode=False):
if not isinstance(directory, str):
raise TypeError("please specify the directory using string type")
trajectory = np.array(self.trajectory)
trajectory_hp = np.array(self.trajectory_hp)
trajectory_cmd = np.array(self.trajectory_cmd)
np.savetxt(directory + "/trajectory.csv", trajectory, fmt='%f', delimiter=',')
np.savetxt(directory + "/trajectory_hp.csv", trajectory_hp, fmt='%f', delimiter=',')
np.savetxt(directory + "/trajectory_cmd.csv", trajectory_cmd, fmt='%f', delimiter=',')
if debugging_mode:
plt.plot(trajectory[:, 0], trajectory[:, 1])
plt.plot(trajectory_hp[:, 0], trajectory_hp[:, 1])
plt.plot(trajectory_cmd[:, 0], trajectory_cmd[:, 1])
else:
plt.plot(trajectory[:, 0], trajectory[:, 1])
@staticmethod
def load_and_plot_trajectory(directory, debugging_mode=False):
if not isinstance(directory, str):
raise TypeError("please specify the directory using string type")
if debugging_mode:
trajectory = np.loadtxt(directory + "/trajectory.csv", delimiter=',')
trajectory_hp = np.loadtxt(directory + "/trajectory_hp.csv", delimiter=',')
trajectory_cmd = np.loadtxt(directory + "/trajectory_cmd.csv", delimiter=',')
plt.plot(trajectory[:, 0], trajectory[:, 1])
plt.plot(trajectory_hp[:, 0], trajectory_hp[:, 1])
plt.plot(trajectory_cmd[:, 0], trajectory_cmd[:, 1])
else:
trajectory = np.loadtxt(directory + "/trajectory.csv", delimiter=',')
plt.plot(trajectory[:, 0], trajectory[:, 1])
| StarcoderdataPython |
74539 | <filename>app.py<gh_stars>0
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import pandas as pd
## verbo en espanol a conjugacion en mapudungun
verbos=pd.read_csv('verbs.csv',header=0,sep=',')
verbos = verbos.sort_values(['esp', 'mapu'], ascending=[1, 0])
verbos_esp=[verbo for verbo in verbos.esp]
verbos_mapu=[verbo for verbo in verbos.mapu]
#verbos={esp:mapu for (esp,mapu) in zip(verbos_esp,verbos_mapu)}
personas={'singular':{'primera':'iñche','segunda':'eymi','tercera':'fey'},'dual':{'primera':'iñcu','segunda':'eymu','tercera':'feyegu'},'plural':{'primera':'iñciñ','segunda':'eymvn','tercera':'feyegvn'}}
consonantes=['n','w']
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.layout = html.Div([
html.Label('Elige un verbo de la siguiente lista!',style={'color': 'black', 'fontSize': 18, 'font-weight': 'bold'}),
dcc.Dropdown(id='input-1-state',
options=[{'label':key,'value':key} for key in verbos_esp],
value=verbos_esp[0]
),
html.Label('¿Cuántas personas realizan la acción?: elige el "número"', style={'color': 'black', 'fontSize': 18, 'font-weight': 'bold'}),
dcc.RadioItems(id='input-2-state',
options=[
{'label': 'singular', 'value': 'singular'},
{'label': 'dual', 'value': 'dual'},
{'label': 'plural', 'value': 'plural'}
],
value='singular'
),
html.Label('¿Quiénes participan?: elige la "persona"', style={'color': 'black', 'fontSize': 18, 'font-weight': 'bold'}),
dcc.RadioItems(id='input-3-state',
options=[
{'label': 'primera', 'value': 'primera'},
{'label': 'segunda', 'value': 'segunda'},
{'label': 'tercera', 'value': 'tercera'}
],
value='tercera'
),
html.Label('¿Es afirmativo o negativo?: elige la "polaridad"', style={'color': 'black', 'fontSize': 18, 'font-weight': 'bold'}),
dcc.RadioItems(id='input-4-state',
options=[
{'label': 'positiva', 'value': 'positiva'},
{'label': 'negativa', 'value': 'negativa'}
],
value='positiva'
),
html.Label('¿Cuándo ocurrió la acción?: elige el "tiempo"', style={'color': 'black', 'fontSize': 18, 'font-weight': 'bold'}),
dcc.RadioItems(id='input-5-state',
options=[
{'label': 'no-futuro', 'value': 'no-futuro'},
{'label': 'futuro', 'value': 'futuro'}
],
value='no-futuro'
),
html.Button(id='submit-button', n_clicks=0, children='mapuzugun mew!'),
html.Div(id='output-state'),
html.H3('Si quieres cooperar con esta iniciativa escríbeme a <EMAIL>',style={'fontSize': 14})
], style={'columnCount': 2})
@app.callback(Output('output-state', 'children'),
[Input('submit-button', 'n_clicks')],
[State('input-1-state', 'value'),
State('input-2-state', 'value'),
State('input-3-state', 'value'),
State('input-4-state', 'value'),
State('input-5-state', 'value')])
def verb_to_mapudungun(n_clicks, verb_esp,numero,persona,polaridad,tiempo):
verbos={esp:mapu for (esp,mapu) in zip(verbos_esp,verbos_mapu)}
base=verbos[verb_esp]
conjugacion={'singular':{'primera':'iñce (yo)','segunda':'eymi (tú)','tercera':'fey (ella/él)'},'dual':{'primera':'iñcu (nosotras/nosotros dos)','segunda':'eymu (ustedes dos)','tercera':'feyegu (ellas/ellos dos)'},'plural':{'primera':'iñciñ (nosotras/nosotros)','segunda':'eymvn (ustedes)','tercera':'feyegvn (ellas/ellos)'}}
expansion=base+' '+'(base)'
if polaridad=='positiva':## persona gramatical + base + futuro + polaridad
if tiempo=='futuro':
traduccion=conjugacion[numero][persona]+' '+base+'a'
expansion+=' '+'+'+' '+'a'+' (futuro)'
else:
traduccion=conjugacion[numero][persona]+' '+base
elif polaridad=='negativa':
if tiempo=='futuro':
traduccion=conjugacion[numero][persona]+' '+base+'la'+'ya'
expansion+=' '+'+'+' '+'la'+' '+'(negación)'+' '+'+'+' '+'ya'+' '+'(futuro)'
else:
traduccion=conjugacion[numero][persona]+' '+base+'la'
expansion+=' '+'+'+' '+'la'+' '+'(negación)'
if base[-1] in consonantes: ## terminan en consonante
if numero=='singular':
if persona=='primera':
traduccion=traduccion+'vn'
expansion+=' '+'+'+' '+'vn'+' '+'(1SG)'
elif persona=='segunda':
traduccion=traduccion+'imi'
expansion+=' '+'+'+' '+'imi'+' '+'(2SG)'
else:
traduccion=traduccion+'i'
expansion+=' '+'+'+' '+'i'+' '+'(3SG)'
elif numero=='dual':
if persona=='primera':
traduccion=traduccion+'iyu'
expansion+=' '+'+'+' '+'iyu'+' '+'(1DUAL)'
elif persona=='segunda':
traduccion=traduccion+'imu'
expansion+=' '+'+'+' '+'imu'+' '+'(2DUAL)'
else:
traduccion=traduccion+'ingu'
expansion+=' '+'+'+' '+'ingu'+' '+'(3DUAL)'
else:
if persona=='primera':
traduccion=traduccion+'iyiñ'
expansion+=' '+'+'+' '+'iyiñ'+' '+'(1PL)'
elif persona=='segunda':
traduccion=traduccion+'imvn'
expansion+=' '+'+'+' '+'imvn'+' '+'(2PL)'
else:
traduccion=traduccion+'igvn'
expansion+=' '+'+'+' '+'igvn'+' '+'(3PL)'
elif base[-1]=='i': ## termina en i
if numero=='singular':
if persona=='primera':
traduccion=traduccion+'n'
expansion+=' '+'+'+' '+'n'+' '+'(1SG)'
elif persona=='segunda':
traduccion=traduccion+'mi'
expansion+=' '+'+'+' '+'mi'+' '+'(2SG)'
else:
traduccion=traduccion
expansion+=' '+'+'+' '+'0'+' '+'(3SG)'
elif numero=='dual':
if persona=='primera':
traduccion=traduccion+'yu'
expansion+=' '+'+'+' '+'yu'+' '+'(1DUAL)'
elif persona=='segunda':
traduccion=traduccion+'mu'
expansion+=' '+'+'+' '+'mu'+' '+'(2DUAL)'
else:
traduccion=traduccion+'gu'
expansion+=' '+'+'+' '+'gu'+' '+'(3DUAL)'
else:
if persona=='primera':
traduccion=traduccion+'iñ'
expansion+=' '+'+'+' '+'iñ'+' '+'(1PL)'
elif persona=='segunda':
traduccion=traduccion+'mvn'
expansion+=' '+'+'+' '+'mvn'+' '+'(2PL)'
else:
traduccion=traduccion+'gvn'
expansion+=' '+'+'+' '+'gvn'+' '+'(3PL)'
else: ## en otro caso
if numero=='singular':
if persona=='primera':
traduccion=traduccion+'n'
expansion+=' '+'+'+' '+'n'+' '+'(1SG)'
elif persona=='segunda':
traduccion=traduccion+'ymi'
expansion+=' '+'+'+' '+'ymi'+' '+'(2SG)'
else:
traduccion=traduccion+'y'
expansion+=' '+'+'+' '+'y'+' '+'(3SG)'
elif numero=='dual':
if persona=='primera':
traduccion=traduccion+'yu'
expansion+=' '+'+'+' '+'yu'+' '+'(1DUAL)'
elif persona=='segunda':
traduccion=traduccion+'ymu'
expansion+=' '+'+'+' '+'ymu'+' '+'(2DUAL)'
else:
traduccion=traduccion+'ygu'
expansion+=' '+'+'+' '+'ygu'+' '+'(3DUAL)'
else:
if persona=='primera':
traduccion=traduccion+'yiñ'
expansion+=' '+'+'+' '+'yiñ'+' '+'(1PL)'
elif persona=='segunda':
traduccion=traduccion+'ymvn'
expansion+=' '+'+'+' '+'ymvn'+' '+'(2PL)'
else:
traduccion=traduccion+'ygvn'
expansion+=' '+'+'+' '+'ygvn'+' '+'(3PL)'
return (html.P(['En mapuzugun, el verbo "{}" conjugado en "{}" persona "{}" en polaridad "{}" y tiempo "{}" se dice'.format(verb_esp,persona,numero,polaridad,tiempo)+' '+'"'+traduccion+'"',html.Br(),html.Strong('Morfología :) '+traduccion.replace(conjugacion[numero][persona]+' ','')+' = '+expansion, style={'color': '#8B008B', 'fontSize': 14})]))
if __name__ == '__main__':
app.run_server(debug=True)
| StarcoderdataPython |
114199 | <reponame>istrategylabs/django-flashbriefing<filename>tests/test_models.py
import datetime
import pytest
from flashbriefing.models import Feed, Item, ItemType
@pytest.mark.django_db
def test_item_type_audio():
feed = Feed.objects.create(title='FEED')
item = Item.objects.create(
feed=feed, title='ITEM', audio_content='/audio.mp3',
published_date=datetime.datetime.utcnow())
assert item.item_type == ItemType.AUDIO
@pytest.mark.django_db
def test_item_type_text():
feed = Feed.objects.create(title='FEED')
item = Item.objects.create(
feed=feed, title='ITEM', published_date=datetime.datetime.utcnow())
assert item.item_type == ItemType.TEXT
| StarcoderdataPython |
96595 | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import cirq
def assert_consistent_resolve_parameters(val: Any):
names = cirq.parameter_names(val)
symbols = cirq.parameter_symbols(val)
assert {symbol.name for symbol in symbols} == names
if not cirq.is_parameterized(val):
assert not names
assert not symbols
else:
# Try to resolve all parameters with numbers. This may fail if some of
# the parameters want different types. But if resolution succeeds, the
# object should report that it has no more parameters to resolve.
try:
resolved = cirq.resolve_parameters(val, {name: 0 for name in names})
except Exception:
pass
else:
assert not cirq.parameter_names(resolved)
assert not cirq.parameter_symbols(resolved)
| StarcoderdataPython |
1758097 | # 一个重构的时间类,通过str可以得到moviepy裁切电影的标准时刻字符串
class Tick:
@property
def hour(self):
return self.__hour
@hour.setter
def hour(self, value: int):
self.__hour = value
@property
def min(self):
return self.__min
@min.setter
def min(self, value: int):
self.__min = value
@property
def sec(self):
return self.__sec
@sec.setter
def sec(self, value: int):
self.__sec = value
@property
def min_sec(self):
return self.__min_sec
@min_sec.setter
def min_sec(self, value: int):
self.__min_sec = value
def __init_prop(self, tick_string: str):
tick_fragment = tick_string.split(':')
sec_fragment = tick_fragment[2].split(',')
self.__hour = int(tick_fragment[0])
self.__min = int(tick_fragment[1])
self.__sec = int(sec_fragment[0])
self.__min_sec = int(sec_fragment[1])
def __init__(self, tick_string: str=None):
if tick_string is not None:
self.__init_prop(tick_string)
else:
self.__hour = 0
self.__min = 0
self.__sec = 0
self.__min_sec = 0
def __sub_digital(self, num: int):
return str(num//10) if(num > 99) else str(num)
def __str__(self):
return str(self.__hour) + ":" + str(self.__min) + ":" + str(self.__sec) + '.' + self.__sub_digital(self.__min_sec)
def __add__(self, other):
t_new = Tick()
t_new.hour = self.hour + other.hour
t_new.min = self.min + other.min
t_new.sec = self.sec + other.sec
t_new.min_sec = self.min_sec + other.min_sec
return t_new
| StarcoderdataPython |
4809673 | """
Censor 2 ----> CENSOR 1
Designed to be run by the evaluator.
TCP Censor that synchronizes on first SYN only, works 100% of the time, sends 5 RSTs to client.
"""
import layers.packet
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import IP, TCP
from censors.censor import Censor
class Censor2(Censor):
def __init__(self, environment_id, forbidden, log_dir, log_level, port, queue_num):
Censor.__init__(self, environment_id, log_dir, log_level, port, queue_num)
self.forbidden = forbidden
self.tcb = 0
self.drop_all_from = None
def check_censor(self, packet):
"""
Check if the censor should run against this packet. Returns true or false.
"""
try:
self.logger.debug("Inbound packet to censor: %s", layers.packet.Packet._str_packet(packet))
# Only censor TCP packets for now
if "TCP" not in packet:
return False
if packet["TCP"].sprintf('%TCP.flags%') == "S":
self.tcb = packet["TCP"].seq + 1
self.logger.debug("Synchronizing TCB on packet: %s", layers.packet.Packet._str_packet(packet))
return False
if packet["TCP"].seq == self.tcb:
self.tcb += len(self.get_payload(packet))
else:
self.logger.debug("Ignoring packet: %s", layers.packet.Packet._str_packet(packet))
return False
for keyword in self.forbidden:
if keyword in self.get_payload(packet):
self.logger.debug("Packet triggered censor: %s", layers.packet.Packet._str_packet(packet))
return True
return False
except Exception:
self.logger.exception("Censor 2 exception caught.")
return False
def censor(self, scapy_packet):
"""
Send 5 resets to the client.
"""
rst = IP(src=scapy_packet[IP].dst, dst=scapy_packet[IP].src)/TCP(dport=scapy_packet[TCP].sport, sport=scapy_packet[TCP].dport, ack=scapy_packet[TCP].seq+len(str(scapy_packet[TCP].payload)), seq=scapy_packet[TCP].ack, flags="R")
for i in range(0, 5):
self.mysend(rst)
| StarcoderdataPython |
183598 | <reponame>UCSD-E4E/vmman-mangrove
#!/usr/bin/python3
import docker
import argparse
import configparser
import json
import time
from tabulate import tabulate
import sys
class DockerAutomation():
@staticmethod
def start_container(container_ind, client):
print(config["ids"][container_ind])
container = client.containers.get(config["ids"][container_ind])
if container.attrs["State"]["Running"]:
print("Docker container {} with ID {} already running, getting VNC URL".format(container_ind, container.attrs["Config"]["Hostname"]))
output = container.exec_run("curl -s localhost:4040/api/tunnels | jq -r .tunnels[0].public_url")
url = json.loads(output.output)["tunnels"][0]["public_url"]
print("URL for VNC access is: {}".format(url))
else:
print("Starting Docker container {} with ID {}, getting VNC URL".format(container_ind, container.attrs["Config"]["Hostname"]))
container.start()
container.exec_run("ngrok http 80", stdout=False, stream=True )
time.sleep(5)
output = container.exec_run("curl -s localhost:4040/api/tunnels | jq -r .tunnels[0].public_url")
url = json.loads(output.output)["tunnels"][0]["public_url"]
print("NOTE: This url will only last for the current session, do not save this url or use it after you have stopped your container")
print("URL for VNC access is: {}".format(url))
@staticmethod
def stop_container(container_ind, client):
container = client.containers.get(config["ids"][container_ind])
if container.attrs["State"]["Running"]:
print("Stopping container {} with ID {}".format(container_ind, container.attrs["Config"]["Hostname"]))
container.stop()
else:
print("Docker container {} with ID {} is not running".format(container_ind, container.attrs["Config"]["Hostname"]))
@staticmethod
def list_container(client):
table_list = [["#","ID", "Name", "Status", "Image"]]
for id in config["ids"]:
container = client.containers.get(config["ids"][id])
row = [id, container.short_id, container.name, container.status, container.image.tags[0]]
table_list.append(row)
print(tabulate(table_list))
if __name__ == "__main__":
config = configparser.ConfigParser()
config.read("/home/container_ids")
parser = argparse.ArgumentParser(description='Start given docker containers, and give url for novnc.')
parser.add_argument('-start',
help='Starts the given docker container and gives url for novnc', nargs=1, default= None)
parser.add_argument('-stop',
help='Stops the given docker container', nargs=1, default= None)
parser.add_argument('-list',
help='Lists all docker VMs and their current state', action='store_true')
args = parser.parse_args()
client = docker.from_env()
if len(sys.argv)==1:
# display help message when no args are passed.
print("Please enter a valid id for a docker VM")
parser.print_help()
sys.exit(1)
if args.start != None:
assert args.start[0] in config["ids"], "ID not in container config file"
DockerAutomation.start_container(args.start[0], client)
elif args.stop != None:
assert args.stop[0] in config["ids"], "ID not in container config file"
DockerAutomation.stop_container(args.stop[0], client)
elif args.list == True:
DockerAutomation.list_container(client)
| StarcoderdataPython |
3218024 | <reponame>aawarner/BLT-ASIC<gh_stars>1-10
"""Copyright (c) 2019 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
import subprocess
import ctypes
import sys
import ssl
import csv
import os
import logging
from json import load, dump, dumps, loads
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime, timedelta
from time import time, sleep, ctime
from collections import Counter as ct
from ordered_set import OrderedSet
from re import search, findall
from requests import request as rq
from requests import exceptions as ex
from multiprocessing.dummy import Pool as ThreadPool
from flask import Flask, render_template, request, redirect, session, send_file, flash
import cisco_info
def grab_oauth_ccw_order(username, password):
"""function for ccw order api oauth"""
try:
with open("ccw_order_cred.json", "r") as f:
password_creds = load(f)
cred_tuple = (
password_creds["client_id"],
password_creds["client_secret"],
username,
password,
)
url = "https://cloudsso.cisco.com/as/token.oauth2"
payload = (
"client_id=%s&client_secret=%s&grant_type=password&username=%s&password=%s"
% cred_tuple
)
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
"cache-control": "no-cache",
}
except FileNotFoundError:
print("ccw_order_cred.json is missing...correct this problem")
return None
try:
response = rq("POST", url, data=payload, headers=headers)
ccwo_access_token = response.json()["access_token"]
if "profiles" not in os.listdir():
os.mkdir("profiles")
if username not in os.listdir("profiles"):
os.mkdir("profiles//%s" % username)
os.mkdir("profiles//%s//reports" % username)
with open("profiles//%s//ccw_order_oauth.json" % username, "w") as f:
dump({"ts": int(time()), "access_token": ccwo_access_token}, f)
return ccwo_access_token
except KeyError:
return None
def grab_oauth_ccwr(username):
"""Function for ccw-r api oauth"""
url = "https://cloudsso.cisco.com/as/token.oauth2"
with open("ccwr_client.json", "r") as f:
client_creds = load(f)
print(client_creds)
payload = "client_id=%s&client_secret=%s&grant_type=client_credentials" % (
client_creds["client_id"],
client_creds["client_secret"],
)
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "*/*",
"Cache-Control": "no-cache",
"Host": "cloudsso.cisco.com",
"Accept-Encoding": "gzip, deflate",
"Content-Length": "103",
"Connection": "keep-alive",
"cache-control": "no-cache",
}
print(headers)
response = rq("POST", url, data=payload, headers=headers)
ccwr_access_token = response.json()["access_token"]
print(ccwr_access_token)
with open("profiles//%s//ccwr_oauth.json" % username, "w") as f:
dump({"ts": int(time()), "access_token": ccwr_access_token}, f)
return ccwr_access_token
def create_3K_lic_rpt(ccwr_full_list, output_file_list, smart_account):
"""cat3k results report create function
Create a group on lambda functions to perform RegEx searches
Creates a CSV formatted Report of 3K licensing content from a file input
of a CCW-R file export"""
# Find Header Row based on Serial keyword.
hr = lambda s: search(".*[Ss][Ee][Rr][Ii][Aa].*", s)
# Find any SKU containing 3K nomenclature.
is_3x50 = lambda s: search("3[68]50", s)
# Find any top level traditonal hardware SKU that also contains license level.
non_C1_3x50 = lambda s: search("WS-C3[68]50.*-[SEL]", s)
# Find individual 3K on-box license SKUs.
lic_C1_3x50 = lambda s: search("C3[68]50-[24][48]-[SL]-[ES]", s)
# Find any C1 SKU that is less than 24 ports. These have license level
# as part of the top-level part.
non_24_48_port_C1 = lambda s: search("C1-WS.*-12.*-[ES]", s)
# print(dumps(ccwr_full_list[0:5],indent=4))
# Find CCW-R header row to place into a list
header = [i for i in ccwr_full_list[0:3] if hr(str(i))]
# Parse CCW-R lines with any 3x50 SKUs into a list of rows
dev_3x50 = [i for i in ccwr_full_list if is_3x50(i[0])]
# Parse CCW-R lines for traditional top-level SKU rows
non_C1_dev = [i for i in dev_3x50 if non_C1_3x50(i[0])]
#need to create a list of parent serials for non Cisco ONE devices
non_C1_dev_parent_serials =[i[3] for i in non_C1_dev]
# print(dumps(non_C1_dev,indent=4))
###Parse CCW-R lines for individual on-box SW upgrade licensing rows
upg_lics = [i for i in dev_3x50 if lic_C1_3x50(i[0])]
#need to eliminate duplicate license for newer orders that contain both SW SKU and reflect entitlement at top level.
upg_lics = [i for i in upg_lics if i[3] not in non_C1_dev_parent_serials]
# print(dumps(upg_lics,indent=4))
# Parse C1 SKUs for 3Ks less than 12 ports b/c SW licenses appear in top-level
non_24_48_port = [i for i in dev_3x50 if non_24_48_port_C1(i[0])]
# print(dumps(non_24_48_port,indent=4))
# Concatenate all parsed lists
parsed_ccwr_rows_list = header + non_C1_dev + upg_lics + non_24_48_port
# print(dumps(parsed_ccwr_rows_list,indent=4))
# Perform count of elements in concatenated list and place in dict
devdict = dict(ct([i[0] for i in parsed_ccwr_rows_list][1:]))
# Extract top-level SKUs and convert to list of actual licensing SKU that appear in CSSM.
C3x50 = [
i[0][3:11] + "-" + i[0][-1]
for i in parsed_ccwr_rows_list
if i[0].startswith("WS-C3")
]
C3x50 = C3x50 + [
i[0][:12].replace(i[0][:5], "C" + "-" + i[0][-1])
for i in parsed_ccwr_rows_list
if i[0].startswith("C1-WS")
]
C3x50_E = [i.replace(i[-2:], "-S-E") for i in C3x50 if i.endswith("E")]
C3x50_S = [i.replace(i[-2:], "-L-S") for i in C3x50 if i.endswith("S")]
C3x50_L = [i.replace(i[-2:], "-L-L") for i in C3x50 if i.endswith("L")]
# Extract top-level upgrade license SKUs and convert to list
upg_lics_indiv = [i[0] for i in upg_lics]
# Concatenate license lists
total_upg_lics = C3x50_E + C3x50_S + C3x50_L + upg_lics_indiv
# Perform count of elements in concatenated list and place in dict
licdict = dict(ct(total_upg_lics))
# Create output file
for i in output_file_list:
with open(i, "w") as f:
f.write("Top-Level Device OR License,-----,Count\n")
for i in devdict:
f.write(i + ",-----," + str(devdict[i]) + "\n")
f.write(4 * "\n")
f.write(
"LICENSES to be deposited in %s\n\n" % smart_account
+ "License,-----,Count\n"
)
for i in licdict:
f.write(i + ",-----," + str(licdict[i]) + "\n")
f.write(4 * "\n")
f.write("Full License/Device Breakout from CCW-R\n\n")
for i in parsed_ccwr_rows_list:
for j in i:
f.write(j)
f.write(",")
f.write("\n")
def ccwo_order_status(username, so_num):
try:
with open("profiles//%s//ccw_order_oauth.json" % username, "r") as f:
jl = load(f)
ccwo_access_token = jl["access_token"]
except Exception as e:
print(e)
url = "https://api.cisco.com/commerce/ORDER/v2/sync/checkOrderStatus"
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Request-ID": "Type: Integer",
"Accept-Language": "en_us",
"Authorization": "Bearer %s" % ccwo_access_token,
"cache-control": "no-cache",
"Host": "api.cisco.com",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
}
if so_num.startswith("8"):
payload = dumps(
{
"GetPurchaseOrder": {
"value": {
"DataArea": {
"PurchaseOrder": [
{
"PurchaseOrderHeader": {
"ID": {"value": ""},
"DocumentReference": [
{"ID": {"value": so_num}}
],
"SalesOrderReference": [{"ID": {"value": ""}}],
"Description": [
{"value": "Yes", "typeCode": "details"}
],
}
}
]
},
"ApplicationArea": {
"CreationDateTime": "datetime",
"BODID": {"value": "BoDID-test", "schemeVersionID": "V1"},
},
}
}
}
)
else:
payload = dumps(
{
"GetPurchaseOrder": {
"value": {
"DataArea": {
"PurchaseOrder": [
{
"PurchaseOrderHeader": {
"ID": {"value": ""},
"DocumentReference": [{"ID": {"value": ""}}],
"SalesOrderReference": [
{"ID": {"value": so_num}}
],
"Description": [
{"value": "Yes", "typeCode": "details"}
],
}
}
]
},
"ApplicationArea": {
"CreationDateTime": "datetime",
"BODID": {"value": "BoDID-test", "schemeVersionID": "V1"},
},
}
}
}
)
response = rq("POST", url, data=payload, headers=headers)
result = response.json()
result = \
result["ShowPurchaseOrder"]["value"]["DataArea"]["PurchaseOrder"][0]["PurchaseOrderHeader"]["Extension"][4]["Name"][
0]["value"]
result = {so_num: result}
return result
def ccwr_search_request(username, searchType="serialNumbers", search_list=[]):
"""ccw-r search function"""
try:
with open("profiles//%s//ccwr_oauth.json" % username, "r") as f:
jl = load(f)
print("ccwr oauth2 token age: " + str(int(time()) - jl["ts"]) + " seconds.")
if (time() - jl["ts"]) > 3500:
ccwr_access_token = grab_oauth_ccwr(username)
else:
ccwr_access_token = jl["access_token"]
except:
ccwr_access_token = grab_oauth_ccwr(username)
url = "https://api.cisco.com/ccw/renewals/api/v1.0/search/lines"
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Request-ID": "Type: Integer",
"Accept-Language": "en_us",
"Authorization": "Bearer %s" % ccwr_access_token,
"Cache-Control": "no-cache",
"Host": "api.cisco.com",
"Accept-Encoding": "gzip, deflate",
"Content-Length": "113",
"Connection": "keep-alive",
"cache-control": "no-cache",
}
offset = 0
payload = dumps(
{
searchType: search_list,
"limit": 1000,
"offset": offset,
"configurations": True,
}
)
with open('counter_dict.json','r') as f:
counter_dict=load(f)
current_hr=int(ctime().split()[3].split(':')[0])
current_dt=ctime().split()[1:3]
if (current_hr != counter_dict['current_hr']) or (current_dt != counter_dict['current_dt']):
with open('counter_dict.json','w') as f:
counter_dict['current_hr']=current_hr
counter_dict['page_counter']=0
counter_dict['over']=False
counter_dict['current_dt']=current_dt
dump(counter_dict,f)
if counter_dict['over']==False :
response = rq("POST", url, data=payload, headers=headers)
if response.status_code == 403:
ccwr_access_token = grab_oauth_ccwr()
headers["Authorization"] = "Bearer %s" % ccwr_access_token
response = rq("POST", url, data=payload, headers=headers)
else:
ccwr_response = response.json()
try:
counter_dict['page_counter']=counter_dict['page_counter']+(int(ccwr_response["totalRecords"]/1000)+1)
except:
pass
with open('counter_dict.json','w') as f:
if counter_dict['page_counter'] > 298:
counter_dict['over']=True
ts=str(int(time()))
with open('jobs/%s.%s'%(username,ts),'w') as job:
dump(search_list,job)
print('\n\n\nDo some scheduling, the paging counter just went over!\n\n\n')
dump(counter_dict,f)
ccwr_response='over'
return ccwr_response, ccwr_access_token
dump(counter_dict,f)
if counter_dict['over']==True:
ccwr_response='over'
ts=str(int(time()))
if search_list != []:
with open('jobs/%s.%s'%(username,ts),'w') as job:
dump(search_list,job)
return ccwr_response, ccwr_access_token
else:
response=None
try:
if int(ccwr_response["totalRecords"]) > 1000:
additional_requests = int(int(ccwr_response["totalRecords"]) / 1000)
for i in range(additional_requests):
offset += 1000
payload = dumps(
{
searchType: search_list,
"limit": 1000,
"offset": offset,
"configurations": True,
}
)
addtl_response = rq("POST", url, data=payload, headers=headers)
addtl_response = addtl_response.json()
ccwr_response["instances"] = (
ccwr_response["instances"] + addtl_response["instances"]
)
return ccwr_response, ccwr_access_token
except:
return None, ccwr_access_token
def ccwr_create_table(username, ccwr_response, sa_list):
"""function to create table for ccw-r results"""
# with open('contract_response.json','r') as f:
# jsl=load(f)
ccwr_full_list = [
[
"Product Number",
"Product Description",
"Serial Number",
"Parent Serial Number",
"Instance Number",
"Sales Order Number",
"End Customer Name",
"Smart Account Name",
]
]
ccwr_out_ts=str(time())
raw_ccwr_output_csv = "profiles/%s/current_raw_CCWR_output.csv" % username
raw_ccwr_output_csv_report = "profiles/%s/reports/CCWR_raw_output-%s.csv" % (username,ccwr_out_ts)
ccwr_out_filelist=[raw_ccwr_output_csv,raw_ccwr_output_csv_report]
# print(dumps(ccwr_response, sort_keys=True, indent=4))
try:
for i in ccwr_response["instances"]:
l = []
try:
l.append(i["product"]["number"])
except:
l.append("")
try:
l.append(i["product"]["description"].lstrip("^").replace(",", ";"))
except:
l.append("")
try:
l.append(i["serialNumber"])
except:
l.append("")
try:
l.append(i["parentSerialNumber"])
except:
l.append("")
try:
l.append(i["instanceNumber"])
except:
l.append("")
try:
l.append(i["salesOrderNumber"])
except:
l.append("")
try:
l.append(i["endCustomer"]["name"])
except:
l.append("")
try:
if not sa_list:
l.append("")
else:
for i in sa_list:
for k, v in i.items():
if k == l[5]:
l.insert(7, v)
else:
l.append("")
except:
l.append("")
l = l[:8]
ccwr_full_list.append(l)
for i in ccwr_out_filelist:
with open(i, "w") as f:
for i in ccwr_full_list:
for j in i:
f.write(j + ",")
f.write("\n")
return ccwr_full_list
except:
with open(raw_ccwr_output_csv, "w") as f:
f.write("No Data")
ccwr_temp_list = []
return ccwr_temp_list
def file_SN_search(inventory_file):
"""serial number csv parser function"""
serial_chk = lambda s: search(".*[Ss][Ee][Rr][Ii][Aa].*", s)
sn_chk = lambda s: search(".*[Ss][Nn].*", s)
with open(inventory_file) as f:
rl = f.readlines()
'''will assume tht the table width is reflected in the first 5 rows AND is the maximum width'''
col_width=max([len(i.split(",")) for i in rl[0:5]])
print('%s is the column width'%col_width)
'''will find the row indices for anything containing [Ss][Ee][Rr][Ii][Aa] or [Ss][Nn]'''
sn_row_index_list=[rl.index(i) for i in rl[0:5] if serial_chk(i) or sn_chk(i)]
print('%s is the sn row index list'%sn_row_index_list)
'''will only consider the first row that matches the column width'''
sn_row_index=[i for i in sn_row_index_list if len(rl[i].split(","))==col_width][0]
print('%s is the sn row index'%sn_row_index_list)
'''create the header'''
header = rl[sn_row_index].split(",")
print('%s is the header'%header)
'''find the column indices that contains [Ss][Ee][Rr][Ii][Aa] or [Ss][Nn]'''
sn_col_idx_list = [header.index(i) for i in header if serial_chk(i) or sn_chk(i)]
print('%s : are the column indices'%sn_col_idx_list)
'''iterate thru column indices and rows in file to create a list of SNs based on column headers'''
csv_sn_list = [i.split(",")[j] for j in sn_col_idx_list for i in rl[(sn_row_index+1):]]
return csv_sn_list
def file_SN_search_reg(inventory_file):
with open(inventory_file) as f:
data = f.read()
csv_sn_list_reg = findall(r"[A-Z]{3}[A-Za-z0-9]{8}", data)
return csv_sn_list_reg
def ccwo_search_request(username, so_list=[]):
"""ccw order search function"""
try:
with open("profiles//%s//ccw_order_oauth.json" % username, "r") as f:
jl = load(f)
print(
"ccw order oauth2 token age: " + str(int(time()) - jl["ts"]) + " seconds."
)
ccwo_access_token = jl["access_token"]
except FileNotFoundError as e:
print(e)
pass
url = "https://api.cisco.com/commerce/ORDER/sync/getSerialNumbers"
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Request-ID": "Type: Integer",
"Accept-Language": "en_us",
"Authorization": "Bearer %s" % ccwo_access_token,
"cache-control": "no-cache",
"Host": "api.cisco.com",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
}
ccwo_sn_list = []
ccwo_error_so_list = []
so_data_matrix_header = [
["Sales Order Number", "Line Number", "Part Number", "Quantity"]
]
so_data_matrix = []
for so_num in so_list:
try:
pageNumber = 1
payload = dumps(
{
"serialNumberRequest": {
"salesOrderNumber": so_num,
"pageNumber": str(pageNumber),
}
}
)
response = rq("POST", url, data=payload, headers=headers)
rj = response.json()
so_data_matrix = so_data_matrix + [
[so_num, i["lineNumber"], i["partNumber"], i["quantity"]]
for i in rj["serialNumberResponse"]["serialDetails"]["lines"]
]
# print(dumps(rj, sort_keys=True, indent=4))
total_pages = int(
rj["serialNumberResponse"]["responseHeader"]["totalPages"]
)
if total_pages > 1:
# print("SO# %s totalPages is great than 1"%str(so_num))
pageNumber = 2
for i in range(total_pages - 1):
payload = dumps(
{
"serialNumberRequest": {
"salesOrderNumber": so_num,
"pageNumber": str(pageNumber),
}
}
)
addtl_page = rq("POST", url, data=payload, headers=headers)
addtl_page = addtl_page.json()
so_data_matrix = so_data_matrix + [
[so_num, i["lineNumber"], i["partNumber"], i["quantity"]]
for i in addtl_page["serialNumberResponse"]["serialDetails"][
"lines"
]
]
pageNumber += 1
# print(addtl_page)
rj["serialNumberResponse"]["serialDetails"]["lines"] = (
rj["serialNumberResponse"]["serialDetails"]["lines"]
+ addtl_page["serialNumberResponse"]["serialDetails"]["lines"]
)
sn_parse1 = [
i for i in rj["serialNumberResponse"]["serialDetails"]["lines"]
]
sn_parse2 = [i["serialNumbers"] for i in sn_parse1]
# print(dumps(sn_parse2, indent=4))
print(
rj["serialNumberResponse"]["responseHeader"]["totalPages"]
+ " is the total number of pages in the CCWO response for SO#%s"
% so_num
)
for i in sn_parse2:
for j in i:
ccwo_sn_list.append(str(j["serialNumber"]))
except Exception:
print(str(so_num) + " SO# could not be retrieved")
###logger.debug(so_num + " SO# could not be retrieved")
ccwo_error_so_list.append(so_num)
# print(str(so_num)+' cannot be retrieved from ccw.')
continue
print(str(len(ccwo_sn_list)) + " is the length of the ccwo sn list.")
# print(ccwo_sn_list)
##logger.debug(ccwo_sn_list)
so_data_matrix.sort()
so_data_matrix = so_data_matrix_header + so_data_matrix
return ccwo_sn_list, ccwo_error_so_list, so_data_matrix
app = Flask(__name__)
app.secret_key = "CHANGE_THIS_KEY_IF_RUNNING_PERSISTENTLY"
app.permanent_session_lifetime = timedelta(minutes=10)
ALLOWED_EXTENSIONS = {"csv"}
#CONTEXT = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
#CONTEXT.load_cert_chain("blt.cisco.com-60405.cer", "blt.key")
@app.route("/login", methods=["POST"])
def do_admin_login():
"""Login function"""
try:
username = request.form["username"]
password = request.form["password"]
sadomain = request.form["SADomain"]
session["username"] = username
session["sadomain"] = sadomain
ccwo_access_token = grab_oauth_ccw_order(username, password)
if ccwo_access_token:
session["logged_in"] = True
else:
flash("Invalid Credentials...Try again")
return render_template("login.html")
return redirect("/")
except ex.ConnectionError:
username = request.form["username"]
sadomain = request.form["SADomain"]
session["username"] = username
session["sadomain"] = sadomain
session["logged_in"] = True
if username not in os.listdir("profiles"):
os.mkdir("profiles//%s" % username)
os.mkdir("profiles//%s//reports" % username)
with open("profiles//%s//ccwr_oauth.json" % username, "w+") as f:
f.write("Working Offline")
return redirect("/")
@app.route("/logout")
def logout():
"""Function to log out of application"""
session["logged_in"] = False
return redirect("/")
"""Need to be able to clear the cookie/session data for each data source."""
@app.route("/clearcsv")
def clearcsv():
"""Function to clear SN file names"""
try:
session.pop("csv_sn_file_name_list")
return redirect("/serialnumber")
except:
return redirect("/serialnumber")
@app.route("/clearwebsn")
def clearwebsn():
"""Function to clear webform SN input"""
print('clearing webform SN input.')
##logger.debug("cleared webform data in /clearwebsn")
try:
session.pop("webform_sn_qty")
except:
pass
try:
session.pop("raw_sn_input_list")
except:
pass
return redirect("/serialnumber")
@app.route("/clearso")
def clearso():
"""Function to clear sales orders"""
try:
session.pop("ccwo_sn_list_name", "so_list")
return redirect("/")
except:
return redirect("/")
@app.route("/clearsn")
def clearsn():
"""Function to clear serial numbers"""
try:
session.pop("raw_sn_input_list")
except:
pass
try:
session.pop("csv_sn_file_name_list")
except:
pass
try:
session.pop("webform_sn_qty")
except:
pass
return redirect("/")
@app.route("/clearscan")
def clearscan():
"""Function to clear scan results"""
try:
session.pop("scan_sn_list_name")
except:
pass
return redirect("/")
@app.route("/")
def webroot():
"""Function for main application page"""
if not session.get("logged_in"):
return render_template("login.html")
username = session["username"]
try:
scan_sn_list_name = session["scan_sn_list_name"]
scan_sn_list = file_SN_search(scan_sn_list_name)
len_scan_sn_list = str(len(scan_sn_list))
scan_data = "Network devices scanned: %s" % (len_scan_sn_list)
except:
scan_data = "No scan SN Data"
scan_sn_list = []
try:
ccwo_sn_list_name = session["ccwo_sn_list_name"]
# print(ccwo_sn_list_name)
##logger.debug(ccwo_sn_list_name, extra=username)
# ccwo_sn_list=session['ccwo_sn_list']
with open(ccwo_sn_list_name, "r") as f:
ccwo_sn_list = load(f)
# print(ccwo_sn_list)
##logger.debug(ccwo_sn_list, extra=username)
formatted_so_list = str(session["so_list"]).rstrip("]").lstrip("[")
len_ccwo_sn_list = str(len(ccwo_sn_list))
so_data = "SOs %s were searched, returning %s serial numbers." % (
formatted_so_list,
len_ccwo_sn_list,
)
except:
ccwo_sn_list = []
so_data = "No SO Data"
try:
raw_sn_input_list = session["raw_sn_input_list"]
len_raw_sn_input_list = str(len(raw_sn_input_list))
sn_data_web = "Webform input SN count: %s" % len_raw_sn_input_list
except:
sn_data_web = "No Webform SN Data"
raw_sn_input_list = []
try:
csv_sn_file_name_list = session["csv_sn_file_name_list"]
print('webroot csv sn file name list is: %s'%csv_sn_file_name_list)
csv_sn_list=[]
for i in csv_sn_file_name_list:
try:
j = file_SN_search(i)
csv_sn_list=csv_sn_list+j
except IndexError:
j = file_SN_search_reg(i)
csv_sn_list = csv_sn_list + j
len_csv_sn_list = str(len(csv_sn_list))
sn_data_csv = "CSV input SN count: %s" % len_csv_sn_list
except:
sn_data_csv = "No File SN Data"
csv_sn_list = []
ccwr_search_list = csv_sn_list + raw_sn_input_list + ccwo_sn_list + scan_sn_list
ccwr_search_list = list(set(ccwr_search_list))
with open("profiles/%s/current_CCWR_input_SNs.json" % username, "w") as f:
dump(ccwr_search_list, f)
data_total = str(len(ccwr_search_list))
return render_template(
"index.html",
so_data=so_data,
sn_data_web=sn_data_web,
sn_data_csv=sn_data_csv,
data_total=data_total,
scan_data=scan_data,
)
@app.route("/ccwrresults", methods=["GET"])
def ccwrresults_get():
"""Function to display ccw-r final results"""
start_time = datetime.now()
session.pop("_flashes", [])
if not session.get("logged_in"):
return render_template("login.html")
try:
username = session["username"]
sadomain = session["sadomain"]
with open("profiles/%s/current_CCWR_input_SNs.json" % username, "r") as f:
ccwr_search_list = load(f)
ccwr_response, ccwr_access_token = ccwr_search_request(
username, "serialNumbers", ccwr_search_list
)
if ccwr_response=='over':
return render_template(
"apiover.html"
)
so_for_sa_list = []
elapsed_time = datetime.now() - start_time
print("CCW-R API Time:")
print(elapsed_time)
start_time = datetime.now()
for i in ccwr_response["instances"]:
so_for_sa_list.append(i["salesOrderNumber"])
so_for_sa_list = [x for x in so_for_sa_list if x.startswith("1") or x.startswith("8") or x.startswith("9")]
so_for_sa_list_dedup = []
[so_for_sa_list_dedup.append(x) for x in so_for_sa_list if x not in so_for_sa_list_dedup]
threads = []
sa_list = []
with ThreadPoolExecutor(max_workers=50) as executor:
for so_num in so_for_sa_list_dedup:
threads.append(
executor.submit(
ccwo_order_status, username, so_num
)
)
for task in as_completed(threads):
try:
result = task.result()
sa_list.append(result)
except:
continue
elapsed_time = datetime.now() - start_time
print("CCWO API Time:")
print(elapsed_time)
ccwr_temp_list = ccwr_create_table(username, ccwr_response, sa_list)
os=OrderedSet([dumps(i) for i in ccwr_temp_list])
ccwr_full_list=[loads(i) for i in os]
output_file = "profiles/%s/CAT3K_License_Report.csv" % username
reportstamp=str(int(time()))
report_output_file ="profiles/%s/reports/CAT3K_License_Report.%s.csv" % (username,reportstamp)
output_file_list=[output_file,report_output_file]
create_3K_lic_rpt(ccwr_full_list, output_file_list, sadomain)
# print(ccwr_full_list)
##logger.debug(ccwr_full_list)
if sadomain == "":
body = (
"#### Attach either the CCWR Raw Data or Cat3K License Report to this email####"
"<--Delete before sending %0D%0A"
+ "%0D%0A"
+ "#### Please make sure to include the smart account and virtual account "
"for license deposit####<--Delete before sending%0D%0A"
+ "%0D%0A"
+ "Cisco Licensing Team,%0D%0A"
+ "%0D%0A"
+ "Please convert traditional licensing for products in the attached "
"spreadsheet to smart licensing. "
+ "The Smart Account for this action is <Insert your SA here>. "
+ "The Virtual Accouont for this action is <Insert your VA here>."
)
mailto = "<EMAIL>"
subject = "Smart Licensing Entitlement Request"
email = "mailto:{mailto}?subject={subject}&body={body}".format(
mailto=mailto, subject=subject, body=body
)
return render_template(
"ccwrresults.html", ccwr_full_list=ccwr_full_list, email=email
)
body = (
"#### Attach either the CCWR Raw Data or Cat3K License Report to this email####"
"<--Delete before sending %0D%0A"
+ "%0D%0A"
+ "#### Please verify the virtual account in {sadomain} for license deposit####"
"<--Delete before sending%0D%0A".format(
sadomain=sadomain)
+ "%0D%0A"
+ "Cisco Licensing Team,%0D%0A"
+ "%0D%0A"
+ "Please convert traditional licensing for products in the attached "
"spreadsheet to smart licensing. "
+ "The Smart Account for this action is {sadomain}.".format(
sadomain=sadomain)
+ "The Virtual Accouont for this action is <Insert your VA here>."
)
mailto = "<EMAIL>"
subject = "Smart Licensing Entitlement Request"
email = "mailto:{mailto}?subject={subject}&body={body}".format(
mailto=mailto, subject=subject, body=body
)
elapsed_time = datetime.now() - start_time
return render_template(
"ccwrresults.html", ccwr_full_list=ccwr_full_list, email=email
)
except FileNotFoundError:
flash(
"No data submitted. Gather entitlement data via one of the above methods."
)
return render_template("index.html")
except ex.ConnectionError:
flash(
"Connection to Cisco.com can not be established. Check network connectivity."
)
return render_template("index.html")
except TypeError:
flash(
"No data submitted. Gather entitlement data via one of the above methods."
)
return render_template("index.html")
@app.route("/download3Krpt", methods=["GET"])
def download3Krpt_get():
"""Function to download Cat3K report"""
username = session["username"]
file_download = "profiles/%s/CAT3K_License_Report.csv" % username
return send_file(file_download, as_attachment=True)
@app.route("/downloadccwrraw", methods=["GET"])
def downloadccwrraw_get():
"""Function to download ccw-r raw data"""
username = session["username"]
file_download = "profiles/%s/current_raw_CCWR_output.csv" % username
return send_file(file_download, as_attachment=True)
@app.route("/downloadbltuserguide", methods=["GET"])
def downloadbltuserguide():
file_download = "docs/BLT-User-Guide.pdf"
return send_file(file_download, as_attachment=True)
@app.route("/downloadbltinstallguide", methods=["GET"])
def downloadbltinstallguide():
file_download = "docs/BLT-Install-Guide.pdf"
return send_file(file_download, as_attachment=True)
@app.route("/serialnumber", methods=["GET", "POST"])
def serialnumber():
"""Function to search serial numbers"""
if not session.get("logged_in"):
return render_template("login.html")
username = session["username"]
if request.method == "GET":
try:
csv_sn_file_name_list=session["csv_sn_file_name_list"]
except:
pass
try:
webform_sn_qty=session["webform_sn_qty"]
except:
pass
if request.method == "POST":
try:
searchsn = str(request.form["searchsn"])
if searchsn != '':
form_sn_input_list = searchsn.split(",")
else:
form_sn_input_list=[]
try:
raw_sn_input_list=session["raw_sn_input_list"]
print(raw_sn_input_list)
##logger.debug(raw_sn_input_list)
except:
raw_sn_input_list=[]
raw_sn_input_list=raw_sn_input_list+form_sn_input_list
webform_sn_qty=len(raw_sn_input_list)
session["raw_sn_input_list"] = raw_sn_input_list
session["webform_sn_qty"] = webform_sn_qty
except:
pass
try:
snfile = request.files["csvfile"]
UPLOAD_FOLDER = "profiles/%s/" % username
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
snfile.save(os.path.join(app.config["UPLOAD_FOLDER"], snfile.filename))
csv_sn_file_name = UPLOAD_FOLDER + snfile.filename
try:
csv_sn_file_name_list=session["csv_sn_file_name_list"]
print(csv_sn_file_name_list)
##logger.debug(csv_sn_file_name_list)
except Exception as E:
print('Exception: %s'%E)
##logger.debug('Exception: %s'%E)
csv_sn_file_name_list=[]
pass
csv_sn_file_name_list.append(csv_sn_file_name)
print('csv file name list is: %s'%csv_sn_file_name_list)
session["csv_sn_file_name_list"] = csv_sn_file_name_list
except:
pass
try:
csv_sn_file_name_list=session["csv_sn_file_name_list"]
except:
csv_sn_file_name_list=[]
pass
try:
webform_sn_qty=session["webform_sn_qty"]
except:
webform_sn_qty=0
pass
return render_template("serialnumber.html", csv_sn_file_name_list=csv_sn_file_name_list, webform_sn_qty=webform_sn_qty)
@app.route("/salesorder", methods=["GET", "POST"])
def salesorder():
"""Function to search sales order numbers"""
if not session.get("logged_in"):
return render_template("login.html")
if request.method == "GET":
return render_template("salesorder.html")
try:
if request.method == "POST":
username = session["username"]
searchso = str(request.form["searchso"])
# print(searchso)
#logger.debug("searchso")
so_list = searchso.split(",")
session["so_list"] = so_list
with open("profiles//%s//ccw_order_oauth.json" % username, "r") as f:
jl = load(f)
# print('ccw order oauth2 token age: '+str(int(time())-jl['ts'])+' seconds.')
if (time() - jl["ts"]) > 3500:
session["logged_in"] = False
return redirect("/")
ccwo_sn_list, ccwo_error_so_list, so_data_matrix = ccwo_search_request(
username, so_list
)
ccwo_success_so = [i for i in so_list if i not in ccwo_error_so_list]
ccwo_sn_list_name = "profiles/%s/current_SO_search.json" % username
session["ccwo_sn_list_name"] = ccwo_sn_list_name
with open(ccwo_sn_list_name, "w") as f:
dump(ccwo_sn_list, f)
# sleep(1)
return render_template(
"soresults.html",
so_data_matrix=so_data_matrix,
ccwo_success_so=ccwo_success_so,
ccwo_error_so_list=ccwo_error_so_list,
)
except FileNotFoundError:
flash(
"Connection to Cisco.com can not be established. Check network connectivity."
)
return render_template("salesorder.html")
@app.route("/scan", methods=["GET"])
def scan():
"""Function to scan network for serial numbers"""
if not session.get("logged_in"):
return render_template("login.html")
title = ["Product ID", "Serial Number", "License Entitlement"]
with open("device_info.csv", "w+") as devFile:
devwriter = csv.writer(devFile, lineterminator="\n")
devwriter.writerow(title)
# Define start time
start_time = datetime.now()
# Define the number of threads
num_threads = int(10)
pool = ThreadPool(num_threads)
# Start threads
pool.starmap(cisco_info.getDevInfo, SWLIST)
pool.close()
pool.join()
username = session["username"]
UPLOAD_FOLDER = "profiles/%s/" % username
if os.path.exists(UPLOAD_FOLDER + "device_info.csv"):
os.remove(UPLOAD_FOLDER + "device_info.csv")
os.rename("device_info.csv", UPLOAD_FOLDER + "device_info.csv")
scan_sn_list_name = "profiles/%s/device_info.csv" % username
session["scan_sn_list_name"] = scan_sn_list_name
print(session["scan_sn_list_name"])
print("\nReview collected information in device_info.csv")
print("\nElapsed time: " + str(datetime.now() - start_time))
return redirect("/")
@app.route("/scanupload", methods=["GET", "POST"])
def scanupload():
"""Function to accept CSV file for on prem network scanning"""
if not session.get("logged_in"):
return render_template("login.html")
username = session["username"]
UPLOAD_FOLDER = "profiles/%s/" % username
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
if request.method == "POST":
f = request.files["csvfile"]
f.save(os.path.join(app.config["UPLOAD_FOLDER"], f.filename))
data = cisco_info.getLoginInfo(UPLOAD_FOLDER + f.filename)
global SWLIST
SWLIST = cisco_info.convertLoginDict(data)
if not SWLIST:
flash("CSV formatted incorrectly...Try again")
return render_template("scanupload.html")
return render_template("netscan.html", csvinfo=SWLIST)
return render_template("scanupload.html")
@app.route("/help", methods=["GET"])
def helppage():
"""Function to return help page"""
return render_template("help.html")
@app.route("/Reports", methods=["GET"])
def reportpage():
"""Function to return user reports"""
if not session.get("logged_in"):
return render_template("login.html")
username = session["username"]
files_list = os.listdir('profiles/%s/reports'%username)
spooledreport_list=[i for i in files_list if 'spooled' in i]
spooledreport_list.sort()
report_list=[i for i in files_list if 'spooled' not in i]
report_list.sort()
return render_template("Reports.html",report_list=report_list, spooledreport_list=spooledreport_list)
@app.route("/downloadreports/<filename>", methods=["GET"])
def downloadreport(filename):
"""Function to return user reports"""
if not session.get("logged_in"):
return render_template("login.html")
username = session["username"]
file_download = "profiles/%s/reports/%s" % (username,filename)
return send_file(file_download, as_attachment=True)
@app.route("/deletereports/<filename>", methods=["GET"])
def deletereport(filename):
"""Function to return user reports"""
if not session.get("logged_in"):
return render_template("login.html")
username = session["username"]
file_delete = "profiles/%s/reports/%s" % (username,filename)
os.remove(file_delete)
files_list = os.listdir('profiles/%s/reports'%username)
spooledreport_list=[i for i in files_list if 'spooled' in i]
spooledreport_list.sort()
report_list=[i for i in files_list if 'spooled' not in i]
report_list.sort()
return render_template("Reports.html",report_list=report_list,spooledreport_list=spooledreport_list)
@app.route("/teamsupport", methods=["GET"])
def teamsupport():
"""Function to join teams space for support"""
return redirect("https://eurl.io/#BkZG3vfeU")
if __name__ == "__main__":
if sys.platform == "win32":
app.run(host="127.0.0.1", debug=True, ssl_context="adhoc")
else:
app.run(host="0.0.0.0", debug=True, ssl_context="adhoc")
| StarcoderdataPython |
1775529 | <gh_stars>0
# @date 2018-08-23
# @author <NAME>, All rights reserved without prejudices.
# @license Copyright (c) 2018 Dream Overflow
# HTTPS+WS connector for bitmex.com
import time
import json
import requests
from datetime import datetime, timedelta
from common.utils import UTC
from .apikeyauthwithexpires import APIKeyAuthWithExpires
from .ws import BitMEXWebsocket
import logging
logger = logging.getLogger('siis.connector.bitmex')
class Connector(object):
TRADES_HISTORY_MAX_RETRY = 3
CANDLES_HISTORY_MAX_RETRY = 3
TIMEFRAME_TO_BIN_SIZE = {
60: '1m',
5*60: '5m',
60*60: '1h',
24*60*60: '1d'
}
# bin size reverse map (str: double)
BIN_SIZE_TO_TIMEFRAME = {v: k for k, v in TIMEFRAME_TO_BIN_SIZE.items()}
BIN_SIZE = ('1m', '5m', '1h', '1d')
def __init__(self, service, api_key, api_secret, symbols, host="www.bitmex.com", callback=None):
self._protocol = "https://"
self._host = host or "www.bitmex.com"
self._base_url = "/api/v1/"
self._timeout = 7
self._retries = 0 # initialize counter
self._watched_symbols = symbols or set() # followed instruments
self._all_instruments = [] # availables listed instruments
# always XBTUSD as needed for others pairs or computing
if 'XBTUSD' not in self._watched_symbols:
self._watched_symbols.add('XBTUSD')
self.__api_key = api_key
self.__api_secret = api_secret
self._callback = callback
self._session = None
self._ws = None
def connect(self, use_ws=True):
# Prepare HTTPS session
if self._session is None:
self._session = requests.Session()
# These headers are always sent
self._session.headers.update({'user-agent': 'siis-' + '1.0'})
self._session.headers.update({'content-type': 'application/json'})
self._session.headers.update({'accept': 'application/json'})
# list all instruments
endpoint = "/instrument/active"
result = self.request(path=endpoint, verb='GET')
self._all_instruments = []
if isinstance(result, list):
for instrument in result:
if instrument['typ'] in ('FFCCSX', 'FFWCSX'):
self._all_instruments.append(instrument['symbol'])
if use_ws:
if self._ws is None:
self._ws = BitMEXWebsocket(self.__api_key, self.__api_secret, self._callback)
if self._ws is not None and not self._ws.connected:
self._ws = BitMEXWebsocket(self.__api_key, self.__api_secret, self._callback)
# only subscribe to available instruments
symbols = []
if '*' in self._watched_symbols:
# follow any
self._watched_symbols = self._all_instruments
else:
# follow only listed symbols
for symbol in self._watched_symbols:
if symbol in self._all_instruments:
symbols.append(symbol)
else:
logger.warning('- BitMex instrument %s is not available.' % (symbol,))
self._watched_symbols = symbols
self._ws.connect("wss://" + self._host, symbols, should_auth=True)
def disconnect(self):
if self._ws:
if self._ws.connected:
self._ws.exit()
self._ws = None
if self._session:
self._session = None
@property
def authenticated(self) -> bool:
return self.__api_key is not None
def request(self, path, query=None, postdict=None, verb=None, timeout=None, max_retries=None):
url = self._protocol + self._host + self._base_url + path
if timeout is None:
timeout = self._timeout
# default to POST if data is attached, GET otherwise
if not verb:
verb = 'POST' if postdict else 'GET'
# by default don't retry POST or PUT. Retrying GET/DELETE is okay because they are idempotent.
# in the future we could allow retrying PUT, so long as 'leavesQty' is not used (not idempotent),
# or you could change the clOrdID (set {"clOrdID": "new", "origClOrdID": "old"}) so that an amend
# can't erroneously be applied twice.
if max_retries is None:
max_retries = 0 if verb in ['POST', 'PUT'] else 3
# auth: API Key/Secret
auth = APIKeyAuthWithExpires(self.__api_key, self.__api_secret)
def retry():
self._retries += 1
if self._retries > max_retries:
raise Exception("Max retries on %s (%s) hit, raising." % (path, json.dumps(postdict or '')))
return self.request(path, query, postdict, verb, timeout, max_retries)
# Make the request
response = None
try:
# logger.debug("Sending req to %s: %s" % (url, json.dumps(postdict or query or '')))
req = requests.Request(verb, url, json=postdict, auth=auth, params=query)
prepped = self._session.prepare_request(req)
response = self._session.send(prepped, timeout=timeout)
# Make non-200s throw
response.raise_for_status()
except requests.exceptions.HTTPError as e:
if response is None:
raise e
# 401 - Auth error. This is fatal.
if response.status_code == 401:
logger.error("API Key or Secret incorrect, please check and restart.")
logger.error("Error: " + response.text, True)
if postdict:
# fatal error...
return False
# 404, can be thrown if order canceled or does not exist.
elif response.status_code == 404:
if verb == 'DELETE':
# logger.error("Order not found: %s" % postdict['orderID'])
# return
logger.error("Unable to contact the BitMEX API (404). ")
logger.error("Request: %s \n %s" % (url, json.dumps(postdict)))
raise e
# 429, ratelimit; cancel orders & wait until X-RateLimit-Reset
elif response.status_code == 429:
# logger.error("Ratelimited on current request (contact <EMAIL> to raise your limits). ")
# logger.error("Request: %s \n %s" % (url, json.dumps(postdict)))
# Figure out how long we need to wait.
ratelimit_reset = response.headers['X-RateLimit-Reset']
to_sleep = int(ratelimit_reset) - int(time.time()) + 1.0 # add 1.0 more second be we still have issues
reset_str = datetime.fromtimestamp(int(ratelimit_reset)).strftime('%X')
to_sleep = float(response.headers.get('Retry-After', to_sleep))
# We're ratelimited, and we may be waiting for a long time. Cancel orders.
# logger.warning("Canceling all known orders in the meantime.")
# for o in self.open_orders():
# if 'orderID' in o:
# self.cancel(o['orderID'])
logger.error("Sleeping from %s for %d seconds." % (reset_str, to_sleep))
time.sleep(to_sleep)
# Retry the request
return retry()
# 503 - BitMEX temporary downtime, likely due to a deploy. Try again
elif response.status_code == 503:
logger.warning("Unable to contact the BitMEX API (503), retrying.")
logger.warning("Request: %s \n %s" % (url, json.dumps(postdict)))
time.sleep(5)
return retry()
elif response.status_code == 400:
error = response.json()['error']
message = error['message'].lower() if error else ''
# Duplicate clOrdID: that's fine, probably a deploy, go get the order(s) and return it
if 'duplicate clordid' in message:
orders = postdict['orders'] if 'orders' in postdict else postdict
IDs = json.dumps({'clOrdID': [order['clOrdID'] for order in orders]})
order_results = self.request('/order', query={'filter': IDs}, verb='GET')
for i, order in enumerate(order_results):
if (
order['orderQty'] != abs(postdict['orderQty']) or
order['side'] != ('Buy' if postdict['orderQty'] > 0 else 'Sell') or
order['price'] != postdict['price'] or
order['symbol'] != postdict['symbol']):
raise Exception('Attempted to recover from duplicate clOrdID, but order returned from API ' +
'did not match POST.\nPOST data: %s\nReturned order: %s' % (json.dumps(orders[i]), json.dumps(order)))
# All good
return order_results
elif 'insufficient available balance' in message:
logger.error('BitMex Account out of funds. The message: %s' % error['message'])
raise Exception('BitMex Insufficient Funds')
# If we haven't returned or re-raised yet, we get here.
logger.error("BitMex unhandled Error: %s: %s" % (e, response.text))
logger.error("Endpoint was: %s %s: %s" % (verb, path, json.dumps(postdict)))
raise e
except requests.exceptions.Timeout as e:
# Timeout, re-run this request (retry immediately)
logger.warning("Timed out on request: %s (%s), retrying..." % (path, json.dumps(postdict or '')))
return retry()
except requests.exceptions.ConnectionError as e:
logger.warning("Unable to contact the BitMEX API (%s). Please check the URL. Retrying. ")
logger.warning("Request: %s %s \n %s" % (e, url, json.dumps(postdict)))
time.sleep(2)
return retry()
# Reset retry counter on success
self._retries = 0
return response.json()
@property
def ws(self):
return self._ws
@property
def connected(self) -> bool:
return self._session is not None
@property
def ws_connected(self):
return self._ws is not None and self._ws.connected
@property
def watched_instruments(self):
return self._watched_symbols
@property
def all_instruments(self):
return self._all_instruments
def get_historical_trades(self, symbol, from_date, to_date=None, limit=None):
trades = []
endpoint = "trade" # quote"
params = {
'symbol': symbol,
'reverse': 'false',
'count': limit or 500, # or max limit
'start': 0
}
if to_date:
params['endTime'] = self._format_datetime(to_date)
start = 0
last_datetime = from_date
last_trade_id = ""
retry_count = 0 # in case of request http error (timeout, expired timestamp)
while 1:
if last_datetime:
params['startTime'] = self._format_datetime(last_datetime)
params['start'] = start # offset if timestamp are same
results = []
try:
results = self.request(path=endpoint, query=params, verb='GET')
except requests.exceptions.HTTPError as e:
retry_count += 1
if retry_count > Connector.TRADES_HISTORY_MAX_RETRY:
raise e
for c in results:
if not c['timestamp']:
continue
dt = self._parse_datetime(c['timestamp']).replace(tzinfo=UTC())
if to_date and dt > to_date:
break
if dt < last_datetime:
start += 1
continue
if last_trade_id == c['trdMatchID']:
# could be in case of the last trade of the prev query is the first of the current query
continue
# increase offset when similar timestamp, else reset
if dt == last_datetime:
start += 1
else:
start = 0
# PlusTick,MinusTick,ZeroPlusTick,ZeroMinusTick
direction = 0
if c['tickDirection'] in ('PlusTick', 'ZeroPlusTick'):
direction = 1
elif c['tickDirection'] in ('MinusTick', 'ZeroMinusTick'):
direction = -1
yield (int(dt.timestamp()*1000), # integer ms
c['price'], c['price'], # bid, ask
c['price'], # last
c['size'], # volume
direction)
last_datetime = dt
last_trade_id = c['trdMatchID']
if (to_date and last_datetime > to_date) or len(results) < 500:
break
time.sleep(1.0) # don't excess API usage limit
return trades
def get_historical_candles(self, symbol, bin_size, from_date, to_date=None, limit=None, partial=False):
"""
Time interval [1m,5m,1h,1d].
"""
candles = []
endpoint = "trade/bucketed" # "quote/bucketed"
if bin_size not in self.BIN_SIZE:
raise ValueError("BitMex does not support bin size %s !" % bin_size)
params = {
'binSize': bin_size,
'symbol': symbol,
'reverse': 'false',
'count': limit or 750, # or max limit
# 'start': 0
}
if partial:
params['partial'] = True
# because bitmex works in close time but we are in open time
# delta = self.BIN_SIZE_TO_TIMEFRAME[bin_size]
# have issue using delta in seconds why...
delta_time = timedelta(seconds=0)
if bin_size == '1m':
delta_time = timedelta(minutes=1)
elif bin_size == '5m':
delta_time = timedelta(minutes=5)
elif bin_size == '1h':
delta_time = timedelta(hours=1)
elif bin_size == '1d':
delta_time = timedelta(days=1)
if to_date:
# params['endTime'] = self._format_datetime(to_date + timedelta(seconds=delta))
params['endTime'] = self._format_datetime(to_date + delta_time)
# last_datetime = from_date + timedelta(seconds=delta)
last_datetime = from_date + delta_time
ot = from_date # init
retry_count = 0
while 1:
results = []
if last_datetime:
params['startTime'] = self._format_datetime(last_datetime)
try:
results = self.request(path=endpoint, query=params, verb='GET')
except requests.exceptions.HTTPError as e:
retry_count += 1
if retry_count > Connector.CANDLES_HISTORY_MAX_RETRY:
raise e
for c in results:
dt = self._parse_datetime(c['timestamp']).replace(tzinfo=UTC())
# its close time, want open time
ot = dt - delta_time # timedelta(seconds=delta)
if to_date and ot > to_date:
break
yield (int(ot.timestamp()*1000), # integer ms
c['open'], c['high'], c['low'], c['close'],
0.0, # spread
c['volume'])
last_datetime = dt
if (to_date and ot > to_date) or len(results) < 750:
break
time.sleep(1.0) # don't excess API usage limit
def _format_datetime(self, dt):
return dt.strftime('%Y-%m-%d %H:%M:%S+00:00')
def _parse_datetime(self, dt):
return datetime.strptime(dt, '%Y-%m-%dT%H:%M:%S.%fZ')
def get_order_book_l2(self, symbol, depth):
"""
Get current order book.
@return A tuple with two arrays of dict : (buys, sells)
Each entry contains the id of the order, size and price.
"""
orders = []
endpoint = "orderBook/L2"
params = {
'symbol': symbol,
'depth': depth or 0,
}
results = self.request(path=endpoint, query=params, verb='GET')
buys = []
sells = []
for data in results:
if data['side'] == 'Buy':
buys.append({
'id': str(data['id']),
'size': data['size'],
'price': data['price']
})
elif data['side'] == 'Sell':
sells.append({
'id': str(data['id']),
'size': data['size'],
'price': data['price']
})
return buys, sells
| StarcoderdataPython |
3348357 | import torch
from torch.distributions import Beta, Normal, TransformedDistribution
from torch.distributions.transforms import AffineTransform
class TransformedDistributionEx(TransformedDistribution):
def entropy(self):
"""
Returns entropy of distribution, batched over batch_shape.
Returns:
Tensor of shape batch_shape.
"""
ent = self.base_dist.entropy()
for t in self.transforms:
if isinstance(t, AffineTransform):
affine_ent = torch.log(torch.abs(t.scale))
if t.event_dim > 0:
sum_dims = list(range(-t.event_dim, 0))
affine_ent = affine_ent.sum(dim=sum_dims)
ent = ent + affine_ent
else:
raise NotImplementedError
return ent
| StarcoderdataPython |
188313 | from .generated import access_ui
from .generated import data_sheet_ui
from .generated import form_item_ui
from .generated import form_ui
from .generated import item_boolean_checkboxes_ui
from .generated import item_choice_radio_ui
from .generated import item_datetime_ui
from .generated import item_single_line_ui
from .generated import item_text_multi_line_ui
from .generated import login_ui
from .generated import progress_bar_ui
from .generated import resources
from .generated import section_ui
from .generated import selector_ui
from .generated import text_editor_ui
from .generated import workbook_ui
from .generated import workbook_creation_ui
from .generated import sheet_creation_ui
# from .generated import dynamic_vlist_ui
# from .generated import dynamic_vlist_item_ui
from .generated import form_editor_widget_ui
from .generated import form_editor_file_ui
from .generated import form_editor_sheet_ui
from .generated import form_edit_ui
from .generated import section_edit_ui
from .generated import item_edit_ui
from .generated import choice_edit_ui
from .generated import variable_edit_ui
from .generated import section_transition_edit_ui
from . import widgets
from . import main_qss
| StarcoderdataPython |
1775647 | <filename>framework/lib/dlju/maven_junit.py
import capture_junit as cj
import re
import os
tests_begin = """-------------------------------------------------------
T E S T S
-------------------------------------------------------"""
def findBeginningOfTests(output):
""" Find where the test running begins
Parameters
----------
output : list of str
A list of the build output strings
Returns
-------
int
The index in the `output` list where the actual tests begin
If it can't find that index, then it returns -1
"""
testLines = tests_begin.split("\n")
# print(testLines)
for i, o in enumerate(output):
if o == testLines[0].strip() and (i + 2) < len(output):
if output[i+1] == testLines[1].strip() and output[i + 2] == testLines[2].strip():
return i + 3
return -1
def findSurefireCommand(output):
""" Find where the maven surefire plugin (which is the test plugin)
begins its command
Parameters
----------
output : list of str
The lines of the build output
Returns
-------
str
The line that contains the java command to run the maven surefire plugin
"""
for o in output:
print(o)
if o.startswith("Forking command line"):
return o
return None
def createCommands(javaCommand, options, lines, ranTests):
""" Return a list of the junit commands to run
Parameters
----------
javaCommand : str
The `java` invocation
options : list of str
The command line options for `java`
lines : list of str
Lines containing the test classes and classPathUrl
ranTests : list of str
A list of the tests that were ran
Returns
-------
list of str
A list containing commands to run each test class
"""
print(lines)
testClasses = [l for l in lines if l.startswith("tc")]
classPaths = [l for l in lines if l.startswith("classPathUrl")]
print(len(classPaths))
testClasses = [l.split("=") for l in testClasses]
classPaths = [l.split("=") for l in classPaths]
print(classPaths)
testClasses = [l[1] for l in testClasses if l[1] in ranTests]
classPaths = [l[1] for l in classPaths]
classPath = "-classpath {}".format(":".join(classPaths))
junitVersion = cj.getJUnitVersion([classPath])
junitClass = cj.getJunitTestRunnerClass(junitVersion)
if junitClass is None:
print("Error: could not find JUnit version for classpath {}".format(classPath))
exit(1)
return ["{} {} {} {} {}".format(javaCommand, " ".join(options), classPath, junitClass, tc) for tc in testClasses]
def findRunTests(lines):
""" From the lines of the build output, figures out which tests were run
Parameters
----------
lines : list of str
The lines of the build output file
Returns
-------
list of str
A list of the names of the tests that were run
"""
ran = []
for l in lines:
if l.startswith("Running"):
splits = l.split(" ")
if len(splits) == 2:
ran.append(splits[1])
if l.startswith("Results"):
break
return ran
def isArgumentlessJavaOption(line):
""" Determine whether a given line contains a command line option that does
not take arguments.
Parameters
----------
line : str
A line of the build output
Returns
-------
bool
True if the line contains an option that doesn't take arguments
"""
argumentlessOptions = ["agentlib",
"agentpath",
"disableassertions",
"D",
"da",
"enableassertions",
"ea",
"enablesystemassertions",
"esa",
"disablesystemassertions",
"dsa",
"javaagent",
"jre-restrict-search",
"no-jre-restrict-search",
"showversion",
"splash",
"verbose",
"version",
"X"]
for a in argumentlessOptions:
if line.startswith("-{}".format(a)):
return True
return False
def getSurefireDir(tmps):
for t in tmps:
if os.path.isdir(t):
return t
return None
if __name__ == '__main__':
args = cj.parse_args(outputSuffix="_maven")
output = cj.getOutput(args.logs)
print(len(output))
output = [o.rstrip() for o in output]
output = [re.sub(r"\[[^\]]+\]", "", o) for o in output]
output = [o.strip() for o in output]
beginIndex = findBeginningOfTests(output)
print(beginIndex)
surefire = findSurefireCommand(output[beginIndex:])
ran = findRunTests(output[beginIndex:])
if surefire is not None:
if surefire.find("&&") >= 0:
splits = surefire.split("&&")
if len(splits) >= 2:
secondPart = splits[1]
commandParts = secondPart.split(" ")
i = 0
javaCommand = ""
options = []
tmpFiles = []
print(commandParts)
while i < len(commandParts):
command = commandParts[i]
print(f"Command: {command}")
if command.find("bin/java") >= 0:
javaCommand = command
elif command.find("-jar") >= 0:
i += 1
elif command.startswith("-"):
print(f"An option: {command}")
# print("Skipping {}".format(command))
if isArgumentlessJavaOption(command) or command.find("=") > -1:
options.append(command)
else:
options.append("{} {}".format(command, commandParts[i + 1]))
i += 1
elif len(command) > 0:
tmpFiles.append(command)
i += 1
pass
tmpFiles = [tmp for tmp in tmpFiles if tmp.find("surefire") > -1]
print("tmpFiles: {}".format(tmpFiles))
surefireDir = getSurefireDir(tmpFiles)
tmpFiles = [t for t in tmpFiles if t != surefireDir]
for tmp in tmpFiles:
lines = []
with open(os.path.join(surefireDir, tmp), "r") as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
if any(l.startswith("tc") for l in lines):
# This is one that we want
newCommands = createCommands(javaCommand, options, lines, ran)
with open(args.output, "w") as f:
f.write("\n".join(newCommands))
f.flush()
break
pass
pass
pass
pass
else:
print("Surefire is none")
| StarcoderdataPython |
1600533 | import multiprocessing as mp
mp.set_start_method('spawn', force=True)
import argparse
import os
import time
import yaml
import numpy
import logging
from easydict import EasyDict
import pprint
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import linklink as link
import sys
sys.path.append('./models')
from models import model_entry
from scheduler import get_scheduler
from memcached_dataset import McDataset
from utils import create_logger, AverageMeter, accuracy, save_checkpoint, load_state, DistributedGivenIterationSampler, simple_group_split, DistributedSampler, param_group_no_wd
from distributed_utils import dist_init, reduce_gradients, DistModule
from loss import LabelSmoothCELoss
from optim import optim_entry, FP16SGD, FusedFP16SGD
#model_names = sorted(name for name in models.__dict__
# if name.islower() and not name.startswith("__")
# and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--config', default='cfgs/config_res50.yaml')
parser.add_argument('--load-path', default='', type=str)
parser.add_argument('--recover', action='store_true')
parser.add_argument('-e', '--evaluate', action='store_true')
parser.add_argument('--sync', action='store_true')
parser.add_argument('--fake', action='store_true')
parser.add_argument('--fuse-prob', action='store_true')
parser.add_argument('--fusion-list', nargs='+', help='multi model fusion list')
class ColorAugmentation(object):
def __init__(self, eig_vec=None, eig_val=None):
if eig_vec == None:
eig_vec = torch.Tensor([
[ 0.4009, 0.7192, -0.5675],
[-0.8140, -0.0045, -0.5808],
[ 0.4203, -0.6948, -0.5836],
])
if eig_val == None:
eig_val = torch.Tensor([[0.2175, 0.0188, 0.0045]])
self.eig_val = eig_val # 1*3
self.eig_vec = eig_vec # 3*3
def __call__(self, tensor):
assert tensor.size(0) == 3
alpha = torch.normal(mean=torch.zeros_like(self.eig_val))*0.1
quatity = torch.mm(self.eig_val*alpha, self.eig_vec)
tensor = tensor + quatity.view(3, 1, 1)
return tensor
def main():
global args, config, best_prec1
args = parser.parse_args()
with open(args.config) as f:
config = yaml.load(f)
config = EasyDict(config['common'])
config.save_path = os.path.dirname(args.config)
rank, world_size = dist_init()
# create model
bn_group_size = config.model.kwargs.bn_group_size
bn_var_mode = config.model.kwargs.get('bn_var_mode', 'L2')
if bn_group_size == 1:
bn_group = None
else:
assert world_size % bn_group_size == 0
bn_group = simple_group_split(world_size, rank, world_size // bn_group_size)
config.model.kwargs.bn_group = bn_group
config.model.kwargs.bn_var_mode = (link.syncbnVarMode_t.L1
if bn_var_mode == 'L1'
else link.syncbnVarMode_t.L2)
model = model_entry(config.model)
if rank == 0:
print(model)
model.cuda()
if config.optimizer.type == 'FP16SGD' or config.optimizer.type == 'FusedFP16SGD':
args.fp16 = True
else:
args.fp16 = False
if args.fp16:
# if you have modules that must use fp32 parameters, and need fp32 input
# try use link.fp16.register_float_module(your_module)
# if you only need fp32 parameters set cast_args=False when call this
# function, then call link.fp16.init() before call model.half()
if config.optimizer.get('fp16_normal_bn', False):
print('using normal bn for fp16')
link.fp16.register_float_module(link.nn.SyncBatchNorm2d, cast_args=False)
link.fp16.register_float_module(torch.nn.BatchNorm2d, cast_args=False)
link.fp16.init()
model.half()
model = DistModule(model, args.sync)
# create optimizer
opt_config = config.optimizer
opt_config.kwargs.lr = config.lr_scheduler.base_lr
if config.get('no_wd', False):
param_group, type2num = param_group_no_wd(model)
opt_config.kwargs.params = param_group
else:
opt_config.kwargs.params = model.parameters()
optimizer = optim_entry(opt_config)
# optionally resume from a checkpoint
last_iter = -1
best_prec1 = 0
if args.load_path:
if args.recover:
best_prec1, last_iter = load_state(args.load_path, model, optimizer=optimizer)
else:
load_state(args.load_path, model)
cudnn.benchmark = True
# Data loading code
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# augmentation
aug = [transforms.RandomResizedCrop(config.augmentation.input_size),
transforms.RandomHorizontalFlip()]
for k in config.augmentation.keys():
assert k in ['input_size', 'test_resize', 'rotation', 'colorjitter', 'colorold']
rotation = config.augmentation.get('rotation', 0)
colorjitter = config.augmentation.get('colorjitter', None)
colorold = config.augmentation.get('colorold', False)
if rotation > 0:
aug.append(transforms.RandomRotation(rotation))
if colorjitter is not None:
aug.append(transforms.ColorJitter(*colorjitter))
aug.append(transforms.ToTensor())
if colorold:
aug.append(ColorAugmentation())
aug.append(normalize)
# train
train_dataset = McDataset(
config.train_root,
config.train_source,
transforms.Compose(aug),
fake=args.fake)
# val
val_dataset = McDataset(
config.val_root,
config.val_source,
transforms.Compose([
transforms.Resize(config.augmentation.test_resize),
transforms.CenterCrop(config.augmentation.input_size),
transforms.ToTensor(),
normalize,
]),
args.fake)
train_sampler = DistributedGivenIterationSampler(train_dataset, config.lr_scheduler.max_iter, config.batch_size,
last_iter=last_iter)
val_sampler = DistributedSampler(val_dataset, round_up=False)
train_loader = DataLoader(
train_dataset, batch_size=config.batch_size, shuffle=False,
num_workers=config.workers, pin_memory=True, sampler=train_sampler)
val_loader = DataLoader(
val_dataset, batch_size=config.batch_size, shuffle=False,
num_workers=config.workers, pin_memory=True, sampler=val_sampler)
config.lr_scheduler['optimizer'] = optimizer.optimizer if isinstance(optimizer, FP16SGD) else optimizer
config.lr_scheduler['last_iter'] = last_iter
lr_scheduler = get_scheduler(config.lr_scheduler)
if rank == 0:
tb_logger = SummaryWriter(config.save_path+'/events')
logger = create_logger('global_logger', config.save_path+'/log.txt')
logger.info('args: {}'.format(pprint.pformat(args)))
logger.info('config: {}'.format(pprint.pformat(config)))
else:
tb_logger = None
if args.evaluate:
if args.fusion_list is not None:
validate(val_loader, model, fusion_list=args.fusion_list, fuse_prob=args.fuse_prob)
else:
validate(val_loader, model)
link.finalize()
return
train(train_loader, val_loader, model, optimizer, lr_scheduler, last_iter+1, tb_logger)
link.finalize()
def train(train_loader, val_loader, model, optimizer, lr_scheduler, start_iter, tb_logger):
global best_prec1
batch_time = AverageMeter(config.print_freq)
fw_time = AverageMeter(config.print_freq)
bp_time = AverageMeter(config.print_freq)
sy_time = AverageMeter(config.print_freq)
step_time = AverageMeter(config.print_freq)
data_time = AverageMeter(config.print_freq)
losses = AverageMeter(config.print_freq)
top1 = AverageMeter(config.print_freq)
top5 = AverageMeter(config.print_freq)
# switch to train mode
model.train()
world_size = link.get_world_size()
rank = link.get_rank()
logger = logging.getLogger('global_logger')
end = time.time()
label_smooth = config.get('label_smooth', 0.0)
if label_smooth > 0:
logger.info('using label_smooth: {}'.format(label_smooth))
criterion = LabelSmoothCELoss(label_smooth, 1000)
else:
criterion = nn.CrossEntropyLoss()
for i, (input, target) in enumerate(train_loader):
curr_step = start_iter + i
lr_scheduler.step(curr_step)
current_lr = lr_scheduler.get_lr()[0]
# measure data loading time
data_time.update(time.time() - end)
# transfer input to gpu
target = target.cuda()
input = input.cuda() if not args.fp16 else input.cuda().half()
# forward
output = model(input)
loss = criterion(output, target) / world_size
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
reduced_loss = loss.clone()
reduced_prec1 = prec1.clone() / world_size
reduced_prec5 = prec5.clone() / world_size
link.allreduce(reduced_loss)
link.allreduce(reduced_prec1)
link.allreduce(reduced_prec5)
losses.update(reduced_loss.item())
top1.update(reduced_prec1.item())
top5.update(reduced_prec5.item())
# backward
optimizer.zero_grad()
if isinstance(optimizer, FusedFP16SGD):
optimizer.backward(loss)
reduce_gradients(model, args.sync)
optimizer.step()
elif isinstance(optimizer, FP16SGD):
def closure():
# backward
optimizer.backward(loss, False)
# sync gradients
reduce_gradients(model, args.sync)
# check overflow, convert to fp32 grads, downscale
optimizer.update_master_grads()
return loss
optimizer.step(closure)
else:
loss.backward()
reduce_gradients(model, args.sync)
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
if curr_step % config.print_freq == 0 and rank == 0:
tb_logger.add_scalar('loss_train', losses.avg, curr_step)
tb_logger.add_scalar('acc1_train', top1.avg, curr_step)
tb_logger.add_scalar('acc5_train', top5.avg, curr_step)
tb_logger.add_scalar('lr', current_lr, curr_step)
logger.info('Iter: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\t'
'LR {lr:.4f}'.format(
curr_step, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5, lr=current_lr))
if curr_step > 0 and curr_step % config.val_freq == 0:
val_loss, prec1, prec5 = validate(val_loader, model)
if not tb_logger is None:
tb_logger.add_scalar('loss_val', val_loss, curr_step)
tb_logger.add_scalar('acc1_val', prec1, curr_step)
tb_logger.add_scalar('acc5_val', prec5, curr_step)
if rank == 0:
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'step': curr_step,
'arch': config.model.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best, config.save_path+'/ckpt')
end = time.time()
def validate(val_loader, model, fusion_list=None, fuse_prob=False):
batch_time = AverageMeter(0)
losses = AverageMeter(0)
top1 = AverageMeter(0)
top5 = AverageMeter(0)
# switch to evaluate mode
if fusion_list is not None:
model_list = []
for i in range(len(fusion_list)):
model_list.append(model_entry(config.model))
model_list[i].cuda()
model_list[i] = DistModule(model_list[i], args.sync)
load_state(fusion_list[i], model_list[i])
model_list[i].eval()
if fuse_prob:
softmax = nn.Softmax(dim=1)
else:
model.eval()
rank = link.get_rank()
world_size = link.get_world_size()
logger = logging.getLogger('global_logger')
criterion = nn.CrossEntropyLoss()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
input = input.cuda() if not args.fp16 else input.half().cuda()
target = target.cuda()
# compute output
if fusion_list is not None:
output_list = []
for model_idx in range(len(fusion_list)):
output = model_list[model_idx](input)
if fuse_prob:
output = softmax(output)
output_list.append(output)
output = torch.stack(output_list, 0)
output = torch.mean(output, 0)
else:
output = model(input)
# measure accuracy and record loss
loss = criterion(output, target) #/ world_size ## loss should not be scaled here, it's reduced later!
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
num = input.size(0)
losses.update(loss.item(), num)
top1.update(prec1.item(), num)
top5.update(prec5.item(), num)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % config.print_freq == 0 and rank == 0:
logger.info('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time))
# gather final results
total_num = torch.Tensor([losses.count])
loss_sum = torch.Tensor([losses.avg*losses.count])
top1_sum = torch.Tensor([top1.avg*top1.count])
top5_sum = torch.Tensor([top5.avg*top5.count])
link.allreduce(total_num)
link.allreduce(loss_sum)
link.allreduce(top1_sum)
link.allreduce(top5_sum)
final_loss = loss_sum.item()/total_num.item()
final_top1 = top1_sum.item()/total_num.item()
final_top5 = top5_sum.item()/total_num.item()
if rank == 0:
logger.info(' * Prec@1 {:.3f}\tPrec@5 {:.3f}\tLoss {:.3f}\ttotal_num={}'.format(final_top1, final_top5, final_loss, total_num.item()))
model.train()
return final_loss, final_top1, final_top5
if __name__ == '__main__':
main()
| StarcoderdataPython |
128814 | <filename>models/Connection.py
from genie.testbed import load
from prettyprinter import pprint
class PYATSConnection:
def __init__(self):
self.__testbed = load("./res/testbeds/testbed.yaml")
def __find_device_by_ip(self, ip):
for device_name, device in self.__testbed.devices.items():
if ip == str(device.connections['ssh']['ip']):
return device
return None
def get_device_running_config(self, ip):
device = self.__find_device_by_ip(ip)
if device is not None:
pprint(type(device))
device.connect(init_config_commands=[])
return device.learn('config')
else:
return None
| StarcoderdataPython |
3279567 | #coding:utf-8
import argparse
import os
import numpy as np
import paddlehub as hub
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.optimizer import AdamOptimizer
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--num_epoch", type=int, default=1, help="Number of epoches for fine-tuning.")
parser.add_argument("--batch_size", type=int, default=16, help="Total examples' number in batch for training.")
parser.add_argument("--log_interval", type=int, default=10, help="log interval.")
parser.add_argument("--save_interval", type=int, default=10, help="save interval.")
parser.add_argument("--checkpoint_dir", type=str, default="paddlehub_finetune_ckpt_dygraph", help="Path to save log data.")
parser.add_argument("--max_seq_len", type=int, default=512, help="Number of words of the longest seqence.")
# yapf: enable.
class TransformerClassifier(fluid.dygraph.Layer):
def __init__(self, num_classes, transformer):
super(TransformerClassifier, self).__init__()
self.num_classes = num_classes
self.transformer = transformer
self.fc = Linear(input_dim=768, output_dim=num_classes)
def forward(self, input_ids, position_ids, segment_ids, input_mask):
result = self.transformer(input_ids, position_ids, segment_ids,
input_mask)
cls_feats = fluid.layers.dropout(
result['pooled_output'],
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
cls_feats = fluid.layers.reshape(cls_feats, shape=[-1, 768])
pred = self.fc(cls_feats)
return fluid.layers.softmax(pred)
def finetune(args):
module = hub.Module(name="ernie", max_seq_len=args.max_seq_len)
# Use the appropriate tokenizer to preprocess the data set
# For ernie_tiny, it will do word segmentation to get subword. More details: https://www.jiqizhixin.com/articles/2019-11-06-9
if module.name == "ernie_tiny":
tokenizer = hub.ErnieTinyTokenizer(
vocab_file=module.get_vocab_path(),
spm_path=module.get_spm_path(),
word_dict_path=module.get_word_dict_path(),
)
else:
tokenizer = hub.BertTokenizer(vocab_file=module.get_vocab_path())
dataset = hub.dataset.ChnSentiCorp(
tokenizer=tokenizer, max_seq_len=args.max_seq_len)
with fluid.dygraph.guard():
tc = TransformerClassifier(
num_classes=dataset.num_labels, transformer=module)
adam = AdamOptimizer(learning_rate=1e-5, parameter_list=tc.parameters())
state_dict_path = os.path.join(args.checkpoint_dir,
'dygraph_state_dict')
if os.path.exists(state_dict_path + '.pdparams'):
state_dict, _ = fluid.load_dygraph(state_dict_path)
tc.load_dict(state_dict)
loss_sum = acc_sum = cnt = 0
for epoch in range(args.num_epoch):
for batch_id, data in enumerate(
dataset.batch_records_generator(
phase="train",
batch_size=args.batch_size,
shuffle=True,
pad_to_batch_max_seq_len=False)):
batch_size = len(data["input_ids"])
input_ids = np.array(data["input_ids"]).astype(
np.int64).reshape([batch_size, -1, 1])
position_ids = np.array(data["position_ids"]).astype(
np.int64).reshape([batch_size, -1, 1])
segment_ids = np.array(data["segment_ids"]).astype(
np.int64).reshape([batch_size, -1, 1])
input_mask = np.array(data["input_mask"]).astype(
np.float32).reshape([batch_size, -1, 1])
labels = np.array(data["label"]).astype(np.int64).reshape(
[batch_size, 1])
pred = tc(input_ids, position_ids, segment_ids, input_mask)
acc = fluid.layers.accuracy(pred, to_variable(labels))
loss = fluid.layers.cross_entropy(pred, to_variable(labels))
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
adam.minimize(avg_loss)
loss_sum += avg_loss.numpy() * labels.shape[0]
acc_sum += acc.numpy() * labels.shape[0]
cnt += labels.shape[0]
if batch_id % args.log_interval == 0:
print('epoch {}: loss {}, acc {}'.format(
epoch, loss_sum / cnt, acc_sum / cnt))
loss_sum = acc_sum = cnt = 0
if batch_id % args.save_interval == 0:
state_dict = tc.state_dict()
fluid.save_dygraph(state_dict, state_dict_path)
if __name__ == "__main__":
args = parser.parse_args()
finetune(args)
| StarcoderdataPython |
127711 | <reponame>jgayfer/Spirit<gh_stars>10-100
from discord.ext import commands
import discord
import asyncio
from cogs.utils import constants
from cogs.utils.message_manager import MessageManager
class Core:
"""Core functionality required for the bot to function"""
def __init__(self, bot):
self.bot = bot
async def on_ready(self):
self.display_startup_info()
self.add_remove_offline_guilds()
async def on_member_remove(self, user):
"""Remove user from database when they leave the guild"""
member_ids = []
for member in self.bot.get_all_members():
member_ids.append(member.id)
if user.id not in member_ids:
self.bot.db.remove_user(user.id)
async def on_command_error(self, ctx, error):
"""Command error handler"""
manager = MessageManager(ctx)
if isinstance(error, commands.CommandNotFound):
pass
elif isinstance(error, commands.MissingRequiredArgument):
pass
elif isinstance(error, commands.NotOwner):
pass
elif isinstance(error, commands.NoPrivateMessage):
await manager.send_message("You can't use that command in a private message")
elif isinstance(error, commands.CheckFailure):
await manager.send_message("You don't have the required permissions to do that")
elif isinstance(error, commands.CommandOnCooldown):
await manager.send_message(error)
# Non Discord.py errors
elif isinstance(error, commands.CommandInvokeError):
if isinstance(error.original, discord.errors.Forbidden):
pass
elif isinstance(error.original, asyncio.TimeoutError):
await manager.send_private_message("I'm not sure where you went. We can try this again later.")
else:
raise error
else:
raise error
await manager.clean_messages()
def add_remove_offline_guilds(self):
"""Add/remove guilds that may have added/removed the bot while it was offline"""
to_delete = []
results = self.bot.db.get_guilds()
for row in results:
guild_id = row.get('guild_id')
guild = self.bot.get_guild(guild_id)
if not guild:
to_delete.append(guild_id)
for guild_id in to_delete:
self.bot.db.remove_guild(guild_id)
def display_startup_info(self):
print('Spirit v{}'.format(constants.VERSION))
print('Username: {}'.format(self.bot.user.name))
print('------')
| StarcoderdataPython |
168591 | <gh_stars>100-1000
class Solution:
def matrixScore(self, A):
"""
:type A: List[List[int]]
:rtype: int
"""
if not A or not A[0]: return 0
n, m = len(A), len(A[0])
tot = n*2**(m-1)
for i in range(1, m):
zero = sum(A[j][0]^A[j][i] for j in range(n))
tot += max(zero, n-zero)*2**(m-i-1)
return tot
class Solution:
def matrixScore(self, A):
if not A or not A[0]: return 0
n, m = len(A), len(A[0])
tot = 0
for i in range(m):
zero = sum(A[j][0]^A[j][i] for j in range(n))
tot += max(zero, n-zero)*2**(m-i-1)
return tot
class Solution:
def matrixScore(self, A):
"""
:type A: List[List[int]]
:rtype: int
"""
M = len(A)
N = len(A[0])
res = M << (N - 1)
for j in range(1, N):
m = sum(A[i][j] == A[i][0] for i in range(M))
res += max(m, M - m) <<(N - 1 - j)
return res
class Solution:
def matrixScore(self, A: List[List[int]]) -> int:
for i in range(len(A)):
if A[i][0] == 0:
self.flip_row(A, i)
return self.dfs(A, 1)
def dfs(self, a, j):
if j == len(a[0]):
return sum([int(''.join(map(str, a[i])), 2) for i in range(len(a))])
count = sum([1 for i in range(len(a)) if a[i][j]])
if count < (len(a)+1)//2:
self.flip_col(a, j)
return self.dfs(a, j + 1)
def flip_row(self, a, i):
for j in range(len(a[0])):
a[i][j] = int(not a[i][j])
def flip_col(self, a, j):
for i in range(len(a)):
a[i][j] = int(not a[i][j]) | StarcoderdataPython |
1659071 | <reponame>sys-bio/rrplugins
from teplugins import *
try:
modelPlugin = Plugin("tel_test_model")
noisePlugin = Plugin("tel_add_noise")
chiSquarePlugin = Plugin("tel_chisquare")
#Generate internal test data
modelPlugin.execute()
modelData = modelPlugin.TestData
expData = modelPlugin.TestDataWithNoise
chiSquarePlugin.ExperimentalData = expData
chiSquarePlugin.ModelData = modelData
chiSquarePlugin.NrOfModelParameters = 1
chiSquarePlugin.execute()
chi = chiSquarePlugin.ChiSquare
reduced_chi = chiSquarePlugin.ReducedChiSquare
print 'ChiSquare is: ' + `chi`
print 'Reduced ChiSquare is: ' + `reduced_chi`
except Exception as e:
print 'Problem: ' + `e` | StarcoderdataPython |
3256301 | <gh_stars>1-10
import requests
import sys
class app:
def __init__(self, token):
self.token = token
self.headers = {'Authorization': token}
def execute(self):
return requests.get('https://discord.com/api/v6/guilds/0/members', headers=self.headers)
def main():
print("Paste your token :")
token = input()
cred = app(token)
cred.execute()
if __name__ == '__main__':
main() | StarcoderdataPython |
3294898 | import sys,os
from . import config
def main(args):
MS_fin=args.MS_fin#LCL.mzML/Subject3_rep3_021213_Fx100mM.mzML
MS_db=args.MS_db #~/flashscratch/LCL/all_jp/tmp/proteome_ref_merged.fa
outdir=args.outdir
cmd1= args.java_path+' -Xmx8g -jar '+args.MSGF_path+' -s '+MS_fin+' -d '+MS_db+' -e 0 -tda 1 -maxLength 13 -minLength 7 -inst 1 -t 5ppm -o '+outdir
print cmd1
os.system(cmd1)
cmd2= args.java_path+' -Xmx8g -cp '+args.MSGF_path+' edu.ucsd.msjava.ui.MzIDToTsv -i '+outdir
print cmd2
os.system(cmd2)
if __name__ == '__main__':
main() | StarcoderdataPython |
47847 | <gh_stars>0
from libra_client.lbrtypes.event import EventHandle
from libra_client.canoser import Struct, Uint64
from libra_client.move_core_types.move_resource import MoveResource
class CredentialResource(Struct, MoveResource):
MODULE_NAME = "DualAttestation"
STRUCT_NAME = "Credential"
_fields = [
("human_name", str),
("base_url", str),
("compliance_public_key", bytes),
("expiration_date", Uint64),
("compliance_key_rotation_events", EventHandle),
("base_url_rotation_events", EventHandle),
] | StarcoderdataPython |
24722 | from .validator import (
And,
Attr,
Chain,
Const,
Contains,
ExcMax,
ExcMin,
Float,
Macro,
Max,
Min,
MultipleOf,
Not,
Or,
Pattern,
Proto,
Type,
Validator,
Xor,
)
__all__ = [
"And",
"Attr",
"Chain",
"Const",
"Validator",
"Contains",
"ExcMax",
"ExcMin",
"Float",
"Macro",
"Max",
"Min",
"MultipleOf",
"Not",
"Or",
"Pattern",
"Proto",
"Type",
"Xor",
]
| StarcoderdataPython |
1621467 | <filename>experiments/project_generators/histogram_per_query_generate_500_dbpedia_cleaned_full_nindices_1.py
import json
base_raw = """
{
"name": "histogram-generate-per-query-%d-500-dbpedia-cleaned-full-nindices-1",
"description": "Generate histogram/counters for index and table usage, repetition will be 5 times (generate the stats per query), using dbpedia database, cleaned of crashing queries, run all queries",
"github_url": "https://github.com/pjotrscholtze/trident.git",
"github_checkout": "master",
"script": [
"#!/bin/bash -e",
"#SBATCH -t 15:00 -N 1 -n 8 --mem=64000M",
"#SBATCH -p longq",
"#SBATCH --output=$PROJECT_PATH/slurm_%j.out",
"du -h -d0 $DATABASE_PATH/dbpedia-times-details-nindices-1",
"__REPLACED_BELOW__"
]
}
"""
res = []
for i in range(0, 9128):
data = json.loads(base_raw)
data["name"] = data["name"] % i
data["script"][5] = "$BUILD_CACHE_PATH/trident/trident benchmark -i $DATABASE_PATH/dbpedia-times-details-nindices-1 --query_type query_native --query_file $BUILD_CACHE_PATH/trident/experiments/queries_full_small_chunks/query_chunk_%d.sparql --results_file $PROJECT_PATH/res.json.lines --histogram_mode generate --histogram_file $PROJECT_PATH/temp.json --repetitions 1" % i
res.append(data)
with open("projects/histogram-generate-per-query-500-dbpedia-cleaned-full-nindices-1.json", "w") as f:
f.writelines([json.dumps(res, indent=2)]) | StarcoderdataPython |
3293677 | <reponame>nicorellius/pdxpixel
import logging
from django.shortcuts import render, HttpResponseRedirect, get_object_or_404
from django.contrib.auth import authenticate, login, logout
from django.views.generic.base import View
from django.contrib import messages
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from core.receivers import on_user_logged_out
from core.util import get_timestamp
from .forms import LoginForm
from .models import UserProfile
logger = logging.getLogger(__name__)
class LoginView(View):
form_class = LoginForm
template_name = 'registration/login.html'
success_url = '/blog/'
success_url_staff = '/admin/'
context_object_name = 'form'
def get(self, request):
form = self.form_class()
return render(request, self.template_name, {
'form': form
})
def post(self, request):
error = ''
form = self.form_class(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(username=cd['username'],
password=cd['password'])
if user is not None:
if user.is_active:
login(request, user)
messages.add_message(
request,
messages.INFO,
'User {0} successfully logged in.'.format(user)
)
logger.info(
'[{0}] POST user logged in: {1}'.format(
get_timestamp(), request.user)
)
# TODO -- why is this not used rather than above message.
# messages.success(request, 'logged in successfully')
if user.is_staff:
return HttpResponseRedirect(self.success_url_staff)
return HttpResponseRedirect(self.success_url)
else:
error = "User is valid, but not active. " \
"Contact the administrator to enable your account."
else:
error = "Incorrect username or password. Please try again."
return render(request, self.template_name, {
'form': form,
'error': error,
})
class LogoutView(View):
template_name = 'registration/logged_out.html'
def get(self, request):
logger.info('[{0}] {1}'.format(
get_timestamp(), on_user_logged_out)
)
response = logout(request)
return render(response, self.template_name)
class ProfileView(View):
model = UserProfile
template_name = 'accounts/profile.html'
@method_decorator(login_required)
def get(self, request):
if request.user.username:
profile = get_object_or_404(self.model, user=request.user)
data_dict = {
'profile': profile,
}
return render(request, self.template_name, data_dict)
| StarcoderdataPython |
3316849 | <gh_stars>0
from sys import argv
from os.path import exists
script, from_file, to_file=argv
print(f"Copying from{from_file} to {to_file}")
in_file=open(from_file)
indata=in_file.read()
print(f"The input file is {len(indata)} bytes long")
print(f"Does the output file exist?{exists(to_file)}")
print("Ready,hit RETURN to continue, CTRL-C to abort")
input()
out_file=open(to_file,'w')
out_file.write(indata)
print("Alright,all done")
out_file.close()
in_file.close()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.