content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from flask import Blueprint
from .views import *
import os, sys
import yaml
app_name = __name__
directory = app_name[:app_name.rfind('.', 0, app_name.rfind('/'))]
exec(
'{0} = Blueprint("{0}", __name__)'.format(directory)
)
urls_source_path = ''
if not os.path.isfile(os.path.join(os.path.dirname(__file__), 'urls.yml')):
if not os.path.isfile(os.path.join(os.path.dirname(__file__), 'urls.yaml')):
raise FileNotFoundError("'urls' yaml configuration file not exist in current directory.")
else: urls_source_path = os.path.join(os.path.dirname(__file__), 'urls.yaml')
else: urls_source_path = os.path.join(os.path.dirname(__file__), 'urls.yml')
with open(urls_source_path) as source:
urls_setting = yaml.load(source, Loader=yaml.FullLoader)
for endpoint, details in urls_setting.items():
if details.get('options') == None:
exec(
"{0}.add_url_rule('{rule}', '{endpoint}', {view})".format(directory, rule=details['rule'], endpoint=endpoint, view=details['view'])
)
else:
exec(
"{0}.add_url_rule('{rule}', '{endpoint}', {view}, **{options})".format(directory, rule=details['rule'], endpoint=endpoint, view=details['view'], options=details['options'])
)
| [
6738,
42903,
1330,
39932,
198,
6738,
764,
33571,
1330,
1635,
198,
11748,
28686,
11,
25064,
198,
11748,
331,
43695,
198,
198,
1324,
62,
3672,
796,
11593,
3672,
834,
198,
198,
34945,
796,
598,
62,
3672,
58,
25,
1324,
62,
3672,
13,
81,
... | 2.344322 | 546 |
'''
Run bilingual lexicon induction in a given language to and from English and
get mean of mean cosine similarities as performance metric. Feel free to use
custom input vector files or our defaults. To use our defaults, make sure you
have run either translationftdatagen.py or translationmusedatagen.py first,
depending on which model you'd like to use for your embeddings, so that you
have input vector files that conform to our naming conventions.
'''
# imports
import fasttext
import io
import numpy as np
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
import numpy as np
from sklearn.linear_model import LinearRegression
import argparse
# load a file "sheet" of input vectors
# cosine similarity between prediction vectors and ground-truth vectors
# processing command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--target_train_to_file",
help = "name of file with target language train vectors for given language to English",
default = "data/spanish_to_english_spanish_translation_ft_vecs_train.txt")
parser.add_argument("--english_train_to_file",
help = "name of file with English train vectors for given language to English",
default = "data/spanish_to_english_spanish_translation_ft_vecs_train.txt")
parser.add_argument("--target_train_from_file",
help = "name of file with target language train vectors for given language from English",
default = "data/spanish_from_english_spanish_translation_ft_vecs_train.txt")
parser.add_argument("--english_train_from_file",
help = "name of file with English train vectors for given language from English",
default = "data/spanish_from_english_spanish_translation_ft_vecs_train.txt")
parser.add_argument("--target_test_to_file",
help = "name of file with target language test vectors for given language to English",
default = "data/spanish_to_english_spanish_translation_ft_vecs_test.txt")
parser.add_argument("--english_test_to_file",
help = "name of file with English test vectors for given language to English",
default = "data/spanish_to_english_spanish_translation_ft_vecs_test.txt")
parser.add_argument("--target_test_from_file",
help = "name of file with target language test vectors for given language from English",
default = "data/spanish_from_english_spanish_translation_ft_vecs_test.txt")
parser.add_argument("--english_test_from_file",
help = "name of file with English test vectors for given language from English",
default = "data/spanish_from_english_spanish_translation_ft_vecs_test.txt")
parser.add_argument("--train_size",
help = "number of entries in training data",
default = "5000")
parser.add_argument("--test_size",
help = "number of entries in testing data",
default = "1500")
parser.add_argument("--language",
help = "name of language that the model/words correspond to, "
+ "will be used to name output files",
default = "spanish")
parser.add_argument("--model_name",
help = "name of embedding model, "
+ "will be used to name output files",
default = "ft")
args = parser.parse_args()
# load data files
from_train_x = load(args.english_train_from_file, int(args.train_size))
from_train_y = load(args.target_train_from_file, int(args.train_size))
from_test_x = load(args.english_test_from_file, int(args.test_size))
from_test_y = load(args.target_test_from_file, int(args.test_size))
to_train_x = load(args.target_train_to_file, int(args.train_size))
to_train_y = load(args.english_train_to_file, int(args.train_size))
to_test_x = load(args.target_test_to_file, int(args.test_size))
to_test_y = load(args.english_test_to_file, int(args.test_size))
# train and run model from English
from_model = LinearRegression()
from_model.fit(from_train_x, from_train_y)
from_predictions = from_model.predict(from_test_x)
from_csim = csim(from_predictions, from_test_y)
to_model = LinearRegression()
to_model.fit(to_train_x, to_train_y)
to_predictions = to_model.predict(to_test_x)
to_csim = csim(to_predictions, to_test_y)
with open(args.language + "_" + args.model_name + ".txt", "w") as o:
o.write("From: " + str(from_csim) + "\n")
o.write("To: " + str(to_csim) + "\n")
| [
7061,
6,
198,
10987,
48116,
31191,
4749,
28471,
287,
257,
1813,
3303,
284,
290,
422,
3594,
290,
198,
1136,
1612,
286,
1612,
8615,
500,
20594,
355,
2854,
18663,
13,
18571,
1479,
284,
779,
198,
23144,
5128,
15879,
3696,
393,
674,
26235,
... | 3.051116 | 1,389 |
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""models module tests."""
import tests.appenginesdk
import mox
import stubout
from google.appengine.ext import testbed
from google.apputils import app
from google.apputils import basetest
from simian.mac.models import base as models
class ModelsModuleTest(mox.MoxTestBase):
"""Test module level portions of models."""
def testPut(self):
"""Test put()."""
mock_func = self.mox.CreateMockAnything()
b = _BaseModel()
if type(__builtins__) is dict:
# __builtins__ is a dict under setuptools + python. ???
self.mox.StubOutWithMock(models.db.Model, 'put')
models.db.Model.put().AndReturn(None)
else:
self.mox.StubOutWithMock(__builtins__, 'super')
__builtins__.super(models.BaseModel, b).AndReturn(mock_func)
mock_func.put().AndReturn(None)
self.mox.ReplayAll()
b.put()
self.mox.VerifyAll()
def testBaseModelDeleteMemcacheWrap(self):
"""Test BaseModel.DeleteMemcacheWrap()."""
self.mox.StubOutWithMock(models, 'memcache', True)
memcache_key = 'mwg_BaseModel_key'
models.memcache.delete(memcache_key).AndReturn(None)
prop_name = 'foo_name'
memcache_key_with_prop_name = 'mwgpn_BaseModel_key_%s' % prop_name
models.memcache.delete(memcache_key_with_prop_name).AndReturn(None)
self.mox.ReplayAll()
models.BaseModel.DeleteMemcacheWrap('key')
models.BaseModel.DeleteMemcacheWrap('key', prop_name=prop_name)
self.mox.VerifyAll()
def testBaseModelResetMemcacheWrap(self):
"""Test BaseModel.ResetMemcacheWrap()."""
self.mox.StubOutWithMock(models.BaseModel, 'DeleteMemcacheWrap', True)
self.mox.StubOutWithMock(models.BaseModel, 'MemcacheWrappedGet', True)
key_name = 'mwg_BaseModel_key'
prop_name = 'foo_name'
models.BaseModel.DeleteMemcacheWrap(
key_name, prop_name=prop_name).AndReturn(None)
models.BaseModel.MemcacheWrappedGet(
key_name, prop_name=prop_name, memcache_secs=10).AndReturn(None)
self.mox.ReplayAll()
models.BaseModel.ResetMemcacheWrap(
key_name, prop_name=prop_name, memcache_secs=10)
self.mox.VerifyAll()
def testBaseModelMemcacheWrappedGet(self):
"""Test BaseModel.MemcacheWrappedGet() when not cached."""
key_name = 'foo_key_name'
memcache_key_name = 'mwg_%s_%s' % (models.BaseModel.kind(), key_name)
self.mox.StubOutWithMock(models, 'memcache', True)
self.mox.StubOutWithMock(models.BaseModel, 'get_by_key_name', True)
self.mox.StubOutWithMock(models.db, 'model_to_protobuf', True)
mock_entity = self.mox.CreateMockAnything()
models.db.model_to_protobuf(mock_entity).AndReturn(mock_entity) # cheat
mock_entity.SerializeToString().AndReturn('serialized')
models.memcache.get(memcache_key_name).AndReturn(None)
models.BaseModel.get_by_key_name(key_name).AndReturn(mock_entity)
models.memcache.set(
memcache_key_name, 'serialized', models.MEMCACHE_SECS).AndReturn(None)
self.mox.ReplayAll()
self.assertEqual(
mock_entity, models.BaseModel.MemcacheWrappedGet(key_name))
self.mox.VerifyAll()
def testBaseModelMemcacheWrappedGetWhenMemcacheSetFail(self):
"""Test BaseModel.MemcacheWrappedGet() when not cached."""
key_name = 'foo_key_name'
memcache_key_name = 'mwg_%s_%s' % (models.BaseModel.kind(), key_name)
self.mox.StubOutWithMock(models, 'memcache', True)
self.mox.StubOutWithMock(models.BaseModel, 'get_by_key_name', True)
self.mox.StubOutWithMock(models.db, 'model_to_protobuf', True)
mock_entity = self.mox.CreateMockAnything()
models.db.model_to_protobuf(mock_entity).AndReturn(mock_entity) # cheat
mock_entity.SerializeToString().AndReturn('serialized')
models.memcache.get(memcache_key_name).AndReturn(None)
models.BaseModel.get_by_key_name(key_name).AndReturn(mock_entity)
models.memcache.set(
memcache_key_name, 'serialized',
models.MEMCACHE_SECS).AndRaise(ValueError)
self.mox.ReplayAll()
self.assertEqual(
mock_entity, models.BaseModel.MemcacheWrappedGet(key_name))
self.mox.VerifyAll()
def testBaseModelMemcacheWrappedGetWhenCached(self):
"""Test BaseModel.MemcacheWrappedGet() when cached."""
key_name = 'foo_key_name'
memcache_key_name = 'mwg_%s_%s' % (models.BaseModel.kind(), key_name)
self.mox.StubOutWithMock(models, 'memcache', True)
self.mox.StubOutWithMock(models.BaseModel, 'get_by_key_name', True)
self.mox.StubOutWithMock(models.db, 'model_from_protobuf', True)
mock_entity = self.mox.CreateMockAnything()
models.memcache.get(memcache_key_name).AndReturn('serialized')
models.db.model_from_protobuf('serialized').AndReturn(mock_entity)
self.mox.ReplayAll()
self.assertEqual(
mock_entity, models.BaseModel.MemcacheWrappedGet(key_name))
self.mox.VerifyAll()
def testBaseModelMemcacheWrappedGetWhenCachedBadSerialization(self):
"""Test BaseModel.MemcacheWrappedGet() when cached."""
key_name = 'foo_key_name'
memcache_key = 'mwg_%s_%s' % (models.BaseModel.kind(), key_name)
self.mox.StubOutWithMock(models, 'memcache', True)
self.mox.StubOutWithMock(models.BaseModel, 'get_by_key_name', True)
self.mox.StubOutWithMock(models.db, 'model_from_protobuf', True)
models.memcache.get(memcache_key).AndReturn('serialized')
models.db.model_from_protobuf('serialized').AndRaise(
ProtocolBufferDecodeError)
models.memcache.delete(memcache_key).AndReturn(None)
models.BaseModel.get_by_key_name(key_name).AndReturn(None)
self.mox.ReplayAll()
self.assertEqual(
None, models.BaseModel.MemcacheWrappedGet(key_name, retry=True))
self.mox.VerifyAll()
def testBaseModelMemcacheWrappedGetWhenCachedBadSerializationUnexpected(self):
"""Test BaseModel.MemcacheWrappedGet() when cached."""
key_name = 'foo_key_name'
memcache_key = 'mwg_%s_%s' % (models.BaseModel.kind(), key_name)
self.mox.StubOutWithMock(models, 'memcache', True)
self.mox.StubOutWithMock(models.BaseModel, 'get_by_key_name', True)
self.mox.StubOutWithMock(models.db, 'model_from_protobuf', True)
models.memcache.get(memcache_key).AndReturn('serialized')
models.db.model_from_protobuf('serialized').AndRaise(Exception)
models.memcache.delete(memcache_key).AndReturn(None)
models.BaseModel.get_by_key_name(key_name).AndReturn(None)
self.mox.ReplayAll()
self.assertEqual(
None, models.BaseModel.MemcacheWrappedGet(key_name, retry=True))
self.mox.VerifyAll()
def testBaseModelMemcacheWrappedGetWhenCachedPropName(self):
"""Test BaseModel.MemcacheWrappedGet() when cached."""
key_name = 'foo_key_name'
prop_name = 'prop'
memcache_key = 'mwgpn_%s_%s_%s' % (
models.BaseModel.kind(), key_name, prop_name)
self.mox.StubOutWithMock(models, 'memcache', True)
self.mox.StubOutWithMock(models.BaseModel, 'get_by_key_name', True)
self.mox.StubOutWithMock(models.db, 'model_from_protobuf', True)
models.memcache.get(memcache_key).AndReturn('value')
self.mox.ReplayAll()
self.assertEqual(
'value', models.BaseModel.MemcacheWrappedGet(key_name, prop_name))
self.mox.VerifyAll()
def testBaseModelMemcacheWrappedGetNoEntity(self):
"""Test BaseModel.MemcacheWrappedGet() when entity does not exist."""
key_name = 'foo_key_name'
memcache_key = 'mwg_%s_%s' % (models.BaseModel.kind(), key_name)
self.mox.StubOutWithMock(models, 'memcache', self.mox.CreateMockAnything())
self.mox.StubOutWithMock(
models.BaseModel, 'get_by_key_name', self.mox.CreateMockAnything())
models.memcache.get(memcache_key).AndReturn(None)
models.BaseModel.get_by_key_name(key_name).AndReturn(None)
self.mox.ReplayAll()
self.assertEqual(
None, models.BaseModel.MemcacheWrappedGet(key_name))
self.mox.VerifyAll()
def testBaseModelMemcacheWrappedGetWithPropName(self):
"""Test BaseModel.MemcacheWrappedGet() for particular property."""
value = 'good value'
prop_name = 'blah_value'
key_name = 'foo_key_name'
memcache_key = 'mwgpn_%s_%s_%s' % (
models.BaseModel.kind(), key_name, prop_name)
self.mox.StubOutWithMock(models, 'memcache', True)
self.mox.StubOutWithMock(models.BaseModel, 'get_by_key_name', True)
self.mox.StubOutWithMock(models.db, 'model_to_protobuf', True)
mock_entity = self.mox.CreateMockAnything()
setattr(mock_entity, prop_name, value)
models.memcache.get(memcache_key).AndReturn(None)
models.BaseModel.get_by_key_name(key_name).AndReturn(mock_entity)
models.memcache.set(
memcache_key, value, models.MEMCACHE_SECS).AndReturn(None)
self.mox.ReplayAll()
self.assertEqual(
value, models.BaseModel.MemcacheWrappedGet(key_name, prop_name))
self.mox.VerifyAll()
def testBaseModelMemcacheWrappedGetWithNonExistentPropName(self):
"""Test BaseModel.MemcacheWrappedGet() for non-existent property."""
prop_name = 'bad_prop'
key_name = 'foo_key_name'
memcache_key = 'mwgpn_%s_%s_%s' % (
models.BaseModel.kind(), key_name, prop_name)
self.mox.StubOutWithMock(models, 'memcache', True)
self.mox.StubOutWithMock(models.BaseModel, 'get_by_key_name', True)
mock_entity = object()
self.assertFalse(hasattr(mock_entity, prop_name))
models.memcache.get(memcache_key).AndReturn(None)
models.BaseModel.get_by_key_name(key_name).AndReturn(mock_entity)
self.mox.ReplayAll()
self.assertEqual(
None, models.BaseModel.MemcacheWrappedGet(key_name, prop_name))
self.mox.VerifyAll()
def testMemcacheWrappedSet(self):
"""Test BaseModel.MemcacheWrappedSet()."""
self.mox.StubOutWithMock(models, 'memcache', True)
self.mox.StubOutWithMock(models.BaseModel, 'kind')
self.mox.StubOutWithMock(models.BaseModel, 'get_or_insert')
self.mox.StubOutWithMock(models.db, 'model_to_protobuf', True)
mock_entity = self.mox.CreateMockAnything()
memcache_entity_key = 'mwg_kind_key'
memcache_key = 'mwgpn_kind_key_prop'
models.BaseModel.kind().AndReturn('kind')
models.BaseModel.kind().AndReturn('kind')
models.BaseModel.get_or_insert('key').AndReturn(mock_entity)
mock_entity.put()
models.db.model_to_protobuf(mock_entity).AndReturn(mock_entity) # cheat
mock_entity.SerializeToString().AndReturn('serialized')
models.memcache.set(
memcache_key, 'value', models.MEMCACHE_SECS).AndReturn(None)
models.memcache.set(
memcache_entity_key, 'serialized',
models.MEMCACHE_SECS).AndReturn(None)
self.mox.ReplayAll()
models.BaseModel.MemcacheWrappedSet('key', 'prop', 'value')
self.mox.VerifyAll()
def testMemcacheWrappedDeleteWhenKeyName(self):
"""Test BaseModel.MemcacheWrappedDelete() when key_name supplied."""
self.mox.StubOutWithMock(models, 'memcache', True)
self.mox.StubOutWithMock(models.BaseModel, 'kind')
self.mox.StubOutWithMock(models.BaseModel, 'get_by_key_name')
entity = self.mox.CreateMockAnything()
memcache_key = 'mwg_kind_key'
models.BaseModel.get_by_key_name('key').AndReturn(entity)
entity.delete().AndReturn(None)
models.BaseModel.kind().AndReturn('kind')
models.memcache.delete(memcache_key).AndReturn(None)
self.mox.ReplayAll()
models.BaseModel.MemcacheWrappedDelete(key_name='key')
self.mox.VerifyAll()
def testMemcacheWrappedDeleteWhenEntity(self):
"""Test BaseModel.MemcacheWrappedDelete() when entity supplied."""
self.mox.StubOutWithMock(models, 'memcache', True)
self.mox.StubOutWithMock(models.BaseModel, 'kind')
self.mox.StubOutWithMock(models.BaseModel, 'get_by_key_name')
entity = self.mox.CreateMockAnything()
entity.key().AndReturn(entity)
entity.name().AndReturn('key')
memcache_key = 'mwg_kind_key'
entity.delete().AndReturn(None)
models.BaseModel.kind().AndReturn('kind')
models.memcache.delete(memcache_key).AndReturn(None)
self.mox.ReplayAll()
models.BaseModel.MemcacheWrappedDelete(entity=entity)
self.mox.VerifyAll()
def testBaseModelMemcacheWrappedGetAllFilter(self):
"""Test BaseModel.MemcacheWrappedGetAllFilter()."""
filters = (('foo =', 'bar'), ('one =', 1))
filter_str = '_foo =,bar_|_one =,1_'
memcache_key = 'mwgaf_%s%s' % (models.BaseModel.kind(), filter_str)
entities = ['the', 'entities']
self.mox.StubOutWithMock(models, 'memcache', self.mox.CreateMockAnything())
self.mox.StubOutWithMock(models.BaseModel, 'all')
mock_query = self.mox.CreateMockAnything()
models.memcache.get(memcache_key).AndReturn(None)
models.BaseModel.all().AndReturn(mock_query)
for filt, value in filters:
mock_query.filter(filt, value).AndReturn(mock_query)
mock_query.fetch(1000).AndReturn(entities)
models.memcache.set(
memcache_key, entities, models.MEMCACHE_SECS).AndReturn(None)
self.mox.ReplayAll()
self.assertEqual(
entities, models.BaseModel.MemcacheWrappedGetAllFilter(filters))
self.mox.VerifyAll()
def testPackageAliasResolvePackageName(self):
"""Test PackageAlias.ResolvePackageName() classmethod."""
pkg_alias = 'unknown'
pkg_name = 'foopkg'
mock_entity = self.mox.CreateMockAnything()
mock_entity.enabled = True
mock_entity.munki_pkg_name = pkg_name
self.mox.StubOutWithMock(models.PackageAlias, 'MemcacheWrappedGet')
models.PackageAlias.MemcacheWrappedGet(pkg_alias).AndReturn(mock_entity)
self.mox.ReplayAll()
self.assertEqual(
pkg_name, models.PackageAlias.ResolvePackageName(pkg_alias))
self.mox.VerifyAll()
def testPackageAliasResolvePackageNameDisabled(self):
"""Test PackageAlias.ResolvePackageName() classmethod."""
pkg_alias = 'unknown'
pkg_name = 'foopkg'
mock_entity = self.mox.CreateMockAnything()
mock_entity.enabled = False
mock_entity.munki_pkg_name = pkg_name
self.mox.StubOutWithMock(models.PackageAlias, 'MemcacheWrappedGet')
models.PackageAlias.MemcacheWrappedGet(pkg_alias).AndReturn(mock_entity)
self.mox.ReplayAll()
self.assertEqual(
None, models.PackageAlias.ResolvePackageName(pkg_alias))
self.mox.VerifyAll()
def testPackageAliasResolvePackageNameEmptyPkgName(self):
"""Test PackageAlias.ResolvePackageName() classmethod."""
pkg_alias = 'unknown'
pkg_name = ''
mock_entity = self.mox.CreateMockAnything()
mock_entity.enabled = True
mock_entity.munki_pkg_name = pkg_name
self.mox.StubOutWithMock(models.PackageAlias, 'MemcacheWrappedGet')
models.PackageAlias.MemcacheWrappedGet(pkg_alias).AndReturn(mock_entity)
self.mox.ReplayAll()
self.assertEqual(
None, models.PackageAlias.ResolvePackageName(pkg_alias))
self.mox.VerifyAll()
def testPackageAliasResolvePackageName_AliasNotFound(self):
"""Test PackageAlias.ResolvePackageName() classmethod."""
pkg_alias = 'unknown'
self.mox.StubOutWithMock(models.PackageAlias, 'MemcacheWrappedGet')
models.PackageAlias.MemcacheWrappedGet(pkg_alias).AndReturn(None)
self.mox.ReplayAll()
self.assertEqual(None, models.PackageAlias.ResolvePackageName(pkg_alias))
self.mox.VerifyAll()
class BaseManifestModificationTest(mox.MoxTestBase):
"""BaseManifestModification class test."""
def testGenerateInstance(self):
"""TODO(user): Should be completed."""
def testResetModMemcache(self):
"""Test ResetModMemcache()."""
target = 'target'
mod_type_invalid = 'UNKNOWN'
mod_type = models.MANIFEST_MOD_MODELS.keys()[0]
mod_type_cls = models.MANIFEST_MOD_MODELS[mod_type]
self.mox.StubOutWithMock(mod_type_cls, 'DeleteMemcacheWrappedGetAllFilter')
mod_type_cls.DeleteMemcacheWrappedGetAllFilter(
(('%s =' % mod_type, target),)).AndReturn(None)
self.mox.ReplayAll()
self.assertTrue(mod_type_invalid not in models.MANIFEST_MOD_MODELS)
self.assertRaises(
ValueError, models.BaseManifestModification.ResetModMemcache,
mod_type_invalid, target)
models.BaseManifestModification.ResetModMemcache(mod_type, target)
self.mox.VerifyAll()
class KeyValueCacheTest(mox.MoxTestBase):
"""Test KeyValueCache class."""
def testIpInListWhenEmptyIp(self):
"""Tests IpInList() with empty IP values."""
self.assertEqual(False, self.cls.IpInList(self.key, ''))
self.assertEqual(False, self.cls.IpInList(self.key, None))
def testIpInListWhenIpNotInEmptyList(self):
"""Tests IpInList() with an IP that will not match an empty list."""
self.mox.StubOutWithMock(models.util, 'Deserialize')
self.mox.StubOutWithMock(self.cls, 'MemcacheWrappedGet')
ip = '1.2.3.4'
deserialized = []
self.cls.MemcacheWrappedGet(
self.key, 'text_value').AndReturn('serialized')
models.util.Deserialize('serialized').AndReturn(deserialized)
self.mox.ReplayAll()
self.assertFalse(self.cls.IpInList(self.key, ip))
self.mox.VerifyAll()
def testIpInListWhenPropertyValueIsEmpty(self):
"""Tests IpInList() with null/empty property text_value for list."""
self.mox.StubOutWithMock(models.util, 'Deserialize')
self.mox.StubOutWithMock(self.cls, 'MemcacheWrappedGet')
ip = '1.2.3.4'
self.cls.MemcacheWrappedGet(self.key, 'text_value').AndReturn('')
self.mox.ReplayAll()
self.assertFalse(self.cls.IpInList(self.key, ip))
self.mox.VerifyAll()
def testIpInListWhenIpNotInList(self):
"""Tests IpInList() with an IP not in the lists."""
self.mox.StubOutWithMock(models.util, 'Deserialize')
self.mox.StubOutWithMock(self.cls, 'MemcacheWrappedGet')
ip = '1.2.3.4'
deserialized = ['192.168.0.0/16']
self.cls.MemcacheWrappedGet(
self.key, 'text_value').AndReturn('serialized')
models.util.Deserialize('serialized').AndReturn(deserialized)
self.mox.ReplayAll()
self.assertFalse(self.cls.IpInList(self.key, ip))
self.mox.VerifyAll()
def testIpInListWhenTrue(self):
"""Tests IpInList() with an IP that is found in the list."""
self.mox.StubOutWithMock(models.util, 'Deserialize')
self.mox.StubOutWithMock(self.cls, 'MemcacheWrappedGet')
ip = '1.2.3.4'
deserialized = ['192.168.0.0/16', '1.0.0.0/8']
self.cls.MemcacheWrappedGet(
self.key, 'text_value').AndReturn('serialized')
models.util.Deserialize('serialized').AndReturn(deserialized)
self.mox.ReplayAll()
self.assertTrue(self.cls.IpInList(self.key, ip))
self.mox.VerifyAll()
def testIpInListWhenIpv6(self):
"""Tests IpInList() with an IPv6 IP."""
ip = '2620:0:1003:1007:216:36ff:feee:f090'
self.mox.ReplayAll()
self.assertFalse(self.cls.IpInList(self.key, ip))
self.mox.VerifyAll()
if __name__ == '__main__':
app.run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
2864,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743... | 2.468215 | 7,834 |
import scipy.signal
import numpy as np
| [
11748,
629,
541,
88,
13,
12683,
282,
198,
11748,
299,
32152,
355,
45941,
628,
198
] | 2.733333 | 15 |
import socket
import sys
deviceAmount = 4
gateway = ("localhost", 10000)
server = ("localhost",8000)
# creazione socket UDP per i device.
deviceInterface = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
deviceInterface.bind(gateway)
# creazione sockect TPC per server.
serverInterface = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# connessione con server.
try:
print("Connessione con server...")
serverInterface.connect(server)
print("Connessione con server stabilita.")
except Exception as e:
print("Errore durante connessione con server.")
print("Err: "+e)
sys.exit(1)
# sendMeasurements invia le letture raccolte al server.
if __name__ == "__main__":
main()
| [
11748,
17802,
201,
198,
11748,
25064,
201,
198,
201,
198,
25202,
31264,
796,
604,
201,
198,
10494,
1014,
796,
5855,
36750,
1600,
33028,
8,
201,
198,
15388,
796,
5855,
36750,
1600,
33942,
8,
201,
198,
201,
198,
2,
1126,
1031,
7935,
178... | 2.72119 | 269 |
import yarl
from collections import defaultdict
def shorten(text, width, placeholder='...'):
"""
>>> shorten('123456789', width=8)
'12345...'
>>> shorten('123456789', width=9)
'123456789'
"""
if not text:
return text
if len(text) <= width:
return text
return text[: max(0, width - len(placeholder))] + placeholder
def reverse_url(url):
"""
convert url to reversed url
"""
url = yarl.URL(url)
host = '.'.join(reversed(url.host.split('.')))
result = f'{host}!{url.port}!{url.scheme}{url.raw_path_qs}'
if url.raw_fragment:
result += '#' + url.raw_fragment
return result
def forward_url(url):
"""
convert reversed url to normal url
"""
try:
host, port, other = url.split('!', 2)
scheme, extra = other.split('/', 1)
except ValueError as ex:
raise ValueError(f'invalid reverse url: {ex}') from None
colon_port = ''
if port == '443' and scheme == 'https':
colon_port = ''
elif port == '80' and scheme == 'http':
colon_port = ''
else:
colon_port = ':' + port
host = '.'.join(reversed(host.split('.')))
result = f'{scheme}://{host}{colon_port}/{extra}'
return result
class DuplicateFeedDetector:
"""
A stream detector to find duplicate feeds,
assume push feeds by the order of reverse url
>>> det = DuplicateFeedDetector()
>>> feeds = [
... (11, 'http://a.example.com/feed.xml'),
... (12, 'https://b.example.com/feed.xml'),
... (21, 'http://rss.anyant.com/changelog.atom'),
... (22, 'https://rss.anyant.com/changelog.atom'),
... (23, 'https://rss.anyant.com/changelog.atom?version=1.0.0'),
... (24, 'https://rss.anyant.com/changelog.atom?'),
... (31, 'http://blog.guyskk.com/feed.xml'),
... (32, 'https://blog.guyskk.com/feed.xml'),
... ]
>>> for feed_id, url in feeds:
... det.push(feed_id, reverse_url(url))
>>> checkpoint = reverse_url('http://blog.guyskk.com/feed.xml')
>>> assert det.checkpoint == checkpoint, det.checkpoint
>>> got = det.poll()
>>> assert got == [(22, 21, 24)], got
>>> det.flush()
>>> got = det.poll()
>>> assert got == [(32, 31)], got
>>> assert det.checkpoint is None, det.checkpoint
"""
@property
| [
11748,
331,
7063,
198,
6738,
17268,
1330,
4277,
11600,
628,
198,
4299,
45381,
7,
5239,
11,
9647,
11,
46076,
11639,
986,
6,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
13163,
45381,
10786,
10163,
2231,
3134,
4531,
3256,
9647,
... | 2.354354 | 999 |
from flask import Blueprint
professor_blueprint = Blueprint(
'professor',
__name__,
template_folder='templates',
static_folder='static'
)
from . import views
| [
6738,
42903,
1330,
39932,
198,
198,
5577,
5987,
62,
17585,
4798,
796,
39932,
7,
198,
220,
220,
220,
705,
5577,
5987,
3256,
198,
220,
220,
220,
11593,
3672,
834,
11,
198,
220,
220,
220,
11055,
62,
43551,
11639,
11498,
17041,
3256,
198,... | 2.933333 | 60 |
import random
import operator
from functools import reduce
import numpy as np
from ..numpy.types import NumpyVectorType
from metagraph import concrete_algorithm, NodeID
from .types import PythonNodeSetType, PythonNodeMapType
from typing import Tuple, Iterable, Any, Callable, Optional
@concrete_algorithm("util.nodeset.choose_random")
@concrete_algorithm("util.nodemap.sort")
@concrete_algorithm("util.nodemap.select")
@concrete_algorithm("util.nodemap.filter")
@concrete_algorithm("util.nodemap.apply")
@concrete_algorithm("util.nodemap.reduce")
| [
11748,
4738,
198,
11748,
10088,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11485,
77,
32152,
13,
19199,
1330,
399,
32152,
38469,
6030,
198,
6738,
1138,
6111,
1330,
10017,
62,
282,
42289,
11,
... | 3.038043 | 184 |
''' Copyright [2020] Hahn-Schickard-Gesellschaft für angewandte Forschung e.V., Daniel Konegen + Marcus Rueb
Copyright [2021] Karlsruhe Institute of Technology, Daniel Konegen
SPDX-License-Identifier: Apache-2.0
============================================================================================================'''
import time
import os
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class Loading_images(QThread):
"""Loading screen thread.
Attributes:
Loadpng: The image which is represented on the "LoadWindow"
loading_img: The different images representing the loadingscreen
"""
def run(self):
"""Activates the thread
Changes the image of the loading screen every 0.75 seconds.
"""
while(self.isRunning()):
if self.loading_img < 15:
self.loading_img += 1
else:
self.loading_img = 1
time.sleep(0.75)
self.Loadpng.setPixmap(QPixmap(os.path.join('Images','GUI_loading_images', 'GUI_load_' + str(self.loading_img) + '.png')))
def stop_thread(self):
"""Ends the thread
"""
self.terminate() | [
7061,
6,
15069,
685,
42334,
60,
367,
15386,
12,
14874,
624,
446,
12,
38,
274,
19187,
11693,
701,
277,
25151,
3550,
413,
392,
660,
27325,
354,
2150,
304,
13,
53,
1539,
7806,
509,
505,
5235,
1343,
17068,
45363,
65,
198,
220,
220,
220,... | 2.309187 | 566 |
from pickle import TRUE
from django.db import models | [
6738,
2298,
293,
1330,
26751,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981
] | 4 | 13 |
from binary_tree.tree_from_preorder_inorder import tree_from_preorder_inorder
from binary_tree.tree_node import TreeNode
from test_helpers.test_helpers import get_binary_tree_values
| [
6738,
13934,
62,
21048,
13,
21048,
62,
6738,
62,
3866,
2875,
62,
259,
2875,
1330,
5509,
62,
6738,
62,
3866,
2875,
62,
259,
2875,
198,
6738,
13934,
62,
21048,
13,
21048,
62,
17440,
1330,
12200,
19667,
198,
6738,
1332,
62,
16794,
364,
... | 3.267857 | 56 |
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
__author__ = "DEAP Team"
__version__ = "1.2"
__revision__ = "1.2.2"
| [
2,
220,
220,
220,
770,
2393,
318,
636,
286,
5550,
2969,
13,
198,
2,
198,
2,
220,
220,
220,
5550,
2969,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
220,
220,
220,
340,
739,
262,
2846,
286,
262,
... | 3.131687 | 243 |
import numpy as np
import matplotlib.pyplot as plt
# plt.style.use("dark_background")
times1 = np.load('trained/timesteps_1.npy')
rewards1 = np.load('trained/avg_ep_rews_1.npy')
times2 = np.load('trained/timesteps_2.npy')
rewards2 = np.load('trained/avg_ep_rews_2.npy')
times3 = np.load('trained/timesteps_3.npy')
rewards3 = np.load('trained/avg_ep_rews_3.npy')
rewards_arr = np.vstack((rewards1, rewards2, rewards3))
#
fig, ax0 = plt.subplots(figsize=(20, 8), tight_layout = True)
ax0.plot(times1, np.mean(rewards_arr, axis=0), c="indianred")
ax0.fill_between(times1, np.amax(rewards_arr, axis=0), np.amin(rewards_arr, axis=0), color="indianred", alpha=0.5)
ax0.set_title("RL Training Over Time")
ax0.set_xlabel("Timestep")
ax0.set_ylabel("Average Episode Reward")
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
2,
458,
83,
13,
7635,
13,
1904,
7203,
21953,
62,
25249,
4943,
628,
198,
22355,
16,
796,
45941,
13,
2220,
10786,
35311,
14,
16514,
395,
... | 2.322581 | 341 |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from itertools import chain
from django.template.loader import get_template
from django.utils.translation import ugettext as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from monitoring import api
from monitoring.alarmdefs import constants
| [
2,
15069,
2211,
30446,
15503,
12,
11869,
446,
7712,
5834,
11,
406,
13,
47,
13,
198,
2,
15069,
2177,
376,
52,
41,
2043,
12564,
40880,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
153... | 3.862348 | 247 |
#!/usr/bin/env python3
import sys
import os
import struct
import collections
import json
from asfcut import ASFFileHeader, ASFChunkHeader, ASF_CF_STEREO
if __name__ == '__main__':
exit(main()) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
2878,
198,
11748,
17268,
198,
11748,
33918,
198,
6738,
355,
69,
8968,
1330,
7054,
5777,
576,
39681,
11,
7054,
37,
1925,
2954,
39681,... | 2.910448 | 67 |
"""API module"""
from . import core, types, constants
__all__ = ['core', 'constants', 'types']
| [
37811,
17614,
8265,
37811,
198,
6738,
764,
1330,
4755,
11,
3858,
11,
38491,
198,
198,
834,
439,
834,
796,
37250,
7295,
3256,
705,
9979,
1187,
3256,
705,
19199,
20520,
198
] | 3.2 | 30 |
# coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class CouponbookApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def create_coupon_book(self, coupon_book, **kwargs):
"""
Create a coupon-book.
{\"nickname\":\"Create a new coupon book\",\"request\":\"createCouponBookRequest.html\",\"response\":\"createCouponBookResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_coupon_book(coupon_book, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CouponBook coupon_book: The coupon-book object to be created. (required)
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_coupon_book_with_http_info(coupon_book, **kwargs)
else:
(data) = self.create_coupon_book_with_http_info(coupon_book, **kwargs)
return data
def create_coupon_book_with_http_info(self, coupon_book, **kwargs):
"""
Create a coupon-book.
{\"nickname\":\"Create a new coupon book\",\"request\":\"createCouponBookRequest.html\",\"response\":\"createCouponBookResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_coupon_book_with_http_info(coupon_book, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CouponBook coupon_book: The coupon-book object to be created. (required)
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['coupon_book']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_coupon_book" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'coupon_book' is set
if ('coupon_book' not in params) or (params['coupon_book'] is None):
raise ValueError("Missing the required parameter `coupon_book` when calling `create_coupon_book`")
resource_path = '/coupon-books'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'coupon_book' in params:
body_params = params['coupon_book']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/xml', 'application/xml', 'application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponBookPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def delete_coupon_book(self, coupon_book_id, **kwargs):
"""
Retire a coupon-book, specified by the coupon-book-ID parameter.
{\"nickname\":\"Delete coupon book\",\"response\":\"deleteCouponBookByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_coupon_book(coupon_book_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str coupon_book_id: ID of the coupon-book. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_coupon_book_with_http_info(coupon_book_id, **kwargs)
else:
(data) = self.delete_coupon_book_with_http_info(coupon_book_id, **kwargs)
return data
def delete_coupon_book_with_http_info(self, coupon_book_id, **kwargs):
"""
Retire a coupon-book, specified by the coupon-book-ID parameter.
{\"nickname\":\"Delete coupon book\",\"response\":\"deleteCouponBookByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_coupon_book_with_http_info(coupon_book_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str coupon_book_id: ID of the coupon-book. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['coupon_book_id', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_coupon_book" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'coupon_book_id' is set
if ('coupon_book_id' not in params) or (params['coupon_book_id'] is None):
raise ValueError("Missing the required parameter `coupon_book_id` when calling `delete_coupon_book`")
resource_path = '/coupon-books/{coupon-book-ID}'.replace('{format}', 'json')
path_params = {}
if 'coupon_book_id' in params:
path_params['coupon-book-ID'] = params['coupon_book_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponBookPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_attachable_coupon_books(self, attachableness, has_code, **kwargs):
"""
Returns a collection of attachable coupon-books. An attachable coupon-book has at least one remaining use, and is not deleted. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Get all attachable coupon books\",\"response\":\"getCouponBookAllAttachable.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_attachable_coupon_books(attachableness, has_code, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool attachableness: The attachableness of the coupon-book. (required)
:param bool has_code: Whether the coupon-books have book codes or not. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first coupon-book to return.
:param int records: The maximum number of coupon-books to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_attachable_coupon_books_with_http_info(attachableness, has_code, **kwargs)
else:
(data) = self.get_all_attachable_coupon_books_with_http_info(attachableness, has_code, **kwargs)
return data
def get_all_attachable_coupon_books_with_http_info(self, attachableness, has_code, **kwargs):
"""
Returns a collection of attachable coupon-books. An attachable coupon-book has at least one remaining use, and is not deleted. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Get all attachable coupon books\",\"response\":\"getCouponBookAllAttachable.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_attachable_coupon_books_with_http_info(attachableness, has_code, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool attachableness: The attachableness of the coupon-book. (required)
:param bool has_code: Whether the coupon-books have book codes or not. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first coupon-book to return.
:param int records: The maximum number of coupon-books to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['attachableness', 'has_code', 'organizations', 'offset', 'records', 'order_by', 'order']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_attachable_coupon_books" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'attachableness' is set
if ('attachableness' not in params) or (params['attachableness'] is None):
raise ValueError("Missing the required parameter `attachableness` when calling `get_all_attachable_coupon_books`")
# verify the required parameter 'has_code' is set
if ('has_code' not in params) or (params['has_code'] is None):
raise ValueError("Missing the required parameter `has_code` when calling `get_all_attachable_coupon_books`")
resource_path = '/coupon-books/attachable/{attachableness}/{has_code}'.replace('{format}', 'json')
path_params = {}
if 'attachableness' in params:
path_params['attachableness'] = params['attachableness']
if 'has_code' in params:
path_params['has_code'] = params['has_code']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponBookPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_coupon_books(self, **kwargs):
"""
Returns a collection of coupon-books. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Get all coupon books\",\"response\":\"getCouponBookAll.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_coupon_books(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first coupon-books to return.
:param int records: The maximum number of coupon-books to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired coupon-books should be returned.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_coupon_books_with_http_info(**kwargs)
else:
(data) = self.get_all_coupon_books_with_http_info(**kwargs)
return data
def get_all_coupon_books_with_http_info(self, **kwargs):
"""
Returns a collection of coupon-books. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Get all coupon books\",\"response\":\"getCouponBookAll.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_coupon_books_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first coupon-books to return.
:param int records: The maximum number of coupon-books to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired coupon-books should be returned.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organizations', 'offset', 'records', 'order_by', 'order', 'include_retired']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_coupon_books" % key
)
params[key] = val
del params['kwargs']
resource_path = '/coupon-books'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
if 'include_retired' in params:
query_params['include_retired'] = params['include_retired']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponBookPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_coupon_book_by_book_code(self, book_code, **kwargs):
"""
Returns a single coupon-book, specified by the book-code parameter.
{\"nickname\":\"Retrieve by book code\",\"response\":\"getCouponBookByBookCode.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_coupon_book_by_book_code(book_code, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str book_code: The unique coupon-book-code. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_coupon_book_by_book_code_with_http_info(book_code, **kwargs)
else:
(data) = self.get_coupon_book_by_book_code_with_http_info(book_code, **kwargs)
return data
def get_coupon_book_by_book_code_with_http_info(self, book_code, **kwargs):
"""
Returns a single coupon-book, specified by the book-code parameter.
{\"nickname\":\"Retrieve by book code\",\"response\":\"getCouponBookByBookCode.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_coupon_book_by_book_code_with_http_info(book_code, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str book_code: The unique coupon-book-code. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['book_code', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_coupon_book_by_book_code" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'book_code' is set
if ('book_code' not in params) or (params['book_code'] is None):
raise ValueError("Missing the required parameter `book_code` when calling `get_coupon_book_by_book_code`")
resource_path = '/coupon-books/book-code/{book-code}'.replace('{format}', 'json')
path_params = {}
if 'book_code' in params:
path_params['book-code'] = params['book_code']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponBookPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_coupon_book_by_coupon_book_definition_id(self, coupon_book_definition_id, **kwargs):
"""
Returns a collection of coupon-books, specified by coupon-book-definition-ID parameter. By default 10 coupon-books are returned. Records are returned in natural order.
{\"nickname\":\"Retrieve by coupon book definition\",\"response\":\"getCouponBookByCouponBookDefinitionID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_coupon_book_by_coupon_book_definition_id(coupon_book_definition_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str coupon_book_definition_id: The string coupon-book-definition-ID of the coupon-book. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first coupon-book to return.
:param int records: The maximum number of coupon-books to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired coupon-books should be returned.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_coupon_book_by_coupon_book_definition_id_with_http_info(coupon_book_definition_id, **kwargs)
else:
(data) = self.get_coupon_book_by_coupon_book_definition_id_with_http_info(coupon_book_definition_id, **kwargs)
return data
def get_coupon_book_by_coupon_book_definition_id_with_http_info(self, coupon_book_definition_id, **kwargs):
"""
Returns a collection of coupon-books, specified by coupon-book-definition-ID parameter. By default 10 coupon-books are returned. Records are returned in natural order.
{\"nickname\":\"Retrieve by coupon book definition\",\"response\":\"getCouponBookByCouponBookDefinitionID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_coupon_book_by_coupon_book_definition_id_with_http_info(coupon_book_definition_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str coupon_book_definition_id: The string coupon-book-definition-ID of the coupon-book. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first coupon-book to return.
:param int records: The maximum number of coupon-books to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired coupon-books should be returned.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['coupon_book_definition_id', 'organizations', 'offset', 'records', 'order_by', 'order', 'include_retired']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_coupon_book_by_coupon_book_definition_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'coupon_book_definition_id' is set
if ('coupon_book_definition_id' not in params) or (params['coupon_book_definition_id'] is None):
raise ValueError("Missing the required parameter `coupon_book_definition_id` when calling `get_coupon_book_by_coupon_book_definition_id`")
resource_path = '/coupon-books/coupon-book-definition/{coupon-book-definition-ID}'.replace('{format}', 'json')
path_params = {}
if 'coupon_book_definition_id' in params:
path_params['coupon-book-definition-ID'] = params['coupon_book_definition_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
if 'include_retired' in params:
query_params['include_retired'] = params['include_retired']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponBookPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_coupon_book_by_coupon_code(self, coupon_code, **kwargs):
"""
Returns a single coupon-book, specified by the coupon-code parameter.
{\"nickname\":\"Retrieve by coupon code\",\"response\":\"getCouponBookByBookCode.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_coupon_book_by_coupon_code(coupon_code, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str coupon_code: The unique coupon-code. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_coupon_book_by_coupon_code_with_http_info(coupon_code, **kwargs)
else:
(data) = self.get_coupon_book_by_coupon_code_with_http_info(coupon_code, **kwargs)
return data
def get_coupon_book_by_coupon_code_with_http_info(self, coupon_code, **kwargs):
"""
Returns a single coupon-book, specified by the coupon-code parameter.
{\"nickname\":\"Retrieve by coupon code\",\"response\":\"getCouponBookByBookCode.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_coupon_book_by_coupon_code_with_http_info(coupon_code, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str coupon_code: The unique coupon-code. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['coupon_code', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_coupon_book_by_coupon_code" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'coupon_code' is set
if ('coupon_code' not in params) or (params['coupon_code'] is None):
raise ValueError("Missing the required parameter `coupon_code` when calling `get_coupon_book_by_coupon_code`")
resource_path = '/coupon-books/coupon/{coupon-code}'.replace('{format}', 'json')
path_params = {}
if 'coupon_code' in params:
path_params['coupon-code'] = params['coupon_code']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponBookPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_coupon_book_by_id(self, coupon_book_id, **kwargs):
"""
Returns a single coupon-book, specified by the coupon-book-ID parameter.
{\"nickname\":\"Retrieve an existing coupon book\",\"response\":\"getCouponBookByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_coupon_book_by_id(coupon_book_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str coupon_book_id: ID of the coupon-book. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_coupon_book_by_id_with_http_info(coupon_book_id, **kwargs)
else:
(data) = self.get_coupon_book_by_id_with_http_info(coupon_book_id, **kwargs)
return data
def get_coupon_book_by_id_with_http_info(self, coupon_book_id, **kwargs):
"""
Returns a single coupon-book, specified by the coupon-book-ID parameter.
{\"nickname\":\"Retrieve an existing coupon book\",\"response\":\"getCouponBookByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_coupon_book_by_id_with_http_info(coupon_book_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str coupon_book_id: ID of the coupon-book. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['coupon_book_id', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_coupon_book_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'coupon_book_id' is set
if ('coupon_book_id' not in params) or (params['coupon_book_id'] is None):
raise ValueError("Missing the required parameter `coupon_book_id` when calling `get_coupon_book_by_id`")
resource_path = '/coupon-books/{coupon-book-ID}'.replace('{format}', 'json')
path_params = {}
if 'coupon_book_id' in params:
path_params['coupon-book-ID'] = params['coupon_book_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponBookPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def update_coupon_book(self, coupon_book, **kwargs):
"""
Update a coupon-book.
{\"nickname\":\"Update a coupon book\",\"request\":\"updateCouponBookRequest.html\",\"response\":\"updateCouponBookResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_coupon_book(coupon_book, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CouponBook coupon_book: The coupon-book object to be updated. (required)
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_coupon_book_with_http_info(coupon_book, **kwargs)
else:
(data) = self.update_coupon_book_with_http_info(coupon_book, **kwargs)
return data
def update_coupon_book_with_http_info(self, coupon_book, **kwargs):
"""
Update a coupon-book.
{\"nickname\":\"Update a coupon book\",\"request\":\"updateCouponBookRequest.html\",\"response\":\"updateCouponBookResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_coupon_book_with_http_info(coupon_book, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CouponBook coupon_book: The coupon-book object to be updated. (required)
:return: CouponBookPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['coupon_book']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_coupon_book" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'coupon_book' is set
if ('coupon_book' not in params) or (params['coupon_book'] is None):
raise ValueError("Missing the required parameter `coupon_book` when calling `update_coupon_book`")
resource_path = '/coupon-books'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'coupon_book' in params:
body_params = params['coupon_book']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/xml', 'application/xml', 'application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponBookPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
3941,
39746,
30617,
7824,
628,
198,
220,
220,
220,
4946,
17614,
1020,
2196,
25,
352,
13,
15,
13,
15,
198,
220,
220,
220,
220,
198,
220,
220,
220,
2980,
515,
41... | 2.233816 | 22,013 |
from copy import deepcopy
| [
6738,
4866,
1330,
2769,
30073,
198
] | 4.333333 | 6 |
from machine import Pin, PWM
import time
led1 = Pin(1, Pin.OUT)
led2 = Pin(2, Pin.OUT)
led3 = Pin(3, Pin.OUT)
led4 = Pin(4, Pin.OUT)
pwm = PWM(Pin(28))
# do re mi fa sao la si
L_tones = [262, 294, 330, 349, 392, 440, 494 ]
M_tones = [523, 587, 659, 698, 784, 880, 988 ]
H_tones = [1046, 1175, 1318, 1397, 1568, 1760, 1967]
# durations = [125, 62, 187, 94, 250, 125 ]
durations = [125,62,62,125,125]
twotigers = [262, 294, 330, 262, 262, 294, 330, 262, 330, 349, 392, 330, 349, 392, 392,
440, 392, 349, 330,262, 392,440, 392, 349, 330, 262, 262, 392, 262, 262, 392, 262 ]
while True:
for f in range(len(twotigers)):
for d in range(len(durations)):
play_atone(twotigers[f], durations[d])
led1.toggle()
time.sleep(0.01)
led2.toggle()
time.sleep(0.01)
led3.toggle()
time.sleep(0.01)
led4.toggle()
| [
6738,
4572,
1330,
13727,
11,
350,
22117,
198,
11748,
640,
198,
198,
992,
16,
796,
13727,
7,
16,
11,
13727,
13,
12425,
8,
198,
992,
17,
796,
13727,
7,
17,
11,
13727,
13,
12425,
8,
198,
992,
18,
796,
13727,
7,
18,
11,
13727,
13,
... | 1.893281 | 506 |
import nltk
from geograpy3.extraction import Extractor
from geograpy3.places import PlaceContext
# download all the basic nltk toolkit
| [
11748,
299,
2528,
74,
198,
198,
6738,
4903,
7113,
88,
18,
13,
2302,
7861,
1330,
29677,
273,
198,
6738,
4903,
7113,
88,
18,
13,
23625,
1330,
8474,
21947,
198,
198,
2,
4321,
477,
262,
4096,
299,
2528,
74,
2891,
15813,
628
] | 3.365854 | 41 |
# module name: featureshare
# main program: samplespecificdbgenerator
__author__ = "Anthony Cesnik & Michael Knippen"
__date__ = "$Mar 31, 2016 8:00:00 PM$"
from lxml import etree as et
import refparse
import numpy as np
HTML_NS = "http://uniprot.org/uniprot"
XSI_NS = "http://www.w3.org/2001/XMLSchema-instance"
NAMESPACE_MAP = {None : HTML_NS, "xsi" : XSI_NS}
UP = '{'+HTML_NS+'}'
| [
2,
8265,
1438,
25,
3033,
43466,
198,
2,
1388,
1430,
25,
8405,
431,
7790,
9945,
8612,
1352,
198,
198,
834,
9800,
834,
796,
366,
32697,
42518,
17187,
1222,
3899,
6102,
3974,
268,
1,
198,
834,
4475,
834,
796,
17971,
7676,
3261,
11,
158... | 2.490323 | 155 |
if __name__ == "__main__":
sol = Solution()
sol.generateMatrix(2)
| [
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
1540,
796,
28186,
3419,
198,
220,
1540,
13,
8612,
378,
46912,
7,
17,
8,
628,
628
] | 2.551724 | 29 |
import datetime
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from ..models import OccurrenceSeriesFactory, OccurrenceFactory
| [
11748,
4818,
8079,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
9945,
13,
... | 3.76 | 75 |
import pytest
from meiga import Error, Failure, Success
@pytest.mark.unit
@pytest.mark.unit
| [
11748,
12972,
9288,
198,
198,
6738,
502,
13827,
1330,
13047,
11,
25743,
11,
16282,
628,
198,
31,
9078,
9288,
13,
4102,
13,
20850,
628,
198,
31,
9078,
9288,
13,
4102,
13,
20850,
198
] | 2.939394 | 33 |
from markdownio import span
| [
6738,
1317,
2902,
952,
1330,
11506,
628,
628,
198
] | 3.555556 | 9 |
print("hello Github")
print("红色文件: 代表未添加到版本控制中的文件,"
"绿色文件: 代表添加到版本控制中的文件,"
"蓝色文件: 代表已在版本控制中的文件发生了修改")
print("欢迎来到实力至上主义的教室")
print("海参呢????") | [
198,
4798,
7203,
31373,
38994,
4943,
198,
198,
4798,
7203,
163,
118,
95,
164,
231,
110,
23877,
229,
20015,
114,
25,
5099,
222,
47987,
26193,
101,
17312,
103,
162,
115,
119,
27950,
254,
26344,
108,
48304,
17312,
105,
162,
236,
100,
263... | 0.755981 | 209 |
from functools import wraps
from concurrent.futures import ThreadPoolExecutor
from . import util
from .mp import cpu_count
class _ExecutorMixin:
""" A Mixin that provides asynchronous functionality.
This mixin provides methods that allow a class to run
blocking methods via asyncio in a ThreadPoolExecutor.
It also provides methods that attempt to keep the object
picklable despite having a non-picklable ThreadPoolExecutor
as part of its state.
"""
pool_workers = cpu_count()
@init_executor
def run_in_executor(self, callback, *args, loop=None, **kwargs):
""" Wraps run_in_executor so we can support kwargs.
BaseEventLoop.run_in_executor does not support kwargs, so
we wrap our callback in a lambda if kwargs are provided.
"""
return util.run_in_executor(
self._executor, callback, *args, loop=loop, **kwargs
)
@init_executor
def run_in_thread(self, callback, *args, **kwargs):
""" Runs a method in an executor thread.
This is used when a method must be run in a thread (e.g.
to that a lock is released in the same thread it was
acquired), but should be run in a blocking way.
"""
fut = self._executor.submit(callback, *args, **kwargs)
return fut.result()
class CoroBuilder(type):
""" Metaclass for adding coroutines to a class.
This metaclass has two main roles:
1) Make _ExecutorMixin a parent of the given class
2) For every function name listed in the class attribute "coroutines",
add a new instance method to the class called "coro_<func_name>",
which is a coroutine that calls func_name in a ThreadPoolExecutor.
Each wrapper class that uses this metaclass can define three class
attributes that will influence the behavior of the metaclass:
coroutines - A list of methods that should get coroutine versions
in the wrapper. For example:
coroutines = ['acquire', 'wait']
Will mean the class gets coro_acquire and coro_wait methods.
delegate - The class object that is being wrapped. This object will
be instantiated when the wrapper class is instantiated, and
will be set to the `_obj` attribute of the instance.
pool_workers - The number of workers in the ThreadPoolExecutor internally
used by the wrapper class. This defaults to cpu_count(),
but for classes that need to acquire locks, it should
always be set to 1.
"""
def __init__(cls, name, bases, dct):
""" Properly initialize a coroutine wrapper class.
Sets pool_workers and delegate on the class, and also
adds an __init__ method to it that instantiates the
delegate with the proper context.
"""
super().__init__(name, bases, dct)
pool_workers = dct.get("pool_workers")
delegate = dct.get("delegate")
old_init = dct.get("__init__")
# Search bases for values we care about, if we didn't
# find them on the current class.
for b in bases:
b_dct = b.__dict__
if not pool_workers:
pool_workers = b_dct.get("pool_workers")
if not delegate:
delegate = b_dct.get("delegate")
if not old_init:
old_init = b_dct.get("__init__")
cls.delegate = delegate
# If we found a value for pool_workers, set it. If not,
# ExecutorMixin sets a default that will be used.
if pool_workers:
cls.pool_workers = pool_workers
# Here's the __init__ we want every wrapper class to use.
# It just instantiates the delegate mp object using the
# correct context.
@wraps(old_init)
cls.__init__ = init_func
@staticmethod
| [
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
24580,
13,
69,
315,
942,
1330,
14122,
27201,
23002,
38409,
198,
198,
6738,
764,
1330,
7736,
198,
6738,
764,
3149,
1330,
42804,
62,
9127,
628,
198,
198,
4871,
4808,
23002,
38409,
35608,
259... | 2.582454 | 1,516 |
from django.db import models
from django.contrib.auth.models import (
AbstractBaseUser,
BaseUserManager,
AbstractUser,
)
from django.contrib.auth.models import PermissionsMixin
from main_app.choices import *
from django.utils.translation import ugettext_lazy as _
import datetime
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
357,
198,
220,
220,
220,
27741,
14881,
12982,
11,
198,
220,
220,
220,
7308,
12982,
13511,
11,
198,
220,
220,
220,
27741,
129... | 3.039216 | 102 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
198,
11748,
42625,
14208,
13,
26791,
13,
... | 2.956522 | 46 |
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Access control package.
"""
from zope.deferredimport import deprecated
deprecated(
"The functionality of AccessControl.User has moved to"
" AccessControl.users. Please import from there."
" This backward compatibility shim will be removed in AccessControl"
" version 6.",
BasicUser='AccessControl.users:BasicUser',
NullUnrestrictedUser='AccessControl.users:NullUnrestrictedUser',
SimpleUser='AccessControl.users:SimpleUser',
SpecialUser='AccessControl.users:SpecialUser',
Super='AccessControl.users:UnrestrictedUser',
UnrestrictedUser='AccessControl.users:UnrestrictedUser',
User='AccessControl.users:User',
_remote_user_mode='AccessControl.users:_remote_user_mode',
absattr='AccessControl.users:absattr',
addr_match='AccessControl.users:addr_match',
domainSpecMatch='AccessControl.users:domainSpecMatch',
emergency_user='AccessControl.users:emergency_user',
host_match='AccessControl.users:host_match',
nobody='AccessControl.users:nobody',
readUserAccessFile='AccessControl.users:readUserAccessFile',
reqattr='AccessControl.users:reqattr',
rolejoin='AccessControl.users:rolejoin',
system='AccessControl.users:system',
)
deprecated(
"The standard Zope user folder implementation has moved to"
" OFS.userfolder. Please depend on Zope and import from "
" OFS.userfolder or use the new minimal "
" user folder classes from AccessControl.userfolder."
" This backward compatibility shim will be removed in AccessControl"
" version 6.",
BasicUserFolder='OFS.userfolder:BasicUserFolder',
manage_addUserFolder='OFS.userfolder:manage_addUserFolder',
UserFolder='OFS.userfolder:UserFolder',
)
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
198,
2,
15069,
357,
66,
8,
6244,
1168,
3008,
5693,
290,
25767,
669,
13,
198,
2,
198,
2,
770,
3788,
318,
2426,
284,
262,
8617,
286,
262,
1168,
3008,
5094,
13789,
11,
198,
2,
10628,
362,
13,
... | 3.443787 | 676 |
"""
LC 444
Given a sequence originalSeq and an array of sequences,
write a method to find if originalSeq can be uniquely reconstructed from the array of sequences.
Unique reconstruction means that we need to find if originalSeq is the only sequence
such that all sequences in the array are subsequences of it.
Example 1:
Input: originalSeq: [1, 2, 3, 4], seqs: [[1, 2], [2, 3], [3, 4]]
Output: true
Explanation: The sequences [1, 2], [2, 3], and [3, 4] can uniquely reconstruct
[1, 2, 3, 4], in other words, all the given sequences uniquely define the order of numbers
in the 'originalSeq'.
Example 2:
Input: originalSeq: [1, 2, 3, 4], seqs: [[1, 2], [2, 3], [2, 4]]
Output: false
Explanation: The sequences [1, 2], [2, 3], and [2, 4] cannot uniquely reconstruct
[1, 2, 3, 4]. There are two possible sequences we can construct from the given sequences:
1) [1, 2, 3, 4]
2) [1, 2, 4, 3]
Example 3:
Input: originalSeq: [3, 1, 4, 2, 5], seqs: [[3, 1, 5], [1, 4, 2, 5]]
Output: true
Explanation: The sequences [3, 1, 5] and [1, 4, 2, 5] can uniquely reconstruct
[3, 1, 4, 2, 5].
"""
from collections import defaultdict
main()
"""
Time O(V+E)
Space O(V+E)
"""
| [
37811,
198,
5639,
45095,
198,
15056,
257,
8379,
2656,
4653,
80,
290,
281,
7177,
286,
16311,
11,
198,
13564,
257,
2446,
284,
1064,
611,
2656,
4653,
80,
460,
307,
24139,
49594,
422,
262,
7177,
286,
16311,
13,
198,
40257,
25056,
1724,
32... | 2.885856 | 403 |
import socket
HOST = "79.114.19.165" # Standard loopback interface address (localhost)
PORT = 5003 # Port to listen on (non-privileged ports are > 1023)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# s.bind((HOST, PORT))
# s.listen()
s.connect((HOST,PORT))
# conn, addr = s.accept()
with open("received", "wb") as dest:
while True:
data = s.recv(104857600)
dest.write(data)
if not data:
print("Client disconected!")
break
print("file recived")
| [
11748,
17802,
201,
198,
201,
198,
39,
10892,
796,
366,
3720,
13,
16562,
13,
1129,
13,
20986,
1,
220,
1303,
8997,
9052,
1891,
7071,
2209,
357,
36750,
8,
201,
198,
15490,
796,
5323,
18,
220,
1303,
4347,
284,
6004,
319,
357,
13159,
12,... | 2.062937 | 286 |
from apps.school import app as school
from apps.cities import app as cities
from apps.utils import app as utils
from apps.doctors import app as doctors
import settings
import pandas as pd
| [
6738,
6725,
13,
14347,
1330,
598,
355,
1524,
198,
6738,
6725,
13,
66,
871,
1330,
598,
355,
4736,
198,
6738,
6725,
13,
26791,
1330,
598,
355,
3384,
4487,
198,
6738,
6725,
13,
4598,
5217,
1330,
598,
355,
7519,
198,
11748,
6460,
198,
1... | 3.311475 | 61 |
'''
Script for processing the ZAP Google Group stats
'''
import csv
import glob
import json
import os
import utils
| [
7061,
6,
198,
7391,
329,
7587,
262,
1168,
2969,
3012,
4912,
9756,
198,
7061,
6,
198,
11748,
269,
21370,
198,
11748,
15095,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
3384,
4487,
628
] | 3.515152 | 33 |
import json
import pytest
from labelbox.data.annotation_types.geometry.polygon import Polygon
from labelbox.data.annotation_types.geometry.line import Line
from labelbox.data.annotation_types.geometry.point import Point
from labelbox.data.annotation_types.geometry.rectangle import Rectangle
from labelbox.data.serialization.labelbox_v1.converter import LBV1Converter
from labelbox.schema.bulk_import_request import Bbox
@pytest.mark.parametrize(
"file_path", ['tests/data/assets/labelbox_v1/tiled_image_export.json'])
def test_image(file_path):
"""Tests against both Simple and non-Simple tiled image export data.
index-0 is non-Simple, index-1 is Simple
"""
with open(file_path, 'r') as f:
payload = json.load(f)
collection = LBV1Converter.deserialize(payload)
collection_as_list = collection.as_list()
assert len(collection_as_list) == 2
non_simple_annotations = collection_as_list[0].annotations
assert len(non_simple_annotations) == 6
expected_shapes = [Polygon, Point, Point, Point, Line, Rectangle]
for idx in range(len(non_simple_annotations)):
assert isinstance(non_simple_annotations[idx].value,
expected_shapes[idx])
assert non_simple_annotations[-1].value.start.x == -99.36567524971268
assert non_simple_annotations[-1].value.start.y == 19.34717117508651
assert non_simple_annotations[-1].value.end.x == -99.3649886680726
assert non_simple_annotations[-1].value.end.y == 19.41999425190506
simple_annotations = collection_as_list[1].annotations
assert len(simple_annotations) == 8
expected_shapes = [
Polygon, Point, Point, Point, Point, Point, Line, Rectangle
]
for idx in range(len(simple_annotations)):
assert isinstance(simple_annotations[idx].value,
expected_shapes[idx])
| [
11748,
33918,
198,
198,
11748,
12972,
9288,
198,
6738,
6167,
3524,
13,
7890,
13,
1236,
14221,
62,
19199,
13,
469,
15748,
13,
35428,
14520,
1330,
12280,
14520,
198,
6738,
6167,
3524,
13,
7890,
13,
1236,
14221,
62,
19199,
13,
469,
15748,
... | 2.442356 | 798 |
from adobe_analytics.session import OmnitureSession
import json
from warnings import warn
from copy import deepcopy
class Cursor:
"""
A cursor for handling GET requests, including an iterator
for handling large responses (>1k for REST, >50k for BULK)
"""
| [
6738,
512,
5910,
62,
38200,
14094,
13,
29891,
1330,
31816,
8089,
36044,
201,
198,
201,
198,
11748,
33918,
201,
198,
6738,
14601,
1330,
9828,
201,
198,
6738,
4866,
1330,
2769,
30073,
201,
198,
201,
198,
4871,
327,
21471,
25,
201,
198,
... | 3.075269 | 93 |
"""main.py
Contains the main function to run the program.
"""
if '__name__' == '__main__':
main()
| [
37811,
12417,
13,
9078,
198,
198,
4264,
1299,
262,
1388,
2163,
284,
1057,
262,
1430,
13,
198,
37811,
198,
198,
361,
705,
834,
3672,
834,
6,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.666667 | 39 |
#!/bin/bash
#
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
import argparse
import importlib
import os
import sys
import time
import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as ft_utils
import yaml
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--report",
help="Create json result file",
action="store_true")
args = parser.parse_args()
TEST_DB_URL = ft_utils.get_functest_config('results.test_db_url')
logger = ft_logger.Logger("sdnvpn-run-tests").getLogger()
REPO_PATH = os.environ['repos_dir'] + '/sdnvpn/'
if __name__ == '__main__':
main()
| [
2,
48443,
8800,
14,
41757,
198,
2,
198,
2,
15069,
357,
66,
8,
1853,
1439,
2489,
10395,
198,
2,
770,
1430,
290,
262,
19249,
5696,
198,
2,
389,
925,
1695,
739,
262,
2846,
286,
262,
24843,
13789,
11,
10628,
362,
13,
15,
198,
2,
543... | 2.738994 | 318 |
import os
import logging
from google.cloud import bigquery
from tfx.types.experimental.simple_artifacts import Dataset
from tfx.types.experimental.simple_artifacts import Model as BQModel
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import InputArtifact, OutputArtifact, Parameter
@component
| [
198,
11748,
28686,
198,
11748,
18931,
198,
198,
6738,
23645,
13,
17721,
1330,
1263,
22766,
198,
198,
6738,
256,
21373,
13,
19199,
13,
23100,
9134,
13,
36439,
62,
50179,
1330,
16092,
292,
316,
198,
6738,
256,
21373,
13,
19199,
13,
23100,... | 3.471698 | 106 |
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from collections import OrderedDict
from contextlib import closing
import cx_Oracle
import jaydebeapi as jdb
import jpype
from datadog_checks.checks import AgentCheck
from datadog_checks.config import is_affirmative
from . import queries
EVENT_TYPE = SOURCE_TYPE_NAME = 'oracle'
| [
2,
357,
34,
8,
16092,
324,
519,
11,
3457,
13,
2864,
198,
2,
1439,
2489,
10395,
198,
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
357,
3826,
38559,
24290,
8,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738... | 3.186992 | 123 |
import numpy as np
import random
import copy
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
11748,
4866,
628,
628,
628
] | 3.571429 | 14 |
#euclidean algo for gcd
x=input("enter two no. for gcd")
a,b=x.split()
a=int(a)
b=int(b)
r1=r2=r=0
if(a>b):
r1=a
r2=b
elif(a<b):
r1=b
r2=a
else:
r1=a
#print(r1,r2)
while(r2>0):
q=r1//r2
r=r1-q*r2
r1=r2
r2=r
#print(r1,r2)
print("gcd of",a,b,"is ",r1)
| [
2,
12496,
565,
485,
272,
435,
2188,
329,
308,
10210,
198,
87,
28,
15414,
7203,
9255,
734,
645,
13,
329,
308,
10210,
4943,
198,
64,
11,
65,
28,
87,
13,
35312,
3419,
198,
64,
28,
600,
7,
64,
8,
198,
65,
28,
600,
7,
65,
8,
198,... | 1.420765 | 183 |
# Evaluating MNIST with Random Forest Classifiers
# Principal Component Analysis
from sklearn.datasets import fetch_mldata
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_predict, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score, classification_report
from mlxtend.plotting import plot_confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
import time
RSEED = 85
mnist = fetch_mldata('MNIST original')
print(mnist)
# To plot classification reports
# Split the data into train, test sets
split = 60000
X_train, X_test = mnist["data"][:split], mnist["data"][split:]
y_train, y_test = mnist["target"][:split], mnist["target"][split:]
# Shuffle the training indices for cross validation
shuffle = np.random.permutation(split)
X_train, y_train = X_train[shuffle], y_train[shuffle]
### RANDOM FOREST on full set ###
start = time.clock()
forest_clf = RandomForestClassifier(
bootstrap = True,
n_estimators=20,
max_features='sqrt',
random_state=RSEED
)
forest_clf.fit(X_train, y_train)
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X_test.astype(np.float64))
print(cross_val_score(
forest_clf,
X_scaled, y_test,
cv=10,
scoring="accuracy"
))
y_pred = cross_val_predict(
forest_clf,
X_scaled, y_test,
cv=10
)
print(classification_report(y_test, y_pred))
plot_cr(classification_report(y_test, y_pred))
f1score_a = f1_score(y_test, y_pred, average='macro')
stop = time.clock()
time1 = stop - start
cm = confusion_matrix(y_test, y_pred)
plt.matshow(cm, cmap=plt.cm.gray)
plt.show()
fig, ax = plot_confusion_matrix(conf_mat=cm, figsize=(10, 10))
plt.show()
rws = cm.sum(axis=1, keepdims=True)
norm = cm / rws
np.fill_diagonal(norm, 0)
plt.matshow(norm, cmap=plt.cm.gray)
plt.show()
### PRINCIPAL COMPONENT ANALYSIS ###
start = time.clock()
pca_start = time.clock()
pca = PCA(n_components = 0.95)
X_pca = pca.fit_transform(X_train)
X_test_pca = pca.fit_transform(X_test)
pca_stop = time.clock()
pca_time = pca_stop - pca_start
forest_clf2 = RandomForestClassifier(
bootstrap = True,
n_estimators=20,
max_features='sqrt',
random_state=RSEED
)
forest_clf2.fit(X_pca, y_train)
X_scaled = scaler.fit_transform(X_test_pca.astype(np.float64))
print(cross_val_score(
forest_clf2,
X_scaled, y_test,
cv=10,
scoring="accuracy"
))
y_pred = cross_val_predict(
forest_clf2,
X_scaled, y_test,
cv=10
)
print(classification_report(y_test, y_pred))
plot_cr(classification_report(y_test, y_pred))
f1score_b = f1_score(y_test, y_pred, average='macro')
stop = time.clock()
time2 = stop - start
print("RF took:", round(time1, 2), "secs")
print("RF with PCA took:", round(time2, 2), "secs")
print("PCA component identification took:", round(pca_time, 2), "secs")
print("PCA increased time:", round((time2 - time1) / time1 * 100, 1), "%")
print("Main set has", len(X_train[0]),
"variables and F1 score of:", round(f1score_a * 100, 1))
print("PCA set has", len(X_pca[0]),
"variables and F1 score of:", round(f1score_b * 100, 1))
cm = confusion_matrix(y_test, y_pred)
plt.matshow(cm, cmap=plt.cm.gray)
plt.show()
fig, ax = plot_confusion_matrix(conf_mat=cm, figsize=(10, 10))
plt.show()
rws = cm.sum(axis=1, keepdims=True)
norm = cm / rws
np.fill_diagonal(norm, 0)
plt.matshow(norm, cmap=plt.cm.gray)
plt.show()
| [
2,
26439,
11927,
29060,
8808,
351,
14534,
9115,
5016,
13350,
198,
2,
32641,
35100,
14691,
198,
198,
6738,
1341,
35720,
13,
19608,
292,
1039,
1330,
21207,
62,
76,
335,
1045,
198,
6738,
1341,
35720,
13,
12501,
296,
9150,
1330,
4217,
32,
... | 2.460722 | 1,413 |
from datetime import datetime
from django.db import models
from django.db.transaction import commit_on_success
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from wt_articles.models import TranslatedArticle, TranslatedSentence
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
PENDING = 0
IN_PROGRESS = 1
FINISHED = 2
REVIEW_STATUSES = (
(PENDING, 'Pending'),
(IN_PROGRESS, 'In Progress'),
(FINISHED, 'Finished'),
)
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
9945,
13,
7645,
2673,
1330,
4589,
62,
261,
62,
13138,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,... | 3.020202 | 198 |
# Local imports
from gmprocess.metrics.imc.imc import IMC
class Channels(IMC):
"""Class defining steps and invalid imts, for channels."""
# making invalid IMTs a class variable because
# 1) it doesn't change with instances
# 2) information can now be retrieved without
# instantiating first
_invalid_imts = ['FAS', 'ARIAS']
def __init__(self, imc, imt, percentile=None, period=None):
"""
Args:
imc (string): Intensity measurement component.
imt (string): Intensity measurement type.
percentile (float): Percentile for rotations. Default is None.
Not used by Channels.
period (float): Period for fourier amplitude spectra and
spectral amplitudes. Default is None. Not used by Channels.
"""
super().__init__(imc, imt, percentile=None, period=None)
self._steps = {
'Rotation': 'null_rotation',
'Combination2': 'null_combination',
}
| [
2,
10714,
17944,
198,
6738,
308,
76,
14681,
13,
4164,
10466,
13,
320,
66,
13,
320,
66,
1330,
8959,
34,
628,
198,
4871,
609,
8961,
7,
3955,
34,
2599,
198,
220,
220,
220,
37227,
9487,
16215,
4831,
290,
12515,
545,
912,
11,
329,
9619... | 2.440476 | 420 |
from dash import Dash, dcc, html, Input, Output, State
import dash_bootstrap_components as dbc
| [
6738,
14470,
1330,
16189,
11,
288,
535,
11,
27711,
11,
23412,
11,
25235,
11,
1812,
198,
11748,
14470,
62,
18769,
26418,
62,
5589,
3906,
355,
288,
15630,
628
] | 3.428571 | 28 |
from enum import Enum
from typing import List
from ...util.callapi import call_api
__author__ = 'Tonio Fincke (Brockmann Consult GmbH)'
URL_BASE = "http://localhost:9090/"
CLEAR_URL = URL_BASE + "multiply/api/clear/{}"
| [
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
7343,
198,
6738,
2644,
22602,
13,
13345,
15042,
1330,
869,
62,
15042,
198,
198,
834,
9800,
834,
796,
705,
35416,
952,
376,
1939,
365,
357,
33,
10823,
9038,
21651,
402,
2022,
39,
33... | 2.731707 | 82 |
DISCORD_BOT_TOKEN = "YOUR_DISCORD_API_TOKEN" | [
26288,
34,
12532,
62,
33,
2394,
62,
10468,
43959,
796,
366,
56,
11698,
62,
26288,
34,
12532,
62,
17614,
62,
10468,
43959,
1
] | 1.913043 | 23 |
import os
from setuptools import find_packages, setup
setup(
name="mock-ssh-server",
version="0.9.1",
description="Mock SSH server for testing purposes",
long_description=read_long_description(),
url="https://github.com/carletes/mock-ssh-server",
author="Carlos Valiente",
author_email="carlos@pepelabs.net",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Testing",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
package_dir={
"mockssh": "mockssh",
},
packages=find_packages(),
package_data={
"mockssh": [
"sample-user-key",
"sample-user-key.pub",
"server-key",
"server-key.pub",
]
},
install_requires=read_requirements(),
zip_safe=False,
)
| [
11748,
28686,
198,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
628,
628,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
76,
735,
12,
45824,
12,
15388,
1600,
198,
220,
220,
220,
2196,
2625,
15,
13,
24,
13,
16,
16... | 2.39212 | 533 |
from django.contrib import admin
from investor_management import models
# Register your models here.
admin.site.register(models.InvestorOrganization)
admin.site.register(models.InvestorUser)
admin.site.register(models.InvestorOrgSMEProjectInvestments)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
15811,
62,
27604,
1330,
4981,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
28482,
13,
15654,
13,
30238,
7,
27530,
13,
19070,
273,
26121,
1634,
8,
198,
28482,
13,
15654,
13,
... | 3.666667 | 69 |
# coding: utf-8
"""
Harmony Connect
An easy to use API that helps you access the Factom blockchain. # noqa: E501
OpenAPI spec version: 1.0.17
Contact: harmony-support@factom.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import harmony_connect_client
from harmony_connect_client.api.info_api import InfoApi # noqa: E501
from harmony_connect_client.rest import ApiException
class TestInfoApi(unittest.TestCase):
"""InfoApi unit test stubs"""
def test_get_api_info(self):
"""Test case for get_api_info
API Info # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
35088,
8113,
628,
220,
220,
220,
1052,
2562,
284,
779,
7824,
326,
5419,
345,
1895,
262,
19020,
296,
11779,
13,
220,
1303,
645,
20402,
25,
412,
33548,
628,
220,
2... | 2.677903 | 267 |
from matplotlib import pyplot as plt
import rasterio
import gdal
import numpy as np
import isce
import isceobj
import isceobj.Image.IntImage as IntImage
import isceobj.Image.SlcImage as SLC
from isceobj.Image import createImage
from datetime import datetime as dt
import argparse
import glob
import os
import shutil
main()
| [
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
11748,
374,
1603,
952,
198,
11748,
308,
31748,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
318,
344,
198,
11748,
318,
344,
26801,
198,
11748,
318,
344,
26801,
13,
5159,... | 3.203883 | 103 |
from scipy import spatial
result = sentence_similarity(
"The field was full of lush, green grass. The horses grazed peacefully.",
" The young children played with kites."
)
print(result) | [
6738,
629,
541,
88,
1330,
21739,
628,
198,
198,
20274,
796,
6827,
62,
38610,
414,
7,
198,
220,
220,
220,
366,
464,
2214,
373,
1336,
286,
37408,
11,
4077,
8701,
13,
383,
14260,
32703,
276,
30996,
33283,
198,
220,
220,
220,
366,
383,
... | 3.45614 | 57 |
from lxml import etree as ET
from lxml.builder import E
class Response:
"""
BXML Response element
:Example:
from bandwidth.voice.bxml import Response
response = Response(E.Call({'from': '+1234567890', 'to': '+1234567891'}), E.Hangup())
"""
def __init__(self, *response_verbs):
"""
Initialize Response element
:type response_verbs: list
:param response_verbs: on or several of BXML verbs
:Example:
response = bandwidth.catapult.bxml.Response(E.Hangup())
response = bandwidth.catapult.bxml.Response(E.Call({'from': '+1234567890', 'to': '+1234567891'}), E.Hangup())
response = bandwidth.catapult.bxml.Response(E.PlayAudio("Thank you"), E.Hangup())
"""
self.response = E.xml(E.Response(*response_verbs))
def to_xml(self):
"""
Convert response object to XML presentation
:rtype str
:returns XML text
:Example:
xml = response.to_xml()
"""
return ET.tostring(self.response)
def __str__(self):
"""
Convert response object to XML presentation implicitly
:rtype str
:returns XML text
:Example:
xml = str(response)
"""
return self.to_xml().decode('utf-8')
| [
6738,
300,
19875,
1330,
2123,
631,
355,
12152,
198,
6738,
300,
19875,
13,
38272,
1330,
412,
628,
198,
4871,
18261,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
347,
55,
5805,
18261,
5002,
628,
220,
220,
220,
1058,
16281,
25,
1... | 2.292469 | 571 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
r"""
Basic training script for PyTorch
"""
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import json
import torch
from collections import defaultdict
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
from maskrcnn_benchmark.data.transforms import build_transforms
from maskrcnn_benchmark.data.build import build_dataset
from maskrcnn_benchmark.data.collate_batch import BatchCollator
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from pycocotools import mask as maskUtils
import numpy as np
import pdb
import cv2
import math
def annToMask(boxlist):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
h,w = boxlist.size
masks = boxlist.get_field('masks')
labels = boxlist.get_field('labels')
# pdb.set_trace()
index = list(np.nonzero(labels==1)[:,0])
polygons = []
for i, seg in enumerate(masks.polygons):
if i in index:
polygons.append(seg)
RLES = []
for segm in polygons:
rles = maskUtils.frPyObjects([p.numpy() for p in segm.polygons], h, w )
rle = maskUtils.merge(rles)
RLES.append(rle)
return RLES
if __name__ == "__main__":
main()
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
81,
37811,
198,
26416,
3047,
4226,
329,
9485,
15884,
354,
198,
37811,
198,
198,
2,
5345,
510,
2183,
2858,
878,
3016,
1997,
2073,
318,
17392,
... | 2.832543 | 633 |
import glob
import os
import shutil
for i in glob.glob('*.mag.*'):
shutil.copy(i,outdir)
for i in glob.glob('*.psf.*'):
shutil.copy(i,outdir)
for i in glob.glob('*.mag.*'):
os.remove(i)
for i in glob.glob('*.psf.*'):
os.remove(i)
for i in glob.glob('*.als.*'):
os.remove(i)
for i in glob.glob('*.arj.*'):
os.remove(i)
for i in glob.glob('*.pst.*'):
os.remove(i)
for i in glob.glob('*.psg.*'):
os.remove(i)
for i in glob.glob('*pix*fits'):
os.remove(i)
for i in glob.glob('*_psf_stars.txt'):
os.remove(i)
for i in glob.glob('*.sub.*'):
os.remove(i)
for i in glob.glob('refcoords.txt'):
os.remove(i)
for i in glob.glob('imagelist.txt'):
os.remove(i)
for i in glob.glob('comlist.txt'):
os.remove(i)
for i in glob.glob('shifts.txt'):
os.remove(i)
for i in glob.glob('shifted_*'):
os.remove(i)
for i in glob.glob('coords'):
os.remove(i)
for i in glob.glob('geomap*.txt'):
os.remove(i)
for i in glob.glob('template0.fits'):
os.remove(i)
for i in glob.glob('*_ped.fits'):
os.remove(i)
for i in glob.glob('*_aligned_tmpl.fits'):
os.remove(i)
for i in glob.glob('*_ped_conv.fits'):
os.remove(i)
| [
11748,
15095,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
198,
1640,
1312,
287,
15095,
13,
4743,
672,
10786,
24620,
19726,
15885,
6,
2599,
198,
220,
220,
220,
4423,
346,
13,
30073,
7,
72,
11,
448,
15908,
8,
198,
198,
1640,
1312,
... | 2.00335 | 597 |
from setuptools import setup
setup(
name='djangoevents',
version='0.9.3',
url='https://github.com/ApplauseOSS/djangoevents',
license='MIT',
description='Building blocks for building Event Sourcing Django applications.',
author='Applause',
author_email='eng@applause.com',
zip_safe=False,
packages=[
'djangoevents',
'djangoevents.migrations',
'djangoevents.tests.settings',
],
include_package_data=True,
install_requires=[
'eventsourcing>=1.2,<1.3',
'django',
],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
28241,
14208,
31534,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
24,
13,
18,
3256,
198,
220,
220,
220,
19016,
11639,
5450,
1378,
12567,
13,
... | 2.380342 | 234 |
from core.neural_network import NeuralNetwork
from functions import *
| [
6738,
4755,
13,
710,
1523,
62,
27349,
1330,
47986,
26245,
198,
6738,
5499,
1330,
1635,
198
] | 4.375 | 16 |
from typing import Dict
from boa3.model.builtin.interop.nativecontract import StdLibMethod
from boa3.model.variable import Variable
| [
6738,
19720,
1330,
360,
713,
198,
198,
6738,
1489,
64,
18,
13,
19849,
13,
18780,
259,
13,
3849,
404,
13,
30191,
28484,
1330,
520,
67,
25835,
17410,
198,
6738,
1489,
64,
18,
13,
19849,
13,
45286,
1330,
35748,
628
] | 3.435897 | 39 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from convNd import convNd
import time
import matplotlib.pyplot as plt
torch.backends.cudnn.deterministic = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Define 4D tensor to test on
inChans = 1
outChans = 1
x = torch.ones(1, inChans, 5, 5, 5, 5, 5).to(device)
ks = 3
padding = 0
# Only zeros allowed by pytorch
padding_mode = 'zeros'
stride = 2
weight = 1
bias = 0
groups = 1
# ConvNd where d = 3
conv = convNd(inChans, outChans, 5, ks, stride, padding, use_bias=True,
padding_mode=padding_mode, groups=groups,
kernel_initializer=lambda x: torch.nn.init.constant_(x, weight),
bias_initializer=lambda x: torch.nn.init.constant_(x, bias)).to(device)
# Transposed convolution
convT = convNd(inChans, outChans, 5, ks, stride, padding,
groups=groups, is_transposed=True,
kernel_initializer=lambda x: torch.nn.init.constant_(x, weight),
bias_initializer=lambda x: torch.nn.init.constant_(x, bias)).to(device)
# Run timers to compare with torch implementations
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
print(' ')
# Convolve with ConvNd
torch.cuda.synchronize()
start.record()
out = conv(x)
end.record()
torch.cuda.synchronize()
print("ConvNd time: " + str(start.elapsed_time(end)))
print(out.shape)
# Convolve with ConvTransposeNd
torch.cuda.synchronize()
start.record()
outT = convT(x)
end.record()
torch.cuda.synchronize()
print("ConvTransposeNd time: " + str(start.elapsed_time(end)))
print(outT.shape) | [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
3063,
45,
67,
1330,
3063,
45,
67,
198,
11748,
640,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
... | 2.639456 | 588 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import setuptools
with open("../README.md", "r") as f:
long_description = f.read()
with open("requirements.txt") as f:
requirements = f.read().splitlines()
setuptools.setup(
author="DeepSeismic Maintainers",
author_email="deepseismic@microsoft.com",
classifiers=[
"Development Status :: 1 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
],
description="DeepSeismic",
install_requires=requirements,
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
name="deepseismic_interpretation",
packages=setuptools.find_packages(include=["deepseismic_interpretation", "deepseismic_interpretation.*"]),
platforms="any",
python_requires=">=3.6",
setup_requires=["pytest-runner"],
tests_require=["pytest"],
url="https://github.com/microsoft/deepseismic",
version="0.1.0",
zip_safe=False,
)
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
198,
198,
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
40720,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
25,
198,
220,
220,
220,
... | 2.738589 | 482 |
import json
import pika
from py_zipkin.zipkin import zipkin_client_span
from py_zipkin.request_helpers import create_http_headers | [
11748,
33918,
198,
11748,
279,
9232,
198,
6738,
12972,
62,
13344,
5116,
13,
13344,
5116,
1330,
19974,
5116,
62,
16366,
62,
12626,
198,
6738,
12972,
62,
13344,
5116,
13,
25927,
62,
16794,
364,
1330,
2251,
62,
4023,
62,
50145
] | 3.307692 | 39 |
#!/usr/bin/env python3
"""chesstc - Calculate expected chess game time.
usage: chesstc <base> <increment>
This calculates the expected duration of a chess game (minutes:seconds) for a
given base and increment; e.g. 5+0 should take approx 10 minutes.
Assumes 40 moves, using a given base and increment. Each player will have
half of this duration each.
"""
from argparse import ArgumentParser
PARSER = ArgumentParser(description="Get expected time of a chess match")
PARSER.add_argument("base", help="Number of minutes to start", type=int)
PARSER.add_argument("increment", help="Seconds per move", type=int)
ARGS = PARSER.parse_args()
total_sec = ARGS.base * 60 + ARGS.increment * 40 * 2
minute = int(total_sec / 60)
remsec = int(int(total_sec) - minute*60)
print("{}:{}".format(minute, remsec))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
2052,
301,
66,
532,
27131,
378,
2938,
19780,
983,
640,
13,
198,
198,
26060,
25,
269,
956,
301,
66,
1279,
8692,
29,
1279,
24988,
434,
29,
198,
198,
1212,
43707,
262,
2938,
... | 3.199203 | 251 |
''' Email Delivery Subsystem
'''
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now, localtime
# from django.utils.encoding import force_text
from django.utils.encoding import smart_str
from django import template
from django.core import serializers
from django.dispatch import dispatcher
from django.contrib.contenttypes.models import ContentType
from django.core.mail import EmailMultiAlternatives
import pydoc
import uuid
import os
from datetime import timedelta
# from email import Charset, message_from_string
from email import message_from_string
# from email.mime.text import MIMEText
import time
import utils
class BaseModel(models.Model):
'''Base Model
'''
created_at = models.DateTimeField(_(u'Created At'), auto_now_add=True, )
updated_at = models.DateTimeField(_(u'Updated At'), auto_now=True, )
@classmethod
class MailAddress(BaseModel):
''' Mail Address
'''
email = models.EmailField(
_('Email Address'),
help_text=_('Email Address Help'), max_length=50)
bounced = models.IntegerField(
_('Bounced Count'),
help_text=_('Bounced Count Help'), default=0)
enabled = models.BooleanField(
_('Enabled Address'), help_text=_('Enabled Address Help'),
default=True)
@property
bounced_signal = dispatcher.Signal(providing_args=["instance", ])
class Postbox(BaseModel):
''' Mail Forwarding Definition
'''
server = models.ForeignKey(
Server, verbose_name=_('Sending Server'),)
address = models.EmailField(
_('Postbox Address'), help_text=_('Postbox Address Help'),
max_length=50)
forward = models.ForeignKey(
MailAddress, verbose_name=_('Forward Address'),
help_text=_('Forward Address Help'),
null=True, blank=True, default=None, on_delete=models.SET_DEFAULT)
deleted = models.BooleanField(
_('Is Deleted'), help_text=_('Is Deleted Help'), default=False, )
task = models.TextField(
_('Postbox Task'), help_text=_('Postbox Task Help'),
null=True, blank=True, default=None)
blacklist = models.TextField(
_('Black List Pattern'),
help_text=_('Black List Pattern Help'),
null=True, blank=True, default=None)
@property
class Relay(BaseModel):
''' Relay Entries for Postbox
'''
sender = models.ForeignKey(
MailAddress,
verbose_name=_('Original Sender Address'),
help_text=_('Original Sender Address Help'))
postbox = models.ForeignKey(
Postbox,
verbose_name=_('Original Recipient Postbox'),
help_text=_('Original Recipient Postbox Help'))
is_spammer = models.BooleanField(
_('Is Spammer'), default=False)
'''`Postbox` owner can check this `sender` is a spammer.'''
objects = RelayQuerySet.as_manager()
class MailStatus(models.Model):
'''Mail Status
'''
STATUS_DISABLED = 0
STATUS_QUEUED = 10
STATUS_SENDING = 20
STATUS_SENT = 30
STATUS = (
(STATUS_DISABLED, _('Disabled Mail'), ),
(STATUS_QUEUED, _('Queued Mail'), ),
(STATUS_SENDING, _('Sending Mail'), ),
(STATUS_SENT, _('Sent Mail'), ),
)
status = models.IntegerField(
_('Mail Status'), help_text=_('Mail Status Help'),
default=STATUS_DISABLED, choices=STATUS)
due_at = models.DateTimeField(
_('Due At'), help_text=_('Due At'),
null=True, blank=True, default=None)
sent_at = models.DateTimeField(
_('Sent At'), help_text=_('Sent At Help'),
null=True, blank=True, default=None)
sleep_from = models.TimeField(
_('Sleep From'), help_text=_('Sleep From Help'),
null=True, blank=True, default=None)
sleep_to = models.TimeField(
_('Sleep To'), help_text=_('Sleep To Help'),
null=True, blank=True, default=None)
def update_due_at(self, days=0):
'''Update due_at with `sleep_to` '''
self.due_at = localtime(now()) + timedelta(days=days)
# WARN:microsecond is trunctad by MySQL 5.6+
self.due_at = self.due_at.replace(
hour=self.sleep_to.hour,
minute=self.sleep_to.minute,
second=self.sleep_to.second,
microsecond=self.sleep_to.microsecond,)
self.save()
def delay(self, dt=None):
'''Mail sending process is delayed until `sleep_to` '''
dt = dt or localtime(now()).time()
if any([
not self.sleep_from, not self.sleep_to, not dt]
):
return False
if all([
self.sleep_from <= self.sleep_to,
self.sleep_from <= dt,
dt <= self.sleep_to,
]):
# MUST today
self.update_due_at()
return True
if all([
self.sleep_from > self.sleep_to,
]):
if self.sleep_from <= dt:
# Tommorrow
self.update_due_at(1)
return True
elif dt <= self.sleep_to:
# Today
self.update_due_at()
return True
return False
def is_active(self, dt=None):
'''Is active mail or not'''
dt = dt or now()
return all([
self.status == self.STATUS_QUEUED,
self.due_at is None or self.due_at <= dt,
self.sent_at is None])
class Mail(BaseMail, MailStatus):
'''Mail Delivery Definition
'''
name = models.CharField(
_('Mail Name'), help_text=_('Mail Name Help'), max_length=50,
null=True, default=None, blank=True)
ctx = models.TextField(
_('Context Data'), help_text=_('Context Data Help'),
default=None, null=True, blank=True)
objects = MailQuerySet.as_manager()
def add_recipient(self, email):
'''Add an recipient email address to this Mail
- email is registered as a :ref:`emailqueue.models.MailAddress`
for bounce management.
'''
to, created = MailAddress.objects.get_or_create(email=email)
recipient, created = Recipient.objects.get_or_create(
mail=self, to=to,)
return recipient
def send_mail(self, *args, **kwargs):
'''Send Mail'''
if self.sender.server and self.sender.server.handler:
self.sender.server.handler.send_mail(self, *args, **kwargs)
class Recipient(BaseModel):
'''Recipients for a Mail
'''
mail = models.ForeignKey(
Mail, verbose_name=_('Mail'), help_text=_('Mail Help'))
to = models.ForeignKey(
MailAddress, verbose_name=_('Recipient Address'),
help_text=_('Recipient Address Help'))
return_path = models.EmailField(
_('Return Path'), help_text=_('Return Path Help'), max_length=50,
null=True, default=None, blank=True)
sent_at = models.DateTimeField(
_('Sent At to Reipient'), help_text=_('Sent At to Recipient Help'),
null=True, blank=True, default=None)
error_message = models.ForeignKey(
'Message', verbose_name=_('Bounced Error Message'),
help_text=_('Bounced Error Message Help'),
related_name='bounced_recipient',
null=True, blank=True, default=None, on_delete=models.SET_DEFAULT)
objects = RecipientQuerySet.as_manager()
uploaded_signal = dispatcher.Signal(providing_args=["instance", ])
'''File Uploaded Signal'''
def save(self, *args, **kwargs):
''' If return_path is not set, create it before `save` '''
if not self.return_path and self.mail and self.to:
self.return_path = utils.to_return_path(
'mail', self.mail.sender.domain,
self.mail.id, self.to.id, )
super(Recipient, self).save(*args, **kwargs)
class Attachment(BaseModel):
'''Attachemetns for a Mail
'''
mail = models.ForeignKey(
Mail, verbose_name=_('Mail'), help_text=_('Mail Help'))
file = FileField(
_('Attachment File'),
help_text=_('Attrachment File Help'),)
class Message(BaseModel, MailMessage, RelayedMessage):
''' Raw Message '''
server = models.ForeignKey(
Server, verbose_name=_('Recipient Server'),
default=None, blank=True, null=True)
service = models.EmailField(
_('Service Name'), help_text=_('Service Name Help'), max_length=50,
default=None, blank=True, null=True, db_index=True, )
sender = models.EmailField(
_('Sender'), help_text=_('Sender Help'), max_length=100,
default=None, blank=True, null=True)
recipient = models.EmailField(
_('Recipient'), help_text=_('Recipient Help'), max_length=100,
default=None, blank=True, null=True)
original_recipient = models.EmailField(
_('Original Recipient'),
help_text=_('Oringinal Recipient Help'), max_length=100,
default=None, blank=True, null=True)
processed_at = models.DateTimeField(
_('Processed At'), null=True, blank=True, default=None)
objects = MessageQuerySet.as_manager()
@property
def bounced_parameters(self):
''' Email Hanlders of Message object
'''
return getattr(self, '_bounced_parameters', _cached())
@property
def forward_return_path(self):
'''Return-Path for forwarding
- used ofr forwarding error message for Relayed message
'''
domain = self.recipient.split('@')[1]
address = utils.to_return_path(
"fwd", domain, str(self.id), )
return address
def process_message(self):
''' Process this Message '''
if self.server and self.server.handler:
self.server.handler.process_message(self)
| [
7061,
6,
9570,
28682,
3834,
10057,
198,
7061,
6,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198,
6738,
42625,
14208,
13,
26791,
13,
24... | 2.371851 | 4,128 |
import numpy as np
import cantera as ct
import os
import sys
from . import _ember
import time
class Struct(object):
"""
A dictionary-like data structure where fields are accessible as both
attributes and dictionary keys::
>>> s = Struct()
>>> s['foo'] = 6
>>> s.foo
6
>>> s.bar = 'x'
>>> 'bar' in s:
True
>>> s['bar']
'x'
>>> s.keys()
['foo', 'bar']
Valid methods of initialization, equivalent to the above:
>>> s = Struct(foo=6, bar='x')
>>> s = Struct({'foo': 6, 'bar': 'x'})
"""
class HDFStruct(Struct):
"""
Like :class:`Struct`, but converts HDF5 structs to numpy arrays.
"""
class NpzStruct(Struct):
"""
Like :class:`Struct`, but loads data from NumPy 'npz' data files
"""
def load(filename):
"""
Generate an :class:`Struct` object from a saved profile.
"""
if filename.endswith('h5'):
return HDFStruct(filename)
elif filename.endswith('npz'):
return NpzStruct(filename)
def get_qdot(gas, profile, pressure=101325):
"""
Calculate the heat release rate along the flame coordinate.
:param gas:
A Cantera `Solution` object made using the same mechanism file used
for the simulation.
:param profile:
An :class:`.HDFStruct` object created by loading a `profNNNNNN.h5` file.
:param pressure:
The pressure at which the simulation was run.
"""
q = []
for i in range(len(profile.T)):
gas.TPY = profile.T[i], pressure, profile.Y[:,i]
q.append(-np.dot(gas.net_production_rates, gas.partial_molar_enthalpies))
return np.array(q)
def expandProfile(prof, gas, diffusion=True, reaction_rates=True):
"""
Reconstruct derived data associated with a flame profile.
:param prof:
An :class:`.HDFStruct` object created by loading a `profNNNNNN.h5` file
:param gas:
A Cantera `Solution` object made using the same mechanism file used
for the simulation.
:param diffusion:
Set to `False` to disable calculating diffusion properties (which can
be slow for large mechanisms)
:param reaction_rates:
Set to `False` to disable detailed reaction rates (creation /
destruction / rates-of-progress)
Arrays which are reconstructed:
* grid properties: *hh*, *cfp*, *cf*, *cfm*, *rphalf*, *dlj*
* thermodynamic properties: *rho*, *cp*, *Wmx*, *W*
* kinetic properties: *wdot*, *q*, *creation_rates*, *destruction_rates*,
*forward_rates_of_progress*, *reverse_rates_of_progress*,
*net_rates_of_progress*
* transport properties: *rhoD*, *k*, *mu*, *Dkt*, *jFick*, *jSoret*, *jCorr*
* other: *X* (mole fractions)
"""
N = len(prof.x)
I = gas.n_reactions
# Grid properties
try:
gridAlpha = prof.gridAlpha
except AttributeError:
gridAlpha = 0
prof.hh = np.zeros(N)
prof.cfp = np.zeros(N)
prof.cf = np.zeros(N)
prof.cfm = np.zeros(N)
prof.rphalf = np.zeros(N)
prof.dlj = np.zeros(N)
for j in range(N-1):
prof.hh[j] = prof.x[j+1] - prof.x[j]
prof.rphalf[j] = (0.5 * (prof.x[j]+prof.x[j+1]))**prof.gridAlpha
hh = prof.hh
for j in range(1, N-1):
prof.cfp[j] = hh[j-1]/(hh[j]*(hh[j]+hh[j-1]))
prof.cf[j] = (hh[j]-hh[j-1])/(hh[j]*hh[j-1])
prof.cfm[j] = -hh[j]/(hh[j-1]*(hh[j]+hh[j-1]))
prof.dlj[j] = 0.5 * (prof.x[j+1] - prof.x[j-1])
# Thermodynamic / Transport / Kinetic properties
K = gas.n_species
try:
P = prof.P
except AttributeError:
P = 101325
if diffusion:
prof.rhoD = np.zeros((K,N))
prof.Dkt = np.zeros((K,N))
prof.jFick = np.zeros((K,N))
prof.jSoret = np.zeros((K,N))
prof.jCorr = np.zeros(N)
prof.rho = np.zeros(N)
prof.wdot = np.zeros((K,N))
prof.q = np.zeros(N)
prof.k = np.zeros(N)
prof.cp = np.zeros(N)
prof.mu = np.zeros(N)
prof.Wmx = np.zeros(N)
prof.X = np.zeros((K,N))
if reaction_rates:
prof.creation_rates = np.zeros((K,N))
prof.destruction_rates = np.zeros((K,N))
prof.forward_rates_of_progress = np.zeros((I,N))
prof.reverse_rates_of_progress = np.zeros((I,N))
prof.net_rates_of_progress = np.zeros((I,N))
for j in range(N):
gas.TPY = prof.T[j], P, prof.Y[:,j]
prof.rho[j] = gas.density
wdot = gas.net_production_rates
prof.wdot[:,j] = wdot
prof.q[j] = -np.dot(wdot, gas.partial_molar_enthalpies)
prof.creation_rates[:,j] = gas.creation_rates
prof.destruction_rates[:,j] = gas.destruction_rates
prof.forward_rates_of_progress[:,j] = gas.forward_rates_of_progress
prof.reverse_rates_of_progress[:,j] = gas.reverse_rates_of_progress
prof.net_rates_of_progress[:,j] = gas.net_rates_of_progress
prof.X[:,j] = gas.X
prof.k[j] = gas.thermal_conductivity
prof.cp[j] = gas.cp_mass
prof.mu[j] = gas.viscosity
prof.Wmx[j] = gas.mean_molecular_weight
if diffusion:
Dbin = gas.binary_diff_coeffs
prof.Dkt[:,j] = gas.thermal_diff_coeffs
eps = 1e-15;
for k in range(K):
X = gas.X
Y = gas.Y
sum1 = sum(X[i]/Dbin[k,i] for i in range(K) if i != k)
sum2 = sum((Y[i]+eps/K)/Dbin[k,i] for i in range(K) if i != k)
prof.rhoD[k,j] = prof.rho[j]/(sum1 + X[k]/(1+eps-Y[k])*sum2)
if diffusion:
for j in range(1, N-1):
for k in range(K):
prof.jFick[k,j] = -0.5 * ((prof.rhoD[k,j] + prof.rhoD[k,j+1]) *
((prof.Y[k,j+1]-prof.Y[k,j])/prof.hh[j]))
prof.jSoret[k,j] = -0.5 * ((prof.Dkt[k,j]/prof.T[j] +
prof.Dkt[k,j+1]/prof.T[j+1]) *
(prof.T[j+1]-prof.T[j])/prof.hh[j])
prof.jCorr[j] -= prof.jFick[k,j] + prof.jSoret[k,j]
prof.W = gas.molecular_weights
| [
11748,
299,
32152,
355,
45941,
198,
11748,
460,
49600,
355,
269,
83,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
764,
1330,
4808,
1491,
198,
11748,
640,
198,
198,
4871,
32112,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
... | 2.001292 | 3,095 |
# test_explorer.py
# meant to be run with 'pytest'
#
# This file is part of scqubits: a Python package for superconducting qubits,
# arXiv:2107.08552 (2021). https://arxiv.org/abs/2107.08552
#
# Copyright (c) 2019 and later, Jens Koch and Peter Groszkowski
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
############################################################################
import numpy as np
import scqubits as scq
from scqubits import Explorer, InteractionTerm, ParameterSweep
| [
2,
1332,
62,
20676,
11934,
13,
9078,
198,
2,
4001,
284,
307,
1057,
351,
705,
9078,
9288,
6,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
629,
421,
9895,
25,
257,
11361,
5301,
329,
2208,
36495,
278,
627,
9895,
11,
198,
2,
610,
55,
... | 3.338798 | 183 |
import coreapi
import time
import random
from datetime import timedelta
from django.contrib.auth import get_user_model
from django.conf import settings
from django.utils import timezone
from rest_framework import parsers, renderers, status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.schemas import AutoSchema
from django_rest_passwordreset.serializers import *
from django_rest_passwordreset.models import ResetPasswordToken
from django_rest_passwordreset.signals import reset_password_token_created, pre_password_reset, post_password_reset
from django_rest_passwordreset.utils import get_client_masked_ip
User = get_user_model()
def get_password_reset_token_expiry_time(is_long_token=False):
"""
Returns the password reset token expirty time in hours (default: 24)
Set Django SETTINGS.DJANGO_REST_MULTITOKENAUTH_RESET_TOKEN_EXPIRY_TIME to overwrite this time
:return: expiry time
"""
if is_long_token:
return getattr(settings, 'DJANGO_REST_MULTITOKENAUTH_RESET_TOKEN_LONG_EXPIRY_TIME', 48)
# get token validation time
return getattr(settings, 'DJANGO_REST_MULTITOKENAUTH_RESET_TOKEN_EXPIRY_TIME', 24)
def get_use_username():
"""
Returns if user search need to be based on username instead of email
Set Django SETTINGS.DJANGO_REST_MULTITOKENAUTH_USE_USERNAME to overwrite this
:return: use username
"""
return getattr(settings, 'DJANGO_REST_MULTITOKENAUTH_USE_USERNAME', False)
def get_new_token(user, request):
"""
Return new reset password token
"""
return ResetPasswordToken.objects.create(
user=user,
user_agent=request.META['HTTP_USER_AGENT'],
ip_address=get_client_masked_ip(request)
)
class ResetPasswordConfirm(APIView):
"""
An Api View which provides a method to reset a password based on a unique token
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = PasswordTokenSerializer
schema = AutoSchema(
manual_fields=[
coreapi.Field('password', location='body', required=True),
coreapi.Field('token', location='body', required=True),
]
)
class ResetPasswordCheck(APIView):
"""
An Api View which provides a method to check that a token is valid.
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = TokenSerializer
schema = AutoSchema(
manual_fields=[
coreapi.Field('token', location='body', required=True),
]
)
class ResetPasswordRequestToken(APIView):
"""
An Api View which provides a method to request a password reset token based on an e-mail address
Sends a signal reset_password_token_created when a reset token was created
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = EmailSerializer
schema = AutoSchema(
manual_fields=[
coreapi.Field('email', location='body', required=True, type='email'),
]
)
reset_password_confirm = ResetPasswordConfirm.as_view()
reset_password_check = ResetPasswordCheck.as_view()
reset_password_request_token = ResetPasswordRequestToken.as_view()
| [
11748,
4755,
15042,
198,
11748,
640,
198,
11748,
4738,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
67... | 2.867717 | 1,270 |
import requests
import json
from contextlib import closing
from openapi_server.models.query_graph import QueryGraph
from openapi_server.controllers.utils import translate_type
definition_file = 'transformer_chains.json'
knowledge_map = KnowledgeMap()
| [
11748,
7007,
198,
11748,
33918,
198,
6738,
4732,
8019,
1330,
9605,
198,
198,
6738,
1280,
15042,
62,
15388,
13,
27530,
13,
22766,
62,
34960,
1330,
43301,
37065,
198,
198,
6738,
1280,
15042,
62,
15388,
13,
3642,
36667,
13,
26791,
1330,
15... | 3.694444 | 72 |
# startstop.py
# Copyright 2020 Roger Marsh
# Licence: See LICENCE (BSD licence)
"""Functions to assist application start, stop, and exception display."""
import tkinter
import tkinter.messagebox
from .exceptionhandler import GRAB_ERROR
def start_application_exception(error, appname="Application", action="start"):
"""Report an application exception during startup."""
try:
tkinter.messagebox.showerror(
title=appname.join(("Start ", " Exception")),
message=".\n\nThe reported exception is:\n\n".join(
(action.join(("Unable to ", " " + appname)), str(error))
),
)
except tkinter.TclError as tkerror:
if str(tkerror) != GRAB_ERROR:
raise SystemExit(
"Problem in tkinter reporting failure to start application"
)
except:
raise SystemExit("Problem reporting failure to start application")
def stop_application(app, topwidget):
"""Destroy application widget and stop application."""
try:
topwidget.destroy()
except:
pass
try:
del app
except:
pass
def application_exception(
error, app, topwidget, title="Application", appname="the application"
):
"""Report an application exception during run."""
try:
tkinter.messagebox.showerror(
parent=topwidget,
title=title,
message="".join(
(
"An exception which cannot be handled within ",
appname,
" has occurred.",
"\n\nThe reported exception is:\n\n",
str(error),
)
),
)
except tkinter.TclError as local_error:
if str(local_error) != GRAB_ERROR:
raise SystemExit(
"".join(
(
"Problem in tkinter reporting exception within ",
appname,
" : ",
str(local_error),
)
)
)
except Exception as local_error:
try:
ser = tkinter.Tk()
ser.wm_title(title)
try:
tkinter.messagebox.showerror(
parent=ser,
title=title,
message="".join(
(
"An exception which cannot be handled by ",
appname,
" has occurred.",
"\n\nThe reported exception is:\n\n",
str(local_error),
)
),
)
except tkinter.TclError as _error:
if str(_error) != GRAB_ERROR:
raise SystemExit(
"".join(
(
"Problem in tkinter reporting exception in ",
appname,
" : ",
str(_error),
)
)
)
except Exception as _error:
raise SystemExit(
"".join(
(
"Problem reporting exception in ",
appname,
" : ",
str(_error),
)
)
)
ser.destroy()
del ser
except Exception as exc:
raise SystemExit(
"".join(
(
"Problem reporting problem in reporting exception in ",
appname,
" : ",
str(exc),
)
)
)
stop_application(app, topwidget)
| [
2,
923,
11338,
13,
9078,
198,
2,
15069,
12131,
13637,
9786,
198,
2,
10483,
594,
25,
4091,
38559,
18310,
357,
21800,
17098,
8,
198,
198,
37811,
24629,
2733,
284,
3342,
3586,
923,
11,
2245,
11,
290,
6631,
3359,
526,
15931,
198,
198,
1... | 1.69344 | 2,378 |
import base64
import hashlib
import os
import work_wechat
webhook_key = os.environ.get("WEBHOOK_KEY")
| [
11748,
2779,
2414,
198,
11748,
12234,
8019,
198,
11748,
28686,
198,
198,
11748,
670,
62,
732,
17006,
198,
198,
12384,
25480,
62,
2539,
796,
28686,
13,
268,
2268,
13,
1136,
7203,
8845,
33,
39,
15308,
62,
20373,
4943,
628,
628,
198
] | 2.634146 | 41 |
# ============================================================================
# zmatrix.py -- ZEntry, ZMatrix structs; prototypes for functions used in
# latch module
# ----------------------------------------------------------------------------
# Author: Benjamin P. Haley, Tongtong Shen, Purdue University
# Copyright (c) 2012 Purdue University
# ----------------------------------------------------------------------------
# See the LICENSE file for information on usage and redistribution of this
# file and for a DISCLAIMER OF ALL WARRANTIES.
# ============================================================================
import copy
import numpy as np
from .config import REAL_MAX, MAX_BONDS
from .element import getElementName
# from .stdio import FILE
from .types import AtomType
from .utils import DEG2RAD
NO_POSITION = -REAL_MAX
# Minimum number of stored positions
MIN_POSITIONS = 3
# Internal coordinate representation of a single atom
# ============================================================================
# setPosition()
# ----------------------------------------------------------------------------
# Result: set the position of the index-th atom; calls choke() is index is out
# of range
# ============================================================================
# ============================================================================
# clearPosition()
# ----------------------------------------------------------------------------
# Result: set the position of the index-th atom to NO_POSITION; calls choke()
# if index is out of range
# ============================================================================
# ============================================================================
# isBonded()
# ----------------------------------------------------------------------------
# Result: return 1 if entries n and m are separated by at most max_bonds
# bonds in zm, else return 0
# ============================================================================
# ============================================================================
# getPosition()
# ----------------------------------------------------------------------------
# Result: store the position of the index-th atom in *pos; calls choke() if
# index is out of range
# ============================================================================
# ============================================================================
# writeZMatrix()
# ----------------------------------------------------------------------------
# Result: write a ZMatrix to a FILE
# ============================================================================
# ============================================================================
# createZMatrix()
# ----------------------------------------------------------------------------
# Result: return a pointer to a newly allocated, initialized ZMatrix; calls
# choke() if allocation fails
# ============================================================================
| [
2,
38093,
2559,
18604,
198,
2,
1976,
6759,
8609,
13,
9078,
1377,
1168,
30150,
11,
1168,
46912,
2878,
82,
26,
32338,
329,
5499,
973,
287,
198,
2,
43140,
8265,
198,
2,
16529,
10541,
198,
2,
6434,
25,
14533,
350,
13,
30646,
11,
26565,
... | 5.717668 | 549 |
import re
from runner import Runner
from collections import Counter
from pretty_print import Print_C | [
11748,
302,
198,
6738,
17490,
1330,
21529,
198,
6738,
17268,
1330,
15034,
198,
6738,
2495,
62,
4798,
1330,
12578,
62,
34
] | 4.761905 | 21 |
from .colors import Colors
from voidpp_tools.terminal import get_size
| [
198,
6738,
764,
4033,
669,
1330,
29792,
198,
198,
6738,
7951,
381,
62,
31391,
13,
23705,
282,
1330,
651,
62,
7857,
198
] | 3.272727 | 22 |
from django.urls import path
from escape_rooms.organizations_app.views import CompanyListView, CompanyDetailView, CompanyCreateView, \
EmployeeListView, EmployeeDetailView, EmployeeCreateView
urlpatterns = [
path('companies/', CompanyListView.as_view()),
path('company/<int:pk>/', CompanyDetailView.as_view()),
path('company/', CompanyCreateView.as_view()),
path('employees/', EmployeeListView.as_view()),
path('employee/<int:pk>/', EmployeeDetailView.as_view()),
path('employee/', EmployeeCreateView.as_view()),
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
6654,
62,
9649,
13,
9971,
4582,
62,
1324,
13,
33571,
1330,
5834,
8053,
7680,
11,
5834,
11242,
603,
7680,
11,
5834,
16447,
7680,
11,
3467,
198,
220,
220,
220,
36824,
8053,
... | 2.878307 | 189 |
"""
Trains a Pixel-CNN++ generative model on CIFAR-10 or Tiny ImageNet data.
Uses multiple GPUs, indicated by the flag --nr_gpu
Example usage:
CUDA_VISIBLE_DEVICES=0,1,2,3 python train_double_cnn.py --nr_gpu 4
"""
import os
import sys
import json
import argparse
import time
import numpy as np
import tensorflow.compat.v1 as tf
from pixel_cnn_pp import nn
from pixel_cnn_pp.model import model_spec
from utils import plotting
import datetime
# -----------------------------------------------------------------------------
# parser = argparse.ArgumentParser()
# # data I/O
# parser.add_argument('-i', '--data_dir', type=str, default='/local_home/tim/pxpp/data', help='Location for the dataset')
# parser.add_argument('-o', '--save_dir', type=str, default='/local_home/tim/pxpp/save', help='Location for parameter checkpoints and samples')
# parser.add_argument('-d', '--data_set', type=str, default='cifar', help='Can be either cifar|imagenet')
# parser.add_argument('-t', '--save_interval', type=int, default=20, help='Every how many epochs to write checkpoint/samples?')
# parser.add_argument('-r', '--load_params', dest='load_params', action='store_true', help='Restore training from previous model checkpoint?')
# # model
# parser.add_argument('-q', '--nr_resnet', type=int, default=5, help='Number of residual blocks per stage of the model')
# parser.add_argument('-n', '--nr_filters', type=int, default=160, help='Number of filters to use across the model. Higher = larger model.')
# parser.add_argument('-m', '--nr_logistic_mix', type=int, default=10, help='Number of logistic components in the mixture. Higher = more flexible model')
# parser.add_argument('-z', '--resnet_nonlinearity', type=str, default='concat_elu', help='Which nonlinearity to use in the ResNet layers. One of "concat_elu", "elu", "relu" ')
# parser.add_argument('-c', '--class_conditional', dest='class_conditional', action='store_true', help='Condition generative model on labels?')
# parser.add_argument('-ed', '--energy_distance', dest='energy_distance', action='store_true', help='use energy distance in place of likelihood')
# # optimization
# parser.add_argument('-l', '--learning_rate', type=float, default=0.001, help='Base learning rate')
# parser.add_argument('-e', '--lr_decay', type=float, default=0.999995, help='Learning rate decay, applied every step of the optimization')
# parser.add_argument('-b', '--batch_size', type=int, default=16, help='Batch size during training per GPU')
# parser.add_argument('-u', '--init_batch_size', type=int, default=16, help='How much data to use for data-dependent initialization.')
# parser.add_argument('-p', '--dropout_p', type=float, default=0.5, help='Dropout strength (i.e. 1 - keep_prob). 0 = No dropout, higher = more dropout.')
# parser.add_argument('-x', '--max_epochs', type=int, default=5000, help='How many epochs to run in total?')
# parser.add_argument('-g', '--nr_gpu', type=int, default=8, help='How many GPUs to distribute the training across?')
# # evaluation
# parser.add_argument('--polyak_decay', type=float, default=0.9995, help='Exponential decay rate of the sum of previous model iterates during Polyak averaging')
# parser.add_argument('-ns', '--num_samples', type=int, default=1, help='How many batches of samples to output.')
# # reproducibility
# parser.add_argument('-s', '--seed', type=int, default=1, help='Random seed to use')
# args = parser.parse_args()
# print('input args:\n', json.dumps(vars(args), indent=4, separators=(',',':'))) # pretty print args
args = parser_()
args.data_dir=r'C:\Users\justjo\Downloads\public_datasets'
args.save_dir=r'C:\Users\justjo\PycharmProjects\pixel-cnn\model_checkpoints'#, help='Location for parameter checkpoints and samples')
args.data_set='cifar'#, help='Can be either cifar|imagenet')
args.save_interval=1 #, help='Every how many epochs to write checkpoint/samples?')
args.load_params=False
# model
args.nr_resnet=5 #, help='Number of residual blocks per stage of the model')
args.nr_filters=120 #, help='Number of filters to use across the model. Higher = larger model.')
args.nr_logistic_mix=10 #, help='Number of logistic components in the mixture. Higher = more flexible model')
args.resnet_nonlinearity='concat_elu' #, help='Which nonlinearity to use in the ResNet layers. One of "concat_elu", "elu", "relu" ')
args.class_conditional=False
args.energy_distance=False
# optimization
args.learning_rate=0.001#, help='Base learning rate')
args.lr_decay=0.999995#, help='Learning rate decay, applied every step of the optimization')
args.batch_size=32#, help='Batch size during training per GPU')
args.init_batch_size=16#, help='How much data to use for data-dependent initialization.')
args.dropout_p=0#, help='Dropout strength (i.e. 1 - keep_prob). 0 = No dropout, higher = more dropout.')
args.max_epochs=5000#, help='How many epochs to run in total?')
args.nr_gpu=1#, help='How many GPUs to distribute the training across?')
args.svd_mat = False
# evaluation
args.polyak_decay=0.9995#, help='Exponential decay rate of the sum of previous model iterates during Polyak averaging')
args.num_samples=1#, help='How many batches of samples to output.')
# reproducibility
args.seed=1#, help='Random seed to use')
# -----------------------------------------------------------------------------
# fix random seed for reproducibility
rng = np.random.RandomState(args.seed)
tf.set_random_seed(args.seed)
# energy distance or maximum likelihood?
# if args.energy_distance:
# loss_fun = nn.energy_distance
# else:
loss_fun = nn.discretized_mix_logistic_loss
# initialize data loaders for train/test splits
if args.data_set == 'imagenet' and args.class_conditional:
raise("We currently don't have labels for the small imagenet data set")
if args.data_set == 'cifar':
import data.cifar10_data as cifar10_data
DataLoader = cifar10_data.DataLoader
elif args.data_set == 'imagenet':
import data.imagenet_data as imagenet_data
DataLoader = imagenet_data.DataLoader
else:
raise("unsupported dataset")
train_data = DataLoader(args.data_dir, 'train', args.batch_size, rng=rng, shuffle=True, return_labels=args.class_conditional, svd_mat=args.svd_mat)
val_data = DataLoader(args.data_dir, 'val', args.batch_size, shuffle=False, return_labels=args.class_conditional, svd_mat = train_data.svd_mat)
test_data = DataLoader(args.data_dir, 'test', args.batch_size, shuffle=False, return_labels=args.class_conditional, svd_mat = train_data.svd_mat)
obs_shape = train_data.get_observation_size() # e.g. a tuple (32,32,3)
assert len(obs_shape) == 3, 'assumed right now'
# data place holders
x_init = tf.placeholder(tf.float32, shape=(args.init_batch_size,) + obs_shape)
# xs = [tf.placeholder(tf.float32, shape=(args.batch_size, ) + obs_shape) for i in range(args.nr_gpu)]
xs = tf.placeholder(tf.float32, shape=(args.batch_size, ) + obs_shape)
#tensorboard stuff
tf_loss_ph = tf.placeholder(tf.float32,shape=None,name='loss_summary')
# if the model is class-conditional we'll set up label placeholders + one-hot encodings 'h' to condition on
# if args.class_conditional:
# num_labels = train_data.get_num_labels()
# y_init = tf.placeholder(tf.int32, shape=(args.init_batch_size,))
# h_init = tf.one_hot(y_init, num_labels)
# y_sample = np.split(np.mod(np.arange(args.batch_size*args.nr_gpu), num_labels), args.nr_gpu)
# h_sample = [tf.one_hot(tf.Variable(y_sample[i], trainable=False), num_labels) for i in range(args.nr_gpu)]
# ys = [tf.placeholder(tf.int32, shape=(args.batch_size,)) for i in range(args.nr_gpu)]
# hs = [tf.one_hot(ys[i], num_labels) for i in range(args.nr_gpu)]
# else:
h_init = None
h_sample = [None] * args.nr_gpu
hs = h_sample
# create the model
model_opt = { 'nr_resnet': args.nr_resnet, 'nr_filters': args.nr_filters, 'nr_logistic_mix': args.nr_logistic_mix, 'resnet_nonlinearity': args.resnet_nonlinearity, 'energy_distance': args.energy_distance }
model = tf.make_template('model', model_spec)
# run once for data dependent initialization of parameters
init_pass = model(x_init, h_init, init=True, dropout_p=args.dropout_p, **model_opt)
# keep track of moving average
# all_params = tf.trainable_variables()
# ema = tf.train.ExponentialMovingAverage(decay=args.polyak_decay)
# maintain_averages_op = tf.group(ema.apply(all_params))
# ema_params = [ema.average(p) for p in all_params]
# get loss gradients over multiple GPUs + sampling
# grads = []
# loss_gen = []
# loss_gen_test = []
# loss_gen_val = []
# new_x_gen = []
# for i in range(args.nr_gpu):
# with tf.device('/gpu:%d' % i):
with tf.device('/gpu:0'):
# train
out = model(xs, h = None, ema=None, dropout_p=args.dropout_p, **model_opt)
loss_gen = loss_fun(tf.stop_gradient(xs), out)
optimizer = tf.train.AdamOptimizer().minimize(loss_gen)
# gradients
# grads.append(tf.gradients(loss_gen, all_params, colocate_gradients_with_ops=True))
# test
out = model(xs, h=None, ema=None, dropout_p=0., **model_opt)
loss_gen_test = loss_fun(xs, out)
# val
out = model(xs, h=None, ema=None, dropout_p=0., **model_opt)
loss_gen_val = loss_fun(xs, out)
# sample
# out = model(xs, h_sample, ema=ema, dropout_p=0, **model_opt)
# if args.energy_distance:
# new_x_gen.append(out[0])
# else:
# new_x_gen.append(nn.sample_from_discretized_mix_logistic(out, args.nr_logistic_mix))
# add losses and gradients together and get training updates
# tf_lr = tf.placeholder(tf.float32, shape=[])
# with tf.device('/gpu:0'):
# for i in range(1,args.nr_gpu):
# loss_gen[0] += loss_gen[i]
# loss_gen_test[0] += loss_gen_test[i]
# loss_gen_val[0] += loss_gen_val[i]
# for j in range(len(grads[0])):
# grads[0][j] += grads[i][j]
# training op
# optimizer = tf.group(nn.adam_updates(all_params, grads[0], lr=tf_lr, mom1=0.95, mom2=0.9995), maintain_averages_op)
# optimizer = tf.train.AdamOptimizer().minimize(loss_gen)
# convert loss to bits/dim
bits_per_dim = loss_gen/(np.log(2.)*np.prod(obs_shape)*args.batch_size)
bits_per_dim_test = loss_gen_test/(np.log(2.)*np.prod(obs_shape)*args.batch_size)
bits_per_dim_val = loss_gen_val/(np.log(2.)*np.prod(obs_shape)*args.batch_size)
training_summary_batch = tf.summary.scalar("training_accuracy_batch",bits_per_dim) #bits_per_dim
training_summary = tf.summary.scalar("training_accuracy",tf_loss_ph) #bits_per_dim
test_summary = tf.summary.scalar("test_accuracy", tf_loss_ph) #bits_per_dim_test
validation_summary = tf.summary.scalar("validation_accuracy", tf_loss_ph) #bits_per_dim_val
# sample from the model
# def sample_from_model(sess):
# x_gen = [np.zeros((args.batch_size,) + obs_shape, dtype=np.float32) for i in range(args.nr_gpu)]
# for yi in range(obs_shape[0]):
# for xi in range(obs_shape[1]):
# new_x_gen_np = sess.run(new_x_gen, {xs[i]: x_gen[i] for i in range(args.nr_gpu)})
# for i in range(args.nr_gpu):
# x_gen[i][:,yi,xi,:] = new_x_gen_np[i][:,yi,xi,:]
# return np.concatenate(x_gen, axis=0)
# init & save
initializer = tf.global_variables_initializer()
saver = tf.train.Saver()
# turn numpy inputs into feed_dict for use with tensorflow
# def make_feed_dict(data, init=False):
# if type(data) is tuple:
# x,y = data
# else:
# x = data
# y = None
# # x = np.cast[np.float32]((x - 127.5) / 127.5) # input to pixelCNN is scaled from uint8 [0,255] to float in range [-1,1]
# x = np.cast[np.float32](x) # input to pixelCNN is scaled from uint8 [0,255] to float in range [-1,1]
#
# if init:
# feed_dict = {x_init: x}
# if y is not None:
# feed_dict.update({y_init: y})
# else:
# x = np.split(x, args.nr_gpu)
# feed_dict = {xs[i]: x[i] for i in range(args.nr_gpu)}
# if y is not None:
# y = np.split(y, args.nr_gpu)
# feed_dict.update({ys[i]: y[i] for i in range(args.nr_gpu)})
# return feed_dict
args.path = os.path.join('checkpoint', 'nr_resnet{}_h{}nr_filters{}_{}'.format(
args.nr_filters, args.nr_resnet, args.nr_logistic_mix,
str(datetime.datetime.now())[:-7].replace(' ', '-').replace(':', '-')))
ckpt_file = os.path.join(args.save_dir, str(datetime.datetime.now())[:-7].replace(' ', '-').replace(':', '-'), 'params_' + args.data_set + '.ckpt')
writer = tf.summary.FileWriter(os.path.join('tensorboard', args.path))
# //////////// perform training //////////////
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
test_bpd = []
val_bpd = []
lr = args.learning_rate
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
test_sum_min = 999999
# with tf.Session(config=config) as sess:
for epoch in range(args.max_epochs):
begin = time.time()
# init
if epoch == 0:
train_data.reset() # rewind the iterator back to 0 to do one full epoch
if args.load_params:
ckpt_file = args.save_dir + '/params_' + args.data_set + '.ckpt' ## put the checkpoint file loc here when restoring model
print('restoring parameters from', ckpt_file)
saver.restore(sess, ckpt_file)
else:
print('initializing the model...')
sess.run(initializer)
# feed_dict = make_feed_dict(train_data.next(args.init_batch_size), init=True) # manually retrieve exactly init_batch_size examples
feed_dict = {x_init: train_data.next(args.init_batch_size).astype(np.float32)}
sess.run(init_pass, feed_dict)
################ caluclate starting losses
test_losses = []
for d in test_data:
# feed_dict = make_feed_dict(d)
feed_dict = {xs: d}
l = sess.run(bits_per_dim_test, feed_dict)
test_losses.append(l)
test_loss_gen = np.mean(test_losses)
# test_bpd.append(test_loss_gen)
feed_dict = {tf_loss_ph: test_loss_gen}
test_summ = sess.run(test_summary, feed_dict)
writer.add_summary(test_summ, epoch)
test_sum_min = test_loss_gen
##################################
val_losses = []
for d in val_data:
# feed_dict = make_feed_dict(d)
feed_dict = {xs: d}
l = sess.run(bits_per_dim_val, feed_dict)
val_losses.append(l)
val_loss_gen = np.mean(val_losses)
val_bpd.append(val_loss_gen)
feed_dict = {tf_loss_ph: val_loss_gen}
val_summ = sess.run(validation_summary, feed_dict)
writer.add_summary(val_summ, epoch)
#########################################
train_losses = []
for cnt, d in enumerate(train_data):
# feed_dict = make_feed_dict(d)
# forward/backward/update model on each gpu
# lr *= args.lr_decay
# feed_dict.update({ tf_lr: lr })
feed_dict = {xs: d}
l = sess.run(bits_per_dim, feed_dict)
train_losses.append(l)
train_loss_gen = np.mean(train_losses)
feed_dict = {tf_loss_ph: train_loss_gen}
train_summ = sess.run(training_summary, feed_dict)
writer.add_summary(train_summ, epoch)
#######################################
print('starting training')
# train for one epoch
train_losses = []
mult = len([x for x in train_data])
for cnt, d in enumerate(train_data):
# feed_dict = make_feed_dict(d)
# forward/backward/update model on each gpu
# lr *= args.lr_decay
# feed_dict.update({ tf_lr: lr })
feed_dict = {xs: d}
l,_, train_summ = sess.run([bits_per_dim, optimizer, training_summary_batch], feed_dict)
writer.add_summary(train_summ, mult*epoch + cnt)
train_losses.append(l)
train_loss_gen = np.mean(train_losses)
feed_dict = {tf_loss_ph: train_loss_gen}
train_summ = sess.run(training_summary, feed_dict)
writer.add_summary(train_summ, epoch)
# compute likelihood over test data
test_losses = []
for d in test_data:
# feed_dict = make_feed_dict(d)
feed_dict = {xs: d}
l = sess.run(bits_per_dim_test, feed_dict)
test_losses.append(l)
test_loss_gen = np.mean(test_losses)
# test_bpd.append(test_loss_gen)
feed_dict = {tf_loss_ph: test_loss_gen}
test_summ = sess.run(test_summary, feed_dict)
writer.add_summary(test_summ, epoch)
# compute likelihood over validation data
val_losses = []
for d in val_data:
# feed_dict = make_feed_dict(d)
feed_dict = {xs: d}
l= sess.run(bits_per_dim_val , feed_dict)
val_losses.append(l)
val_loss_gen = np.mean(val_losses)
val_bpd.append(val_loss_gen)
feed_dict = {tf_loss_ph: val_loss_gen}
val_summ = sess.run(validation_summary, feed_dict)
writer.add_summary(val_summ, epoch)
# log progress to console
print("Iteration %d, time = %ds, train bits_per_dim = %.4f, test bits_per_dim = %.4f" % (epoch, time.time()-begin, train_loss_gen, test_loss_gen))
sys.stdout.flush()
if test_sum_min > test_loss_gen:
test_sum_min = test_loss_gen
saver.save(sess, ckpt_file)
# if epoch % args.save_interval == 0:
#
# generate samples from the model
# sample_x = []
# for i in range(args.num_samples):
# sample_x.append(sample_from_model(sess))
# sample_x = np.concatenate(sample_x,axis=0)
# img_tile = plotting.img_tile(sample_x[:100], aspect_ratio=1.0, border_color=1.0, stretch=True)
# img = plotting.plot_img(img_tile, title=args.data_set + ' samples')
# plotting.plt.savefig(os.path.join(args.save_dir,'%s_sample%d.png' % (args.data_set, epoch)))
# plotting.plt.close('all')
# np.savez(os.path.join(args.save_dir,'%s_sample%d.npz' % (args.data_set, epoch)), sample_x)
# save params
# saver.save(sess, ckpt_file)
# np.savez(args.save_dir + '/test_bpd_' + args.data_set + '.npz', test_bpd=np.array(test_bpd))
sess.close()
| [
37811,
198,
2898,
1299,
257,
11349,
12,
18474,
4880,
1152,
876,
2746,
319,
327,
5064,
1503,
12,
940,
393,
20443,
7412,
7934,
1366,
13,
198,
5842,
274,
3294,
32516,
11,
8203,
416,
262,
6056,
1377,
48624,
62,
46999,
198,
198,
16281,
874... | 2.458849 | 7,351 |
import json
import pickle
import sys
from collections import Iterator
from typing import List, Dict, Any
import gym
import numpy as np
import torch
import torch.nn as nn
from dialogue_config import map_index_to_action, AGENT_ACTIONS
from error_model_controller import ErrorModelController
from rulebased_agent import RuleBasedAgent
from state_tracker import StateTracker
from user_simulator import UserSimulator, UserGoal
from utils import remove_empty_slots
if __name__ == "__main__":
params = get_params()
file_path_dict = params["db_file_paths"]
DATABASE_FILE_PATH = file_path_dict["database"]
DICT_FILE_PATH = file_path_dict["dict"]
USER_GOALS_FILE_PATH = file_path_dict["user_goals"]
train_params = params["run"]
slot2values, database, user_goals = load_data(
DATABASE_FILE_PATH, DICT_FILE_PATH, USER_GOALS_FILE_PATH
)
dialog_env = DialogEnv(
user_goals, params["emc"], params["run"]["max_round_num"], database, slot2values
)
# agent = DialogManagerAgent(dialog_env.observation_space, dialog_env.action_space)
rule_agent = RuleBasedAgent(params["agent"]["epsilon_init"])
experience_iterator = iter(experience_generator(rule_agent, dialog_env))
batch = gather_experience(experience_iterator)
print()
| [
11748,
33918,
198,
11748,
2298,
293,
198,
11748,
25064,
198,
6738,
17268,
1330,
40806,
1352,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
11,
4377,
198,
198,
11748,
11550,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
... | 2.848352 | 455 |
# encoding=UTF-8
from PyInstaller.utils.hooks import copy_metadata
datas = copy_metadata('taskgraph')
| [
2,
21004,
28,
48504,
12,
23,
198,
6738,
9485,
15798,
263,
13,
26791,
13,
25480,
82,
1330,
4866,
62,
38993,
198,
198,
19608,
292,
796,
4866,
62,
38993,
10786,
35943,
34960,
11537,
628
] | 3.151515 | 33 |
import pandas as pd
import numpy as np
import json
import re
from collections import namedtuple
from array import array
from . import marketdata
_metadata = namedtuple('metadata', ['ccy', 'margin', 'multiplier',
'commission', 'isFX'])
class Blotter():
"""
This is a financial blotter which is used for maintaing positions and PnL
in conjunction with backtesting a strategy historically.
The main purpose is for calculating historical PnL for both open and
closed trades as well as maintaining marked to market holdings.
This class maintains market pricing data for marking holdings to market as
well as interest rate data for charging interest and margin on open
positions. The class manages interest and margin charges based on a user
defined time of day and also provides functionality for repatriating closed
PnL to the user defined base currency on a daily user defined time.
"""
def __init__(self,
prices=None,
interest_rates=None,
accrual_time=pd.Timedelta(16, unit="h"),
eod_time=pd.Timedelta(16, unit="h"),
sweep_time=pd.Timedelta(16, unit="h"),
base_ccy="USD",
margin_charge=0.015):
"""
Parameters
----------
prices: str
path to folder of data for all traded instruments. Refer to
blotter.MarketData for more information on file format. Names for
FX instruments should be in the form 'XXXYYY' where XXX is the
first currency and YYY is the second currency, e.g. AUDUSD, USDCAD
interest_rates: str
Path to csv of data for all traded interest bearing instruments.
These rates should be daily annualized rates. Refer to
blotter.MarketData for more information on file format.
accrual_time: pandas.Timedelta
Time of day which interest is charged/paid for interest bearing
instruments (FX) as well as margin costs, if no automatic charges
desired set to None
eod_time: pandas.Timedelta
End of day time used for automatic PnL calculation, if no automatic
PnL calculation desired set to None
sweep_time: pandas.Timedelta
Automatic time used for sweeping PnL calculation, if no
automatic sweeping is desired set to None
base_ccy: str
Base currency of blotter, used when sweeping pnl to base currency
margin_charge: float
Interest rate spread above daily base currency interest rate which
is paid on margin, e.g. if daily interest rate is 0.5% annualized,
margin_charge=0.015 implies daily balance paid on margin is
(0.005 + 0.015)/365
"""
actions = []
if accrual_time is not None:
actions.append((accrual_time, "INTEREST"))
actions.append((accrual_time, "MARGIN"))
if eod_time is not None:
actions.append((eod_time, "PNL"))
if sweep_time is not None:
actions.append((sweep_time, "PNL_SWEEP"))
self._actions = actions
self._base_ccy = base_ccy
self._margin_charge = margin_charge
self._event_log = []
# dictionaries of instrument level data
self._gnrc_meta = dict()
self._instr_map = dict()
self._prices = prices
self._rates = interest_rates
self._holdings = Holdings()
self.get_holdings_history = self._holdings.get_holdings_history
self.get_instrument_pnl_history = self._holdings.get_instrument_pnl_history # NOQA
self.get_pnl_history = self._holdings.get_pnl_history # NOQA
def connect_market_data(self):
"""
Initialize MarketData class, should be called before calling trade()
"""
self._mdata = marketdata.MarketData(prices=self._prices,
rates=self._rates)
@property
def event_log(self):
"""
Returns the event log of events which have acted on the Blotter
"""
return self._event_log
def define_generic(self, generic, ccy=None, margin=0, multiplier=1,
commission=0, isFX=False):
"""
Define meta data for a tradeable instruments associated with a generic.
Parameters
----------
generic: str
Name for the instrument type: used for looking up meta data, e.g.
we would define 'CL' and the associated meta data for these type of
contracts
ccy: str
Currency that contract is traded in, default is base currency of
blotter
margin: float
Amount of margin required for contract
multiplier: int
The multiplier to multiply the price by to get the notional
amount of the instrument, should only be applied to futures
commission: float
Commission charged for trading the instrument
isFX: boolean
Indicate if this instrument is an FX instrument. Affects
whether cash balances are updated for calculating payable
interest.
"""
if ccy is None:
ccy = self._base_ccy
self._gnrc_meta[generic] = _metadata(ccy, margin, multiplier,
commission, isFX)
def map_instrument(self, generic, instrument):
"""
Define a mapping between tradeable instruments and generics, used for
looking up meta data on instruments. Note in the case of a single
instrument such as a currency pair the generic and the instrument
can be the same value.
Parameters
----------
generic: str
Name for the instrument type used for looking up meta data, e.g. we
would define 'CL' and the associated meta data for these type of
contracts
instrument: str
Tradeable instrument name
"""
self._instr_map[instrument] = generic
def trade(self, timestamp, instrument, quantity, price, ntc_price=None):
"""
Record an instrument trade in the Blotter. This will also make a
call to automatic_events to trigger all automatic events up to the time
of this trade.
Parameters
----------
timestamp: pandas.Timestamp
Time of trade
instrument: str
Tradeable instrument name
quantity: int
Number of instruments traded
price: float
Price of trade
ntc_price: float
No tcost price. Generally mid price but can be
anything, this value is stored for downstream analytics but is
unused in any calculations
"""
# side effects since trade() also manages time state for when to sweep
# pnl and charge/pay interest/margin
self.automatic_events(timestamp)
if ntc_price:
ntc_price = float(ntc_price)
self._trade(timestamp, instrument, int(quantity), float(price),
ntc_price)
def automatic_events(self, timestamp):
"""
Update the current time of the Blotter, triggering all scheduled events
between previous clock time and new clock time such as interest
charges, margin charges, PnL calculations and PnL sweeps. See
create_events() for more information on the type of events.
Parameters
----------
timestamp: pandas.Timestamp
Time to update clock to and tigger internal events up until
"""
current_time = self._holdings.timestamp
# first event so there is nothing automatic that needs to be done
if current_time is pd.NaT:
return
actions = self._get_actions(current_time, timestamp, self._actions)
for ts, action in actions.iteritems():
events = self.create_events(ts, action)
self.dispatch_events(events)
@staticmethod
def create_events(self, timestamp, action):
"""
Create internal event for updating Holdings class contained within
Blotter instance. Manages creation of INTEREST, MARGIN, PNL and
PNL_SWEEP events based on internal Blotter data.
This method is exposed to allow users greater flexibility in calling
internal events however by default this is automatically called through
automatic_events() and best not called unless user understands what
they are doing.
MARGIN event charges interest in the base currency based on the margin
required for the current open positions at a rate equal to the base
currency interest rate + the margin_charge.
INTEREST events charges interest on the outstanding cash balances in
different currencies based on the current interest rates.
PNL event calculates and saves the PNL based on current market prices
for all open positions.
PNL_SWEEP event repatriates closed PnL for non base currencies to the
base currency based on the current FX rates.
Parameters
----------
timestamp: pandas.Timestamp
Time to create event for
action: str
Type of event to create, supports INTEREST, MARGIN, PNL and
PNL_SWEEP
Returns
-------
A list of events for being dispatched using dispatch_events()
"""
events = []
if action is "INTEREST":
cashs = self._holdings.get_cash_balances()
if not cashs.empty:
rates = self._mdata.rates.loc[timestamp, cashs.index]
rates = self._adjusted_rates(timestamp, rates)
interests = cashs * rates
for ccy, qty in interests.iteritems():
ev = _Event("INTEREST", {"timestamp": timestamp,
"ccy": ccy,
"quantity": qty})
events.append(ev)
elif action is "MARGIN":
# calculate total margin charge
base_hlds_value = np.abs(self.get_holdings_value(timestamp))
int_rate = self._mdata.rates.loc[timestamp, self._base_ccy]
mrate = int_rate + self._margin_charge
mrate = self._adjusted_rates(timestamp, mrate)
charge = 0
for instr, value in base_hlds_value.iteritems():
metadata = self._gnrc_meta[self._instr_map[instr]]
charge += mrate * metadata.margin * value
if charge:
ev = _Event("INTEREST", {"timestamp": timestamp,
"ccy": self._base_ccy,
"quantity": charge})
events.append(ev)
elif action is "PNL":
assets = self._holdings.get_assets()
if assets:
prices = self._get_prices(timestamp, assets)
else:
prices = pd.Series([])
ev = _Event("PNL", {"timestamp": timestamp, "prices": prices})
events.append(ev)
elif action is "PNL_SWEEP":
assets = self._holdings.get_assets()
if assets:
prices = self._get_prices(timestamp, assets)
else:
prices = None
pnls = self._holdings.get_pnl(timestamp, prices, cache=False)
pnl_sweep = pnls.loc[:, 'closed pnl']
for ccy, pnl in pnl_sweep.iteritems():
if ccy is self._base_ccy:
continue
if pnl != 0:
conv_rate = self._get_fx_conversion(timestamp, ccy)
base_pnl = pnl * conv_rate
ev = _Event("PNL_SWEEP", {"timestamp": timestamp,
"ccy1": ccy, "quantity1": -pnl,
"ccy2": self._base_ccy,
"quantity2": base_pnl})
events.append(ev)
else:
raise NotImplementedError("Unknown event type")
return events
@staticmethod
def dispatch_events(self, events):
"""
Update Blotter._holdings based on event. See create_events() for the
type of events supported. This method is best not called directly
unless user understands what is going on.
Parameters
----------
events: list
list of _Event to dispatch
"""
for event in events:
ev_str = str(event)
if event.type == "TRADE":
event.data.pop("ntc_price", None)
self._holdings.record_trade(**event.data)
elif event.type == "CASH":
self._holdings.update_cash(**event.data)
elif event.type == "INTEREST":
self._holdings.charge_interest(**event.data)
elif event.type == "PNL":
self._holdings.get_instrument_pnl(**event.data)
elif event.type == "PNL_SWEEP":
self._holdings.sweep_pnl(**event.data)
else:
raise NotImplementedError("Unknown event type")
self._event_log.append(ev_str)
def get_holdings_value(self, timestamp):
"""
Return pandas.Series of values of holdings converted to Blotter base
currency sorted by index name. Note that for each currency for which
instruments are traded in, FX rates must be available for the given
timestamp in order to convert. E.g. if Blotter base ccy is USD, and an
instrument traded is in AUD, then AUDUSD or USDAUD must be available in
the prices data folder.
Parameters
----------
timestamp: pandas.Timestamp which corresponds to the time for
marking to market blotter holdings
Returns
-------
A pandas.Series with an index of instruments sorted in lexographical
order and values representing the market value of the positions in the
base currency at the time given by the timestamp
"""
if self._holdings.timestamp > timestamp:
raise ValueError('Must mark to market holdings after'
'Holdings.timestamp')
hlds = self._holdings.get_holdings()
if not hlds:
return pd.Series()
base_hlds_value = []
for ccy in hlds:
prices_ccy = self._get_prices(timestamp, hlds[ccy].index)
conv_rate = self._get_fx_conversion(timestamp, ccy)
value = hlds[ccy] * prices_ccy * conv_rate
base_hlds_value.append(value)
base_hlds_value = pd.concat(base_hlds_value, axis=0)
base_hlds_value.sort_index(inplace=True)
return base_hlds_value
def get_trades(self):
"""
Return quantity, multiplier, price, no tcost price, instrument,
currency, and FX conversion rate of executed trades in order of
execution.
The quantity is the number of instruments traded. The multiplier is any
multiplier associated with futures contracts, this should be 1 for FX.
The price is the executed price of the trade. The costless price is an
estimate of the price for execution without any transaction costs,
provided by the user at the time of execution. This value will be NA if
the user did not provide a value. The instrument is the name of the
instrument traded. The currency is the denomination of the instrument
and th FX conversion rate is the FX rate prevailing at the time to
convert through multiplication the instrument currency to the base
Blotter currency.
Returns
-------
A pandas.DataFrame indexed by timestamp with columns ['instrument',
'quantity', 'multiplier', 'price', 'ntc_price', 'ccy', 'fx_to_base'].
Index has name 'timestamp'.
"""
trade_data = []
for ev in self.event_log:
match = re.match("TRADE\|", ev)
if match:
data = _Event.parse_str_data(ev[match.end():])
trade_data.append(data)
trades = pd.DataFrame(trade_data)
trades.set_index("timestamp", inplace=True)
rates = []
# timestamp can be repeated to unpack and iterate through
for t, ccy in zip(trades.index, trades.loc[:, "ccy"].values):
rates.append(self._get_fx_conversion(t, ccy))
trades.loc[:, "fx_to_base"] = rates
order = ['instrument', 'quantity', 'multiplier', 'price', 'ntc_price',
'ccy', 'fx_to_base']
trades = trades.loc[:, order]
return trades
def get_instruments(self):
"""
Get current set of instruments.
Returns
-------
A pandas.DataFrame indexed and lexicographically sorted by instrument
name with numpy.int values representing the number of instruments
"""
hlds = self._holdings.get_holdings()
if not hlds:
return pd.Series()
instr_nums = []
for ccy in hlds:
instr_num = hlds[ccy]
for ast in instr_num.index:
gnrc = self._instr_map[ast]
multiplier = self._gnrc_meta[gnrc].multiplier
instr_num.loc[ast] = instr_num.loc[ast] / multiplier
instr_nums.append(instr_num)
instr_nums = pd.concat(instr_nums, axis=0)
instr_nums.sort_index(inplace=True)
instr_nums = instr_nums.astype(int)
return instr_nums
def write_log(self, fp):
"""
Write log of blotter events to file. This can be used for
reconstituting blotter. An example output file would look like
TRADE|{"timestamp": "2016-12-01 10:00:00", "ccy": "USD", "commission": 2.5, "instrument": "CLZ16", "price": 53.46, "quantity": 100}
TRADE|{"timestamp": "2016-12-02 10:00:00", "ccy": "USD", "commission": 2.5, "instrument": "CLZ16", "price": 55.32, "quantity": 100}
Parameters
----------
fp: str
path to write log to
""" # NOQA
with open(fp, 'w') as thefile:
for line in self._event_log:
thefile.write("%s\n" % line)
def read_log(self, fp):
"""
Reconstitute a Blotter object from an event log. Note that this will
only replay all the events, meta data and market data sources will
need to be reloaded as well. An example input file would look like
TRADE|{"timestamp": "2016-12-01 10:00:00", "ccy": "USD", "commission": 2.5, "instrument": "CLZ16", "price": 53.46, "quantity": 100, "multiplier": 1}
TRADE|{"timestamp": "2016-12-02 10:00:00", "ccy": "USD", "commission": 2.5, "instrument": "CLZ16", "price": 55.32, "quantity": 100, "multiplier": 1}
Parameters
----------
fp: str
path to read log from
""" # NOQA
events = self._create_log_events(fp)
self.dispatch_events(events)
@staticmethod
def write_meta(self, fp):
"""
Write meta data of associated with instruments in a Blotter to a file.
This can be used later to reconstitute a Blotter. An example output
file file is
{"ccy": "CAD", "margin": 0.1, "multiplier": 100, "commission": 2.5, "isFX": false}|{"CL": ["CLU16", "CLZ16"]}
{"ccy": "CAD", "margin": 0, "multiplier": 1, "commission": 2.5, "isFX": true}|{"USDCAD": ["USDCAD"]}
Parameters
----------
fp: str
path to write meta data
""" # NOQA
# https://stackoverflow.com/questions/483666/python-reverse-invert-a-mapping#485368 # NOQA
inv_map = {}
for k, v in self._instr_map.items():
inv_map[v] = inv_map.get(v, [])
inv_map[v].append(k)
for key in inv_map:
inv_map[key].sort()
keys = list(self._gnrc_meta.keys())
keys.sort()
with open(fp, 'w') as myfile:
for key in keys:
meta_data_str = json.dumps(self._gnrc_meta[key]._asdict())
map_str = '{"' + str(key) + '": ' + json.dumps(inv_map[key]) + '}' # NOQA
line = meta_data_str + "|" + map_str + "\n"
myfile.write(line)
def read_meta(self, fp):
"""
Reconstitute the meta data of a Blotter from a file. Reads as input
files output by write_meta(). File formats should be of the following
form
Parameters
----------
fp: str
Path to file. File should have the following format
{"ccy": "CAD", "margin": 0.1, "multiplier": 100, "commission": 2.5,"isFX": false}|{"CL": ["CLU16", "CLZ16"]}
{"ccy": "CAD", "margin": 0, "multiplier": 1, "commission": 2.5, "isFX": true}|{"USDCAD": ["USDCAD"]}
...
""" # NOQA
with open(fp, 'r') as thefile:
for line in thefile:
meta, mapping = line.split("|")
meta_dict = json.loads(meta)
mapping_dict = json.loads(mapping)
generic = list(mapping_dict.keys())[0]
meta_dict['generic'] = generic
self.define_generic(**meta_dict)
instrs = mapping_dict[generic]
for instr in instrs:
self.map_instrument(generic, instr)
class Holdings():
"""
The Holdings class is designed to manage holdings data and PnL data. The
class stores instrument level holdings data on a per currency basis and
calculates PnL on a per currency basis given instrument prices. The class
is primarily designed to manage these aspects from within the context
of the Blotter class however can also provide this functionality stand
alone.
The main features of the Holdings class include:
- Store per currency per instrument holindgs
- Calculate per currency per instrument PnL
- Maintain interest payable cash balances per currency
- Maintain charged/payed interest per currency
- Provide functionality to sweep PnL from one currency to another
- Return historical holdings
- Return historical PnL
Calculating PnL is done on a as of current holdings basis, there is no
functionality for looking up historical holdings for calculating historic
PnL.
Note: For interest bearing instruments, when users are using the Holdings
class standalone, users are responsible for calling charge_interest() at
appropriate intervals and with appropriate interest rates to ensure that
the PnL calculations are correct. This is handled by the Blotter class.
All actions on the Holdings class must follow in time sequential order.
"""
@staticmethod
@staticmethod
@staticmethod
@property
def timestamp(self):
"""
Returns the current timestamp of the Holdings
"""
return self._timestamp
def get_holdings(self):
"""
Get the current amount of instrument holdings. This includes any
multiplier associated with the instrument.
Returns
-------
dictionary
Dictionary with currencies as keys and pandas.Series as values
where that Series contain the most recent holdings for each of the
holdings in a given currency
"""
pos_data = self._position_data_per_ccy
positions = dict()
for ccy in pos_data:
ccy_pos_data = pos_data[ccy]
idx = list(ccy_pos_data)
idx.sort()
h = pd.Series(index=idx)
for asst in ccy_pos_data:
h.loc[asst] = ccy_pos_data[asst].position[-1]
# filter closed positions
h = h.loc[h != 0]
if not h.empty:
positions[ccy] = h
return positions
def get_holdings_history(self):
"""
Get the full history of the amount of holdings for each instrument
traded (this includes any multiplier associated with the instrument).
Returns
-------
dictionary
Dictionary with currencies as keys and dictionary of pandas.Series
as values where the keys of the nested dictionary are instrument
names and the pandas.Series is a timeseries of holdings
"""
pos_data = self._position_data_per_ccy
positions = dict()
for ccy in pos_data:
ccy_pos_data = pos_data[ccy]
ccy_positions = dict()
for asst in ccy_pos_data:
pos_array = ccy_pos_data[asst]
ts = self._to_timestamp(pos_array.timestamp)
pos = pd.Series(pos_array.position, index=ts, copy=True)
ccy_positions[asst] = pos
positions[ccy] = ccy_positions
return positions
@staticmethod
def get_assets(self):
"""
Get the names of instruments held.
Returns
-------
list
Sorted list of strings of current assets which have holdings
"""
pos_data = self._position_data_per_ccy
asts = []
for ccy in pos_data:
ccy_pos_data = pos_data[ccy]
for asst in ccy_pos_data:
if ccy_pos_data[asst].position[-1] != 0:
asts.append(asst)
asts.sort()
return asts
def record_trade(self, timestamp, instrument, price, quantity, multiplier,
commission, ccy):
"""
Record an instrument trade in Holdings. Trades must be time ordered.
Parameters
----------
timestamp: pandas.Timestamp
Time of trade
instrument: str
Tradeable instrument name
price: float
Price of trade
quantity: int
Number of instruments traded.
multiplier: int
A number which when multiplied by the price gives the notional
value of a contract. E.g. for trading an ES contract,
the multipler is 50, therefore 1 ES contract with a price of 2081
the notional value of the contract is 2081 x 50$.
commission: float
total commission for the trade
ccy: str
currency of instrument denomination
"""
if quantity == 0:
raise ValueError("Cannot trade 0 quantity of an instrument")
if np.isnan(quantity):
raise ValueError("Cannot trade nan quantity of an instrument")
if multiplier <= 0 or not isinstance(multiplier, int):
raise ValueError("multiplier must be positive integer")
if quantity > 0:
price_attr = "avg_buy_price"
total_attr = "total_buy"
elif quantity < 0:
price_attr = "avg_sell_price"
total_attr = "total_sell"
amount = quantity * multiplier
if ccy in self._position_data_per_ccy:
ccy_holdings = self._position_data_per_ccy[ccy]
else:
ccy_holdings = {}
self._position_data_per_ccy[ccy] = ccy_holdings
if instrument in ccy_holdings:
holdings = ccy_holdings[instrument]
else:
holdings = self._make_empty_holding()
ccy_holdings[instrument] = holdings
# deals with first access being non existent
prev_hldings = self._get_last(holdings, 'position')
avg_price = self._get_last(holdings, price_attr)
total = self._get_last(holdings, total_attr)
if self._timestamp > timestamp:
raise ValueError('Operations on Holdings must follow in time'
' sequential order')
holdings.timestamp.append(timestamp.timestamp())
holdings.position.append(prev_hldings + amount)
holdings.trade.append(amount)
self._timestamp = timestamp
fees = self._get_last(holdings, "fees", default=0)
holdings.fees.append(commission + fees)
aamnt = np.abs(amount)
new_price = (total * avg_price + aamnt * price) / (total + aamnt)
getattr(holdings, price_attr).append(new_price)
getattr(holdings, total_attr).append(total + aamnt)
# when adding to position or flipping position sign update
# average price
ADDING = np.sign(amount) == np.sign(prev_hldings)
NEW_POS = np.sign(amount + prev_hldings) not in {np.sign(prev_hldings), 0} # NOQA
if ADDING:
a_price = holdings.avg_pos_price[-1]
new_pos_price = (a_price * prev_hldings + price * amount) / (prev_hldings + amount) # NOQA
holdings.avg_pos_price.append(new_pos_price)
elif NEW_POS:
holdings.avg_pos_price.append(price)
else:
holdings.avg_pos_price.append(holdings.avg_pos_price[-1])
def update_cash(self, timestamp, ccy, quantity):
"""
Update the amount of cash in a certain type of currency, used for
charging interest on that balance.
Parameters
----------
timestamp: pandas.Timestamp
Time of trade
ccy: str
currency of cash balance
quantity: float
Amount of cash
"""
self._update_property(timestamp, ccy, quantity, '_cash')
def charge_interest(self, timestamp, ccy, quantity):
"""
Update the amount of interest charged in the account of a currency.
Parameters
----------
timestamp: pandas.Timestamp
Time of trade
ccy: str
currency of interest charge/payment
quantity: float
Amount of interest
"""
self._update_property(timestamp, ccy, quantity, '_interest')
def sweep_pnl(self, timestamp, ccy1, quantity1, ccy2, quantity2):
"""
Convert PnL from one currency to another. The user is
responsible for ensuring that the implicit FX rates used are sensible.
Parameters
----------
timestamp: pandas.Timestamp
Time of trade
ccy1: str
currency of first leg of sweep
quantity1: float
Amount of currency from first leg of sweep
ccy2: str
currency of second leg of sweep
quantity2: float
Amount of currency from second leg of sweep
Examples
--------
>>> ts = pd.Timestamp('2016-12-01T10:00:00')
aud = 5000
usd = 5000 * 0.80
holder.sweep_pnl(ts, 'AUD', -aud, 'USD', usd)
"""
self._update_property(timestamp, ccy1, quantity1, '_pnl_sweep')
self._update_property(timestamp, ccy2, quantity2, '_pnl_sweep')
def get_cash_balances(self):
"""
Return a pandas.Series of the cash balances for each currency
"""
currencies = list(self._cash)
currencies.sort()
cashs = pd.Series(index=currencies)
for ccy in self._cash:
cashs.loc[ccy] = self._cash[ccy].amount[-1]
cashs = cashs.loc[cashs != 0]
return cashs
def get_instrument_pnl(self, timestamp, prices=None, cache=True):
"""
Calculate and return pnl, closed pnl and open pnl for traded
instruments in each currency.
Parameters
----------
timestamp: pandas.Timestamp
Time of PnL calculation, used for caching the result
prices: pandas.Series
series of instrument prices for current holdings
cache: boolean
Cache this result for later retrieval and advance internal Holdings
event clock
Returns
-------
dictionary
Dictionary with currencies as keys and pandas.DataFrame as values
where the DataFrame contains columns
['pnl', 'closed pnl', 'open pnl'] and the index is the set of
holdings of current instruments
"""
# allows PnL calculation without having to pass dummy series of prices
# when all positions are closed
if prices is None:
prices = pd.Series()
if self._timestamp > timestamp:
raise ValueError('Operations on Holdings must follow in time'
' sequential order')
pos_data = self._position_data_per_ccy
pnls = dict()
for ccy in pos_data:
ccy_pos_data = pos_data[ccy]
asts = list(ccy_pos_data)
asts.sort()
pos = pd.Series(index=asts)
fees = pd.Series(index=asts)
avg_buy_price = pd.Series(index=asts)
tot_buy = pd.Series(index=asts)
avg_sell_price = pd.Series(index=asts)
tot_sell = pd.Series(index=asts)
avg_pos_price = pd.Series(index=asts)
for asst in ccy_pos_data:
ast_dat = ccy_pos_data[asst]
pos.loc[asst] = self._get_last(ast_dat, 'position')
fees.loc[asst] = self._get_last(ast_dat, 'fees')
avg_buy_price[asst] = self._get_last(ast_dat, 'avg_buy_price')
tot_buy[asst] = self._get_last(ast_dat, 'total_buy')
avg_sell_price[asst] = self._get_last(ast_dat,
'avg_sell_price')
tot_sell[asst] = self._get_last(ast_dat, 'total_sell')
avg_pos_price[asst] = self._get_last(ast_dat, 'avg_pos_price')
# this is required to avoid needing to pass in prices for
# instruments with 0 current holdings but holdings historically
asts_not0 = pos.loc[pos != 0].index
prices_ccy = prices.loc[asts_not0]
if len(asts_not0) == 0:
pos_value = 0.0
ccy_open_pnl = pd.Series(0.0, index=asts)
else:
pos_value = pos.loc[asts_not0].mul(prices_ccy)
ccy_open_pnl = pos.loc[asts_not0].mul(prices_ccy - avg_pos_price.loc[asts_not0]) # NOQA
ccy_pnl = tot_sell * avg_sell_price + pos_value - avg_buy_price * tot_buy - fees # NOQA
ccy_closed_pnl = ccy_pnl - ccy_open_pnl
df_pnl = pd.concat([ccy_pnl, ccy_closed_pnl, ccy_open_pnl], axis=1)
df_pnl.columns = ['pnl', 'closed pnl', 'open pnl']
pnls[ccy] = df_pnl
if cache:
for ccy in pnls:
instr_pnls = pnls[ccy]
for instr in instr_pnls.index:
instr_pnl = instr_pnls.loc[instr, :].tolist()
if ccy in self._pnl_data:
ccy_pnl_datas = self._pnl_data[ccy]
else:
ccy_pnl_datas = {}
self._pnl_data[ccy] = ccy_pnl_datas
if instr in ccy_pnl_datas:
instr_pnl_data = ccy_pnl_datas[instr]
else:
instr_pnl_data = self._make_empty_hist_pnl()
ccy_pnl_datas[instr] = instr_pnl_data
instr_pnl_data.time.append(timestamp)
instr_pnl_data.pnl.append(instr_pnl)
self._timestamp = timestamp
return pnls
def get_pnl(self, timestamp, prices=None, cache=True):
"""
Calculate open, closed and total pnl in each currency where instruments
are traded based on given prices.
Parameters
----------
timestamp: pandas.Timestamp
Time of PnL calculation
prices: pandas.Series
series of instrument prices
cache: boolean
Cache this result for later retrieval and advance internal Holdings
event clock
Returns
-------
pandas.DataFrame
DataFrame with columns ['pnl', 'closed pnl', 'open pnl'] and an
index of currencies of instrument denominations. Note that this
will return a row for each currency that an instrument has ever
been traded in, even if the current PnL in the currency is all
0's due to sweeps.
"""
# allows PnL calculation without having to pass dummy series of prices
# when all positions are closed
if prices is None:
prices = pd.Series()
pnls = self.get_instrument_pnl(timestamp, prices, cache)
ccys = list(set().union(pnls, self._interest, self._pnl_sweep))
ccys.sort()
ccy_pnls = pd.DataFrame(index=ccys,
columns=['pnl', 'closed pnl', 'open pnl'],
dtype='float64')
for ccy in ccys:
try:
pnl_sums = pnls[ccy].sum()
except KeyError:
pnl_sums = pd.Series(0, index=['pnl', 'closed pnl',
'open pnl'])
if ccy in self._interest:
interest = self._get_last(self._interest[ccy], 'amount')
else:
interest = 0
if ccy in self._pnl_sweep:
swept_pnl = self._get_last(self._pnl_sweep[ccy], 'amount')
else:
swept_pnl = 0
pnl_sums.loc['pnl'] = pnl_sums.loc['pnl'] + interest + swept_pnl
pnl_sums.loc['closed pnl'] = (pnl_sums.loc['closed pnl'] +
interest + swept_pnl)
ccy_pnls.loc[ccy] = pnl_sums
return ccy_pnls
def get_pnl_history(self):
"""
Return open, closed and total PnL in each currency where instruments
are traded based on cached values from previous calls to
get_instrument_pnl
Returns
-------
dictionary
Dictionary of pandas.DataFrames where keys are currencies and the
DataFrames have columns ['pnl', 'closed pnl', 'open pnl'] and
index of timestamps
"""
ccy_pnls = self.get_instrument_pnl_history()
ccys = list(set().union(ccy_pnls, self._interest, self._pnl_sweep))
ccys.sort()
hist_pnls = dict()
PNL_COLS = ['pnl', 'closed pnl', 'open pnl']
for ccy in ccys:
try:
instr_pnls = ccy_pnls[ccy]
instr_idx = pd.DatetimeIndex([])
instrs = list(instr_pnls.keys())
instrs.sort()
for instr in instrs:
instr_idx = instr_idx.union(instr_pnls[instr].index)
instr_pnl_sum = reindex(instr_pnls[instrs[0]], instr_idx)
for instr in instrs[1:]:
pnl = reindex(instr_pnls[instr], instr_idx)
instr_pnl_sum = instr_pnl_sum + pnl
except KeyError:
instr_pnl_sum = pd.DataFrame([], columns=PNL_COLS)
try:
interest_data = self._interest[ccy]
dts = self._to_timestamp(interest_data.timestamp)
interest = pd.DataFrame(0, index=dts, columns=PNL_COLS)
interest.loc[:, 'closed pnl'] = interest_data.amount
interest.loc[:, 'pnl'] = interest_data.amount
interest = interest.groupby(interest.index).last()
except KeyError:
interest = pd.DataFrame([], columns=PNL_COLS)
try:
sweep_data = self._pnl_sweep[ccy]
dts = self._to_timestamp(sweep_data.timestamp)
sweep = pd.DataFrame(0, index=dts, columns=PNL_COLS)
sweep.loc[:, 'closed pnl'] = sweep_data.amount
sweep.loc[:, 'pnl'] = sweep_data.amount
# multiple sweeps can happen at same time which all build on
# each other so only last one is relevant
sweep = sweep.groupby(sweep.index).last()
except KeyError:
sweep = pd.DataFrame([], columns=PNL_COLS)
idx = instr_pnl_sum.index.union(interest.index).union(sweep.index)
pnl_ccy = (reindex(instr_pnl_sum, idx) + reindex(sweep, idx) +
reindex(interest, idx))
hist_pnls[ccy] = pnl_ccy
return hist_pnls
def get_instrument_pnl_history(self):
"""
Return open, closed and total PnL in each currency for each traded
instrument based on cached values from previous calls to
get_instrument_pnl
Returns
-------
dictionary
Dictionary of dictionaries where to top level dictionary contains
keys for each currency where there has been PnL historically and
the nested dictionaries contain keys for each instrument and values
which are pandas.DataFrame with columns
['pnl', 'closed pnl', 'open pnl'] and index of timestamps
"""
pnl_data = self._pnl_data
hist_pnl = dict()
for ccy in pnl_data:
pnl_data_ccy = pnl_data[ccy]
hist_pnl_ccy = dict()
for instr in pnl_data_ccy:
ts = pnl_data_ccy[instr].time
instr_pnl = pnl_data_ccy[instr].pnl
instr_pnl = pd.DataFrame(instr_pnl, index=ts,
columns=['pnl', 'closed pnl',
'open pnl'])
hist_pnl_ccy[instr] = instr_pnl
hist_pnl[ccy] = hist_pnl_ccy
return hist_pnl
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33918,
198,
11748,
302,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
6738,
7177,
1330,
7177,
198,
6738,
764,
1330,
1910,
7890,
628,
198,
198,
62,
38993,
... | 2.174952 | 19,251 |
#!/usr/bin/python
import sys
import cPickle as pickle
import pycrfsuite
import random
train_size=0.8
if __name__=='__main__':
"""
emb={}
for line in open('vocabulary.vec'):
line=line.decode('utf8').strip().split(' ')
emb[line[0]]=[float(e) for e in line[1:]]
"""
instances=[]
for sentence in open(sys.argv[1]).read().decode('utf8').strip().split('\n\n'):
instances.append([e.strip().split('\t') for e in sentence.split('\n')])
random.seed(42)
random.shuffle(instances)
border=int(len(instances)*train_size)
train=instances[:border]
test=instances[border:]
### training
trainer=pycrfsuite.Trainer(algorithm='pa',verbose=True)
trainer.set_params({'max_iterations':10})
for instance in train:
labels=[e[-1] for e in instance]
feats=extract([(e[1],e[2],e[3]) for e in instance])
#print feats
#print labels
trainer.append(feats,labels)
trainer.train(sys.argv[1]+'.model')
### testing
tagger=pycrfsuite.Tagger()
tagger.open(sys.argv[1]+'.model')
pred=[]
true=[]
label_pre={}
label_rec={}
f=open(sys.argv[1]+'.test.out','w')
for instance in test:
labels=[e[-1] for e in instance]
feats=extract([(e[1],e[2],e[3]) for e in instance])
pred_labels=tagger.tag(feats)
tokens=['\t'.join(e[:3]) for e in instance]
for token,label in zip(tokens,pred_labels):
f.write((token+'\t'+label+'\n').encode('utf8'))
f.write('\n')
pred.extend(pred_labels)
true.extend(labels)
true_triples=set(transform_to_triples(labels))
pred_triples=set(transform_to_triples(pred_labels))
for triple in true_triples:
if triple[0] not in label_rec:
label_rec[triple[0]]=[0,0.]
label_rec[triple[0]][1]+=1
if triple in pred_triples:
label_rec[triple[0]][0]+=1
for triple in pred_triples:
if triple[0] not in label_pre:
label_pre[triple[0]]=[0,0.]
label_pre[triple[0]][1]+=1
if triple in true_triples:
label_pre[triple[0]][0]+=1
print label_pre
print label_rec
from sklearn.metrics import classification_report
print classification_report(true,pred,digits=3)
# final training
trainer=pycrfsuite.Trainer(algorithm='pa',verbose=True)
trainer.set_params({'max_iterations':10})
for instance in instances:
feats=extract([(e[1],e[2],e[3]) for e in instance])
labels=[e[-1] for e in instance]
#print feats
#print labels
trainer.append(feats,labels)
trainer.train(sys.argv[1]+'.model')
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
25064,
198,
11748,
269,
31686,
293,
355,
2298,
293,
198,
11748,
12972,
6098,
69,
2385,
578,
198,
11748,
4738,
198,
27432,
62,
7857,
28,
15,
13,
23,
198,
198,
361,
11593,
3672,
834,
8... | 2.340245 | 1,061 |
from base64 import b64decode, b64encode
from urllib import parse
from Crypto.Hash import SHA
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
def get_signature(key: str, signature_str: str) -> str:
"""
Sign a signature string with SHA-1 RSA.
Args:
key: private key in string representation
signature_str: String to be signed
Returns:
"""
signer = PKCS1_v1_5.new(RSA.importKey(key))
signature = signer.sign(SHA.new(signature_str.encode('utf-8')))
return b64encode(signature).decode('utf-8')
def get_url_signature(key, signature_str):
"""
Urlize signature from `csob.crypto.get_signature`.
Args:
key: private key in string representation
signature_str: String to be signed
Returns:
urlize signature
"""
return parse.quote_plus(get_signature(key, signature_str))
def verify_signature(public_key: str, signature_str: str, signature: str) -> bool:
"""
Verify incoming signature that it is correct.
Args:
public_key: Public key to use
signature_str: String that was signed
signature: The provided signature
Returns:
bool
"""
verifier = PKCS1_v1_5.new(RSA.importKey(public_key))
return verifier.verify(SHA.new(signature_str.encode('utf-8')), b64decode(signature))
| [
6738,
2779,
2414,
1330,
275,
2414,
12501,
1098,
11,
275,
2414,
268,
8189,
198,
6738,
2956,
297,
571,
1330,
21136,
198,
198,
6738,
36579,
13,
26257,
1330,
25630,
198,
6738,
36579,
13,
15202,
9218,
1330,
42319,
198,
6738,
36579,
13,
11712... | 2.603846 | 520 |
from tests.optimization_models import four_kp_model, three_kp_model, two_kp_model
| [
6738,
5254,
13,
40085,
1634,
62,
27530,
1330,
1440,
62,
74,
79,
62,
19849,
11,
1115,
62,
74,
79,
62,
19849,
11,
734,
62,
74,
79,
62,
19849,
628
] | 2.862069 | 29 |
import datetime, re
import warnings
from lxml import etree
def get_post(document, pid=None):
"""Get post data using lxml."""
# get the post
document = etree.HTML(document)
raw, tid, fid, user, text, time = (None,)*6 # le init
err = document.find(".//div[@id='msg']")
if err is not None:
raw = etree.tostring(err)
else:
if pid is not None:
# Check the "header"
header = document.find(".//ul[@class='crumbs']").findall(".//a[@href]")
tid = int(re.search(r"(\d+)", list(header[-1].values())[0]).group(1))
fid = int(re.search(r"(\d+)", list(header[-2].values())[0]).group(1))
# Check the post
post = document.find(f".//div[@id='p{pid}']")
else:
post = document
pid = int(document[0][0].get("id")[1:])
if post is not None:
user = etree.tostring(post.find(".//dl").find(".//dt")[0]).decode()[8:-9]
text = etree.tostring(post.find(".//div[@class='postmsg']")).decode()
text = re.search(r">(.*)</d",text,re.DOTALL).group(1).strip()
time = post.find(".//a[@href]").text.split(" ")
time[1] = datetime.datetime.strptime(time[1], "\u2009%H:%M:%S").time()
if time[0] == "Today":
time = datetime.datetime.combine(datetime.datetime.now().date(), time[1])
elif time[0] == "Yesterday":
time = datetime.datetime.combine(datetime.datetime.now().date(), time[1])
time += datetime.timedelta(days=-1)
else:
time = datetime.datetime.combine(datetime.datetime.strptime(time[0], "%Y-%b-%d").date(), time[1])
time = str(time)
raw = etree.tostring(post).decode()
else:
warnings.warn("Cannot find post ID in document", RuntimeWarning)
return {"rawHTML": raw, "pID": pid, "tID": tid, "fID": fid, "user": user, "text": text, "time": time}
def get_page(document):
"""Get page data using lxml."""
# get the post
document = etree.HTML(document)
raw, tid, fid, pages, posts = (None,)*5 # le init
raw = document.find(".//div[@id='brdmain']")
err = document.find(".//div[@id='msg']")
if err is None:
# Check the "header"
header = document.find(".//ul[@class='crumbs']").findall(".//a[@href]")
match = re.search(r"(\d+)", list(header[-1].values())[0])
if match:
tid = int(match.group(1))
match = re.search(r"(\d+)", list(header[-2].values())[0])
if match:
fid = int(match.group(1))
if header[-1].text is None:
name = header[-1][0].text
else:
name = header[-1].text
# Check the page count
header = document.find(".//p[@class='pagelink conl']")
if header is not None:
pages = sorted(int(x.text) for x in header if re.match(r"\d+", x.text))[-1]
# Check the post
if "id" in raw[1]: #
posts = [etree.tostring(x) for x in raw if re.match(r"p\d+", x.get("id") if x.get("id") is not None else "")]
else:
posts = [etree.tostring(x) for x in raw if "link" not in x.get("class")]
return {"rawHTML": etree.tostring(raw), "tID": tid, "fID": fid, "pages": pages, "posts": posts, "title": name}
def get_message(xml):
"""Get page data using lxml."""
# it's literally copy-pasted from html.py
# well the comment ruined it but you know what I mean
xml = etree.XML(xml.encode())
info = xml.find("infos")
userlist = xml.find("users")
msglist = xml.find("messages")
if info is not None:
info = {x.get("type"): x.text for x in info}
users = {}
if userlist is not None:
for x in userlist:
channel = x.get("channelID")
if channel not in users: users[channel] = []
users[channel].append({"uID": x.get("userID"), "username": x.text})
messages = {}
if msglist is not None:
messages = [{
"pID": x.get("id"),
"user": {"uID": x.get("userID"), "username": x[0].text.strip()},
"text": x[1].text.strip(),
"rawHTML": etree.tostring(x).decode(),
"time": datetime.datetime.strptime(x.get("dateTime"), "%a, %d %b %Y %H:%M:%S %z")
}
for x in msglist]
return {"messages": messages, "info": info, "users": users}
__all__ = dir(globals())
| [
11748,
4818,
8079,
11,
302,
201,
198,
11748,
14601,
201,
198,
6738,
300,
19875,
1330,
2123,
631,
201,
198,
201,
198,
201,
198,
4299,
651,
62,
7353,
7,
22897,
11,
46514,
28,
14202,
2599,
201,
198,
220,
220,
220,
37227,
3855,
1281,
13... | 1.974726 | 2,374 |
import json
print("loading function")
| [
11748,
33918,
198,
198,
4798,
7203,
25138,
2163,
4943,
628,
198
] | 3.727273 | 11 |
import animal
| [
11748,
5044,
628,
198
] | 4 | 4 |
# Copyright 2018 Oinam Romesh Meitei. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy,time
from math import sqrt,pow
from numpy.core.umath_tests import inner1d
from molepy.basis.bas import get_basis
from molepy.pymints.getints import getints1e,getints2e
from molepy.scf.orth import orthog
from molepy.scf.DIIS import DIIS
from molepy.scf.JK import JK,makeJK
from molepy.lib import mout
from molepy.lib.mole_param import hfparam
def RHF(coord,bfs):
" Restricted Hartree Fock "
out = mout.molog()
out.initiate()
out.writer('{:} \n',' Program : Restricted Hartree Fock (closed shell)')
out.writer('{:} \n',' O.R. Meitei')
out.writer('\n','')
out.writer('{:} \n',' Atomic Coordinates')
out.writer('\n','')
for i in coord:
out.writer(' {:<3} {:>10.6f} {:>10.6f} {:>10.6f}\n',i.sym,*i.center)
out.writer('\n','')
out.writer(' Basis Set : {:>10}\n',bfs)
basis,BASIS = get_basis(coord,bfs)
#-tmp
NSPH = 0
for i in BASIS.Shells:
NSPH += 2*i+1
aos = 0
nelec = 0
for i in coord:
nelec += i.anum
out.writer(' Number of electrons : {:>7} \n',nelec)
if hfparam.Spheri:
n_basis_ = NSPH
else:
n_basis_ = len(basis)
out.writer(' Number of Basis functions : {:>7} \n',n_basis_)
out.writer('\n','')
e_nuclear = nuclear(coord)
S,h = getints1e(basis,coord)
eri = getints2e(basis)
sval,svec = numpy.linalg.eigh(S)
if hfparam.Spheri:
S = flatit(S,len(basis))
h = flatit(h,len(basis))
S = hfparam.Cart2Spher2(BASIS.Shells,S,len(basis))
h = hfparam.Cart2Spher2(BASIS.Shells,h,len(basis))
S = deflatit(S,NSPH)
h = deflatit(h,NSPH)
eri = hfparam.Cart2Spher4(BASIS.Shells,eri)
out.writer(' Nuclear repulsion energy : {:>10.7f} \n',e_nuclear)
out.writer('\n','')
out.writer(' Minimum eigenvalue in the overlap matrix : {:>.4e} \n',sval[0])
out.writer('\n','')
X = orthog(S)
es1,vs1 = numpy.linalg.eigh(S)
orb_e,orb_c = m_eigh(h,X)
Nocc = nclosed(coord)
Maxiter = 50
diis = DIIS()
out.writer(' Initial guess is core Hamiltonian (one electron) \n')
out.writer(' Interpolation using DIIS \n')
out.writer('\n','')
out.writer(' Iteration Energy Ediff Ddiff \n')
out.writer('\n','')
prevE = 0.0
prevD = 0.0
J,K = makeJK(n_basis_,eri)
for iter in range(Maxiter):
Coefs = orb_c[:,0:Nocc]
Dens = numpy.dot(Coefs,Coefs.T)
Gmat = JK(n_basis_,Dens,J,K)
Fock = h+Gmat
Fock = diis.diis(Fock,Dens,S)
orb_e,orb_c = m_eigh(Fock,X)
etot,e1e,e2e = HFenergy(h,Fock,Dens,e_nuclear)
rmsD = Dens-prevD
rmsD = numpy.linalg.norm(rmsD)
delE = abs(etot-prevE)
out.writer(' {:>3} {:>15.10f} {:>.4e} {:>.4e} \n',\
iter+1,etot,delE,rmsD)
if iter:
if delE < hfparam.tolE and rmsD < hfparam.tolD:
" Call it converged "
break
prevE = etot
prevD = Dens
out.writer('\n','')
out.writer(' SCF converged \n')
out.writer('\n','')
out.writer(' Final Energy : {:>15.10f} \n',etot)
out.writer(' Nuclear Energy : {:>15.10f} \n',e_nuclear)
out.writer(' One electron Energy : {:>15.10f} \n',e1e)
out.writer(' Two electron Energy : {:>15.10f} \n',e2e)
out.writer('\n','')
return(etot,orb_e,orb_c)
| [
2,
15069,
2864,
440,
259,
321,
371,
2586,
71,
2185,
578,
72,
13,
1439,
6923,
33876,
13,
198,
2,
220,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
2... | 2.057719 | 2,131 |
x = input("Digite algo:")
print(
"""
[01]Essa variável é alfanumérica? {}
[02]Essa variável é alfabética? {}
[03]Essa variável faz parte da ASCII? {}
[04]Essa variável é decimal? {}
[05]Essa variável é um nº de 0 a 9? {}
[06]Essa variável é um identificador que pode ser
usado como função, classes ou variáveis? {}
[07]Essa variável está com todos os caracteres
minúsculos? {}
[08]Essa variável é um valor númerico? {}
[09]Essa variável é um valor imprimível? {}
[10]Essa variável é um espaço em branco? {}
[11]Essa variável é maiúscula? {}
[12]Essa variável é totalmente maiúscula? {}
""".format(
x.isalnum(),
x.isalpha(),
x.isascii(),
x.isdecimal(),
x.isdigit(),
x.isidentifier(),
x.islower(),
x.isnumeric(),
x.isprintable(),
x.isspace(),
x.istitle(),
x.isupper(),
)
)
| [
87,
796,
5128,
7203,
19511,
578,
435,
2188,
25,
4943,
198,
4798,
7,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
220,
220,
685,
486,
60,
29508,
64,
5553,
6557,
626,
38251,
435,
24408,
388,
2634,
30997,
30,
23884,
198,
220,
220,
... | 1.894118 | 510 |
import logging
| [
11748,
18931,
628
] | 5.333333 | 3 |
# coding=utf8
"""
Parsing data from https://github.com/jkkummerfeld/text2sql-data/tree/master/data
"""
import os
import json
import copy
if __name__ == '__main__':
base_path = os.path.join('data', 'atis')
raw_data_path = os.path.join('data', 'atis', 'atis.json')
get_sql_data(base_path, raw_data_path)
| [
2,
19617,
28,
40477,
23,
198,
37811,
198,
47,
945,
278,
1366,
422,
3740,
1378,
12567,
13,
785,
14,
73,
28747,
31647,
16265,
14,
5239,
17,
25410,
12,
7890,
14,
21048,
14,
9866,
14,
7890,
198,
37811,
198,
198,
11748,
28686,
198,
11748... | 2.52381 | 126 |
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import pytest
import struct
from ezdxf.tools.binarydata import ByteStream
if __name__ == "__main__":
pytest.main([__file__])
| [
2,
15069,
357,
66,
8,
12131,
11,
1869,
39193,
4270,
4224,
72,
198,
2,
13789,
25,
17168,
13789,
198,
198,
11748,
12972,
9288,
198,
11748,
2878,
198,
6738,
304,
89,
67,
26152,
13,
31391,
13,
39491,
7890,
1330,
30589,
12124,
628,
628,
... | 2.855072 | 69 |
import unittest
from easyneuron._testutils import log_errors
from easyneuron.exceptions import DimensionsError
from easyneuron.genetic.genomes import Genome, child_of
from easyneuron.metrics import mean_squared_error
from easyneuron.genetic.optimisers import BasicOptimiser
from numpy.random import randint
| [
11748,
555,
715,
395,
198,
198,
6738,
2562,
710,
44372,
13557,
9288,
26791,
1330,
2604,
62,
48277,
198,
6738,
2562,
710,
44372,
13,
1069,
11755,
1330,
41265,
12331,
198,
6738,
2562,
710,
44372,
13,
5235,
5139,
13,
5235,
2586,
1330,
5215... | 3.551724 | 87 |
from __future__ import unicode_literals
import unittest
import utn11
CANONICAL_PAIRS = [
('\u1010\u102D\u103A', '\u1010\u103A\u102D'),
('\u1010\u103A\u102D', '\u1010\u103A\u102D'),
('\u101B\u1031\u1037\u103E', '\u101B\u103E\u1031\u1037'),
('\u101B\u1031\u103E\u1037', '\u101B\u103E\u1031\u1037'),
('\u101B\u1037\u1031\u103E', '\u101B\u103E\u1031\u1037'),
('\u101B\u1037\u103E\u1031', '\u101B\u103E\u1031\u1037'),
('\u101B\u103E\u1031\u1037', '\u101B\u103E\u1031\u1037'),
('\u101B\u103E\u1037\u1031', '\u101B\u103E\u1031\u1037'),
# Mon:
('\u1010\u1031\u103A\u103E', '\u1010\u103E\u103A\u1031'),
('\u1010\u1031\u103E\u103A', '\u1010\u103E\u103A\u1031'),
('\u1010\u103A\u1031\u103E', '\u1010\u103E\u103A\u1031'),
('\u1010\u103A\u103E\u1031', '\u1010\u103E\u103A\u1031'),
('\u1010\u103E\u1031\u103A', '\u1010\u103E\u103A\u1031'),
('\u1010\u103E\u103A\u1031', '\u1010\u103E\u103A\u1031'),
# Mon:
('\u1000\u102C\u102F\u1031\u1036', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102C\u102F\u1036\u1031', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102C\u1031\u102F\u1036', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102C\u1031\u1036\u102F', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102C\u1036\u102F\u1031', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102C\u1036\u1031\u102F', '\u1000\u1031\u102F\u102C\u1036'),
#
('\u1000\u102F\u102C\u1031\u1036', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102F\u102C\u1036\u1031', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102F\u1031\u102C\u1036', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102F\u1031\u1036\u102C', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102F\u1036\u102C\u1031', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u102F\u1036\u1031\u102C', '\u1000\u1031\u102F\u102C\u1036'),
#
('\u1000\u1031\u102C\u102F\u1036', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1031\u102C\u1036\u102F', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1031\u102F\u102C\u1036', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1031\u102F\u1036\u102C', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1031\u1036\u102C\u102F', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1031\u1036\u102F\u102C', '\u1000\u1031\u102F\u102C\u1036'),
#
('\u1000\u1036\u102C\u102F\u1031', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1036\u102C\u1031\u102F', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1036\u102F\u102C\u1031', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1036\u102F\u1031\u102C', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1036\u1031\u102C\u102F', '\u1000\u1031\u102F\u102C\u1036'),
('\u1000\u1036\u1031\u102F\u102C', '\u1000\u1031\u102F\u102C\u1036'),
]
if __name__ == '__main__':
unittest.main()
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
555,
715,
395,
198,
198,
11748,
3384,
77,
1157,
198,
198,
44565,
1340,
20151,
62,
4537,
4663,
50,
796,
685,
198,
220,
19203,
59,
84,
8784,
15,
59,
84,
1537... | 1.649879 | 1,648 |
from PIL import Image,ImageDraw,ImageFont
ASCII = ''
for i in range(33, 128):
ASCII += chr(i)
Size = 16
FontImage = Image.new("1", (1660,300), "white")
Draw = ImageDraw.Draw(FontImage)
for i in range(33, 129):
Draw.rectangle([((i - 33) * (Size + 1), 0), ((i - 32) * (Size + 1), (Size + 1))], outline='black')
if i >= 33 + 2:
Draw.text(((i - 33) * (Size + 1) + 1, 1), chr(i-2), fill="black", font=CalibriL(17))
#DrawFont.line([(4,4 + 3),(1000,4 + 3)],fill='black')
# DrawFont.line([(4,4+16 + 5),(1000,4+16 + 5)],fill='black')
# DrawFont.text((4,4),ASCII,fill = "black", font = 微软雅黑(16))
#DrawFont.line([(4,30 + 3),(1000,30 + 3)],fill='black')
# DrawFont.line([(4,25+18 + 5),(1000,25+18 + 5)],fill='black')
# DrawFont.text((4,25),ASCII,fill = "black", font = 微软雅黑(18))
# DrawFont.text((4,50),ASCII,fill = "black", font = consola(16))
# DrawFont.text((4,70),ASCII,fill = "black", font = consola(17))
#DrawFont.text((4,90),ASCII,fill = "black", font = consola(18))
#DrawFont.text((4,110),ASCII,fill = "black", font = CalibriL(16))
# DrawFont.text((4,130),ASCII,fill = "black", font = CalibriL(17))
# DrawFont.text((4,150),ASCII,fill = "black", font = CalibriL(18))
# DrawFont.text((4,170),ASCII,fill = "black", font = calibri(16))
# DrawFont.text((4,190),ASCII,fill = "black", font = calibri(17))
# DrawFont.text((4,210),ASCII,fill = "black", font = calibri(18))
# DrawFont.text((4,170),ASCII,fill = "black", font = simsun(16))
# DrawFont.text((4,190),ASCII,fill = "black", font = simsun(17))
# DrawFont.text((4,210),ASCII,fill = "black", font = simsun(18))
FontImage.show()
| [
6738,
350,
4146,
1330,
7412,
11,
5159,
25302,
11,
5159,
23252,
201,
198,
201,
198,
201,
198,
201,
198,
42643,
3978,
796,
10148,
201,
198,
1640,
1312,
287,
2837,
7,
2091,
11,
13108,
2599,
201,
198,
220,
220,
220,
37101,
15853,
442,
8... | 2.201333 | 750 |
from setuptools import setup
setup(
name='pyscrabble',
version='1.0',
author='Armands Gedroics',
author_email='armands.gedroics@gmail.com',
url='https://github.com/armands3312/pyscrabble',
description='Multiplayer word game',
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Topic :: Games/Entertainment :: Board Games'
],
packages=['pyscrabble'],
entry_points={
'gui_scripts': [
'pyscrabble = pyscrabble.__main__:main'
]
}
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
79,
893,
6098,
397,
903,
3256,
198,
220,
220,
220,
2196,
11639,
16,
13,
15,
3256,
198,
220,
220,
220,
1772,
11639,
26560,
1746,
402,
276,
305,... | 2.314607 | 267 |
from flask import Flask,jsonify
app = Flask(__name__)
@app.route("/")
if __name__ == "__main__":
app.run(host='0.0.0.0')
| [
6738,
42903,
1330,
46947,
11,
17752,
1958,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
198,
31,
1324,
13,
38629,
7203,
14,
4943,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
598,
13,
... | 2.267857 | 56 |
a = A()
b = B()
d = D('king') # calling init method of class D()
d.met() # get first inheritance class for D that is B so called B class met function
| [
628,
628,
198,
64,
796,
317,
3419,
198,
65,
796,
347,
3419,
198,
67,
796,
360,
10786,
3364,
11537,
220,
1303,
4585,
2315,
2446,
286,
1398,
360,
3419,
198,
198,
67,
13,
4164,
3419,
220,
1303,
651,
717,
24155,
1398,
329,
360,
326,
3... | 3.038462 | 52 |
"""WS2801 controller on an Raspberry Pi."""
| [
37811,
19416,
2078,
486,
10444,
319,
281,
24244,
13993,
526,
15931,
198
] | 3.666667 | 12 |
import rpyc
from rpyc.utils.server import ThreadedServer
import unittest
CONNECT_CONFIG = {"allow_setattr": True}
class Test_rpyc_over_rpyc(unittest.TestCase):
"""Issue #346 shows that exceptions are being raised when an RPyC service method
calls another RPyC service, forwarding a non-trivial (and thus given as a proxy) argument.
"""
def test_immutable_object_return(self):
"""Tests using rpyc over rpyc---issue #346 reported traceback for this use case"""
obj = Fee()
result = self.conn.root.fee_str(obj)
self.assertEqual(str(obj), "Fee", "String representation of obj should not have changed")
self.assertEqual(str(result), "Fee", "String representation of result should be the same as obj")
if __name__ == "__main__":
unittest.main()
| [
11748,
374,
9078,
66,
198,
6738,
374,
9078,
66,
13,
26791,
13,
15388,
1330,
14122,
276,
10697,
198,
11748,
555,
715,
395,
198,
198,
10943,
48842,
62,
10943,
16254,
796,
19779,
12154,
62,
2617,
35226,
1298,
6407,
92,
628,
628,
198,
198... | 2.825175 | 286 |
from couchdbkit import ResourceNotFound
from django.db import DEFAULT_DB_ALIAS, models
from memoized import memoized
from dimagi.ext.couchdbkit import (
BooleanProperty,
Document,
DocumentSchema,
SchemaListProperty,
StringProperty,
)
from corehq.apps.app_manager.models import Application
from corehq.apps.cachehq.mixins import QuickCachedDocumentMixin
from corehq.apps.groups.models import Group
| [
6738,
18507,
9945,
15813,
1330,
20857,
3673,
21077,
198,
6738,
42625,
14208,
13,
9945,
1330,
5550,
38865,
62,
11012,
62,
1847,
43429,
11,
4981,
198,
6738,
16155,
1143,
1330,
16155,
1143,
198,
198,
6738,
5391,
18013,
13,
2302,
13,
66,
76... | 3.095588 | 136 |
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client objects for Turbinia."""
from __future__ import unicode_literals
from datetime import datetime
from datetime import timedelta
import json
import logging
import sys
import time
# TODO(aarontp): Selectively load dependencies based on configured backends
import psq
from turbinia import config
from turbinia.config import logger
from turbinia.lib.google_cloud import GoogleCloudFunction
from turbinia import task_manager
from turbinia import TurbiniaException
log = logging.getLogger('turbinia')
logger.setup()
class TurbiniaClient(object):
"""Client class for Turbinia.
Attributes:
task_manager (TaskManager): Turbinia task manager
"""
def list_jobs(self):
"""List the available jobs."""
log.info('Available Jobs:')
for job in self.task_manager.jobs:
log.info('\t{0:s}'.format(job.name))
def wait_for_request(self, instance, project, region, request_id=None,
poll_interval=60):
"""Polls and waits for Turbinia Request to complete.
Args:
instance (string): The Turbinia instance name (by default the same as the
PUBSUB_TOPIC in the config).
project (string): The name of the project.
region (string): The name of the region to execute in.
request_id (string): The Id of the request we want tasks for.
poll_interval (int): Interval of seconds between polling cycles.
"""
while True:
task_results = self.get_task_data(
instance, project, region, request_id=request_id)
completed_count = 0
uncompleted_count = 0
for task in task_results:
if task.get('successful') is not None:
completed_count += 1
else:
uncompleted_count += 1
if completed_count and completed_count == len(task_results):
break
log.info(
'{0:d} Tasks found, {1:d} completed. Waiting {2:d} seconds.'.format(
len(task_results), completed_count, poll_interval))
time.sleep(poll_interval)
log.info('All {0:d} Tasks completed'.format(len(task_results)))
def get_task_data(self, instance, project, region, days=0, task_id=None,
request_id=None, function_name='gettasks'):
"""Gets task data from Google Cloud Functions.
Args:
instance (string): The Turbinia instance name (by default the same as the
PUBSUB_TOPIC in the config).
project (string): The name of the project.
region (string): The name of the region to execute in.
days (int): The number of days we want history for.
task_id (string): The Id of the task.
request_id (string): The Id of the request we want tasks for.
Returns:
List of Task dict objects.
"""
function = GoogleCloudFunction(project_id=project, region=region)
func_args = {'instance': instance, 'kind': 'TurbiniaTask'}
if days:
start_time = datetime.now() - timedelta(days=days)
# Format this like '1990-01-01T00:00:00z' so we can cast it directly to a
# javascript Date() object in the cloud function.
start_string = start_time.strftime('%Y-%m-%dT%H:%M:%S')
func_args.update({'start_time': start_string})
elif task_id:
func_args.update({'task_id': task_id})
elif request_id:
func_args.update({'request_id': request_id})
response = function.ExecuteFunction(function_name, func_args)
if not response.has_key('result'):
log.error('No results found')
if response.get('error', '{}') != '{}':
msg = 'Error executing Cloud Function: [{0!s}].'.format(
response.get('error'))
log.error(msg)
log.debug('GCF response: {0!s}'.format(response))
raise TurbiniaException(
'Cloud Function {0:s} returned no results.'.format(function_name))
try:
results = json.loads(response['result'])
except (TypeError, ValueError) as e:
raise TurbiniaException(
'Could not deserialize result from GCF: [{0!s}]'.format(e))
return results[0]
def format_task_status(self, instance, project, region, days=0, task_id=None,
request_id=None, all_fields=False):
"""Formats the recent history for Turbinia Tasks.
Args:
instance (string): The Turbinia instance name (by default the same as the
PUBSUB_TOPIC in the config).
project (string): The name of the project.
region (string): The name of the zone to execute in.
days (int): The number of days we want history for.
task_id (string): The Id of the task.
request_id (string): The Id of the request we want tasks for.
all_fields (bool): Include all fields for the task, including task,
request ids and saved file paths.
Returns: String of task status
"""
task_results = self.get_task_data(instance, project, region, days, task_id,
request_id)
num_results = len(task_results)
results = []
if not num_results:
msg = '\nNo Tasks found.'
log.info(msg)
return msg
results.append('\nRetrieved {0:d} Task results:'.format(num_results))
for task in task_results:
if task.get('successful', None):
success = 'Successful'
elif task.get('successful', None) is None:
success = 'Running'
else:
success = 'Failed'
status = task.get('status') if task.get('status') else 'No task status'
if all_fields:
results.append(
'{0:s} request: {1:s} task: {2:s} {3:s} {4:s} {5:s}: {6:s}'.format(
task['last_update'], task['request_id'], task['id'],
task['name'], task['worker_name'], success, status))
saved_paths = task.get('saved_paths') if task.get('saved_paths') else []
for path in saved_paths:
results.append('\t{0:s}'.format(path))
else:
results.append('{0:s} {1:s} {2:s}: {3:s}'.format(
task['last_update'], task['name'], success, status))
return '\n'.join(results)
def send_request(self, request):
"""Sends a TurbiniaRequest message.
Args:
request: A TurbiniaRequest object.
"""
self.task_manager.server_pubsub.send_request(request)
class TurbiniaServer(TurbiniaClient):
"""Turbinia Server class."""
def start(self):
"""Start Turbinia Server."""
log.info('Running Turbinia Server.')
self.task_manager.run()
def add_evidence(self, evidence_):
"""Add evidence to be processed."""
self.task_manger.add_evidence(evidence_)
class TurbiniaPsqWorker(TurbiniaClient):
"""Turbinia PSQ Worker class.
Attributes:
worker (psq.Worker): PSQ Worker object
"""
def __init__(self, *args, **kwargs):
"""Initialization for PSQ Worker."""
super(TurbiniaPsqWorker, self).__init__(*args, **kwargs)
log.info(
'Starting PSQ listener on queue {0:s}'.format(
self.task_manager.psq.name))
self.worker = psq.Worker(queue=self.task_manager.psq)
def start(self):
"""Start Turbinia PSQ Worker."""
log.info('Running Turbinia PSQ Worker.')
self.worker.listen()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
2177,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,... | 2.588571 | 2,975 |