code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import errno
import os
import uuid
def rename_over_existing(src, dest):
try:
# On Windows, this will throw EEXIST, on Linux it won't.
# on Win32 / Python 2.7 it throws OSError instead of IOError
os.rename(src, dest)
except (OSError, IOError) as e:
if e.errno == errno.EEXIST:
# Clearly this song-and-dance is not in fact atomic,
# but if something goes wrong putting the new file in
# place at least the backup file might still be
# around.
backup = dest + ".bak-" + str(uuid.uuid4())
os.rename(dest, backup)
try:
os.rename(src, dest)
except Exception as e:
os.rename(backup, dest)
raise e
finally:
try:
os.remove(backup)
except Exception:
pass
else:
raise e
|
[
"os.rename",
"uuid.uuid4",
"os.remove"
] |
[((660, 680), 'os.rename', 'os.rename', (['src', 'dest'], {}), '(src, dest)\n', (669, 680), False, 'import os\n'), ((1034, 1057), 'os.rename', 'os.rename', (['dest', 'backup'], {}), '(dest, backup)\n', (1043, 1057), False, 'import os\n'), ((1091, 1111), 'os.rename', 'os.rename', (['src', 'dest'], {}), '(src, dest)\n', (1100, 1111), False, 'import os\n'), ((1008, 1020), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1018, 1020), False, 'import uuid\n'), ((1163, 1186), 'os.rename', 'os.rename', (['backup', 'dest'], {}), '(backup, dest)\n', (1172, 1186), False, 'import os\n'), ((1273, 1290), 'os.remove', 'os.remove', (['backup'], {}), '(backup)\n', (1282, 1290), False, 'import os\n')]
|
import socket
HOST = '127.0.0.1'
PORT = 65432
def menu():
print("\n---- Calculadora Estatística Descritiva----")
print("---- Escolha uma opção: ")
print("---- 1) Média ")
print("---- 2) Mediana ")
print("---- 3) Moda")
print("---- 4) Variância")
print("---- 5) Desvio Padrão")
print("---- 6) Gráfico Distribuição De frequência")
print("---- 7) Sair\n")
escolha = input()
return escolha
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
while True:
data = s.recv(1024)
if(data.decode() == 'exit'):
s.close()
exit()
elif(data.decode() == 'digite'):
print("Digite os dados a serem calculados separados por ','(ex: 10, 20, 25, 46, 123, 1.200, 23): ")
dados = input()
dados = dados.encode()
s.sendall(dados)
elif(data.decode() == 'menu'):
escolha = menu()
s.sendall(escolha.encode())
elif(data.decode().lower() == 'mistake'):
print("Opção errada, escolha uma das opções listadas.")
elif('resultado' in data.decode().lower()):
print("\n------------------ RESULTADO ------------------")
print(repr(data.decode()))
print("\n")
|
[
"socket.socket"
] |
[((510, 559), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (523, 559), False, 'import socket\n')]
|
# -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI hotspot_portal API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.0', reason='version does not match')
def is_valid_get_hotspot_portal_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_6cbcecf65a0155fcad602d3ac16531a7_v3_1_0').validate(obj.response)
return True
def get_hotspot_portal_by_id(api):
endpoint_result = api.hotspot_portal.get_hotspot_portal_by_id(
id='string'
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_get_hotspot_portal_by_id(api, validator):
try:
assert is_valid_get_hotspot_portal_by_id(
validator,
get_hotspot_portal_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_hotspot_portal_by_id_default(api):
endpoint_result = api.hotspot_portal.get_hotspot_portal_by_id(
id='string'
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_get_hotspot_portal_by_id_default(api, validator):
try:
assert is_valid_get_hotspot_portal_by_id(
validator,
get_hotspot_portal_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_hotspot_portal_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_0ae4af25df565334b20a24c4878b68e4_v3_1_0').validate(obj.response)
return True
def update_hotspot_portal_by_id(api):
endpoint_result = api.hotspot_portal.update_hotspot_portal_by_id(
active_validation=False,
customizations={'portalTheme': {'id': 'string', 'name': 'string', 'themeData': 'string'}, 'portalTweakSettings': {'bannerColor': 'string', 'bannerTextColor': 'string', 'pageBackgroundColor': 'string', 'pageLabelAndTextColor': 'string'}, 'language': {'viewLanguage': 'string'}, 'globalCustomizations': {'mobileLogoImage': {'data': 'string'}, 'desktopLogoImage': {'data': 'string'}, 'backgroundImage': {'data': 'string'}, 'bannerImage': {'data': 'string'}, 'bannerTitle': 'string', 'contactText': 'string', 'footerElement': 'string'}, 'pageCustomizations': {'data': [{'key': 'string', 'value': 'string'}]}},
description='string',
id='string',
name='string',
payload=None,
portal_test_url='string',
portal_type='string',
settings={'portalSettings': {'httpsPort': 0, 'allowedInterfaces': ['string'], 'certificateGroupTag': 'string', 'endpointIdentityGroup': 'string', 'coaType': 'string', 'displayLang': 'string', 'fallbackLanguage': 'string', 'alwaysUsedLanguage': 'string'}, 'aupSettings': {'requireAccessCode': True, 'accessCode': 'string', 'includeAup': True, 'requireScrolling': True}, 'postAccessBannerSettings': {'includePostAccessBanner': True}, 'authSuccessSettings': {'successRedirect': 'string', 'redirectUrl': 'string'}, 'postLoginBannerSettings': {'includePostAccessBanner': True}, 'supportInfoSettings': {'includeSupportInfoPage': True, 'includeMacAddr': True, 'includeIpAddress': True, 'includeBrowserUserAgent': True, 'includePolicyServer': True, 'includeFailureCode': True, 'emptyFieldDisplay': 'string', 'defaultEmptyFieldValue': 'string'}}
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_update_hotspot_portal_by_id(api, validator):
try:
assert is_valid_update_hotspot_portal_by_id(
validator,
update_hotspot_portal_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_hotspot_portal_by_id_default(api):
endpoint_result = api.hotspot_portal.update_hotspot_portal_by_id(
active_validation=False,
id='string',
customizations=None,
description=None,
name=None,
payload=None,
portal_test_url=None,
portal_type=None,
settings=None
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_update_hotspot_portal_by_id_default(api, validator):
try:
assert is_valid_update_hotspot_portal_by_id(
validator,
update_hotspot_portal_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_hotspot_portal_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_1a344d1c6f535789b7badbaa502e8d3b_v3_1_0').validate(obj.response)
return True
def delete_hotspot_portal_by_id(api):
endpoint_result = api.hotspot_portal.delete_hotspot_portal_by_id(
id='string'
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_delete_hotspot_portal_by_id(api, validator):
try:
assert is_valid_delete_hotspot_portal_by_id(
validator,
delete_hotspot_portal_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_hotspot_portal_by_id_default(api):
endpoint_result = api.hotspot_portal.delete_hotspot_portal_by_id(
id='string'
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_delete_hotspot_portal_by_id_default(api, validator):
try:
assert is_valid_delete_hotspot_portal_by_id(
validator,
delete_hotspot_portal_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_hotspot_portal(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_d912b1c21e2b5dca8b56332d3a8ad13d_v3_1_0').validate(obj.response)
return True
def get_hotspot_portal(api):
endpoint_result = api.hotspot_portal.get_hotspot_portal(
filter='value1,value2',
filter_type='string',
page=0,
size=0,
sortasc='string',
sortdsc='string'
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_get_hotspot_portal(api, validator):
try:
assert is_valid_get_hotspot_portal(
validator,
get_hotspot_portal(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_hotspot_portal_default(api):
endpoint_result = api.hotspot_portal.get_hotspot_portal(
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_get_hotspot_portal_default(api, validator):
try:
assert is_valid_get_hotspot_portal(
validator,
get_hotspot_portal_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_hotspot_portal(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_0df78c9a3f72584dbd1c7b667b0e312f_v3_1_0').validate(obj.response)
return True
def create_hotspot_portal(api):
endpoint_result = api.hotspot_portal.create_hotspot_portal(
active_validation=False,
customizations={'portalTheme': {'id': 'string', 'name': 'string', 'themeData': 'string'}, 'portalTweakSettings': {'bannerColor': 'string', 'bannerTextColor': 'string', 'pageBackgroundColor': 'string', 'pageLabelAndTextColor': 'string'}, 'language': {'viewLanguage': 'string'}, 'globalCustomizations': {'mobileLogoImage': {'data': 'string'}, 'desktopLogoImage': {'data': 'string'}, 'backgroundImage': {'data': 'string'}, 'bannerImage': {'data': 'string'}, 'bannerTitle': 'string', 'contactText': 'string', 'footerElement': 'string'}, 'pageCustomizations': {'data': [{'key': 'string', 'value': 'string'}]}},
description='string',
name='string',
payload=None,
portal_test_url='string',
portal_type='string',
settings={'portalSettings': {'httpsPort': 0, 'allowedInterfaces': ['string'], 'certificateGroupTag': 'string', 'endpointIdentityGroup': 'string', 'coaType': 'string', 'displayLang': 'string', 'fallbackLanguage': 'string', 'alwaysUsedLanguage': 'string'}, 'aupSettings': {'requireAccessCode': True, 'accessCode': 'string', 'includeAup': True, 'requireScrolling': True}, 'postAccessBannerSettings': {'includePostAccessBanner': True}, 'authSuccessSettings': {'successRedirect': 'string', 'redirectUrl': 'string'}, 'postLoginBannerSettings': {'includePostAccessBanner': True}, 'supportInfoSettings': {'includeSupportInfoPage': True, 'includeMacAddr': True, 'includeIpAddress': True, 'includeBrowserUserAgent': True, 'includePolicyServer': True, 'includeFailureCode': True, 'emptyFieldDisplay': 'string', 'defaultEmptyFieldValue': 'string'}}
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_create_hotspot_portal(api, validator):
try:
assert is_valid_create_hotspot_portal(
validator,
create_hotspot_portal(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def create_hotspot_portal_default(api):
endpoint_result = api.hotspot_portal.create_hotspot_portal(
active_validation=False,
customizations=None,
description=None,
name=None,
payload=None,
portal_test_url=None,
portal_type=None,
settings=None
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_create_hotspot_portal_default(api, validator):
try:
assert is_valid_create_hotspot_portal(
validator,
create_hotspot_portal_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_version(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_91257d81be4f5a0486cc085499c19b1c_v3_1_0').validate(obj.response)
return True
def get_version(api):
endpoint_result = api.hotspot_portal.get_version(
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_get_version(api, validator):
try:
assert is_valid_get_version(
validator,
get_version(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_version_default(api):
endpoint_result = api.hotspot_portal.get_version(
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_get_version_default(api, validator):
try:
assert is_valid_get_version(
validator,
get_version_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
|
[
"pytest.raises",
"pytest.mark.skipif"
] |
[((1426, 1527), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.0')"], {'reason': '"""version does not match"""'}), "(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.0', reason=\n 'version does not match')\n", (1444, 1527), False, 'import pytest\n'), ((2298, 2352), 'pytest.raises', 'pytest.raises', (['(JsonSchemaException, MalformedRequest)'], {}), '((JsonSchemaException, MalformedRequest))\n', (2311, 2352), False, 'import pytest\n'), ((2889, 2954), 'pytest.raises', 'pytest.raises', (['(JsonSchemaException, MalformedRequest, TypeError)'], {}), '((JsonSchemaException, MalformedRequest, TypeError))\n', (2902, 2954), False, 'import pytest\n'), ((5410, 5464), 'pytest.raises', 'pytest.raises', (['(JsonSchemaException, MalformedRequest)'], {}), '((JsonSchemaException, MalformedRequest))\n', (5423, 5464), False, 'import pytest\n'), ((6224, 6289), 'pytest.raises', 'pytest.raises', (['(JsonSchemaException, MalformedRequest, TypeError)'], {}), '((JsonSchemaException, MalformedRequest, TypeError))\n', (6237, 6289), False, 'import pytest\n'), ((7113, 7167), 'pytest.raises', 'pytest.raises', (['(JsonSchemaException, MalformedRequest)'], {}), '((JsonSchemaException, MalformedRequest))\n', (7126, 7167), False, 'import pytest\n'), ((7719, 7784), 'pytest.raises', 'pytest.raises', (['(JsonSchemaException, MalformedRequest, TypeError)'], {}), '((JsonSchemaException, MalformedRequest, TypeError))\n', (7732, 7784), False, 'import pytest\n'), ((8679, 8733), 'pytest.raises', 'pytest.raises', (['(JsonSchemaException, MalformedRequest)'], {}), '((JsonSchemaException, MalformedRequest))\n', (8692, 8733), False, 'import pytest\n'), ((9348, 9413), 'pytest.raises', 'pytest.raises', (['(JsonSchemaException, MalformedRequest, TypeError)'], {}), '((JsonSchemaException, MalformedRequest, TypeError))\n', (9361, 9413), False, 'import pytest\n'), ((11812, 11866), 'pytest.raises', 'pytest.raises', (['(JsonSchemaException, MalformedRequest)'], {}), '((JsonSchemaException, MalformedRequest))\n', (11825, 11866), False, 'import pytest\n'), ((12575, 12640), 'pytest.raises', 'pytest.raises', (['(JsonSchemaException, MalformedRequest, TypeError)'], {}), '((JsonSchemaException, MalformedRequest, TypeError))\n', (12588, 12640), False, 'import pytest\n'), ((13349, 13403), 'pytest.raises', 'pytest.raises', (['(JsonSchemaException, MalformedRequest)'], {}), '((JsonSchemaException, MalformedRequest))\n', (13362, 13403), False, 'import pytest\n'), ((13856, 13921), 'pytest.raises', 'pytest.raises', (['(JsonSchemaException, MalformedRequest, TypeError)'], {}), '((JsonSchemaException, MalformedRequest, TypeError))\n', (13869, 13921), False, 'import pytest\n')]
|
def load_data(logfile=None):
import datetime
import time
import numpy as np
import csv
from datetime import datetime
from keras.preprocessing.sequence import pad_sequences
vocabulary = list()
csvfile = open(logfile, 'r')
if "receipt" in logfile: # For Receipt Dataset
logreader = csv.reader(csvfile, delimiter=',')
elif "helpdesk" in logfile:
logreader = csv.reader(csvfile, delimiter=';') # For Helpdesk Dataset
next(logreader, None) # skip the headers
lastcase = ''
casestarttime = None
lasteventtime = None
firstLine = True
lines = [] #these are all the activity seq
timeseqs = [] #time sequences (differences between two events)
trace_start_list = []
numcases = 0
max_length = 0
trace_start_index = 0
for row in logreader:
if "receipt" in logfile: # For Receipt Dataset
t = datetime.strptime(row[2], "%Y-%m-%d %H:%M:%S.%f")
elif "helpdesk" in logfile:
t = datetime.strptime(row[2], "%d.%m.%Y-%H:%M:%S") # For Helpdesk Dataset
if row[0]!=lastcase: #'lastcase' is to save the last executed case for the loop
casestarttime = t
lasteventtime = t
lastcase = row[0]
if not firstLine:
lines.append(line)
timeseqs.append(times)
if len(line) > max_length:
max_length = len(line)
line = []
times = []
numcases += 1
if trace_start_index != 0:
trace_start_index-=1 # To remove index for last case which is not counted as a prefix
trace_start_list.append(trace_start_index)
elif trace_start_index == 0:
trace_start_list.append(trace_start_index)
if row[1] not in vocabulary:
vocabulary.append(row[1])
line.append(row[1])
timesincelastevent = t - lasteventtime
timediff = 86400 * timesincelastevent.days + timesincelastevent.seconds + timesincelastevent.microseconds/1000000
# +1 avoid zero
times.append(timediff+1)
lasteventtime = t
firstLine = False
trace_start_index+=1
lines.append(line)
timeseqs.append(times)
vocabulary = {key: idx for idx, key in enumerate(vocabulary)}
divisor = np.mean([item for sublist in timeseqs for item in sublist]) #average time between events
numcases += 1
print("Num cases: ", numcases)
elems_per_fold = int(round(numcases/3))
if len(line) > max_length:
max_length = len(line)
X = []
X1 = []
y = []
y_t = []
categorical_features_name = []
categorical_features_time = []
max_length = 0
prefix_sizes = []
seqs = 0
vocab = set()
count = 0
for seq, time in zip(lines, timeseqs):
code = []
code.append(vocabulary[seq[0]])
code1 = []
code1.append(np.log(time[0]+1))
vocab.add(seq[0])
for i in range(1,len(seq)):
prefix_sizes.append(len(code))
if len(code)>max_length:
max_length = len(code)
# Building Activity Names and Time from Index for Explainability part
sub_feature_name = []
sub_feature_time = []
vocabulary_clone = vocabulary.copy()
for j in code[:]:
for name, index in vocabulary_clone.items():
if index == j:
sub_feature_name.append(name)
sub_feature_time.append("Time corresponding to "+name)
categorical_features_name.append(sub_feature_name)
categorical_features_time.append(sub_feature_time)
X.append(code[:])
X1.append(code1[:])
y.append(vocabulary[seq[i]])
y_t.append(time[i]/divisor)
code.append(vocabulary[seq[i]])
code1.append(np.log(time[i]+1))
seqs += 1
vocab.add(seq[i])
prefix_sizes = np.array(prefix_sizes)
print("Num sequences:", seqs)
vocab_size = len(vocab)
X = np.array(X)
X1 = np.array(X1)
y = np.array(y)
y_t = np.array(y_t)
categorical_features_name = np.array(categorical_features_name)
categorical_features_time = np.array(categorical_features_time)
y_unique = np.unique(y)
dict_y = {}
i = 0
for el in y_unique:
dict_y[el] = i
i += 1
for i in range(len(y)):
y[i] = dict_y[y[i]]
y_unique = np.unique(y, return_counts=True)
print("Classes: ", y_unique)
n_classes = y_unique[0].shape[0]
# Establishing vocabulary for classes by removing non-predicatable class from vocabulary (For Helpdesk Dataset)
rebel = int
vocabulary_class = {}
# Finding where the class occurs which is not to be predicted
for key,value in enumerate(dict_y):
if (key!=value):
rebel = key
break
# deleting that class from dictionary
for name in vocabulary_clone.copy():
if (vocabulary_clone[name] == rebel):
vocabulary_clone.pop(name)
vocabulary_class = vocabulary_clone.copy()
for index,name in enumerate(vocabulary_class.copy()):
vocabulary_class[name] = index
# padding
padded_X = pad_sequences(X, maxlen=max_length, padding='pre', dtype='float64')
padded_X1 = pad_sequences(X1, maxlen=max_length, padding='pre', dtype='float64')
padded_features = pad_sequences(categorical_features_name, maxlen=max_length, padding='pre', dtype=object, value="Zero Padded Feature") #Padding feature name for Padded feature
padded_features_time = pad_sequences(categorical_features_time, maxlen=max_length, padding='pre', dtype=object, value="Zero Padded Feature") #Padding feature time for Padded feature
return ( (padded_X, padded_X1), (y, y_t), vocab_size, max_length, n_classes, divisor, prefix_sizes, vocabulary, vocabulary_class, padded_features, padded_features_time, categorical_features_name, trace_start_list)
|
[
"csv.reader",
"numpy.log",
"keras.preprocessing.sequence.pad_sequences",
"datetime.datetime.strptime",
"numpy.mean",
"numpy.array",
"numpy.unique"
] |
[((2377, 2436), 'numpy.mean', 'np.mean', (['[item for sublist in timeseqs for item in sublist]'], {}), '([item for sublist in timeseqs for item in sublist])\n', (2384, 2436), True, 'import numpy as np\n'), ((4099, 4121), 'numpy.array', 'np.array', (['prefix_sizes'], {}), '(prefix_sizes)\n', (4107, 4121), True, 'import numpy as np\n'), ((4193, 4204), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (4201, 4204), True, 'import numpy as np\n'), ((4214, 4226), 'numpy.array', 'np.array', (['X1'], {}), '(X1)\n', (4222, 4226), True, 'import numpy as np\n'), ((4235, 4246), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4243, 4246), True, 'import numpy as np\n'), ((4257, 4270), 'numpy.array', 'np.array', (['y_t'], {}), '(y_t)\n', (4265, 4270), True, 'import numpy as np\n'), ((4304, 4339), 'numpy.array', 'np.array', (['categorical_features_name'], {}), '(categorical_features_name)\n', (4312, 4339), True, 'import numpy as np\n'), ((4372, 4407), 'numpy.array', 'np.array', (['categorical_features_time'], {}), '(categorical_features_time)\n', (4380, 4407), True, 'import numpy as np\n'), ((4424, 4436), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (4433, 4436), True, 'import numpy as np\n'), ((4596, 4628), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (4605, 4628), True, 'import numpy as np\n'), ((5384, 5451), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X'], {'maxlen': 'max_length', 'padding': '"""pre"""', 'dtype': '"""float64"""'}), "(X, maxlen=max_length, padding='pre', dtype='float64')\n", (5397, 5451), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((5468, 5536), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X1'], {'maxlen': 'max_length', 'padding': '"""pre"""', 'dtype': '"""float64"""'}), "(X1, maxlen=max_length, padding='pre', dtype='float64')\n", (5481, 5536), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((5559, 5680), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['categorical_features_name'], {'maxlen': 'max_length', 'padding': '"""pre"""', 'dtype': 'object', 'value': '"""Zero Padded Feature"""'}), "(categorical_features_name, maxlen=max_length, padding='pre',\n dtype=object, value='Zero Padded Feature')\n", (5572, 5680), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((5745, 5866), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['categorical_features_time'], {'maxlen': 'max_length', 'padding': '"""pre"""', 'dtype': 'object', 'value': '"""Zero Padded Feature"""'}), "(categorical_features_time, maxlen=max_length, padding='pre',\n dtype=object, value='Zero Padded Feature')\n", (5758, 5866), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((331, 365), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (341, 365), False, 'import csv\n'), ((420, 454), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""";"""'}), "(csvfile, delimiter=';')\n", (430, 454), False, 'import csv\n'), ((919, 968), 'datetime.datetime.strptime', 'datetime.strptime', (['row[2]', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(row[2], '%Y-%m-%d %H:%M:%S.%f')\n", (936, 968), False, 'from datetime import datetime\n'), ((2975, 2994), 'numpy.log', 'np.log', (['(time[0] + 1)'], {}), '(time[0] + 1)\n', (2981, 2994), True, 'import numpy as np\n'), ((1022, 1068), 'datetime.datetime.strptime', 'datetime.strptime', (['row[2]', '"""%d.%m.%Y-%H:%M:%S"""'], {}), "(row[2], '%d.%m.%Y-%H:%M:%S')\n", (1039, 1068), False, 'from datetime import datetime\n'), ((3995, 4014), 'numpy.log', 'np.log', (['(time[i] + 1)'], {}), '(time[i] + 1)\n', (4001, 4014), True, 'import numpy as np\n')]
|
# Made by Mr. Have fun!
# Version 0.3 by H1GHL4ND3R
import sys
from ru.catssoftware.gameserver.model.quest import State
from ru.catssoftware.gameserver.model.quest import QuestState
from ru.catssoftware.gameserver.model.quest.jython import QuestJython as JQuest
qn = "217_TestimonyOfTrust"
MARK_OF_TRUST_ID = 2734
LETTER_TO_ELF_ID = 1558
LETTER_TO_DARKELF_ID = 1556
ADENA = 57
LETTER_TO_DWARF_ID, LETTER_TO_ORC_ID, LETTER_TO_SERESIN_ID, SCROLL_OF_DARKELF_TRUST_ID, \
SCROLL_OF_ELF_TRUST_ID, SCROLL_OF_DWARF_TRUST_ID,SCROLL_OF_ORC_TRUST_ID,RECOMMENDATION_OF_HOLLIN_ID,\
ORDER_OF_OZZY_ID, BREATH_OF_WINDS_ID, SEED_OF_VERDURE_ID, LETTER_OF_THIFIELL_ID, \
BLOOD_OF_GUARDIAN_BASILISK_ID,GIANT_APHID_ID, STAKATOS_FLUIDS_ID, BASILISK_PLASMA_ID, \
HONEY_DEW_ID, STAKATO_ICHOR_ID, ORDER_OF_CLAYTON_ID, PARASITE_OF_LOTA_ID, \
LETTER_TO_MANAKIA_ID, LETTER_OF_MANAKIA_ID, LETTER_TO_NICHOLA_ID, ORDER_OF_NICHOLA_ID, \
HEART_OF_PORTA_ID = range(2737,2762)
DROPLIST={
# For condition 2
27120:[ORDER_OF_OZZY_ID,BREATH_OF_WINDS_ID, 1],
27121:[ORDER_OF_OZZY_ID,SEED_OF_VERDURE_ID, 1],
# For condition 6
20550 :[ORDER_OF_CLAYTON_ID,BLOOD_OF_GUARDIAN_BASILISK_ID,5],
20082 :[ORDER_OF_CLAYTON_ID,GIANT_APHID_ID, 5],
20084 :[ORDER_OF_CLAYTON_ID,GIANT_APHID_ID, 5],
20086 :[ORDER_OF_CLAYTON_ID,GIANT_APHID_ID, 5],
20087 :[ORDER_OF_CLAYTON_ID,GIANT_APHID_ID, 5],
20088 :[ORDER_OF_CLAYTON_ID,GIANT_APHID_ID, 5],
20157 :[ORDER_OF_CLAYTON_ID,STAKATOS_FLUIDS_ID, 5],
20230 :[ORDER_OF_CLAYTON_ID,STAKATOS_FLUIDS_ID, 5],
20232 :[ORDER_OF_CLAYTON_ID,STAKATOS_FLUIDS_ID, 5],
20234 :[ORDER_OF_CLAYTON_ID,STAKATOS_FLUIDS_ID, 5],
# For condition 19
20213 :[ORDER_OF_NICHOLA_ID,HEART_OF_PORTA_ID, 1]
}
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [SCROLL_OF_DARKELF_TRUST_ID, SCROLL_OF_ELF_TRUST_ID, SCROLL_OF_DWARF_TRUST_ID, SCROLL_OF_ORC_TRUST_ID, BREATH_OF_WINDS_ID,
SEED_OF_VERDURE_ID, ORDER_OF_OZZY_ID, LETTER_TO_ELF_ID, ORDER_OF_CLAYTON_ID, BASILISK_PLASMA_ID, STAKATO_ICHOR_ID, HONEY_DEW_ID,
LETTER_TO_DARKELF_ID, LETTER_OF_THIFIELL_ID, LETTER_TO_SERESIN_ID, LETTER_TO_ORC_ID, LETTER_OF_MANAKIA_ID, LETTER_TO_MANAKIA_ID,
PARASITE_OF_LOTA_ID, LETTER_TO_DWARF_ID, LETTER_TO_NICHOLA_ID, HEART_OF_PORTA_ID, ORDER_OF_NICHOLA_ID, RECOMMENDATION_OF_HOLLIN_ID,
BLOOD_OF_GUARDIAN_BASILISK_ID, STAKATOS_FLUIDS_ID, GIANT_APHID_ID]
def onAdvEvent (self,event,npc, player) :
htmltext = event
st = player.getQuestState(qn)
if not st : return
if event == "30191-04.htm" :
st.set("cond","1")
st.setState(State.STARTED)
st.playSound("ItemSound.quest_accept")
st.giveItems(LETTER_TO_ELF_ID,1)
st.giveItems(LETTER_TO_DARKELF_ID,1)
elif event == "30154-03.htm" :
st.takeItems(LETTER_TO_ELF_ID,1)
st.giveItems(ORDER_OF_OZZY_ID,1)
st.set("cond","2")
elif event == "30358-02.htm" :
st.takeItems(LETTER_TO_DARKELF_ID,1)
st.giveItems(LETTER_OF_THIFIELL_ID,1)
st.set("cond","5")
elif event == "30657-03.htm" :
if st.getPlayer().getLevel() >= 37 : # Condition 12 meet the Lord Kakai (Orc Master)
st.takeItems(LETTER_TO_SERESIN_ID,1)
st.giveItems(LETTER_TO_ORC_ID,1)
st.giveItems(LETTER_TO_DWARF_ID,1)
st.set("cond","12")
else: # Condition 11 A lack of Experience
htmltext = "30657-02.htm"
st.set("cond","11")
elif event == "30565-02.htm" :
st.takeItems(LETTER_TO_ORC_ID,1)
st.giveItems(LETTER_TO_MANAKIA_ID,1)
st.set("cond","13")
elif event == "30515-02.htm" :
st.takeItems(LETTER_TO_MANAKIA_ID,1)
st.set("cond","14")
elif event == "30531-02.htm" :
st.takeItems(LETTER_TO_DWARF_ID,1)
st.giveItems(LETTER_TO_NICHOLA_ID,1)
st.set("cond","18")
elif event == "30621-02.htm" :
st.takeItems(LETTER_TO_NICHOLA_ID,1)
st.giveItems(ORDER_OF_NICHOLA_ID,1)
st.set("cond","19")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if npcId != 30191 and id != State.STARTED : return htmltext
if id == State.CREATED :
st.set("cond","0")
st.set("id","0")
if npcId == 30191 :
if player.getRace().ordinal() == 0 :
if player.getLevel() >= 37 :
htmltext = "30191-03.htm"
else:
htmltext = "30191-01.htm"
st.exitQuest(1)
else:
htmltext = "30191-02.htm"
st.exitQuest(1)
elif id == State.COMPLETED : # Check if the quest is already made
if npcId == 30191 :
htmltext = "<html><body>This quest has already been completed.</body></html>"
else : # The quest it self
try :
cond = st.getInt("cond")
except :
cond = None
if cond == 1 : # Condition 1 take the letter to Hierarch Asterios (Elven Master)
if npcId == 30191 :
htmltext = "30191-08.htm"
elif npcId == 30154 and st.getQuestItemsCount(LETTER_TO_ELF_ID) :
htmltext = "30154-01.htm"
elif cond == 2 : # Condition 2 kill the Luel of Zephy and Aktea of the Woods
if npcId == 30154 and st.getQuestItemsCount(ORDER_OF_OZZY_ID) :
htmltext = "30154-04.htm"
elif cond == 3 : # Condition 3 bring back the Breath of winds and Seed of Verdure to Asterios
if npcId == 30154 and st.getQuestItemsCount(BREATH_OF_WINDS_ID) and st.getQuestItemsCount(SEED_OF_VERDURE_ID) :
htmltext = "30154-05.htm"
st.takeItems(BREATH_OF_WINDS_ID,1)
st.takeItems(SEED_OF_VERDURE_ID,1)
st.takeItems(ORDER_OF_OZZY_ID,1)
st.giveItems(SCROLL_OF_ELF_TRUST_ID,1)
st.set("cond","4")
elif cond == 4 : # Condition 4 take the letter to Tetrarch Thifiell (Dark Elven Master)
if npcId == 30154 :
htmltext = "30154-06.htm"
elif npcId == 30358 and st.getQuestItemsCount(LETTER_TO_DARKELF_ID) :
htmltext = "30358-01.htm"
elif cond == 5 : # Condition 5 meet the Magister Clayton
if npcId == 30358 :
htmltext = "30358-05.htm"
elif npcId == 30464 and st.getQuestItemsCount(LETTER_OF_THIFIELL_ID) :
htmltext = "30464-01.htm"
st.takeItems(LETTER_OF_THIFIELL_ID,1)
st.giveItems(ORDER_OF_CLAYTON_ID,1)
st.set("cond","6")
elif cond == 6 : # Condition 6 get 10 of each, Stakato ichor, honey dew and basilisk plasma
if npcId == 30464 and st.getQuestItemsCount(ORDER_OF_CLAYTON_ID) :
htmltext = "30464-02.htm"
elif cond == 7 : # Condition 7 bring back the Stakato ichor, honey dew and basilisk plasma to Magister Clayton
if npcId == 30464 and st.getQuestItemsCount(ORDER_OF_CLAYTON_ID) and st.getQuestItemsCount(STAKATO_ICHOR_ID) and st.getQuestItemsCount(HONEY_DEW_ID) and st.getQuestItemsCount(BASILISK_PLASMA_ID) :
htmltext = "30464-03.htm"
st.set("cond","8")
elif cond == 8 : # Condition 8 take the Stakato ichor, honey dew and basilisk plasma to Thifiell
if npcId == 30358 and st.getQuestItemsCount(ORDER_OF_CLAYTON_ID) and st.getQuestItemsCount(STAKATO_ICHOR_ID) and st.getQuestItemsCount(HONEY_DEW_ID) and st.getQuestItemsCount(BASILISK_PLASMA_ID) :
htmltext = "30358-03.htm"
st.takeItems(ORDER_OF_CLAYTON_ID,1)
st.takeItems(BASILISK_PLASMA_ID,1)
st.takeItems(STAKATO_ICHOR_ID,1)
st.takeItems(HONEY_DEW_ID,1)
st.giveItems(SCROLL_OF_DARKELF_TRUST_ID,1)
st.set("cond","9")
elif cond == 9 : # Condition 9 take the Elven and Dark Elven scroll to Hollint
if npcId == 30191 and st.getQuestItemsCount(SCROLL_OF_ELF_TRUST_ID) and st.getQuestItemsCount(SCROLL_OF_DARKELF_TRUST_ID) :
htmltext = "30191-05.htm"
st.takeItems(SCROLL_OF_DARKELF_TRUST_ID,1)
st.takeItems(SCROLL_OF_ELF_TRUST_ID,1)
st.giveItems(LETTER_TO_SERESIN_ID,1)
st.set("cond","10")
elif npcId == 30358 :
htmltext = "30358-04.htm"
elif cond in [ 10, 11 ] : # Condition 10 meet the Seresin or Condition 11 A lack of Experience
if npcId == 30191 :
htmltext = "30191-09.htm"
elif npcId == 30657 and st.getQuestItemsCount(LETTER_TO_SERESIN_ID) :
htmltext = "30657-01.htm"
elif cond == 12 : # Condition 12 meet the Lord Kakai (Orc Master)
if npcId == 30657 :
htmltext = "30657-04.htm"
elif npcId == 30565 and st.getQuestItemsCount(LETTER_TO_ORC_ID) :
htmltext = "30565-01.htm"
elif cond == 13 : # Condition 13 meet the Seer Manakia
if npcId == 30565 :
htmltext = "30565-03.htm"
elif npcId == 30515 and st.getQuestItemsCount(LETTER_TO_MANAKIA_ID) :
htmltext = "30515-01.htm"
elif cond == 14 : # Condition 14 get 10 Parasite of lota
if npcId == 30515 :
htmltext = "30515-03.htm"
elif cond == 15 : # Condition 15 bring back the Parasite of lota to Seer Manakia
if npcId == 30515 and st.getQuestItemsCount(PARASITE_OF_LOTA_ID)==10 :
htmltext = "30515-04.htm"
st.takeItems(PARASITE_OF_LOTA_ID,10)
st.giveItems(LETTER_OF_MANAKIA_ID,1)
st.set("cond","16")
elif cond == 16 : # Condition 16 bring the letter of Manakia to the Lord Kakai
if npcId == 30565 and st.getQuestItemsCount(LETTER_OF_MANAKIA_ID) :
htmltext = "30565-04.htm"
st.takeItems(LETTER_OF_MANAKIA_ID,1)
st.giveItems(SCROLL_OF_ORC_TRUST_ID,1)
st.set("cond","17")
elif npcId == 30515 :
htmltext = "30515-05.htm"
elif cond == 17 : # Condition 17 meet the Lockirin (Dwarven Master)
if npcId == 30565 :
htmltext = "30565-05.htm"
elif npcId == 30531 and st.getQuestItemsCount(LETTER_TO_DWARF_ID) :
htmltext = "30531-01.htm"
elif cond == 18 : # Condition 18 take the letter to Nichola
if npcId == 30531 :
htmltext = "30531-03.htm"
elif npcId == 30621 and st.getQuestItemsCount(LETTER_TO_NICHOLA_ID) :
htmltext = "30621-01.htm"
elif cond == 19 : # Condition 19 get 1 Heart of Porta
if npcId == 30621 :
htmltext = "30621-03.htm"
elif cond == 20 : # Condition 20 bring the 1 Heart of Porta to Nichola
if npcId == 30621 and st.getQuestItemsCount(ORDER_OF_NICHOLA_ID) and st.getQuestItemsCount(HEART_OF_PORTA_ID) :
htmltext = "30621-04.htm"
st.takeItems(HEART_OF_PORTA_ID,1)
st.takeItems(ORDER_OF_NICHOLA_ID,1)
st.set("cond","21")
elif cond == 21 : # Condition 21 take the letter to Lockirin
if npcId == 30621 :
htmltext = "30621-05.htm"
elif npcId == 30531 :
htmltext = "30531-04.htm"
st.giveItems(SCROLL_OF_DWARF_TRUST_ID,1)
st.set("cond","22")
elif cond == 22 : # Condition 22 take the Orc and Dwarven scroll to High Priest Hollint
if npcId == 30191 and st.getQuestItemsCount(SCROLL_OF_DWARF_TRUST_ID) and st.getQuestItemsCount(SCROLL_OF_ORC_TRUST_ID) :
htmltext = "30191-06.htm"
st.takeItems(SCROLL_OF_DWARF_TRUST_ID,1)
st.takeItems(SCROLL_OF_ORC_TRUST_ID,1)
st.giveItems(RECOMMENDATION_OF_HOLLIN_ID,1)
st.set("cond","23")
elif npcId == 30657 :
htmltext = "30657-05.htm"
elif npcId == 30531 :
htmltext = "30531-05.htm"
elif cond == 23 : # Condition 23 take the Recommendation of Hollin to the High Priest Biotin
if npcId == 30191 :
htmltext = "30191-07.htm"
elif npcId == 30031 and st.getQuestItemsCount(RECOMMENDATION_OF_HOLLIN_ID) :
st.giveItems(ADENA,126106)
st.addExpAndSp(659149,46391)
st.giveItems(7562,96)
htmltext = "30031-01.htm"
st.takeItems(RECOMMENDATION_OF_HOLLIN_ID,1)
st.giveItems(MARK_OF_TRUST_ID,1)
st.unset("cond")
st.unset("id")
st.exitQuest(False)
st.playSound("ItemSound.quest_finish")
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != State.STARTED : return
npcId = npc.getNpcId()
cond = st.getInt("cond")
if cond == 2 and npcId in [ 20013, 20019, 20036, 20044 ] : # Condition 2 kill the Luel of Zephy and Aktea of the Woods
if npcId in [ 20036,20044 ] and st.getQuestItemsCount(BREATH_OF_WINDS_ID) == 0 :
st.set("id",str(st.getInt("id")+1))
if st.getRandom(100)<(st.getInt("id")*33) :
st.playSound("Itemsound.quest_before_battle")
st.addSpawn(27120,npc.getX(),npc.getY(),npc.getZ(),600000)
elif npcId in [ 20013,20019 ] and st.getQuestItemsCount(SEED_OF_VERDURE_ID) == 0 :
st.set("id",str(st.getInt("id")+1))
if st.getRandom(100)<(st.getInt("id")*33) :
st.playSound("Itemsound.quest_before_battle")
st.addSpawn(27121,npc.getX(),npc.getY(),npc.getZ(),600000)
elif cond == 14 : # Condition 14 get 10 Parasite of lota
parasite = st.getQuestItemsCount(PARASITE_OF_LOTA_ID)
if npcId == 20553 and parasite < 10 :
if st.getRandom(2) == 1 :
st.giveItems(PARASITE_OF_LOTA_ID,1)
if parasite+1 == 10 :
st.set("cond","15")
st.playSound("Itemsound.quest_middle")
else:
st.playSound("Itemsound.quest_itemget")
elif cond in [ 2,6,19 ] and npcId in DROPLIST.keys() :
required,item,maxqty=DROPLIST[npcId]
count = st.getQuestItemsCount(item)
if st.getQuestItemsCount(required) and count < maxqty :
st.giveItems(item,1)
if count+1 == maxqty : # Check if got enough number of items
# Special Sound event
if npcId in [ 20550, 20082, 20084, 20086, 20087, 20088, 20157, 20230, 20232, 20234 ] :
# Condition 6 get 10 of each, Stakato ichor, honey dew and basilisk plasma, and transform it
if item == BLOOD_OF_GUARDIAN_BASILISK_ID :
st.takeItems(BLOOD_OF_GUARDIAN_BASILISK_ID, maxqty)
st.giveItems(BASILISK_PLASMA_ID, 1)
elif item == GIANT_APHID_ID :
st.takeItems(GIANT_APHID_ID, maxqty)
st.giveItems(HONEY_DEW_ID, 1)
elif item == STAKATOS_FLUIDS_ID :
st.takeItems(STAKATOS_FLUIDS_ID, maxqty)
st.giveItems(STAKATO_ICHOR_ID, 1)
# Check if player got all the items of condition 6 and set the condition to 7
if st.getQuestItemsCount(BASILISK_PLASMA_ID) and st.getQuestItemsCount(HONEY_DEW_ID) and st.getQuestItemsCount(STAKATO_ICHOR_ID) :
st.set("cond","7")
st.playSound("Itemsound.quest_middle")
else:
st.playSound("Itemsound.quest_itemget")
elif npcId in [ 27120,27121 ] : # Condition 2 kill the Luel of Zephy and Aktea of the Woods
# Check if player got all the items of condition 2 and set the condition to 3
if st.getQuestItemsCount(SEED_OF_VERDURE_ID) and st.getQuestItemsCount(BREATH_OF_WINDS_ID) :
st.set("cond","3")
st.playSound("Itemsound.quest_middle")
else :
st.playSound("Itemsound.quest_itemget")
elif npcId == 20213 : # Condition 19 Porta
st.set("cond","20")
st.playSound("ItemSound.quest_middle")
else:
st.playSound("Itemsound.quest_itemget")
return
QUEST = Quest(217,qn,"Testimony Of Trust")
QUEST.addStartNpc(30191)
QUEST.addTalkId(30191)
QUEST.addTalkId(30031)
QUEST.addTalkId(30154)
QUEST.addTalkId(30358)
QUEST.addTalkId(30464)
QUEST.addTalkId(30515)
QUEST.addTalkId(30531)
QUEST.addTalkId(30565)
QUEST.addTalkId(30621)
QUEST.addTalkId(30657)
for i in DROPLIST.keys()+[20013,20019,20036,20044,20553] :
QUEST.addKillId(i)
|
[
"ru.catssoftware.gameserver.model.quest.jython.QuestJython.__init__"
] |
[((2004, 2042), 'ru.catssoftware.gameserver.model.quest.jython.QuestJython.__init__', 'JQuest.__init__', (['self', 'id', 'name', 'descr'], {}), '(self, id, name, descr)\n', (2019, 2042), True, 'from ru.catssoftware.gameserver.model.quest.jython import QuestJython as JQuest\n')]
|
# Copyright 2006 <NAME> and contributors
# Copyright (C) 2009 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from Widget import Widget
from pyjamas.ui import Event
from MouseListener import MouseHandler
from ClickListener import ClickHandler
prefetchImages = {}
class Image(Widget, MouseHandler, ClickHandler):
def __init__(self, url="", **kwargs):
if not kwargs.has_key('StyleName'): kwargs['StyleName']="gwt-Image"
if url: kwargs['Url'] = url
if kwargs.has_key('Element'):
element = kwargs.pop('Element')
else:
element = DOM.createImg()
self.setElement(element)
Widget.__init__(self, **kwargs)
MouseHandler.__init__(self)
ClickHandler.__init__(self)
self.sinkEvents(Event.ONLOAD | Event.ONERROR)
self.loadListeners = []
def addLoadListener(self, listener):
self.loadListeners.append(listener)
def removeLoadListener(self, listener):
self.loadListeners.remove(listener)
def getUrl(self):
return DOM.getAttribute(self.getElement(), "src")
def onBrowserEvent(self, event):
Widget.onBrowserEvent(self, event)
type = DOM.eventGetType(event)
if type == "load":
for listener in self.loadListeners:
listener.onLoad(self)
elif type == "error":
for listener in self.loadListeners:
listener.onError(self)
def prefetch(self, url):
img = DOM.createImg()
DOM.setElemAttribute(img, "src", url)
prefetchImages[url] = img
def setUrl(self, url):
DOM.setElemAttribute(self.getElement(), "src", url)
Factory.registerClass('pyjamas.ui.Image', Image)
|
[
"pyjamas.DOM.createImg",
"pyjamas.DOM.eventGetType",
"MouseListener.MouseHandler.__init__",
"pyjamas.Factory.registerClass",
"pyjamas.DOM.setElemAttribute",
"ClickListener.ClickHandler.__init__",
"Widget.Widget.__init__",
"Widget.Widget.onBrowserEvent"
] |
[((2237, 2285), 'pyjamas.Factory.registerClass', 'Factory.registerClass', (['"""pyjamas.ui.Image"""', 'Image'], {}), "('pyjamas.ui.Image', Image)\n", (2258, 2285), False, 'from pyjamas import Factory\n'), ((1212, 1243), 'Widget.Widget.__init__', 'Widget.__init__', (['self'], {}), '(self, **kwargs)\n', (1227, 1243), False, 'from Widget import Widget\n'), ((1252, 1279), 'MouseListener.MouseHandler.__init__', 'MouseHandler.__init__', (['self'], {}), '(self)\n', (1273, 1279), False, 'from MouseListener import MouseHandler\n'), ((1288, 1315), 'ClickListener.ClickHandler.__init__', 'ClickHandler.__init__', (['self'], {}), '(self)\n', (1309, 1315), False, 'from ClickListener import ClickHandler\n'), ((1704, 1738), 'Widget.Widget.onBrowserEvent', 'Widget.onBrowserEvent', (['self', 'event'], {}), '(self, event)\n', (1725, 1738), False, 'from Widget import Widget\n'), ((1754, 1777), 'pyjamas.DOM.eventGetType', 'DOM.eventGetType', (['event'], {}), '(event)\n', (1770, 1777), False, 'from pyjamas import DOM\n'), ((2052, 2067), 'pyjamas.DOM.createImg', 'DOM.createImg', ([], {}), '()\n', (2065, 2067), False, 'from pyjamas import DOM\n'), ((2076, 2113), 'pyjamas.DOM.setElemAttribute', 'DOM.setElemAttribute', (['img', '"""src"""', 'url'], {}), "(img, 'src', url)\n", (2096, 2113), False, 'from pyjamas import DOM\n'), ((1155, 1170), 'pyjamas.DOM.createImg', 'DOM.createImg', ([], {}), '()\n', (1168, 1170), False, 'from pyjamas import DOM\n')]
|
#!/usr/bin/env python
import logging
import os
import sys
from argparse import ArgumentParser
from pprint import pformat
import yaml
from future.utils import lfilter, lmap
from foxylib.tools.collections.collections_tool import lchain, DictTool
from foxylib.tools.env.env_tool import EnvTool
from foxylib.tools.env.yaml.filepath2envvars import Filepath2Envvar
from foxylib.tools.env.yaml.yaml_env_tool import Yaml2EnvTool, Lpassline
from foxylib.tools.jinja2.jinja2_tool import Jinja2Renderer
from foxylib.tools.log.foxylib_logger import FoxylibLogger
from foxylib.tools.string.string_tool import str2stripped
def lpasslines_context2envvars(lpasslines, h_context, value_wrapper):
for lpassline in lpasslines:
filepath = Lpassline.lpassline_context2filepath(lpassline, h_context)
yield from Filepath2Envvar.filepath_context2envvars(filepath, h_context, value_wrapper)
def main():
logger = FoxylibLogger.func_level2logger(main, logging.DEBUG)
h_context = dict(os.environ)
assert "ENV" in h_context
logger.debug(pformat({
'h_context_major': DictTool.filter_keys(h_context, {"REPO_DIR", "HOME_DIR", "ENV"}),
}))
# how to output bash pipe friendly in python
# reference: https://stackoverflow.com/q/34459274/1902064
value_wrapper = Filepath2Envvar.args2value_wrapper(sys.argv[1:])
for envvar in lpasslines_context2envvars(sys.stdin, h_context, value_wrapper):
# logger.debug(pformat({'envvar':envvar}))
print(envvar)
def main_old():
logger = FoxylibLogger.func_level2logger(main, logging.DEBUG)
# if len(sys.argv) < 2:
# print("usage: {} <listfile_filepath> <repo_dir>".format(sys.argv[0]))
# sys.exit(1)
l = lfilter(bool, (map(str2stripped, sys.stdin)))
# tmplt_filepath = sys.argv[1]
# env = sys.argv[2]
# repo_dir = sys.argv[2]
# l = lfilter(bool, map(str2stripped, FileTool.filepath2utf8_lines(tmplt_filepath)))
logger.debug({"l": l})
h_env = dict(os.environ)
filepath_list = lmap(lambda s:Jinja2Renderer.text2text(s.split(maxsplit=1)[1], data=h_env), l)
# data = {"ENV": env, "REPO_DIR":repo_dir, "HOME_DIR":os.path.expanduser('~')}
str_tmplt = "\n".join([Jinja2Renderer.textfile2text(fp, h_env)
for fp in filepath_list
if fp.endswith(".yaml") or fp.endswith(".yml")])
envname_list = lfilter(bool, [h_env.get("ENV"), "__DEFAULT__"])
json_yaml = yaml.load(str_tmplt, Loader=yaml.SafeLoader)
kv_list = EnvTool.yaml_envnames2kv_list(json_yaml, h_env, envname_list)
# logger.debug({"envname_list": envname_list})
# logger.debug({"str_tmplt": str_tmplt})
# logger.debug({"json_yaml": json_yaml})
# logger.debug({"kv_list": kv_list})
str_export_list = ['export {0}="{1}"'.format(k, v_yaml)
for k, v_yaml in kv_list]
str_export = "\n".join(str_export_list)
# logger.debug(pformat({
# 'json_yaml':json_yaml,
# 'kv_list':kv_list,
# "str_export_list": str_export_list
# }))
print(str_export)
if __name__== "__main__":
FoxylibLogger.attach_stderr2loggers(logging.DEBUG)
main()
|
[
"foxylib.tools.collections.collections_tool.DictTool.filter_keys",
"yaml.load",
"foxylib.tools.log.foxylib_logger.FoxylibLogger.attach_stderr2loggers",
"foxylib.tools.log.foxylib_logger.FoxylibLogger.func_level2logger",
"foxylib.tools.env.yaml.filepath2envvars.Filepath2Envvar.args2value_wrapper",
"foxylib.tools.env.yaml.yaml_env_tool.Lpassline.lpassline_context2filepath",
"foxylib.tools.jinja2.jinja2_tool.Jinja2Renderer.textfile2text",
"foxylib.tools.env.env_tool.EnvTool.yaml_envnames2kv_list",
"foxylib.tools.env.yaml.filepath2envvars.Filepath2Envvar.filepath_context2envvars"
] |
[((919, 971), 'foxylib.tools.log.foxylib_logger.FoxylibLogger.func_level2logger', 'FoxylibLogger.func_level2logger', (['main', 'logging.DEBUG'], {}), '(main, logging.DEBUG)\n', (950, 971), False, 'from foxylib.tools.log.foxylib_logger import FoxylibLogger\n'), ((1297, 1345), 'foxylib.tools.env.yaml.filepath2envvars.Filepath2Envvar.args2value_wrapper', 'Filepath2Envvar.args2value_wrapper', (['sys.argv[1:]'], {}), '(sys.argv[1:])\n', (1331, 1345), False, 'from foxylib.tools.env.yaml.filepath2envvars import Filepath2Envvar\n'), ((1533, 1585), 'foxylib.tools.log.foxylib_logger.FoxylibLogger.func_level2logger', 'FoxylibLogger.func_level2logger', (['main', 'logging.DEBUG'], {}), '(main, logging.DEBUG)\n', (1564, 1585), False, 'from foxylib.tools.log.foxylib_logger import FoxylibLogger\n'), ((2471, 2515), 'yaml.load', 'yaml.load', (['str_tmplt'], {'Loader': 'yaml.SafeLoader'}), '(str_tmplt, Loader=yaml.SafeLoader)\n', (2480, 2515), False, 'import yaml\n'), ((2530, 2591), 'foxylib.tools.env.env_tool.EnvTool.yaml_envnames2kv_list', 'EnvTool.yaml_envnames2kv_list', (['json_yaml', 'h_env', 'envname_list'], {}), '(json_yaml, h_env, envname_list)\n', (2559, 2591), False, 'from foxylib.tools.env.env_tool import EnvTool\n'), ((3128, 3178), 'foxylib.tools.log.foxylib_logger.FoxylibLogger.attach_stderr2loggers', 'FoxylibLogger.attach_stderr2loggers', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (3163, 3178), False, 'from foxylib.tools.log.foxylib_logger import FoxylibLogger\n'), ((736, 794), 'foxylib.tools.env.yaml.yaml_env_tool.Lpassline.lpassline_context2filepath', 'Lpassline.lpassline_context2filepath', (['lpassline', 'h_context'], {}), '(lpassline, h_context)\n', (772, 794), False, 'from foxylib.tools.env.yaml.yaml_env_tool import Yaml2EnvTool, Lpassline\n'), ((815, 891), 'foxylib.tools.env.yaml.filepath2envvars.Filepath2Envvar.filepath_context2envvars', 'Filepath2Envvar.filepath_context2envvars', (['filepath', 'h_context', 'value_wrapper'], {}), '(filepath, h_context, value_wrapper)\n', (855, 891), False, 'from foxylib.tools.env.yaml.filepath2envvars import Filepath2Envvar\n'), ((2219, 2258), 'foxylib.tools.jinja2.jinja2_tool.Jinja2Renderer.textfile2text', 'Jinja2Renderer.textfile2text', (['fp', 'h_env'], {}), '(fp, h_env)\n', (2247, 2258), False, 'from foxylib.tools.jinja2.jinja2_tool import Jinja2Renderer\n'), ((1091, 1155), 'foxylib.tools.collections.collections_tool.DictTool.filter_keys', 'DictTool.filter_keys', (['h_context', "{'REPO_DIR', 'HOME_DIR', 'ENV'}"], {}), "(h_context, {'REPO_DIR', 'HOME_DIR', 'ENV'})\n", (1111, 1155), False, 'from foxylib.tools.collections.collections_tool import lchain, DictTool\n')]
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for test_runner script."""
from __future__ import print_function
import sys
import unittest
import benchmark
import mock
class TestBenchmarkRunner(unittest.TestCase):
def test_get_benchmark_methods_filter(self):
"""Tests returning methods on a class based on a filter."""
config = mock.Mock()
config.workspace = 'workspace'
config.benchmark_method_patterns = ['new_foo.BenchmarkClass.filter:bench.*']
benchmark_runner = benchmark.BenchmarkRunner(config)
mock_benchmark_class = mock.Mock()
mock_benchmark_class.benchmark_method_1 = 'foo'
mock_module = mock.Mock()
sys.modules['new_foo'] = mock_module
mock_module.BenchmarkClass.return_value = mock_benchmark_class
methods = benchmark_runner._get_benchmark_methods()
self.assertEqual(1, len(methods))
self.assertEqual('new_foo.BenchmarkClass.benchmark_method_1', methods[0])
def test_get_benchmark_methods_exact_match(self):
"""Tests returning methods on a class based on a filter."""
config = mock.Mock()
config.workspace = 'workspace'
config.benchmark_method_patterns = [
'new_foo.BenchmarkClass.benchmark_method_1',
'new_foo.BenchmarkClass.benchmark_method_2']
benchmark_runner = benchmark.BenchmarkRunner(config)
methods = benchmark_runner._get_benchmark_methods()
self.assertEqual(['new_foo.BenchmarkClass.benchmark_method_1',
'new_foo.BenchmarkClass.benchmark_method_2'], methods)
|
[
"benchmark.BenchmarkRunner",
"mock.Mock"
] |
[((994, 1005), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1003, 1005), False, 'import mock\n'), ((1145, 1178), 'benchmark.BenchmarkRunner', 'benchmark.BenchmarkRunner', (['config'], {}), '(config)\n', (1170, 1178), False, 'import benchmark\n'), ((1207, 1218), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1216, 1218), False, 'import mock\n'), ((1290, 1301), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1299, 1301), False, 'import mock\n'), ((1714, 1725), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1723, 1725), False, 'import mock\n'), ((1931, 1964), 'benchmark.BenchmarkRunner', 'benchmark.BenchmarkRunner', (['config'], {}), '(config)\n', (1956, 1964), False, 'import benchmark\n')]
|
from abc import ABCMeta
import json
import jsonpickle
try:
from .numpy_handlers import NPReadable
from .numpy_handlers import register_numpy_handlers
np_readable = NPReadable()
register_numpy_handlers()
except:
np_readable = None
class JSONer(object):
"""The abstract class **JSONer** adds to its descendants the required
functions in order to export/import any given object as json. This can be
useful to share the data between different languages or simply to store
pre-calculated data.
"""
__metaclass__ = ABCMeta
def to_json(self, unpicklable = True, readable = False, api = False):
"""Export the object to a json formated string.
:param bool unpicklable:
When :py:data:`False` the resulting json cannot
be reloaded as the same object again. Makes the json smaller.
:param bool readable:
When flattening complex object variables to json,
this will include a human-readable version together with the stored
json. See `jsonpickle <http://jsonpickle.github.io/>`_ on how to
flatten and restore complex variable types. It does not affect the
conversion of the json string into the object again.
:param bool api:
When flattening complex object variables to json, this
will substitute the compressed data by the human readable version.
Although it might allow for the conversion of the json into the
object, some attributes will become their simplest representation.
For example, a `numpy array <http://www.numpy.org/>`_ will be
reloaded a a simple python array.
:return: a json representation of the object
:rtype: :py:data:`str`
"""
if np_readable is not None:
np_readable.status = readable
np_readable.api = api
return jsonpickle.encode(self, unpicklable=unpicklable)
def to_dict(self, unpicklable = True, readable = False, api = False):
"""Export the object to a json as a dictionary.
:param bool unpicklable:
When :py:data:`False` the resulting json cannot
be reloaded as the same object again. Makes the json smaller.
:param bool readable:
When flattening complex object variables to json,
this will include a human-readable version together with the stored
json. See `jsonpickle <http://jsonpickle.github.io/>`_ on how to
flatten and restore complex variable types. It does not affect the
conversion of the json string into the object again.
:param bool api:
When flattening complex object variables to json, this
will substitute the compressed data by the human readable version.
Although it might allow for the conversion of the json into the
object, some attributes will become their simplest representation.
For example, a `numpy array <http://www.numpy.org/>`_ will be
reloaded a a simple python array.
:return: a json dictionary object
:rtype: :py:data:`dict`
"""
return json.loads(self.to_json(unpicklable, readable, api))
@classmethod
def from_json(cls, json_data):
"""Given a json-formated string, it recreates the object.
:param str json_data: json-formated string.
:return: an instance of the caller object type.
:rtype: :py:data:`object instance`
"""
return jsonpickle.decode(json_data)
@classmethod
def from_dict(cls, json_dict):
"""Given a json dictionary, it recreates the object.
:param dict json_dict: json dictionary.
:return: an instance of the caller object type.
:rtype: :py:data:`object instance`
"""
return cls.from_json(json.dumps(json_dict))
|
[
"jsonpickle.decode",
"json.dumps",
"jsonpickle.encode"
] |
[((1934, 1982), 'jsonpickle.encode', 'jsonpickle.encode', (['self'], {'unpicklable': 'unpicklable'}), '(self, unpicklable=unpicklable)\n', (1951, 1982), False, 'import jsonpickle\n'), ((3578, 3606), 'jsonpickle.decode', 'jsonpickle.decode', (['json_data'], {}), '(json_data)\n', (3595, 3606), False, 'import jsonpickle\n'), ((3911, 3932), 'json.dumps', 'json.dumps', (['json_dict'], {}), '(json_dict)\n', (3921, 3932), False, 'import json\n')]
|
from rest_framework.serializers import ModelSerializer,HyperlinkedIdentityField,SerializerMethodField
from posts.models import Post
from comments.api.serializers import CommentSerializer
from comments.models import Comment
from accounts.api.serializers import UserDetailSerializer
class PostCreateUpdateSerializer(ModelSerializer):
class Meta:
model = Post
fields = ['title','content','publish']
post_detail_url = HyperlinkedIdentityField(
view_name='posts-api:detail',
lookup_field='slug',
)
class PostListSerializer(ModelSerializer):
url = post_detail_url
user = UserDetailSerializer(read_only=True)
delete_url = HyperlinkedIdentityField(
view_name='posts-api:delete',
lookup_field='slug',
)
class Meta:
model = Post
fields = ['url','delete_url','user','title','content','publish']
class PostDetailSerializer(ModelSerializer):
url = post_detail_url
user = UserDetailSerializer(read_only=True)
image = SerializerMethodField()
html = SerializerMethodField()
comments = SerializerMethodField()
class Meta:
model = Post
fields = ['comments','html','user','image','url','id','title','slug','content','publish']
def get_html(self,obj):
return obj.get_markdown()
def get_image(self,obj):
try:
image = obj.image.url
except:
image = None
return image
def get_comments(self,obj):
c_qs = Comment.objects.filter_by_instance(obj)
comments = CommentSerializer(c_qs,many=True).data
return comments
|
[
"rest_framework.serializers.HyperlinkedIdentityField",
"rest_framework.serializers.SerializerMethodField",
"comments.models.Comment.objects.filter_by_instance",
"comments.api.serializers.CommentSerializer",
"accounts.api.serializers.UserDetailSerializer"
] |
[((436, 511), 'rest_framework.serializers.HyperlinkedIdentityField', 'HyperlinkedIdentityField', ([], {'view_name': '"""posts-api:detail"""', 'lookup_field': '"""slug"""'}), "(view_name='posts-api:detail', lookup_field='slug')\n", (460, 511), False, 'from rest_framework.serializers import ModelSerializer, HyperlinkedIdentityField, SerializerMethodField\n'), ((605, 641), 'accounts.api.serializers.UserDetailSerializer', 'UserDetailSerializer', ([], {'read_only': '(True)'}), '(read_only=True)\n', (625, 641), False, 'from accounts.api.serializers import UserDetailSerializer\n'), ((659, 734), 'rest_framework.serializers.HyperlinkedIdentityField', 'HyperlinkedIdentityField', ([], {'view_name': '"""posts-api:delete"""', 'lookup_field': '"""slug"""'}), "(view_name='posts-api:delete', lookup_field='slug')\n", (683, 734), False, 'from rest_framework.serializers import ModelSerializer, HyperlinkedIdentityField, SerializerMethodField\n'), ((953, 989), 'accounts.api.serializers.UserDetailSerializer', 'UserDetailSerializer', ([], {'read_only': '(True)'}), '(read_only=True)\n', (973, 989), False, 'from accounts.api.serializers import UserDetailSerializer\n'), ((1002, 1025), 'rest_framework.serializers.SerializerMethodField', 'SerializerMethodField', ([], {}), '()\n', (1023, 1025), False, 'from rest_framework.serializers import ModelSerializer, HyperlinkedIdentityField, SerializerMethodField\n'), ((1037, 1060), 'rest_framework.serializers.SerializerMethodField', 'SerializerMethodField', ([], {}), '()\n', (1058, 1060), False, 'from rest_framework.serializers import ModelSerializer, HyperlinkedIdentityField, SerializerMethodField\n'), ((1076, 1099), 'rest_framework.serializers.SerializerMethodField', 'SerializerMethodField', ([], {}), '()\n', (1097, 1099), False, 'from rest_framework.serializers import ModelSerializer, HyperlinkedIdentityField, SerializerMethodField\n'), ((1487, 1526), 'comments.models.Comment.objects.filter_by_instance', 'Comment.objects.filter_by_instance', (['obj'], {}), '(obj)\n', (1521, 1526), False, 'from comments.models import Comment\n'), ((1546, 1580), 'comments.api.serializers.CommentSerializer', 'CommentSerializer', (['c_qs'], {'many': '(True)'}), '(c_qs, many=True)\n', (1563, 1580), False, 'from comments.api.serializers import CommentSerializer\n')]
|
import inspect
import imp
import os
import sys
import types
import time
import importlib
import pico
_mtimes = {}
def module_dict(module):
module_dict = {}
pico_exports = getattr(module, 'pico_exports', None)
members = inspect.getmembers(module)
def function_filter(x):
(name, f) = x
return ((inspect.isfunction(f) or inspect.ismethod(f))
and (pico_exports is None or name in pico_exports)
and f.__module__ == module.__name__
and not name.startswith('_')
and not hasattr(f, 'private'))
def class_filter(x):
(name, c) = x
return (inspect.isclass(c)
and (issubclass(c, pico.Pico) or issubclass(c, pico.object))
and (pico_exports is None or name in pico_exports)
and c.__module__ == module.__name__)
class_defs = map(class_dict, filter(class_filter, members))
function_defs = map(func_dict, filter(function_filter, members))
module_dict['classes'] = class_defs
module_dict['functions'] = function_defs
module_dict['__doc__'] = module.__doc__
module_dict['__headers__'] = getattr(module, '__headers__', {})
return module_dict
def class_dict(x):
name, cls = x
def method_filter(x):
(name, f) = x
return ((inspect.isfunction(f) or inspect.ismethod(f))
and (not name.startswith('_') or name == '__init__')
and not hasattr(f, 'private'))
class_dict = {'__class__': cls.__name__}
class_dict['name'] = name
methods = filter(method_filter, inspect.getmembers(cls))
class_dict['__init__'] = func_dict(methods.pop(0))
class_dict['functions'] = map(func_dict, methods)
class_dict['__doc__'] = cls.__doc__
class_dict['__headers__'] = getattr(cls, '__headers__', {})
return class_dict
def func_dict(x):
name, f = x
func_dict = {}
func_dict['name'] = name
func_dict['cache'] = ((hasattr(f, 'cacheable') and f.cacheable))
func_dict['stream'] = ((hasattr(f, 'stream') and f.stream))
a = inspect.getargspec(f)
arg_list_r = reversed(a.args)
defaults_list_r = reversed(a.defaults or [None])
args = reversed(map(None, arg_list_r, defaults_list_r))
args = filter(lambda x: x[0] and x[0] != 'self', args)
func_dict['args'] = args
func_dict['doc'] = f.__doc__
return func_dict
def load(module_name, RELOAD=False):
if module_name == 'pico':
return sys.modules['pico']
if module_name == 'pico.modules':
if module_name in sys.modules:
return sys.modules[module_name]
else:
return sys.modules[__name__]
modules_path = './'
if not sys.path.__contains__(modules_path):
sys.path.insert(0, modules_path)
m = importlib.import_module(module_name)
if RELOAD:
mtime = os.stat(m.__file__.replace('.pyc', '.py')).st_mtime
if _mtimes.get(module_name, mtime) < mtime:
if module_name in sys.modules:
del sys.modules[module_name]
m = importlib.import_module(module_name)
m = reload(m)
print("Reloaded module %s, changed at %s" % (module_name,
time.ctime(mtime)))
_mtimes[module_name] = mtime
if not (hasattr(m, 'pico') and m.pico == pico):
raise ImportError('This module has not imported pico!')
return m
def module_proxy(cls):
module_name = cls.__module__
module = imp.new_module(module_name)
module.pico = pico
def method_filter(x):
(name, f) = x
return ((inspect.isfunction(f) or inspect.ismethod(f))
and (not name.startswith('_') or name == '__init__')
and not hasattr(f, 'private'))
methods = filter(method_filter, inspect.getmembers(cls))
for (name, f) in methods:
setattr(module, name, f)
return module
json_dumpers = {
types.ModuleType: module_dict
}
|
[
"inspect.ismethod",
"importlib.import_module",
"inspect.isclass",
"time.ctime",
"sys.path.insert",
"inspect.getargspec",
"sys.path.__contains__",
"imp.new_module",
"inspect.isfunction",
"inspect.getmembers"
] |
[((235, 261), 'inspect.getmembers', 'inspect.getmembers', (['module'], {}), '(module)\n', (253, 261), False, 'import inspect\n'), ((2083, 2104), 'inspect.getargspec', 'inspect.getargspec', (['f'], {}), '(f)\n', (2101, 2104), False, 'import inspect\n'), ((2795, 2831), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (2818, 2831), False, 'import importlib\n'), ((3518, 3545), 'imp.new_module', 'imp.new_module', (['module_name'], {}), '(module_name)\n', (3532, 3545), False, 'import imp\n'), ((1598, 1621), 'inspect.getmembers', 'inspect.getmembers', (['cls'], {}), '(cls)\n', (1616, 1621), False, 'import inspect\n'), ((2709, 2744), 'sys.path.__contains__', 'sys.path.__contains__', (['modules_path'], {}), '(modules_path)\n', (2730, 2744), False, 'import sys\n'), ((2754, 2786), 'sys.path.insert', 'sys.path.insert', (['(0)', 'modules_path'], {}), '(0, modules_path)\n', (2769, 2786), False, 'import sys\n'), ((3833, 3856), 'inspect.getmembers', 'inspect.getmembers', (['cls'], {}), '(cls)\n', (3851, 3856), False, 'import inspect\n'), ((651, 669), 'inspect.isclass', 'inspect.isclass', (['c'], {}), '(c)\n', (666, 669), False, 'import inspect\n'), ((3071, 3107), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (3094, 3107), False, 'import importlib\n'), ((330, 351), 'inspect.isfunction', 'inspect.isfunction', (['f'], {}), '(f)\n', (348, 351), False, 'import inspect\n'), ((355, 374), 'inspect.ismethod', 'inspect.ismethod', (['f'], {}), '(f)\n', (371, 374), False, 'import inspect\n'), ((1325, 1346), 'inspect.isfunction', 'inspect.isfunction', (['f'], {}), '(f)\n', (1343, 1346), False, 'import inspect\n'), ((1350, 1369), 'inspect.ismethod', 'inspect.ismethod', (['f'], {}), '(f)\n', (1366, 1369), False, 'import inspect\n'), ((3635, 3656), 'inspect.isfunction', 'inspect.isfunction', (['f'], {}), '(f)\n', (3653, 3656), False, 'import inspect\n'), ((3660, 3679), 'inspect.ismethod', 'inspect.ismethod', (['f'], {}), '(f)\n', (3676, 3679), False, 'import inspect\n'), ((3261, 3278), 'time.ctime', 'time.ctime', (['mtime'], {}), '(mtime)\n', (3271, 3278), False, 'import time\n')]
|
from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from .models import Movie
def home(request):
searchTerm = request.GET.get('searchMovie')
if searchTerm:
movies = Movie.objects.filter(title__icontains=searchTerm)
else:
movies = Movie.objects.all()
return render(request, 'home.html', {'searchTerm':searchTerm, 'movies': movies})
def about(request):
return HttpResponse('<h1>Welcome to About Page</h1>')
def signup(request):
email = request.GET.get('email')
return render(request, 'signup.html', {'email':email})
def detail(request, movie_id):
movie = get_object_or_404(Movie,pk=movie_id)
return render(request, 'detail.html', {'movie':movie})
|
[
"django.shortcuts.render",
"django.shortcuts.get_object_or_404",
"django.http.HttpResponse"
] |
[((358, 432), 'django.shortcuts.render', 'render', (['request', '"""home.html"""', "{'searchTerm': searchTerm, 'movies': movies}"], {}), "(request, 'home.html', {'searchTerm': searchTerm, 'movies': movies})\n", (364, 432), False, 'from django.shortcuts import render\n'), ((464, 510), 'django.http.HttpResponse', 'HttpResponse', (['"""<h1>Welcome to About Page</h1>"""'], {}), "('<h1>Welcome to About Page</h1>')\n", (476, 510), False, 'from django.http import HttpResponse\n'), ((581, 629), 'django.shortcuts.render', 'render', (['request', '"""signup.html"""', "{'email': email}"], {}), "(request, 'signup.html', {'email': email})\n", (587, 629), False, 'from django.shortcuts import render\n'), ((673, 710), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Movie'], {'pk': 'movie_id'}), '(Movie, pk=movie_id)\n', (690, 710), False, 'from django.shortcuts import get_object_or_404\n'), ((721, 769), 'django.shortcuts.render', 'render', (['request', '"""detail.html"""', "{'movie': movie}"], {}), "(request, 'detail.html', {'movie': movie})\n", (727, 769), False, 'from django.shortcuts import render\n')]
|
import ast
import _peg_parser as peg_parser
import unittest
from typing import Any, Union, Iterable, Tuple
from textwrap import dedent
from test import support
TEST_CASES = [
('annotated_assignment', 'x: int = 42'),
('annotated_assignment_with_tuple', 'x: tuple = 1, 2'),
('annotated_assignment_with_parens', '(paren): int = 3+2'),
('annotated_assignment_with_yield', 'x: int = yield 42'),
('annotated_no_assignment', 'x: int'),
('annotation_with_multiple_parens', '((parens)): int'),
('annotation_with_parens', '(parens): int'),
('annotated_assignment_with_attr', 'a.b: int'),
('annotated_assignment_with_subscript', 'a[b]: int'),
('annotated_assignment_with_attr_and_parens', '(a.b): int'),
('annotated_assignment_with_subscript_and_parens', '(a[b]): int'),
('assert', 'assert a'),
('assert_message', 'assert a, b'),
('assignment_false', 'a = False'),
('assignment_none', 'a = None'),
('assignment_true', 'a = True'),
('assignment_paren', '(a) = 42'),
('assignment_paren_multiple', '(a, b) = (0, 1)'),
('asyncfor',
'''
async for i in a:
pass
'''),
('attribute_call', 'a.b()'),
('attribute_multiple_names', 'abcd.efg.hij'),
('attribute_simple', 'a.b'),
('attributes_subscript', 'a.b[0]'),
('augmented_assignment', 'x += 42'),
('augmented_assignment_attribute', 'a.b.c += 42'),
('augmented_assignment_paren', '(x) += 42'),
('augmented_assignment_paren_subscript', '(x[0]) -= 42'),
('binop_add', '1 + 1'),
('binop_add_multiple', '1 + 1 + 1 + 1'),
('binop_all', '1 + 2 * 5 + 3 ** 2 - -3'),
('binop_boolop_comp', '1 + 1 == 2 or 1 + 1 == 3 and not b'),
('boolop_or', 'a or b'),
('boolop_or_multiple', 'a or b or c'),
('class_def_bases',
'''
class C(A, B):
pass
'''),
('class_def_decorators',
'''
@a
class C:
pass
'''),
('class_def_decorator_with_expression',
'''
@lambda x: 42
class C:
pass
'''),
('class_def_decorator_with_expression_and_walrus',
'''
@x:=lambda x: 42
class C:
pass
'''),
('class_def_keywords',
'''
class C(keyword=a+b, **c):
pass
'''),
('class_def_mixed',
'''
class C(A, B, keyword=0, **a):
pass
'''),
('class_def_simple',
'''
class C:
pass
'''),
('class_def_starred_and_kwarg',
'''
class C(A, B, *x, **y):
pass
'''),
('class_def_starred_in_kwargs',
'''
class C(A, x=2, *[B, C], y=3):
pass
'''),
('call_attribute', 'f().b'),
('call_genexp', 'f(i for i in a)'),
('call_mixed_args', 'f(a, b, *c, **d)'),
('call_mixed_args_named', 'f(a, b, *c, d=4, **v)'),
('call_one_arg', 'f(a)'),
('call_posarg_genexp', 'f(a, (i for i in a))'),
('call_simple', 'f()'),
('call_subscript', 'f()[0]'),
('comp', 'a == b'),
('comp_multiple', 'a == b == c'),
('comp_paren_end', 'a == (b-1)'),
('comp_paren_start', '(a-1) == b'),
('decorator',
'''
@a
def f():
pass
'''),
('decorator_async',
'''
@a
async def d():
pass
'''),
('decorator_with_expression',
'''
@lambda x: 42
def f():
pass
'''),
('decorator_with_expression_and_walrus',
'''
@x:=lambda x: 42
def f():
pass
'''),
('del_attribute', 'del a.b'),
('del_call_attribute', 'del a().c'),
('del_call_genexp_attribute', 'del a(i for i in b).c'),
('del_empty', 'del()'),
('del_list', 'del a, [b, c]'),
('del_mixed', 'del a[0].b().c'),
('del_multiple', 'del a, b'),
('del_multiple_calls_attribute', 'del a()().b'),
('del_paren', 'del(a,b)'),
('del_paren_single_target', 'del(a)'),
('del_subscript_attribute', 'del a[0].b'),
('del_tuple', 'del a, (b, c)'),
('delete', 'del a'),
('dict',
'''
{
a: 1,
b: 2,
c: 3
}
'''),
('dict_comp', '{x:1 for x in a}'),
('dict_comp_if', '{x:1+2 for x in a if b}'),
('dict_empty', '{}'),
('for',
'''
for i in a:
pass
'''),
('for_else',
'''
for i in a:
pass
else:
pass
'''),
('for_star_target_in_paren', 'for (a) in b: pass'),
('for_star_targets_attribute', 'for a.b in c: pass'),
('for_star_targets_call_attribute', 'for a().c in b: pass'),
('for_star_targets_empty', 'for () in a: pass'),
('for_star_targets_mixed', 'for a[0].b().c in d: pass'),
('for_star_targets_mixed_starred',
'''
for a, *b, (c, d) in e:
pass
'''),
('for_star_targets_multiple', 'for a, b in c: pass'),
('for_star_targets_nested_starred', 'for *[*a] in b: pass'),
('for_star_targets_starred', 'for *a in b: pass'),
('for_star_targets_subscript_attribute', 'for a[0].b in c: pass'),
('for_star_targets_trailing_comma',
'''
for a, (b, c), in d:
pass
'''),
('for_star_targets_tuple', 'for a, (b, c) in d: pass'),
('for_underscore',
'''
for _ in a:
pass
'''),
('function_return_type',
'''
def f() -> Any:
pass
'''),
('f-string_slice', "f'{x[2]}'"),
('f-string_slice_upper', "f'{x[2:3]}'"),
('f-string_slice_step', "f'{x[2:3:-2]}'"),
('f-string_constant', "f'{42}'"),
('f-string_boolop', "f'{x and y}'"),
('f-string_named_expr', "f'{(x:=42)}'"),
('f-string_binop', "f'{x+y}'"),
('f-string_unaryop', "f'{not x}'"),
('f-string_lambda', "f'{(lambda x, /, y, y2=42 , *z, k1, k2=34, **k3: 42)}'"),
('f-string_lambda_call', "f'{(lambda: 2)(2)}'"),
('f-string_ifexpr', "f'{x if y else z}'"),
('f-string_dict', "f'{ {2:34, 3:34} }'"),
('f-string_set', "f'{ {2,-45} }'"),
('f-string_list', "f'{ [2,-45] }'"),
('f-string_tuple', "f'{ (2,-45) }'"),
('f-string_listcomp', "f'{[x for x in y if z]}'"),
('f-string_setcomp', "f'{ {x for x in y if z} }'"),
('f-string_dictcomp', "f'{ {x:x for x in y if z} }'"),
('f-string_genexpr', "f'{ (x for x in y if z) }'"),
('f-string_yield', "f'{ (yield x) }'"),
('f-string_yieldfrom', "f'{ (yield from x) }'"),
('f-string_await', "f'{ await x }'"),
('f-string_compare', "f'{ x == y }'"),
('f-string_call', "f'{ f(x,y,z) }'"),
('f-string_attribute', "f'{ f.x.y.z }'"),
('f-string_starred', "f'{ *x, }'"),
('f-string_doublestarred', "f'{ {**x} }'"),
('f-string_escape_brace', "f'{{Escape'"),
('f-string_escape_closing_brace', "f'Escape}}'"),
('f-string_repr', "f'{a!r}'"),
('f-string_str', "f'{a!s}'"),
('f-string_ascii', "f'{a!a}'"),
('f-string_debug', "f'{a=}'"),
('f-string_padding', "f'{a:03d}'"),
('f-string_multiline',
"""
f'''
{hello}
'''
"""),
('f-string_multiline_in_expr',
"""
f'''
{
hello
}
'''
"""),
('f-string_multiline_in_call',
"""
f'''
{f(
a, b, c
)}
'''
"""),
('global', 'global a, b'),
('group', '(yield a)'),
('if_elif',
'''
if a:
pass
elif b:
pass
'''),
('if_elif_elif',
'''
if a:
pass
elif b:
pass
elif c:
pass
'''),
('if_elif_else',
'''
if a:
pass
elif b:
pass
else:
pass
'''),
('if_else',
'''
if a:
pass
else:
pass
'''),
('if_simple', 'if a: pass'),
('import', 'import a'),
('import_alias', 'import a as b'),
('import_dotted', 'import a.b'),
('import_dotted_alias', 'import a.b as c'),
('import_dotted_multichar', 'import ab.cd'),
('import_from', 'from a import b'),
('import_from_alias', 'from a import b as c'),
('import_from_dotted', 'from a.b import c'),
('import_from_dotted_alias', 'from a.b import c as d'),
('import_from_multiple_aliases', 'from a import b as c, d as e'),
('import_from_one_dot', 'from .a import b'),
('import_from_one_dot_alias', 'from .a import b as c'),
('import_from_star', 'from a import *'),
('import_from_three_dots', 'from ...a import b'),
('import_from_trailing_comma', 'from a import (b,)'),
('kwarg',
'''
def f(**a):
pass
'''),
('kwonly_args',
'''
def f(*, a, b):
pass
'''),
('kwonly_args_with_default',
'''
def f(*, a=2, b):
pass
'''),
('lambda_kwarg', 'lambda **a: 42'),
('lambda_kwonly_args', 'lambda *, a, b: 42'),
('lambda_kwonly_args_with_default', 'lambda *, a=2, b: 42'),
('lambda_mixed_args', 'lambda a, /, b, *, c: 42'),
('lambda_mixed_args_with_default', 'lambda a, b=2, /, c=3, *e, f, **g: 42'),
('lambda_no_args', 'lambda: 42'),
('lambda_pos_args', 'lambda a,b: 42'),
('lambda_pos_args_with_default', 'lambda a, b=2: 42'),
('lambda_pos_only_args', 'lambda a, /: 42'),
('lambda_pos_only_args_with_default', 'lambda a=0, /: 42'),
('lambda_pos_posonly_args', 'lambda a, b, /, c, d: 42'),
('lambda_pos_posonly_args_with_default', 'lambda a, b=0, /, c=2: 42'),
('lambda_vararg', 'lambda *a: 42'),
('lambda_vararg_kwonly_args', 'lambda *a, b: 42'),
('list', '[1, 2, a]'),
('list_comp', '[i for i in a]'),
('list_comp_if', '[i for i in a if b]'),
('list_trailing_comma', '[1+2, a, 3+4,]'),
('mixed_args',
'''
def f(a, /, b, *, c):
pass
'''),
('mixed_args_with_default',
'''
def f(a, b=2, /, c=3, *e, f, **g):
pass
'''),
('multipart_string_bytes', 'b"Hola" b"Hello" b"Bye"'),
('multipart_string_triple', '"""Something here""" "and now"'),
('multipart_string_different_prefixes', 'u"Something" "Other thing" r"last thing"'),
('multiple_assignments', 'x = y = z = 42'),
('multiple_assignments_with_yield', 'x = y = z = yield 42'),
('multiple_pass',
'''
pass; pass
pass
'''),
('namedexpr', '(x := [1, 2, 3])'),
('namedexpr_false', '(x := False)'),
('namedexpr_none', '(x := None)'),
('namedexpr_true', '(x := True)'),
('nonlocal', 'nonlocal a, b'),
('number_complex', '-2.234+1j'),
('number_float', '-34.2333'),
('number_imaginary_literal', '1.1234j'),
('number_integer', '-234'),
('number_underscores', '1_234_567'),
('pass', 'pass'),
('pos_args',
'''
def f(a, b):
pass
'''),
('pos_args_with_default',
'''
def f(a, b=2):
pass
'''),
('pos_only_args',
'''
def f(a, /):
pass
'''),
('pos_only_args_with_default',
'''
def f(a=0, /):
pass
'''),
('pos_posonly_args',
'''
def f(a, b, /, c, d):
pass
'''),
('pos_posonly_args_with_default',
'''
def f(a, b=0, /, c=2):
pass
'''),
('primary_mixed', 'a.b.c().d[0]'),
('raise', 'raise'),
('raise_ellipsis', 'raise ...'),
('raise_expr', 'raise a'),
('raise_from', 'raise a from b'),
('return', 'return'),
('return_expr', 'return a'),
('set', '{1, 2+4, 3+5}'),
('set_comp', '{i for i in a}'),
('set_trailing_comma', '{1, 2, 3,}'),
('simple_assignment', 'x = 42'),
('simple_assignment_with_yield', 'x = yield 42'),
('string_bytes', 'b"hello"'),
('string_concatenation_bytes', 'b"hello" b"world"'),
('string_concatenation_simple', '"abcd" "efgh"'),
('string_format_simple', 'f"hello"'),
('string_format_with_formatted_value', 'f"hello {world}"'),
('string_simple', '"hello"'),
('string_unicode', 'u"hello"'),
('subscript_attribute', 'a[0].b'),
('subscript_call', 'a[b]()'),
('subscript_multiple_slices', 'a[0:a:2, 1]'),
('subscript_simple', 'a[0]'),
('subscript_single_element_tuple', 'a[0,]'),
('subscript_trailing_comma', 'a[0, 1, 2,]'),
('subscript_tuple', 'a[0, 1, 2]'),
('subscript_whole_slice', 'a[0+1:b:c]'),
('try_except',
'''
try:
pass
except:
pass
'''),
('try_except_else',
'''
try:
pass
except:
pass
else:
pass
'''),
('try_except_else_finally',
'''
try:
pass
except:
pass
else:
pass
finally:
pass
'''),
('try_except_expr',
'''
try:
pass
except a:
pass
'''),
('try_except_expr_target',
'''
try:
pass
except a as b:
pass
'''),
('try_except_finally',
'''
try:
pass
except:
pass
finally:
pass
'''),
('try_finally',
'''
try:
pass
finally:
pass
'''),
('unpacking_binop', '[*([1, 2, 3] + [3, 4, 5])]'),
('unpacking_call', '[*b()]'),
('unpacking_compare', '[*(x < y)]'),
('unpacking_constant', '[*3]'),
('unpacking_dict', '[*{1: 2, 3: 4}]'),
('unpacking_dict_comprehension', '[*{x:y for x,y in z}]'),
('unpacking_ifexpr', '[*([1, 2, 3] if x else y)]'),
('unpacking_list', '[*[1,2,3]]'),
('unpacking_list_comprehension', '[*[x for x in y]]'),
('unpacking_namedexpr', '[*(x:=[1, 2, 3])]'),
('unpacking_set', '[*{1,2,3}]'),
('unpacking_set_comprehension', '[*{x for x in y}]'),
('unpacking_string', '[*"myvalue"]'),
('unpacking_tuple', '[*(1,2,3)]'),
('unpacking_unaryop', '[*(not [1, 2, 3])]'),
('unpacking_yield', '[*(yield 42)]'),
('unpacking_yieldfrom', '[*(yield from x)]'),
('tuple', '(1, 2, 3)'),
('vararg',
'''
def f(*a):
pass
'''),
('vararg_kwonly_args',
'''
def f(*a, b):
pass
'''),
('while',
'''
while a:
pass
'''),
('while_else',
'''
while a:
pass
else:
pass
'''),
('with',
'''
with a:
pass
'''),
('with_as',
'''
with a as b:
pass
'''),
('with_as_paren',
'''
with a as (b):
pass
'''),
('with_as_empty', 'with a as (): pass'),
('with_list_recursive',
'''
with a as [x, [y, z]]:
pass
'''),
('with_tuple_recursive',
'''
with a as ((x, y), z):
pass
'''),
('with_tuple_target',
'''
with a as (x, y):
pass
'''),
('with_list_target',
'''
with a as [x, y]:
pass
'''),
('yield', 'yield'),
('yield_expr', 'yield a'),
('yield_from', 'yield from a'),
]
FAIL_TEST_CASES = [
("annotation_multiple_targets", "(a, b): int = 42"),
("annotation_nested_tuple", "((a, b)): int"),
("annotation_list", "[a]: int"),
("annotation_lambda", "lambda: int = 42"),
("annotation_tuple", "(a,): int"),
("annotation_tuple_without_paren", "a,: int"),
("assignment_keyword", "a = if"),
("augmented_assignment_list", "[a, b] += 1"),
("augmented_assignment_tuple", "a, b += 1"),
("augmented_assignment_tuple_paren", "(a, b) += (1, 2)"),
("comprehension_lambda", "(a for a in lambda: b)"),
("comprehension_else", "(a for a in b if c else d"),
("del_call", "del a()"),
("del_call_genexp", "del a(i for i in b)"),
("del_subscript_call", "del a[b]()"),
("del_attribute_call", "del a.b()"),
("del_mixed_call", "del a[0].b().c.d()"),
("for_star_targets_call", "for a() in b: pass"),
("for_star_targets_subscript_call", "for a[b]() in c: pass"),
("for_star_targets_attribute_call", "for a.b() in c: pass"),
("for_star_targets_mixed_call", "for a[0].b().c.d() in e: pass"),
("for_star_targets_in", "for a, in in b: pass"),
("f-string_assignment", "f'{x = 42}'"),
("f-string_empty", "f'{}'"),
("f-string_function_def", "f'{def f(): pass}'"),
("f-string_lambda", "f'{lambda x: 42}'"),
("f-string_singe_brace", "f'{'"),
("f-string_single_closing_brace", "f'}'"),
("from_import_invalid", "from import import a"),
("from_import_trailing_comma", "from a import b,"),
# This test case checks error paths involving tokens with uninitialized
# values of col_offset and end_col_offset.
("invalid indentation",
"""
def f():
a
a
"""),
("not_terminated_string", "a = 'example"),
("try_except_attribute_target",
"""
try:
pass
except Exception as a.b:
pass
"""),
("try_except_subscript_target",
"""
try:
pass
except Exception as a[0]:
pass
"""),
]
FAIL_SPECIALIZED_MESSAGE_CASES = [
("f(x, y, z=1, **b, *a", "iterable argument unpacking follows keyword argument unpacking"),
("f(x, y=1, *z, **a, b", "positional argument follows keyword argument unpacking"),
("f(x, y, z=1, a=2, b", "positional argument follows keyword argument"),
("True = 1", "cannot assign to True"),
("a() = 1", "cannot assign to function call"),
("(a, b): int", "only single target (not tuple) can be annotated"),
("[a, b]: int", "only single target (not list) can be annotated"),
("a(): int", "illegal target for annotation"),
("1 += 1", "'literal' is an illegal expression for augmented assignment"),
("pass\n pass", "unexpected indent"),
("def f():\npass", "expected an indented block"),
("def f(*): pass", "named arguments must follow bare *"),
("def f(*,): pass", "named arguments must follow bare *"),
("def f(*, **a): pass", "named arguments must follow bare *"),
("lambda *: pass", "named arguments must follow bare *"),
("lambda *,: pass", "named arguments must follow bare *"),
("lambda *, **a: pass", "named arguments must follow bare *"),
("f(g()=2", "expression cannot contain assignment, perhaps you meant \"==\"?"),
("f(a, b, *c, d.e=2", "expression cannot contain assignment, perhaps you meant \"==\"?"),
("f(*a, **b, c=0, d[1]=3)", "expression cannot contain assignment, perhaps you meant \"==\"?"),
]
GOOD_BUT_FAIL_TEST_CASES = [
('string_concatenation_format', 'f"{hello} world" f"again {and_again}"'),
('string_concatenation_multiple',
'''
f"hello" f"{world} again" f"and_again"
'''),
('f-string_multiline_comp',
"""
f'''
{(i for i in a
if b)}
'''
"""),
]
FSTRINGS_TRACEBACKS = {
'multiline_fstrings_same_line_with_brace': (
"""
f'''
{a$b}
'''
""",
'(a$b)',
),
'multiline_fstring_brace_on_next_line': (
"""
f'''
{a$b
}'''
""",
'(a$b',
),
'multiline_fstring_brace_on_previous_line': (
"""
f'''
{
a$b}'''
""",
'a$b)',
),
}
EXPRESSIONS_TEST_CASES = [
("expression_add", "1+1"),
("expression_add_2", "a+b"),
("expression_call", "f(a, b=2, **kw)"),
("expression_tuple", "1, 2, 3"),
("expression_tuple_one_value", "1,")
]
def cleanup_source(source: Any) -> str:
if isinstance(source, str):
result = dedent(source)
elif not isinstance(source, (list, tuple)):
result = "\n".join(source)
else:
raise TypeError(f"Invalid type for test source: {source}")
return result
def prepare_test_cases(
test_cases: Iterable[Tuple[str, Union[str, Iterable[str]]]]
) -> Tuple[Iterable[str], Iterable[str]]:
test_ids, _test_sources = zip(*test_cases)
test_sources = list(_test_sources)
for index, source in enumerate(test_sources):
result = cleanup_source(source)
test_sources[index] = result
return test_ids, test_sources
TEST_IDS, TEST_SOURCES = prepare_test_cases(TEST_CASES)
GOOD_BUT_FAIL_TEST_IDS, GOOD_BUT_FAIL_SOURCES = prepare_test_cases(
GOOD_BUT_FAIL_TEST_CASES
)
FAIL_TEST_IDS, FAIL_SOURCES = prepare_test_cases(FAIL_TEST_CASES)
EXPRESSIONS_TEST_IDS, EXPRESSIONS_TEST_SOURCES = prepare_test_cases(
EXPRESSIONS_TEST_CASES
)
class ASTGenerationTest(unittest.TestCase):
def test_correct_ast_generation_on_source_files(self) -> None:
self.maxDiff = None
for source in TEST_SOURCES:
actual_ast = peg_parser.parse_string(source)
expected_ast = peg_parser.parse_string(source, oldparser=True)
self.assertEqual(
ast.dump(actual_ast, include_attributes=True),
ast.dump(expected_ast, include_attributes=True),
f"Wrong AST generation for source: {source}",
)
def test_incorrect_ast_generation_on_source_files(self) -> None:
for source in FAIL_SOURCES:
with self.assertRaises(SyntaxError, msg=f"Parsing {source} did not raise an exception"):
peg_parser.parse_string(source)
def test_incorrect_ast_generation_with_specialized_errors(self) -> None:
for source, error_text in FAIL_SPECIALIZED_MESSAGE_CASES:
exc = IndentationError if "indent" in error_text else SyntaxError
with self.assertRaises(exc) as se:
peg_parser.parse_string(source)
self.assertTrue(
error_text in se.exception.msg,
f"Actual error message does not match expexted for {source}"
)
@unittest.expectedFailure
def test_correct_but_known_to_fail_ast_generation_on_source_files(self) -> None:
for source in GOOD_BUT_FAIL_SOURCES:
actual_ast = peg_parser.parse_string(source)
expected_ast = peg_parser.parse_string(source, oldparser=True)
self.assertEqual(
ast.dump(actual_ast, include_attributes=True),
ast.dump(expected_ast, include_attributes=True),
f"Wrong AST generation for source: {source}",
)
def test_correct_ast_generation_without_pos_info(self) -> None:
for source in GOOD_BUT_FAIL_SOURCES:
actual_ast = peg_parser.parse_string(source)
expected_ast = peg_parser.parse_string(source, oldparser=True)
self.assertEqual(
ast.dump(actual_ast),
ast.dump(expected_ast),
f"Wrong AST generation for source: {source}",
)
def test_fstring_parse_error_tracebacks(self) -> None:
for source, error_text in FSTRINGS_TRACEBACKS.values():
with self.assertRaises(SyntaxError) as se:
peg_parser.parse_string(dedent(source))
self.assertEqual(error_text, se.exception.text)
def test_correct_ast_generatrion_eval(self) -> None:
for source in EXPRESSIONS_TEST_SOURCES:
actual_ast = peg_parser.parse_string(source, mode='eval')
expected_ast = peg_parser.parse_string(source, mode='eval', oldparser=True)
self.assertEqual(
ast.dump(actual_ast, include_attributes=True),
ast.dump(expected_ast, include_attributes=True),
f"Wrong AST generation for source: {source}",
)
def test_tokenizer_errors_are_propagated(self) -> None:
n=201
with self.assertRaisesRegex(SyntaxError, "too many nested parentheses"):
peg_parser.parse_string(n*'(' + ')'*n)
|
[
"textwrap.dedent",
"ast.dump",
"_peg_parser.parse_string"
] |
[((19758, 19772), 'textwrap.dedent', 'dedent', (['source'], {}), '(source)\n', (19764, 19772), False, 'from textwrap import dedent\n'), ((20857, 20888), '_peg_parser.parse_string', 'peg_parser.parse_string', (['source'], {}), '(source)\n', (20880, 20888), True, 'import _peg_parser as peg_parser\n'), ((20916, 20963), '_peg_parser.parse_string', 'peg_parser.parse_string', (['source'], {'oldparser': '(True)'}), '(source, oldparser=True)\n', (20939, 20963), True, 'import _peg_parser as peg_parser\n'), ((22124, 22155), '_peg_parser.parse_string', 'peg_parser.parse_string', (['source'], {}), '(source)\n', (22147, 22155), True, 'import _peg_parser as peg_parser\n'), ((22183, 22230), '_peg_parser.parse_string', 'peg_parser.parse_string', (['source'], {'oldparser': '(True)'}), '(source, oldparser=True)\n', (22206, 22230), True, 'import _peg_parser as peg_parser\n'), ((22604, 22635), '_peg_parser.parse_string', 'peg_parser.parse_string', (['source'], {}), '(source)\n', (22627, 22635), True, 'import _peg_parser as peg_parser\n'), ((22663, 22710), '_peg_parser.parse_string', 'peg_parser.parse_string', (['source'], {'oldparser': '(True)'}), '(source, oldparser=True)\n', (22686, 22710), True, 'import _peg_parser as peg_parser\n'), ((23321, 23365), '_peg_parser.parse_string', 'peg_parser.parse_string', (['source'], {'mode': '"""eval"""'}), "(source, mode='eval')\n", (23344, 23365), True, 'import _peg_parser as peg_parser\n'), ((23393, 23453), '_peg_parser.parse_string', 'peg_parser.parse_string', (['source'], {'mode': '"""eval"""', 'oldparser': '(True)'}), "(source, mode='eval', oldparser=True)\n", (23416, 23453), True, 'import _peg_parser as peg_parser\n'), ((23856, 23898), '_peg_parser.parse_string', 'peg_parser.parse_string', (["(n * '(' + ')' * n)"], {}), "(n * '(' + ')' * n)\n", (23879, 23898), True, 'import _peg_parser as peg_parser\n'), ((21010, 21055), 'ast.dump', 'ast.dump', (['actual_ast'], {'include_attributes': '(True)'}), '(actual_ast, include_attributes=True)\n', (21018, 21055), False, 'import ast\n'), ((21073, 21120), 'ast.dump', 'ast.dump', (['expected_ast'], {'include_attributes': '(True)'}), '(expected_ast, include_attributes=True)\n', (21081, 21120), False, 'import ast\n'), ((21421, 21452), '_peg_parser.parse_string', 'peg_parser.parse_string', (['source'], {}), '(source)\n', (21444, 21452), True, 'import _peg_parser as peg_parser\n'), ((21738, 21769), '_peg_parser.parse_string', 'peg_parser.parse_string', (['source'], {}), '(source)\n', (21761, 21769), True, 'import _peg_parser as peg_parser\n'), ((22277, 22322), 'ast.dump', 'ast.dump', (['actual_ast'], {'include_attributes': '(True)'}), '(actual_ast, include_attributes=True)\n', (22285, 22322), False, 'import ast\n'), ((22340, 22387), 'ast.dump', 'ast.dump', (['expected_ast'], {'include_attributes': '(True)'}), '(expected_ast, include_attributes=True)\n', (22348, 22387), False, 'import ast\n'), ((22757, 22777), 'ast.dump', 'ast.dump', (['actual_ast'], {}), '(actual_ast)\n', (22765, 22777), False, 'import ast\n'), ((22795, 22817), 'ast.dump', 'ast.dump', (['expected_ast'], {}), '(expected_ast)\n', (22803, 22817), False, 'import ast\n'), ((23500, 23545), 'ast.dump', 'ast.dump', (['actual_ast'], {'include_attributes': '(True)'}), '(actual_ast, include_attributes=True)\n', (23508, 23545), False, 'import ast\n'), ((23563, 23610), 'ast.dump', 'ast.dump', (['expected_ast'], {'include_attributes': '(True)'}), '(expected_ast, include_attributes=True)\n', (23571, 23610), False, 'import ast\n'), ((23114, 23128), 'textwrap.dedent', 'dedent', (['source'], {}), '(source)\n', (23120, 23128), False, 'from textwrap import dedent\n')]
|
"""
Saves preprocessed text so external scorers can operate on files.
BLEU and METEOR are flexible about where things are located, but ROUGE is more
strict, so we follow rouge conventions.
"""
# builtins
import code
import shutil
import os
import tempfile
from typing import Optional
# local
from textmetrics.common import References, Candidates
# LOL!
REF_LABELS = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
def save(references: Optional[References], candidates: Candidates) -> None:
# ROUGE has the strictest requirements for how things are laid out, so
# we're using their scheme for saving the temporary files.
#
# Model (reference) directory:
# - reference.A.001.txt (ref 1)
# - reference.B.001.txt (ref 2)
# - ...
# - reference.N.001.txt (ref N)
#
# System (candidate) 1 directory:
# - candidate.001.txt
#
# System (candidate) 2 directory:
# - candidate.001.txt
#
# ...
#
# System (candidate) M directory:
# - candidate.001.txt
# save candidates. Each lives in its own dir and gets a single file.
for cCorpus in candidates['corpora'].values():
cCorpus['tmpdir'] = tempfile.mkdtemp()
cCorpus['tmpfile'] = os.path.join(
cCorpus['tmpdir'], 'candidate.001.txt')
with open(cCorpus['tmpfile'], 'w') as f:
f.write(cCorpus['contents'])
# save references if they exist
if references is None:
return
references['tmpdir'] = tempfile.mkdtemp()
for i, rCorpus in enumerate(references['corpora'].values()):
rCorpus['tmpfile'] = os.path.join( # type: ignore
references['tmpdir'],
'reference.{}.001.txt'.format(REF_LABELS[i])
)
with open(rCorpus['tmpfile'], 'w') as f:
f.write(rCorpus['contents'])
def cleanup(references: Optional[References], candidates: Candidates) -> None:
# cleanup candidates' tmp files
for cCorpus in candidates['corpora'].values():
shutil.rmtree(cCorpus['tmpdir'])
# cleanup references' tmp files if they exist
if references is None or references['tmpdir'] is None:
return
shutil.rmtree(references['tmpdir'])
|
[
"shutil.rmtree",
"tempfile.mkdtemp",
"os.path.join"
] |
[((1574, 1592), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1590, 1592), False, 'import tempfile\n'), ((2246, 2281), 'shutil.rmtree', 'shutil.rmtree', (["references['tmpdir']"], {}), "(references['tmpdir'])\n", (2259, 2281), False, 'import shutil\n'), ((1264, 1282), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1280, 1282), False, 'import tempfile\n'), ((1312, 1364), 'os.path.join', 'os.path.join', (["cCorpus['tmpdir']", '"""candidate.001.txt"""'], {}), "(cCorpus['tmpdir'], 'candidate.001.txt')\n", (1324, 1364), False, 'import os\n'), ((2084, 2116), 'shutil.rmtree', 'shutil.rmtree', (["cCorpus['tmpdir']"], {}), "(cCorpus['tmpdir'])\n", (2097, 2116), False, 'import shutil\n')]
|
from ckan import plugins
from ckanext.ytp_tasks import logic
from .cli import get_commands
class YtpTasksPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IConfigurable)
plugins.implements(plugins.IActions)
plugins.implements(plugins.IAuthFunctions)
plugins.implements(plugins.IClick)
def configure(self, config):
pass
def get_actions(self):
return {'ytp_tasks_add': logic.action_ytp_tasks_add}
def get_auth_functions(self):
return {'ytp_tasks_add': logic.auth_ytp_tasks_add}
def get_commands(self):
return get_commands()
|
[
"ckan.plugins.implements"
] |
[((145, 186), 'ckan.plugins.implements', 'plugins.implements', (['plugins.IConfigurable'], {}), '(plugins.IConfigurable)\n', (163, 186), False, 'from ckan import plugins\n'), ((191, 227), 'ckan.plugins.implements', 'plugins.implements', (['plugins.IActions'], {}), '(plugins.IActions)\n', (209, 227), False, 'from ckan import plugins\n'), ((232, 274), 'ckan.plugins.implements', 'plugins.implements', (['plugins.IAuthFunctions'], {}), '(plugins.IAuthFunctions)\n', (250, 274), False, 'from ckan import plugins\n'), ((279, 313), 'ckan.plugins.implements', 'plugins.implements', (['plugins.IClick'], {}), '(plugins.IClick)\n', (297, 313), False, 'from ckan import plugins\n')]
|
#!/usr/local/bin/python3.5
from time import sleep
from multiprocessing import Lock
import logging
import pychromecast
import bot
POLL_INTERVAL = 1800
class ChromecastListener(object):
def __init__(self, player, bot):
self._song = None
self._player = player
self._bot = bot
self._lock = Lock()
def new_media_status(self, status):
logging.debug("[%s] Got new_media_status %s" % (self._player, status.player_state))
if status.player_state != 'PLAYING':
logging.debug("[%s] Skipping due to status" % (self._player,))
return
song = status.media_metadata.get('songName', status.media_metadata['title'])
self._lock.acquire(True)
try:
if song == self._song:
logging.debug("[%s] Skipping due to same song again (%s)" % (self._player, self._song))
return
self._song = song
finally:
self._lock.release()
logging.info("Posting song %s" % (self._song, ))
artist = "?"
try:
artist = status.media_metadata['artist']
except:
logging.exception("Failed to get artist")
image = ""
try:
image = status.media_metadata['images'][0]['url']
except:
logging.exception("Failed to get image")
try:
self.postSong(artist, song, image)
except Exception as e:
logging.exception("Failed to post song")
def postSong(self, artist, song_name, image=None):
logging.info("[%s]\t%s - %s (%s)" % (self._player, artist, song_name, image))
self._bot.sayEx("%s - %s" % (song_name, artist), image, self._player)
def active_devices():
casts, _browser = pychromecast.get_chromecasts()
return casts
class ChromecastManager(object):
def __init__(self, bot):
self.active_list = {}
self.bot = bot
def poll(self):
for chromecast in active_devices():
if chromecast.uuid in self.active_list:
continue
self.register(chromecast)
def register(self, cs):
chromecast = cs.device.friendly_name
l = ChromecastListener(chromecast, self.bot)
if cs is None:
logging.error("[%s] Registration failed" % (chromecast, ))
return
cs.wait()
mc = cs.media_controller
mc.register_status_listener(l)
logging.info("[%s] Registered" % (chromecast, ))
self.active_list[cs.uuid] = [l, mc]
def main():
m = ChromecastManager(bot.Bot())
while True:
m.poll()
sleep(POLL_INTERVAL)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
[
"logging.error",
"logging.exception",
"logging.debug",
"logging.basicConfig",
"multiprocessing.Lock",
"bot.Bot",
"time.sleep",
"logging.info",
"pychromecast.get_chromecasts"
] |
[((1787, 1817), 'pychromecast.get_chromecasts', 'pychromecast.get_chromecasts', ([], {}), '()\n', (1815, 1817), False, 'import pychromecast\n'), ((2707, 2746), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (2726, 2746), False, 'import logging\n'), ((326, 332), 'multiprocessing.Lock', 'Lock', ([], {}), '()\n', (330, 332), False, 'from multiprocessing import Lock\n'), ((382, 470), 'logging.debug', 'logging.debug', (["('[%s] Got new_media_status %s' % (self._player, status.player_state))"], {}), "('[%s] Got new_media_status %s' % (self._player, status.\n player_state))\n", (395, 470), False, 'import logging\n'), ((996, 1043), 'logging.info', 'logging.info', (["('Posting song %s' % (self._song,))"], {}), "('Posting song %s' % (self._song,))\n", (1008, 1043), False, 'import logging\n'), ((1586, 1663), 'logging.info', 'logging.info', (["('[%s]\\t%s - %s (%s)' % (self._player, artist, song_name, image))"], {}), "('[%s]\\t%s - %s (%s)' % (self._player, artist, song_name, image))\n", (1598, 1663), False, 'import logging\n'), ((2470, 2517), 'logging.info', 'logging.info', (["('[%s] Registered' % (chromecast,))"], {}), "('[%s] Registered' % (chromecast,))\n", (2482, 2517), False, 'import logging\n'), ((2602, 2611), 'bot.Bot', 'bot.Bot', ([], {}), '()\n', (2609, 2611), False, 'import bot\n'), ((2654, 2674), 'time.sleep', 'sleep', (['POLL_INTERVAL'], {}), '(POLL_INTERVAL)\n', (2659, 2674), False, 'from time import sleep\n'), ((523, 585), 'logging.debug', 'logging.debug', (["('[%s] Skipping due to status' % (self._player,))"], {}), "('[%s] Skipping due to status' % (self._player,))\n", (536, 585), False, 'import logging\n'), ((2294, 2351), 'logging.error', 'logging.error', (["('[%s] Registration failed' % (chromecast,))"], {}), "('[%s] Registration failed' % (chromecast,))\n", (2307, 2351), False, 'import logging\n'), ((796, 887), 'logging.debug', 'logging.debug', (["('[%s] Skipping due to same song again (%s)' % (self._player, self._song))"], {}), "('[%s] Skipping due to same song again (%s)' % (self._player,\n self._song))\n", (809, 887), False, 'import logging\n'), ((1160, 1201), 'logging.exception', 'logging.exception', (['"""Failed to get artist"""'], {}), "('Failed to get artist')\n", (1177, 1201), False, 'import logging\n'), ((1337, 1377), 'logging.exception', 'logging.exception', (['"""Failed to get image"""'], {}), "('Failed to get image')\n", (1354, 1377), False, 'import logging\n'), ((1481, 1521), 'logging.exception', 'logging.exception', (['"""Failed to post song"""'], {}), "('Failed to post song')\n", (1498, 1521), False, 'import logging\n')]
|
import os
import scipy.misc
import numpy as np
from partAE_model import PAE
import tensorflow as tf
import h5py
flags = tf.app.flags
flags.DEFINE_integer("epoch", 100, "Epoch to train")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate for adam")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_string("dataset", "03001627_vox", "The name of dataset")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("data_dir", "./03001627_Chair/03001627_sampling_erode0.05_256", "Root directory of dataset [data]")
flags.DEFINE_boolean("train", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("FeedforwardTrainSet", False, "feed Training Set as test input")
flags.DEFINE_integer("ptsBatchSize", 16384, "point samples batch size")
flags.DEFINE_integer("partLabel", 1, "")
flags.DEFINE_string("gpu", '0', "")
##
flags.DEFINE_boolean("debug", False, "")
###
flags.DEFINE_string("workdir", "./default", "")
FLAGS = flags.FLAGS
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=FLAGS.gpu
print(FLAGS)
def main(_):
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth=True
with tf.Session(config=run_config) as sess:
pae = PAE(
sess,
256,
FLAGS,
is_training = FLAGS.train,
dataset_name=FLAGS.dataset,
checkpoint_dir=FLAGS.checkpoint_dir,
data_dir=FLAGS.data_dir,
partLabel=FLAGS.partLabel,
ptsBatchSize=FLAGS.ptsBatchSize )
if FLAGS.train:
pae.train(learning_rate=FLAGS.learning_rate,
beta1=FLAGS.beta1,
for_debug=FLAGS.debug,
epochNum=FLAGS.epoch )
else:
pae.test( specEpoch=FLAGS.epoch )
if __name__ == '__main__':
tf.app.run()
|
[
"tensorflow.ConfigProto",
"tensorflow.Session",
"tensorflow.app.run",
"partAE_model.PAE"
] |
[((1199, 1215), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1213, 1215), True, 'import tensorflow as tf\n'), ((1778, 1790), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (1788, 1790), True, 'import tensorflow as tf\n'), ((1265, 1294), 'tensorflow.Session', 'tf.Session', ([], {'config': 'run_config'}), '(config=run_config)\n', (1275, 1294), True, 'import tensorflow as tf\n'), ((1312, 1517), 'partAE_model.PAE', 'PAE', (['sess', '(256)', 'FLAGS'], {'is_training': 'FLAGS.train', 'dataset_name': 'FLAGS.dataset', 'checkpoint_dir': 'FLAGS.checkpoint_dir', 'data_dir': 'FLAGS.data_dir', 'partLabel': 'FLAGS.partLabel', 'ptsBatchSize': 'FLAGS.ptsBatchSize'}), '(sess, 256, FLAGS, is_training=FLAGS.train, dataset_name=FLAGS.dataset,\n checkpoint_dir=FLAGS.checkpoint_dir, data_dir=FLAGS.data_dir, partLabel\n =FLAGS.partLabel, ptsBatchSize=FLAGS.ptsBatchSize)\n', (1315, 1517), False, 'from partAE_model import PAE\n')]
|
#####################################################################
# #
# This code writen by <NAME> #
# Using the tutorial from Web Scraping with Python #
# You may use this code to retrieve information about #
# shows from IMDB and create a spread sheet #
# I then sorted using Excel, because it was simple #
# Please use this scraper carefully and don't overload #
# IMDB servers too much. #
# Thanks, enjoy! #
# #
#####################################################################
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
import re
import json
import csv
'''
In case I ever decide to put error catching in
def getTitle(url):
try:
html = urlopen(url)
except HTTPError as e:
return None
try:
bsObj = BeautifulSoup(html.read(), "lxml")
title = bsObj.body.h3
except AttributeError as e:
return None
return title
'''
#Retreieve all of the seasons from the show and return a list of the seasons
def getAllLinks(firstLink, allLinks):
html = urlopen(firstLink)
bs = BeautifulSoup(html, 'html.parser')
for link in bs.find_all('a', href=re.compile('^(/title/)(.)*(/episodes)(.)(season)(.)*$')):
if 'href' in link.attrs:
print(link.attrs['href'])
allLinks.append(link.attrs['href'])
return(allLinks)
#Retreieve and copy the episodes to a CSV
def getDataPoints(allLinks, show, csvwriter):
for link in allLinks:
html = urlopen("https://www.imdb.com" + link)
bs = BeautifulSoup(html, "html.parser")
strong_tag = bs.find('div', {'id':'episodes_content'}).find_all('strong')
airdates = bs.find_all(class_='airdate')
#print (strong_tag)
for i in range(0, len(airdates)):
#print("Show = " + show + ", Season = " + strong_tag[len(airdates)].text + ", Episode " + str(i+1) + " = " + strong_tag[i].text + ', ' + airdates[i].text[13:-5])
tmpRow = [show, strong_tag[len(airdates)].text, str(i+1), strong_tag[i].text, airdates[i].text[13:-5]]
#print (tmpRow)
csvwriter.writerow(tmpRow)
#print("Season = " + strong_tag[len(airdates)].text + '\n')
#Header entry intot eh csv and open it.
header = ['Show', 'Season', 'Episode Number', 'Episode Title', 'Airdate']
file_data = open('/Users/leahz/Documents/Developer/WebScraperTutorial/WebScraperTutorial/allDCUniverseEpisodes.csv', 'w')
csvwriter = csv.writer(file_data)
csvwriter.writerow(header)
print(header)
#Super Girl
allLinksSuperGirl = []
allLinksSuperGirl = getAllLinks('https://www.imdb.com/title/tt4016454/', allLinksSuperGirl)
getDataPoints(allLinksSuperGirl, 'Super Girl', csvwriter)
#Arrow
allLinksArrow = []
allLinksArrow = getAllLinks('https://www.imdb.com/title/tt2193021/', allLinksArrow)
getDataPoints(allLinksArrow, 'Arrow', csvwriter)
#Legends of Tomorrow
allLinksLegends = []
allLinksLegends = getAllLinks('https://www.imdb.com/title/tt4532368/', allLinksLegends)
getDataPoints(allLinksLegends, 'Legends of Tomorrow', csvwriter)
#Flash
allLinksFlash = []
allLinksFlash = getAllLinks('https://www.imdb.com/title/tt3107288/', allLinksFlash)
getDataPoints(allLinksFlash, 'Flash', csvwriter)
#Close file
file_data.close()
|
[
"bs4.BeautifulSoup",
"csv.writer",
"re.compile",
"urllib.request.urlopen"
] |
[((2806, 2827), 'csv.writer', 'csv.writer', (['file_data'], {}), '(file_data)\n', (2816, 2827), False, 'import csv\n'), ((1415, 1433), 'urllib.request.urlopen', 'urlopen', (['firstLink'], {}), '(firstLink)\n', (1422, 1433), False, 'from urllib.request import urlopen\n'), ((1443, 1477), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1456, 1477), False, 'from bs4 import BeautifulSoup\n'), ((1844, 1882), 'urllib.request.urlopen', 'urlopen', (["('https://www.imdb.com' + link)"], {}), "('https://www.imdb.com' + link)\n", (1851, 1882), False, 'from urllib.request import urlopen\n'), ((1896, 1930), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1909, 1930), False, 'from bs4 import BeautifulSoup\n'), ((1516, 1571), 're.compile', 're.compile', (['"""^(/title/)(.)*(/episodes)(.)(season)(.)*$"""'], {}), "('^(/title/)(.)*(/episodes)(.)(season)(.)*$')\n", (1526, 1571), False, 'import re\n')]
|
"""
Extended Room
Evennia Contribution - Griatch 2012
This is an extended Room typeclass for Evennia. It is supported
by an extended `Look` command and an extended `@desc` command, also
in this module.
Features:
1) Time-changing description slots
This allows to change the full description text the room shows
depending on larger time variations. Four seasons (spring, summer,
autumn and winter) are used by default. The season is calculated
on-demand (no Script or timer needed) and updates the full text block.
There is also a general description which is used as fallback if
one or more of the seasonal descriptions are not set when their
time comes.
An updated `@desc` command allows for setting seasonal descriptions.
The room uses the `evennia.utils.gametime.GameTime` global script. This is
started by default, but if you have deactivated it, you need to
supply your own time keeping mechanism.
2) In-description changing tags
Within each seasonal (or general) description text, you can also embed
time-of-day dependent sections. Text inside such a tag will only show
during that particular time of day. The tags looks like `<timeslot> ...
</timeslot>`. By default there are four timeslots per day - morning,
afternoon, evening and night.
3) Details
The Extended Room can be "detailed" with special keywords. This makes
use of a special `Look` command. Details are "virtual" targets to look
at, without there having to be a database object created for it. The
Details are simply stored in a dictionary on the room and if the look
command cannot find an object match for a `look <target>` command it
will also look through the available details at the current location
if applicable. An extended `@desc` command is used to set details.
4) Extra commands
CmdExtendedLook - look command supporting room details
CmdExtendedDesc - @desc command allowing to add seasonal descs and details,
as well as listing them
CmdGameTime - A simple `time` command, displaying the current
time and season.
Installation/testing:
1) Add `CmdExtendedLook`, `CmdExtendedDesc` and `CmdGameTime` to the default `cmdset`
(see Wiki for how to do this).
2) `@dig` a room of type `contrib.extended_room.ExtendedRoom` (or make it the
default room type)
3) Use `@desc` and `@detail` to customize the room, then play around!
"""
from __future__ import division
import datetime
import re
from django.conf import settings
from evennia import DefaultRoom
from evennia import gametime
from evennia import default_cmds
from evennia import utils
# error return function, needed by Extended Look command
_AT_SEARCH_RESULT = utils.variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1))
# regexes for in-desc replacements
RE_MORNING = re.compile(r"<morning>(.*?)</morning>", re.IGNORECASE)
RE_AFTERNOON = re.compile(r"<afternoon>(.*?)</afternoon>", re.IGNORECASE)
RE_EVENING = re.compile(r"<evening>(.*?)</evening>", re.IGNORECASE)
RE_NIGHT = re.compile(r"<night>(.*?)</night>", re.IGNORECASE)
# this map is just a faster way to select the right regexes (the first
# regex in each tuple will be parsed, the following will always be weeded out)
REGEXMAP = {"morning": (RE_MORNING, RE_AFTERNOON, RE_EVENING, RE_NIGHT),
"afternoon": (RE_AFTERNOON, RE_MORNING, RE_EVENING, RE_NIGHT),
"evening": (RE_EVENING, RE_MORNING, RE_AFTERNOON, RE_NIGHT),
"night": (RE_NIGHT, RE_MORNING, RE_AFTERNOON, RE_EVENING)}
# set up the seasons and time slots. This assumes gametime started at the
# beginning of the year (so month 1 is equivalent to January), and that
# one CAN divide the game's year into four seasons in the first place ...
MONTHS_PER_YEAR = 12
SEASONAL_BOUNDARIES = (3 / 12.0, 6 / 12.0, 9 / 12.0)
HOURS_PER_DAY = 24
DAY_BOUNDARIES = (0, 6 / 24.0, 12 / 24.0, 18 / 24.0)
# implements the Extended Room
class ExtendedRoom(DefaultRoom):
"""
This room implements a more advanced `look` functionality depending on
time. It also allows for "details", together with a slightly modified
look command.
"""
def at_object_creation(self):
"""Called when room is first created only."""
self.db.spring_desc = ""
self.db.summer_desc = ""
self.db.autumn_desc = ""
self.db.winter_desc = ""
# the general desc is used as a fallback if a seasonal one is not set
self.db.general_desc = ""
# will be set dynamically. Can contain raw timeslot codes
self.db.raw_desc = ""
# this will be set dynamically at first look. Parsed for timeslot codes
self.db.desc = ""
# these will be filled later
self.ndb.last_season = None
self.ndb.last_timeslot = None
# detail storage
self.db.details = {}
def get_time_and_season(self):
"""
Calculate the current time and season ids.
"""
# get the current time as parts of year and parts of day.
# we assume a standard calendar here and use 24h format.
timestamp = gametime.gametime(absolute=True)
# note that fromtimestamp includes the effects of server time zone!
datestamp = datetime.datetime.fromtimestamp(timestamp)
season = float(datestamp.month) / MONTHS_PER_YEAR
timeslot = float(datestamp.hour) / HOURS_PER_DAY
# figure out which slots these represent
if SEASONAL_BOUNDARIES[0] <= season < SEASONAL_BOUNDARIES[1]:
curr_season = "spring"
elif SEASONAL_BOUNDARIES[1] <= season < SEASONAL_BOUNDARIES[2]:
curr_season = "summer"
elif SEASONAL_BOUNDARIES[2] <= season < 1.0 + SEASONAL_BOUNDARIES[0]:
curr_season = "autumn"
else:
curr_season = "winter"
if DAY_BOUNDARIES[0] <= timeslot < DAY_BOUNDARIES[1]:
curr_timeslot = "night"
elif DAY_BOUNDARIES[1] <= timeslot < DAY_BOUNDARIES[2]:
curr_timeslot = "morning"
elif DAY_BOUNDARIES[2] <= timeslot < DAY_BOUNDARIES[3]:
curr_timeslot = "afternoon"
else:
curr_timeslot = "evening"
return curr_season, curr_timeslot
def replace_timeslots(self, raw_desc, curr_time):
"""
Filter so that only time markers `<timeslot>...</timeslot>` of
the correct timeslot remains in the description.
Args:
raw_desc (str): The unmodified description.
curr_time (str): A timeslot identifier.
Returns:
description (str): A possibly moified description.
"""
if raw_desc:
regextuple = REGEXMAP[curr_time]
raw_desc = regextuple[0].sub(r"\1", raw_desc)
raw_desc = regextuple[1].sub("", raw_desc)
raw_desc = regextuple[2].sub("", raw_desc)
return regextuple[3].sub("", raw_desc)
return raw_desc
def return_detail(self, key):
"""
This will attempt to match a "detail" to look for in the room.
Args:
key (str): A detail identifier.
Returns:
detail (str or None): A detail mathing the given key.
Notes:
A detail is a way to offer more things to look at in a room
without having to add new objects. For this to work, we
require a custom `look` command that allows for `look
<detail>` - the look command should defer to this method on
the current location (if it exists) before giving up on
finding the target.
Details are not season-sensitive, but are parsed for timeslot
markers.
"""
try:
detail = self.db.details.get(key.lower(), None)
except AttributeError:
# this happens if no attribute details is set at all
return None
if detail:
season, timeslot = self.get_time_and_season()
detail = self.replace_timeslots(detail, timeslot)
return detail
return None
def return_appearance(self, looker):
"""
This is called when e.g. the look command wants to retrieve
the description of this object.
Args:
looker (Object): The object looking at us.
Returns:
description (str): Our description.
"""
update = False
# get current time and season
curr_season, curr_timeslot = self.get_time_and_season()
# compare with previously stored slots
last_season = self.ndb.last_season
last_timeslot = self.ndb.last_timeslot
if curr_season != last_season:
# season changed. Load new desc, or a fallback.
if curr_season == 'spring':
new_raw_desc = self.db.spring_desc
elif curr_season == 'summer':
new_raw_desc = self.db.summer_desc
elif curr_season == 'autumn':
new_raw_desc = self.db.autumn_desc
else:
new_raw_desc = self.db.winter_desc
if new_raw_desc:
raw_desc = new_raw_desc
else:
# no seasonal desc set. Use fallback
raw_desc = self.db.general_desc or self.db.desc
self.db.raw_desc = raw_desc
self.ndb.last_season = curr_season
update = True
if curr_timeslot != last_timeslot:
# timeslot changed. Set update flag.
self.ndb.last_timeslot = curr_timeslot
update = True
if update:
# if anything changed we have to re-parse
# the raw_desc for time markers
# and re-save the description again.
self.db.desc = self.replace_timeslots(self.db.raw_desc, curr_timeslot)
# run the normal return_appearance method, now that desc is updated.
return super(ExtendedRoom, self).return_appearance(looker)
# Custom Look command supporting Room details. Add this to
# the Default cmdset to use.
class CmdExtendedLook(default_cmds.CmdLook):
"""
look
Usage:
look
look <obj>
look <room detail>
look *<account>
Observes your location, details at your location or objects in your vicinity.
"""
def func(self):
"""
Handle the looking - add fallback to details.
"""
caller = self.caller
args = self.args
if args:
looking_at_obj = caller.search(args,
candidates=caller.location.contents + caller.contents,
use_nicks=True,
quiet=True)
if not looking_at_obj:
# no object found. Check if there is a matching
# detail at location.
location = caller.location
if location and hasattr(location, "return_detail") and callable(location.return_detail):
detail = location.return_detail(args)
if detail:
# we found a detail instead. Show that.
caller.msg(detail)
return
# no detail found. Trigger delayed error messages
_AT_SEARCH_RESULT(looking_at_obj, caller, args, quiet=False)
return
else:
# we need to extract the match manually.
looking_at_obj = utils.make_iter(looking_at_obj)[0]
else:
looking_at_obj = caller.location
if not looking_at_obj:
caller.msg("You have no location to look at!")
return
if not hasattr(looking_at_obj, 'return_appearance'):
# this is likely due to us having an account instead
looking_at_obj = looking_at_obj.character
if not looking_at_obj.access(caller, "view"):
caller.msg("Could not find '%s'." % args)
return
# get object's appearance
caller.msg(looking_at_obj.return_appearance(caller))
# the object's at_desc() method.
looking_at_obj.at_desc(looker=caller)
# Custom build commands for setting seasonal descriptions
# and detailing extended rooms.
class CmdExtendedDesc(default_cmds.CmdDesc):
"""
`@desc` - describe an object or room.
Usage:
@desc[/switch] [<obj> =] <description>
@detail[/del] [<key> = <description>]
Switches for `@desc`:
spring - set description for <season> in current room.
summer
autumn
winter
Switch for `@detail`:
del - delete a named detail.
Sets the "desc" attribute on an object. If an object is not given,
describe the current room.
The alias `@detail` allows to assign a "detail" (a non-object
target for the `look` command) to the current room (only).
You can also embed special time markers in your room description, like this:
```
<night>In the darkness, the forest looks foreboding.</night>.
```
Text marked this way will only display when the server is truly at the given
timeslot. The available times are night, morning, afternoon and evening.
Note that `@detail`, seasons and time-of-day slots only work on rooms in this
version of the `@desc` command.
"""
aliases = ["@describe", "@detail"]
def reset_times(self, obj):
"""By deleteting the caches we force a re-load."""
obj.ndb.last_season = None
obj.ndb.last_timeslot = None
def func(self):
"""Define extended command"""
caller = self.caller
location = caller.location
if self.cmdname == 'detail':
# switch to detailing mode. This operates only on current location
if not location:
caller.msg("No location to detail!")
return
if location.db.details is None:
caller.msg("|rThis location does not support details.|n")
return
if self.switches and self.switches[0] in 'del':
# removing a detail.
if self.lhs in location.db.details:
del location.db.details[self.lhs]
caller.msg("Detail %s deleted, if it existed." % self.lhs)
self.reset_times(location)
return
if not self.args:
# No args given. Return all details on location
string = "|wDetails on %s|n:" % location
details = "\n".join(" |w%s|n: %s"
% (key, utils.crop(text)) for key, text in location.db.details.items())
caller.msg("%s\n%s" % (string, details) if details else "%s None." % string)
return
if not self.rhs:
# no '=' used - list content of given detail
if self.args in location.db.details:
string = "|wDetail '%s' on %s:\n|n" % (self.args, location)
string += str(location.db.details[self.args])
caller.msg(string)
else:
caller.msg("Detail '%s' not found." % self.args)
return
# setting a detail
location.db.details[self.lhs] = self.rhs
caller.msg("Set Detail %s to '%s'." % (self.lhs, self.rhs))
self.reset_times(location)
return
else:
# we are doing a @desc call
if not self.args:
if location:
string = "|wDescriptions on %s|n:\n" % location.key
string += " |wspring:|n %s\n" % location.db.spring_desc
string += " |wsummer:|n %s\n" % location.db.summer_desc
string += " |wautumn:|n %s\n" % location.db.autumn_desc
string += " |wwinter:|n %s\n" % location.db.winter_desc
string += " |wgeneral:|n %s" % location.db.general_desc
caller.msg(string)
return
if self.switches and self.switches[0] in ("spring", "summer", "autumn", "winter"):
# a seasonal switch was given
if self.rhs:
caller.msg("Seasonal descs only work with rooms, not objects.")
return
switch = self.switches[0]
if not location:
caller.msg("No location was found!")
return
if switch == 'spring':
location.db.spring_desc = self.args
elif switch == 'summer':
location.db.summer_desc = self.args
elif switch == 'autumn':
location.db.autumn_desc = self.args
elif switch == 'winter':
location.db.winter_desc = self.args
# clear flag to force an update
self.reset_times(location)
caller.msg("Seasonal description was set on %s." % location.key)
else:
# No seasonal desc set, maybe this is not an extended room
if self.rhs:
text = self.rhs
obj = caller.search(self.lhs)
if not obj:
return
else:
text = self.args
obj = location
obj.db.desc = text # a compatibility fallback
if obj.attributes.has("general_desc"):
obj.db.general_desc = text
self.reset_times(obj)
caller.msg("General description was set on %s." % obj.key)
else:
# this is not an ExtendedRoom.
caller.msg("The description was set on %s." % obj.key)
# Simple command to view the current time and season
class CmdGameTime(default_cmds.MuxCommand):
"""
Check the game time
Usage:
time
Shows the current in-game time and season.
"""
key = "time"
locks = "cmd:all()"
help_category = "General"
def func(self):
"""Reads time info from current room"""
location = self.caller.location
if not location or not hasattr(location, "get_time_and_season"):
self.caller.msg("No location available - you are outside time.")
else:
season, timeslot = location.get_time_and_season()
prep = "a"
if season == "autumn":
prep = "an"
self.caller.msg("It's %s %s day, in the %s." % (prep, season, timeslot))
|
[
"evennia.gametime.gametime",
"evennia.utils.crop",
"django.conf.settings.SEARCH_AT_RESULT.rsplit",
"datetime.datetime.fromtimestamp",
"evennia.utils.make_iter",
"re.compile"
] |
[((2791, 2844), 're.compile', 're.compile', (['"""<morning>(.*?)</morning>"""', 're.IGNORECASE'], {}), "('<morning>(.*?)</morning>', re.IGNORECASE)\n", (2801, 2844), False, 'import re\n'), ((2861, 2918), 're.compile', 're.compile', (['"""<afternoon>(.*?)</afternoon>"""', 're.IGNORECASE'], {}), "('<afternoon>(.*?)</afternoon>', re.IGNORECASE)\n", (2871, 2918), False, 'import re\n'), ((2933, 2986), 're.compile', 're.compile', (['"""<evening>(.*?)</evening>"""', 're.IGNORECASE'], {}), "('<evening>(.*?)</evening>', re.IGNORECASE)\n", (2943, 2986), False, 'import re\n'), ((2999, 3048), 're.compile', 're.compile', (['"""<night>(.*?)</night>"""', 're.IGNORECASE'], {}), "('<night>(.*?)</night>', re.IGNORECASE)\n", (3009, 3048), False, 'import re\n'), ((2700, 2740), 'django.conf.settings.SEARCH_AT_RESULT.rsplit', 'settings.SEARCH_AT_RESULT.rsplit', (['"""."""', '(1)'], {}), "('.', 1)\n", (2732, 2740), False, 'from django.conf import settings\n'), ((5071, 5103), 'evennia.gametime.gametime', 'gametime.gametime', ([], {'absolute': '(True)'}), '(absolute=True)\n', (5088, 5103), False, 'from evennia import gametime\n'), ((5200, 5242), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (5231, 5242), False, 'import datetime\n'), ((11494, 11525), 'evennia.utils.make_iter', 'utils.make_iter', (['looking_at_obj'], {}), '(looking_at_obj)\n', (11509, 11525), False, 'from evennia import utils\n'), ((14655, 14671), 'evennia.utils.crop', 'utils.crop', (['text'], {}), '(text)\n', (14665, 14671), False, 'from evennia import utils\n')]
|
from path import Path
import cv2
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
root =Path('/home/roit/aws/aprojects/xdr94_mono2/mc_test_gt')
out_p = Path('./plasma_gt')
out_p.mkdir_p()
files = root.files()
def main():
cnt=0
for item in tqdm(files):
img = cv2.imread(item)
img =255- cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
img = img/np.mean(img)
plt.imsave(out_p/item.stem+'.png',img,cmap='plasma')
cnt+=1
pass
if __name__ == '__main__':
main()
|
[
"tqdm.tqdm",
"cv2.cvtColor",
"cv2.imread",
"path.Path",
"numpy.mean",
"matplotlib.pyplot.imsave"
] |
[((112, 167), 'path.Path', 'Path', (['"""/home/roit/aws/aprojects/xdr94_mono2/mc_test_gt"""'], {}), "('/home/roit/aws/aprojects/xdr94_mono2/mc_test_gt')\n", (116, 167), False, 'from path import Path\n'), ((176, 195), 'path.Path', 'Path', (['"""./plasma_gt"""'], {}), "('./plasma_gt')\n", (180, 195), False, 'from path import Path\n'), ((273, 284), 'tqdm.tqdm', 'tqdm', (['files'], {}), '(files)\n', (277, 284), False, 'from tqdm import tqdm\n'), ((301, 317), 'cv2.imread', 'cv2.imread', (['item'], {}), '(item)\n', (311, 317), False, 'import cv2\n'), ((412, 470), 'matplotlib.pyplot.imsave', 'plt.imsave', (["(out_p / item.stem + '.png')", 'img'], {'cmap': '"""plasma"""'}), "(out_p / item.stem + '.png', img, cmap='plasma')\n", (422, 470), True, 'import matplotlib.pyplot as plt\n'), ((336, 373), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (348, 373), False, 'import cv2\n'), ((391, 403), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (398, 403), True, 'import numpy as np\n')]
|
from gnutools.fs import listfiles, name
import wave
import contextlib
from tqdm import tqdm
import json
from asr_deepspeech.audio import duration
class JSUTManifest(dict):
def __init__(self, root):
super(JSUTManifest, self).__init__()
self.__root=root
self.__labels = set()
def build(self):
wav_files = listfiles(self.__root, [".wav"])
daudio = dict([(name(file), file) for file in wav_files])
transcript_files = listfiles(self.__root, ["transcript_utf8.txt"])
dtext = {}
for transcript_file in transcript_files:
records = dict([l.rsplit()[0].split(":") for l in open(transcript_file, "r").readlines()])
dtext.update(records)
assert len(daudio)==len(dtext)
for id, text in tqdm(dtext.items(), total=len(dtext), desc="Building the manifest"):
d = {
"audio_filepath": daudio[id],
"duration": duration(daudio[id]),
"text": dtext[id]
}
self.__labels = self.__labels.union(set(dtext[id]))
self.setdefault(id, d)
def export(self, manifest, labels=None):
json.dump(self, open(manifest, "w"), indent=4, ensure_ascii=False)
json.dump(list(self.__labels), open(labels, "w"), indent=4, ensure_ascii=False) if labels is not None else None
if __name__=="__main__":
manifest = JSUTManifest(root="/mnt/.cdata/ASR/ja/raw/CLEAN/JSUT/jsut_ver1.1")
manifest.build()
manifest.export(manifest="jsut_ver1.1.json", labels="labels_jsut.json")
|
[
"asr_deepspeech.audio.duration",
"gnutools.fs.name",
"gnutools.fs.listfiles"
] |
[((345, 377), 'gnutools.fs.listfiles', 'listfiles', (['self.__root', "['.wav']"], {}), "(self.__root, ['.wav'])\n", (354, 377), False, 'from gnutools.fs import listfiles, name\n'), ((471, 518), 'gnutools.fs.listfiles', 'listfiles', (['self.__root', "['transcript_utf8.txt']"], {}), "(self.__root, ['transcript_utf8.txt'])\n", (480, 518), False, 'from gnutools.fs import listfiles, name\n'), ((948, 968), 'asr_deepspeech.audio.duration', 'duration', (['daudio[id]'], {}), '(daudio[id])\n', (956, 968), False, 'from asr_deepspeech.audio import duration\n'), ((402, 412), 'gnutools.fs.name', 'name', (['file'], {}), '(file)\n', (406, 412), False, 'from gnutools.fs import listfiles, name\n')]
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import oneflow
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.placement_context as placement_ctx
import oneflow.python.framework.blob_trait as blob_trait
from oneflow.python.framework.dtype import convert_proto_dtype_to_oneflow_dtype
import oneflow.python.framework.dtype as dtype_util
import oneflow.python.lib.core.enable_if as enable_if
import oneflow.python.framework.hob as hob
import oneflow.python.eager.eager_blob_util as eager_blob_util
import oneflow.python.eager.blob_register as blob_register_util
import oneflow.python.eager.blob_cache as blob_cache_util
import oneflow.python.eager.vm_util as vm_util
import oneflow.python.eager.gradient_util as gradient_util
import oneflow.python.eager.boxing_util as boxing_util
import oneflow_api.oneflow.core.job.placement as placement_cfg
import oneflow_api.oneflow.core.register.logical_blob_id as lbi_util
import oneflow_api
import traceback
import sys
blob_register = blob_register_util.GetDefaultBlobRegister()
def RemoteBlob(lbi, **kw):
api = enable_if.unique([EagerLogicalBlob, LazyRemoteBlob])
return api(lbi, **kw)
@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
def EagerLogicalBlob(lbi, **kw):
job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
lbn = lbi.op_name + "/" + lbi.blob_name
if not isinstance(lbi, lbi_util.LogicalBlobId):
cfg_lbi = lbi_util.LogicalBlobId()
cfg_lbi.set_op_name(lbi.op_name)
cfg_lbi.set_blob_name(lbi.blob_name)
lbi = cfg_lbi
blob_type = oneflow_api.EagerConsistentBlob
if c_api_util.JobBuildAndInferCtx_IsMirroredBlob(job_name, lbn):
blob_type = oneflow_api.EagerMirroredBlob
job_name = ""
if ("job_name" in kw) and (kw["job_name"] is not None):
job_name = kw["job_name"]
blob_object = None
if "blob_object" in kw:
blob_object = kw["blob_object"]
distribute = oneflow_api.distribute.auto()
if "distribute" in kw:
distribute = kw["distribute"]
return blob_type(lbi, blob_object, blob_register, job_name, distribute)
@enable_if.condition(~hob.eager_execution_enabled)
def LazyRemoteBlob(lbi, **kw):
job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
lbn = lbi.op_name + "/" + lbi.blob_name
blob_type = oneflow_api.LazyConsistentBlob
if c_api_util.JobBuildAndInferCtx_IsMirroredBlob(job_name, lbn):
blob_type = oneflow_api.LazyMirroredBlob
if not isinstance(lbi, lbi_util.LogicalBlobId):
cfg_lbi = lbi_util.LogicalBlobId()
cfg_lbi.set_op_name(lbi.op_name)
cfg_lbi.set_blob_name(lbi.blob_name)
lbi = cfg_lbi
job_name = ""
if ("job_name" in kw) and (kw["job_name"] is not None):
job_name = kw["job_name"]
distribute = oneflow_api.distribute.auto()
if "distribute" in kw:
distribute = kw["distribute"]
return blob_type(lbi, job_name, distribute)
@property
def dtype(self):
ret = convert_proto_dtype_to_oneflow_dtype(self.get_dtype())
assert issubclass(ret, dtype_util.dtype)
return ret
def with_distribute(self, distribute):
new = type(self)(self.lbi, self.job_name, oneflow_api.distribute.auto())
new.set_distribute(distribute)
return new
def with_gradient_distribute(self, distribute):
return oneflow.parallel_cast(self, gradient_distribute=distribute)
def get_lazy_shape_log_warning(self):
if oneflow.scope.mirrored_view_enabled():
return ("%s\n%s\n%s") % (
"WARNING:",
"You access a consistent blob shape in mirrored view, there may be problems,",
"you should add 'x = flow.cast_to_current_logical_view(x)'.",
)
else:
return ""
def get_mirror_shape_log_warning(self):
if oneflow.scope.consistent_view_enabled():
return ("%s\n%s\n%s") % (
"WARNING:",
"You access a mirrored blob shape in consistent view, there may be problems,",
"you should add 'x = flow.cast_to_current_logical_view(x)'.",
)
else:
return ""
def RegisterMethod4BlobDef(blob_class):
blob_class.dtype = dtype
blob_class.with_distribute = with_distribute
blob_class.with_gradient_distribute = with_gradient_distribute
def RegisterMethod4LazyConsistentBlob():
RegisterMethod4BlobDef(oneflow_api.LazyConsistentBlob)
oneflow_api.LazyConsistentBlob.get_lazy_shape_log_warning = (
get_lazy_shape_log_warning
)
def RegisterMethod4LazyMirroredBlob():
RegisterMethod4BlobDef(oneflow_api.LazyMirroredBlob)
oneflow_api.LazyMirroredBlob.get_mirror_shape_log_warning = (
get_mirror_shape_log_warning
)
@property
def sub_consistent_blob_list(self):
raise NotImplementedError
def numpy(self, rank=None):
if rank is None:
if self.numpy_size() == 1:
return self._NumpyAt(0)
else:
assert not self.is_dynamic
assert not self.is_tensor_list
return self._Numpy()
else:
return self._NumpyAt(rank)
def numpy_list(self, rank=None):
assert self.is_tensor_list
assert self.is_dynamic
mirrored_list = self._NumpyMirroredList()
if rank is None:
return mirrored_list
else:
parallel_num = self.blob_object_.parallel_desc_symbol.parallel_num
assert rank >= 0
assert rank < parallel_num
assert len(mirrored_list) == parallel_num
return mirrored_list[rank]
def _NumpyAt(self, rank):
assert self.is_tensor_list is not True
assert rank >= 0
assert rank < self.blob_object.parallel_desc_symbol.parallel_num
ndarray_list = self._NumpyMirroredList()
return ndarray_list[rank]
def _Numpy(self):
assert self.is_tensor_list is not True
def FetchBlobNumpy(blob_object):
consistent_blob_name = None
def BoxingToSingleDevice(builder):
parallel_conf = placement_cfg.ParallelConf()
parallel_conf.set_device_tag(blob_object.parallel_desc_symbol.device_tag)
parallel_conf.add_device_name("{}:{}".format(0, 0))
tmp_parallel_desc_symbol = builder.GetParallelDescSymbol(parallel_conf)
tmp_op_arg_parallel_attr = oneflow_api.OpArgParallelAttribute(
tmp_parallel_desc_symbol,
str(blob_object.op_arg_parallel_attr.sbp_parallel),
str(blob_object.op_arg_parallel_attr.opt_mirrored_parallel),
)
with oneflow.scope.placement(
self.parallel_conf.device_tag(), list(self.parallel_conf.device_name()),
):
tmp_blob_object = boxing_util.BoxingTo(
builder, blob_object, tmp_op_arg_parallel_attr
)
nonlocal consistent_blob_name
consistent_blob_name = "{}-consistent".format(self.logical_blob_name)
if not blob_register.HasObject4BlobName(consistent_blob_name):
blob_register.SetObject4BlobName(consistent_blob_name, tmp_blob_object)
vm_util.LogicalRun(BoxingToSingleDevice)
return oneflow_api.EagerPhysicalBlob(
consistent_blob_name,
blob_register,
eager_blob_util._GetPhysicalBlobHeaderCache,
).numpy()
blob_cache = blob_cache_util.FindOrCreateBlobCache(self.blob_object)
return blob_cache.GetCachedNumpy(FetchBlobNumpy)
def _NumpyMirroredList(self):
physical_blob_objects = []
def UnpackLogicalBlobToPhysicalBlobs(builder):
nonlocal physical_blob_objects
physical_blob_objects = builder.UnpackLogicalBlobToPhysicalBlobs(
self.blob_object
)
def GetPhyBlobNumpy(i, phy_blob_object):
name = "{}/{}".format(self.logical_blob_name, i)
blob_register.SetObject4BlobName(name, phy_blob_object)
return (
oneflow_api.EagerPhysicalBlob(
name, blob_register, eager_blob_util._GetPhysicalBlobHeaderCache
).numpy_list()
if self.is_tensor_list
else oneflow_api.EagerPhysicalBlob(
name, blob_register, eager_blob_util._GetPhysicalBlobHeaderCache
).numpy()
)
def FetchBlobNumpyMirroredList(blob_object):
vm_util.LogicalRun(UnpackLogicalBlobToPhysicalBlobs)
return [
GetPhyBlobNumpy(i, phy_blob_object)
for i, phy_blob_object in enumerate(physical_blob_objects)
]
blob_cache = blob_cache_util.FindOrCreateBlobCache(self.blob_object)
return blob_cache.GetCachedNumpyMirroredList(FetchBlobNumpyMirroredList)
def RegisterMethod4EagerBlobTrait():
oneflow_api.EagerBlobTrait.sub_consistent_blob_list = sub_consistent_blob_list
oneflow_api.EagerBlobTrait.dtype = dtype
oneflow_api.EagerBlobTrait._NumpyMirroredList = _NumpyMirroredList
oneflow_api.EagerBlobTrait._Numpy = _Numpy
oneflow_api.EagerBlobTrait._NumpyAt = _NumpyAt
oneflow_api.EagerBlobTrait.numpy_list = numpy_list
oneflow_api.EagerBlobTrait.numpy = numpy
def eager_with_distribute(self, distribute):
new = type(self)(
self.lbi,
blob_object=self.blob_object,
blob_register=blob_register,
job_name=self.job_name,
distribute=self.distribute,
)
new.set_distribute(distribute)
return new
def RegisterMethod4EagerConsistentBlob():
oneflow_api.EagerConsistentBlob.dtype = dtype
oneflow_api.EagerConsistentBlob.with_distribute = eager_with_distribute
oneflow_api.EagerConsistentBlob.with_gradient_distribute = with_gradient_distribute
|
[
"oneflow_api.distribute.auto",
"oneflow_api.oneflow.core.job.placement.ParallelConf",
"oneflow.python.eager.boxing_util.BoxingTo",
"oneflow.python.lib.core.enable_if.condition",
"oneflow.python.eager.blob_register.GetDefaultBlobRegister",
"oneflow.scope.mirrored_view_enabled",
"oneflow.python.eager.vm_util.LogicalRun",
"oneflow_api.EagerPhysicalBlob",
"oneflow_api.oneflow.core.register.logical_blob_id.LogicalBlobId",
"oneflow.parallel_cast",
"oneflow.python.eager.blob_cache.FindOrCreateBlobCache",
"oneflow_api.JobBuildAndInferCtx_GetCurrentJobName",
"oneflow.python.lib.core.enable_if.unique",
"oneflow.python.framework.c_api_util.JobBuildAndInferCtx_IsMirroredBlob",
"oneflow.scope.consistent_view_enabled"
] |
[((1670, 1713), 'oneflow.python.eager.blob_register.GetDefaultBlobRegister', 'blob_register_util.GetDefaultBlobRegister', ([], {}), '()\n', (1711, 1713), True, 'import oneflow.python.eager.blob_register as blob_register_util\n'), ((1835, 1904), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & hob.eager_execution_enabled)\n', (1854, 1904), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((2813, 2862), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(~hob.eager_execution_enabled)'], {}), '(~hob.eager_execution_enabled)\n', (2832, 2862), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((1753, 1805), 'oneflow.python.lib.core.enable_if.unique', 'enable_if.unique', (['[EagerLogicalBlob, LazyRemoteBlob]'], {}), '([EagerLogicalBlob, LazyRemoteBlob])\n', (1769, 1805), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((1953, 2004), 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (2002, 2004), False, 'import oneflow_api\n'), ((2307, 2367), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_IsMirroredBlob', 'c_api_util.JobBuildAndInferCtx_IsMirroredBlob', (['job_name', 'lbn'], {}), '(job_name, lbn)\n', (2352, 2367), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((2639, 2668), 'oneflow_api.distribute.auto', 'oneflow_api.distribute.auto', ([], {}), '()\n', (2666, 2668), False, 'import oneflow_api\n'), ((2909, 2960), 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (2958, 2960), False, 'import oneflow_api\n'), ((3059, 3119), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_IsMirroredBlob', 'c_api_util.JobBuildAndInferCtx_IsMirroredBlob', (['job_name', 'lbn'], {}), '(job_name, lbn)\n', (3104, 3119), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((3502, 3531), 'oneflow_api.distribute.auto', 'oneflow_api.distribute.auto', ([], {}), '()\n', (3529, 3531), False, 'import oneflow_api\n'), ((4028, 4087), 'oneflow.parallel_cast', 'oneflow.parallel_cast', (['self'], {'gradient_distribute': 'distribute'}), '(self, gradient_distribute=distribute)\n', (4049, 4087), False, 'import oneflow\n'), ((4135, 4172), 'oneflow.scope.mirrored_view_enabled', 'oneflow.scope.mirrored_view_enabled', ([], {}), '()\n', (4170, 4172), False, 'import oneflow\n'), ((4484, 4523), 'oneflow.scope.consistent_view_enabled', 'oneflow.scope.consistent_view_enabled', ([], {}), '()\n', (4521, 4523), False, 'import oneflow\n'), ((7990, 8045), 'oneflow.python.eager.blob_cache.FindOrCreateBlobCache', 'blob_cache_util.FindOrCreateBlobCache', (['self.blob_object'], {}), '(self.blob_object)\n', (8027, 8045), True, 'import oneflow.python.eager.blob_cache as blob_cache_util\n'), ((9172, 9227), 'oneflow.python.eager.blob_cache.FindOrCreateBlobCache', 'blob_cache_util.FindOrCreateBlobCache', (['self.blob_object'], {}), '(self.blob_object)\n', (9209, 9227), True, 'import oneflow.python.eager.blob_cache as blob_cache_util\n'), ((2119, 2143), 'oneflow_api.oneflow.core.register.logical_blob_id.LogicalBlobId', 'lbi_util.LogicalBlobId', ([], {}), '()\n', (2141, 2143), True, 'import oneflow_api.oneflow.core.register.logical_blob_id as lbi_util\n'), ((3240, 3264), 'oneflow_api.oneflow.core.register.logical_blob_id.LogicalBlobId', 'lbi_util.LogicalBlobId', ([], {}), '()\n', (3262, 3264), True, 'import oneflow_api.oneflow.core.register.logical_blob_id as lbi_util\n'), ((3886, 3915), 'oneflow_api.distribute.auto', 'oneflow_api.distribute.auto', ([], {}), '()\n', (3913, 3915), False, 'import oneflow_api\n'), ((7749, 7789), 'oneflow.python.eager.vm_util.LogicalRun', 'vm_util.LogicalRun', (['BoxingToSingleDevice'], {}), '(BoxingToSingleDevice)\n', (7767, 7789), True, 'import oneflow.python.eager.vm_util as vm_util\n'), ((8955, 9007), 'oneflow.python.eager.vm_util.LogicalRun', 'vm_util.LogicalRun', (['UnpackLogicalBlobToPhysicalBlobs'], {}), '(UnpackLogicalBlobToPhysicalBlobs)\n', (8973, 9007), True, 'import oneflow.python.eager.vm_util as vm_util\n'), ((6627, 6655), 'oneflow_api.oneflow.core.job.placement.ParallelConf', 'placement_cfg.ParallelConf', ([], {}), '()\n', (6653, 6655), True, 'import oneflow_api.oneflow.core.job.placement as placement_cfg\n'), ((7346, 7414), 'oneflow.python.eager.boxing_util.BoxingTo', 'boxing_util.BoxingTo', (['builder', 'blob_object', 'tmp_op_arg_parallel_attr'], {}), '(builder, blob_object, tmp_op_arg_parallel_attr)\n', (7366, 7414), True, 'import oneflow.python.eager.boxing_util as boxing_util\n'), ((7805, 7920), 'oneflow_api.EagerPhysicalBlob', 'oneflow_api.EagerPhysicalBlob', (['consistent_blob_name', 'blob_register', 'eager_blob_util._GetPhysicalBlobHeaderCache'], {}), '(consistent_blob_name, blob_register,\n eager_blob_util._GetPhysicalBlobHeaderCache)\n', (7834, 7920), False, 'import oneflow_api\n'), ((8562, 8662), 'oneflow_api.EagerPhysicalBlob', 'oneflow_api.EagerPhysicalBlob', (['name', 'blob_register', 'eager_blob_util._GetPhysicalBlobHeaderCache'], {}), '(name, blob_register, eager_blob_util.\n _GetPhysicalBlobHeaderCache)\n', (8591, 8662), False, 'import oneflow_api\n'), ((8753, 8853), 'oneflow_api.EagerPhysicalBlob', 'oneflow_api.EagerPhysicalBlob', (['name', 'blob_register', 'eager_blob_util._GetPhysicalBlobHeaderCache'], {}), '(name, blob_register, eager_blob_util.\n _GetPhysicalBlobHeaderCache)\n', (8782, 8853), False, 'import oneflow_api\n')]
|
from __future__ import absolute_import
import jwt, time
from plivo.utils.validators import *
"""
Class to represent plivo token for endpoint authentication
"""
class AccessToken:
auth_id = ''
username = ''
valid_from = 0
lifetime = 86400
key = ''
grants = {}
uid = 0
@validate_args(
auth_id=[is_account_id()],
auth_token=[optional(of_type(six.text_type))],
username=[all_of(
of_type(six.text_type),
check(lambda username: len(username) > 0, 'empty username')
)],
valid_from=[optional(of_type(*six.integer_types))],
lifetime=[
optional(
all_of(
of_type(*six.integer_types),
check(lambda lifetime: 180 <= lifetime <= 86400,
'180 < lifetime <= 86400')))
],
valid_till=[optional(of_type(*six.integer_types))],
)
def __init__(self,
auth_id,
auth_token,
username,
valid_from=None,
lifetime=None,
valid_till=None,
uid=None):
self.auth_id = auth_id
self.username = username
if valid_from:
self.valid_from = int(valid_from)
else:
self.valid_from = int(time.time())
if lifetime:
self.lifetime = int(lifetime)
if valid_till is not None:
raise ValidationError("use either lifetime or valid_till")
elif valid_till:
self.lifetime = valid_till - self.valid_from
if self.lifetime < 0:
raise ValidationError(
"validity expires %s seconds before it starts" %
self.lifetime)
if self.lifetime < 180 or self.lifetime > 86400:
raise ValidationError(
"validity of %s seconds is out of permitted range [180, 86400]" %
self.lifetime)
self.key = auth_token
if uid:
self.uid = uid
else:
self.uid = "%s-%s" % (username, time.time())
@validate_args(
incoming=[optional(of_type_exact(bool))],
outgoing=[optional(of_type_exact(bool))],
)
def add_voice_grants(self, incoming=False, outgoing=False):
self.grants['voice'] = {
'incoming_allow': incoming,
'outgoing_allow': outgoing
}
def to_jwt(self):
headers = {'typ': 'JWT', 'cty': 'plivo;v=1'}
algorithm = 'HS256'
claims = {
'jti': self.uid,
'iss': self.auth_id,
'sub': self.username,
'nbf': self.valid_from,
'exp': self.valid_from + self.lifetime,
'grants': self.grants
}
return jwt.encode(claims, self.key, algorithm, headers).decode('utf-8')
|
[
"jwt.encode",
"time.time"
] |
[((1342, 1353), 'time.time', 'time.time', ([], {}), '()\n', (1351, 1353), False, 'import jwt, time\n'), ((2837, 2885), 'jwt.encode', 'jwt.encode', (['claims', 'self.key', 'algorithm', 'headers'], {}), '(claims, self.key, algorithm, headers)\n', (2847, 2885), False, 'import jwt, time\n'), ((2145, 2156), 'time.time', 'time.time', ([], {}), '()\n', (2154, 2156), False, 'import jwt, time\n')]
|
from fastapi import APIRouter, Depends
from app.models.user import Token, User, UserCreate
from app.services.user import UserService
router = APIRouter(prefix="/users", tags=["Users"])
@router.post("/login", response_model=Token)
def login():
return UserService().login_user(form_data)
@router.get("/me", response_model=User)
def get_current_user(
current_user=Depends(UserService().get_current_user),
):
return current_user
@router.post("", response_model=User)
def create_user(new_user: UserCreate):
return UserService().create_user(new_user)
|
[
"app.services.user.UserService",
"fastapi.APIRouter"
] |
[((144, 186), 'fastapi.APIRouter', 'APIRouter', ([], {'prefix': '"""/users"""', 'tags': "['Users']"}), "(prefix='/users', tags=['Users'])\n", (153, 186), False, 'from fastapi import APIRouter, Depends\n'), ((258, 271), 'app.services.user.UserService', 'UserService', ([], {}), '()\n', (269, 271), False, 'from app.services.user import UserService\n'), ((383, 396), 'app.services.user.UserService', 'UserService', ([], {}), '()\n', (394, 396), False, 'from app.services.user import UserService\n'), ((533, 546), 'app.services.user.UserService', 'UserService', ([], {}), '()\n', (544, 546), False, 'from app.services.user import UserService\n')]
|
import rpyc
c = rpyc.connect_by_service("TIME")
print( "server's time is", c.root.get_time())
|
[
"rpyc.connect_by_service"
] |
[((18, 49), 'rpyc.connect_by_service', 'rpyc.connect_by_service', (['"""TIME"""'], {}), "('TIME')\n", (41, 49), False, 'import rpyc\n')]
|
"""
General purpose rational polynomial tools
"""
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
import logging
from typing import List, Sequence
import numpy
from numpy.polynomial import polynomial
from scipy.linalg import lstsq, LinAlgError
from sarpy.compliance import SarpyError
logger = logging.getLogger(__name__)
class SarpyRatPolyError(SarpyError):
"""A custom exception class for rational polynomial fitting errors."""
#################
# helper functions
def _get_num_variables(coeff_list):
"""
Determine the number of variables by inspection of the coefficient list
Parameters
----------
coeff_list : Sequence
Returns
-------
int
"""
variables = None
for entry in coeff_list:
if isinstance(entry, int):
if variables is None:
variables = 1
elif variables != 1:
raise ValueError('Entry order mismatch')
else:
if variables is None:
variables = len(entry)
elif variables != len(entry):
raise ValueError('Entry order mismatch')
if variables is None:
raise ValueError('Unable to determine the number of variables')
return variables
def _map_list_to_poly_matrix(coeffs, coeff_list):
"""
Maps the coefficients and coefficient listing to corresponding
numpy polynomial coefficient matrix.
Parameters
----------
coeffs : Sequence
coeff_list : Sequence
Returns
-------
coefficient_array : numpy.ndarray
"""
variables = _get_num_variables(coeff_list)
matrix_shape = []
for i in range(variables):
matrix_shape.append(max(entry[i] for entry in coeff_list)+1)
coefficient_array = numpy.zeros(tuple(matrix_shape), dtype='float64')
for i, entry in enumerate(coeff_list):
coefficient_array[entry] = coeffs[i]
return coefficient_array
def get_default_coefficient_ordering(variables, order):
"""
Gets a sensible coefficient ordering of a polynomial of given number of
variables and order.
Parameters
----------
variables : int
order : int
Returns
-------
coefficient_list : List[tuple]
List of the form `[(exponent 0, exponent 1, ...)]`, determining the ordering
of monomial terms in the associated multivariable polynomial.
"""
variables = int(variables)
order = int(order)
if variables < 1:
raise ValueError('variables must be at least 1')
if order < 1:
raise ValueError('order must be at least 1')
shape_details = tuple([order + 1 for _ in range(variables)])
coefficient_list = []
for index in numpy.ndindex(shape_details):
total_exponent = sum(index)
if total_exponent <= order:
coefficient_list.append(index)
return coefficient_list
###################
# base rational polynomial fitting functions
def rational_poly_fit_1d(x, data, coeff_list, cond=None):
"""
Fits a one variable rational polynomial according to the input coefficient
listing order.
Parameters
----------
x : numpy.ndarray
data : numpy.ndarray
coeff_list : List
cond : None|float
Passed through to :func:`scipy.linalg.lstsq`.
"""
if coeff_list[0] not in [0, (0, )]:
raise ValueError(
'The first entry of coeff_list is required to be the constant term `0`')
if not (x.size == data.size):
raise ValueError('Size mismatch among data entries')
x = x.flatten()
data = data.flatten()
# Enforcing that the denominator has constant term 1,
# P(x)/(1 + Q(x)) = d ->
# P(x) - d*Q(x) = d
# This can be formulated as a strictly linear problem A*t = d
A = numpy.empty((x.size, 2*len(coeff_list) - 1), dtype=numpy.float64)
for i, entry in enumerate(coeff_list):
if not (isinstance(entry, int) or (isinstance(entry, tuple) and len(entry) == 1 and isinstance(entry[0], int))):
raise TypeError('coeff_list must be a list of integers or length 1 tuples of ints')
if isinstance(entry, tuple):
entry = entry[0]
u = 1
if entry > 0:
u *= numpy.power(x, entry)
A[:, i] = u
if i > 0:
A[:, i+len(coeff_list) - 1] = -u*data
# perform least squares fit
try:
sol, residuals, rank, sing_values = lstsq(A, data, cond=cond)
except LinAlgError as e:
raise SarpyRatPolyError(str(e))
#if len(residuals) != 0:
residuals /= float(x.size)
logger.info(
'Performed rational polynomial fit, got\n\t'
'residuals {}\n\t'
'rank {}\n\t'
'singular values {}'.format(residuals, rank, sing_values))
numerator = numpy.zeros((len(coeff_list), ), dtype='float64')
denominator = numpy.zeros((len(coeff_list), ), dtype='float64')
denominator[0] = 1.0
numerator[:] = sol[:len(coeff_list)]
denominator[1:] = sol[len(coeff_list):]
return numerator, denominator
def rational_poly_fit_2d(x, y, data, coeff_list, cond=None):
"""
Fits a two variable rational polynomial according to the input coefficient
listing order.
Parameters
----------
x : numpy.ndarray
y : numpy.ndarray
data : numpy.ndarray
coeff_list : List
cond : None|float
Passed through to :func:`scipy.linalg.lstsq`.
"""
if coeff_list[0] != (0, 0):
raise ValueError(
'The first entry of coeff_list is required to be the constant term `(0, 0)`')
if not (x.size == y.size and x.size == data.size):
raise ValueError('Size mismatch among data entries')
x = x.flatten()
y = y.flatten()
data = data.flatten()
# Enforcing that the denominator has constant term 1,
# P(x, y)/(1 + Q(x, y)) = d ->
# P(x, y) - d*Q(x, y) = d
# This can be formulated as a strictly linear problem A*t = d
A = numpy.empty((x.size, 2 * len(coeff_list) - 1), dtype=numpy.float64)
for i, entry in enumerate(coeff_list):
if len(entry) != 2:
raise TypeError('coeff_list must be a list of tuples of length 2')
u = 1
if entry[0] > 0:
u *= numpy.power(x, entry[0])
if entry[1] > 0:
u *= numpy.power(y, entry[1])
A[:, i] = u
if i > 0:
A[:, i + len(coeff_list) - 1] = -u*data
# perform least squares fit
try:
sol, residuals, rank, sing_values = lstsq(A, data, cond=cond)
except LinAlgError as e:
raise SarpyRatPolyError(str(e))
# if len(residuals) != 0:
residuals /= float(x.size)
logger.info(
'Performed rational polynomial fit, got\n\t'
'residuals {}\n\t'
'rank {}\n\t'
'singular values {}'.format(residuals, rank, sing_values))
numerator = numpy.zeros((len(coeff_list),), dtype='float64')
denominator = numpy.zeros((len(coeff_list),), dtype='float64')
denominator[0] = 1.0
numerator[:] = sol[:len(coeff_list)]
denominator[1:] = sol[len(coeff_list):]
return numerator, denominator
def rational_poly_fit_3d(x, y, z, data, coeff_list, cond=None):
"""
Fits a three variable rational polynomial according to the input coefficient
listing order.
Parameters
----------
x : numpy.ndarray
y : numpy.ndarray
z : numpy.ndarray
data : numpy.ndarray
coeff_list : List
cond : None|float
Passed through to :func:`scipy.linalg.lstsq`.
"""
if coeff_list[0] != (0, 0, 0):
raise ValueError(
'The first entry of coeff_list is required to be the constant term `(0, 0, 0)`')
if not (x.size == y.size and x.size == z.size and x.size == data.size):
raise ValueError('Size mismatch among data entries')
x = x.flatten()
y = y.flatten()
z = z.flatten()
data = data.flatten()
# Enforcing that the denominator has constant term 1,
# P(x, y, z)/(1 + Q(x, y, z)) = d ->
# P(x, y, z) - d*Q(x, y, z) = d
# This can be formulated as a strictly linear problem A*t = d
A = numpy.empty((x.size, 2*len(coeff_list) - 1), dtype=numpy.float64)
for i, entry in enumerate(coeff_list):
if len(entry) != 3:
raise TypeError('coeff_list must be a list of tuples of length 3')
u = 1
if entry[0] > 0:
u *= numpy.power(x, entry[0])
if entry[1] > 0:
u *= numpy.power(y, entry[1])
if entry[2] > 0:
u *= numpy.power(z, entry[2])
A[:, i] = u
if i > 0:
A[:, i + len(coeff_list) - 1] = -u*data
# perform least squares fit
try:
sol, residuals, rank, sing_values = lstsq(A, data, cond=cond)
except LinAlgError as e:
raise SarpyRatPolyError(str(e))
#if len(residuals) != 0:
residuals /= float(x.size)
logger.info(
'Performed rational polynomial fit, got\n\t'
'residuals {}\n\t'
'rank {}\n\t'
'singular values {}'.format(residuals, rank, sing_values))
numerator = numpy.zeros((len(coeff_list),), dtype='float64')
denominator = numpy.zeros((len(coeff_list),), dtype='float64')
denominator[0] = 1.0
numerator[:] = sol[:len(coeff_list)]
denominator[1:] = sol[len(coeff_list):]
return numerator, denominator
####################
# rational polynomial definition
class RationalPolynomial(object):
r"""
A basic rational polynomial implementation. This assumes the data model
`input_data -> output_data` via the relation
.. math::
X = (x, y, ...) & = (input\_data - input\_offset)/input\_scale \\
(output\_data - output\_offset)/output\_scale & = Data = numerator(X)/denominator(X) \\
output\_data & = (numerator(X)/denominator(X))*output_scale + output\_offset
This object is callable, and acts as the evaluation function after construction.
That is, suppose we have
.. code::
rational_poly = RationalPolynomial(...) # suppose constructed as 2 variables
output_0 = rational_poly([x, y]) # pass in the two variables as a single array
output_1 = rational_poly(x, y) # pass in the two variables individually
# output_0 and output_1 should be identical
output_fail = rational_poly(x, y, z)
# this raises an exception for mismatch with the number of variables
"""
__slots__ = (
'_numerator', '_denominator', '_coeff_list', '_variables', '_input_offsets', '_input_scales',
'_output_offset', '_output_scale', '_numerator_array', '_denominator_array')
def __init__(self, numerator, denominator, coeff_list, input_offsets, input_scales, output_offset, output_scale):
"""
Parameters
----------
numerator : Sequence|numpy.ndarray
denominator : Sequence|numpy.ndarray
coeff_list : Sequence
input_offsets : Sequence
input_scales : Sequence
output_offset : float
output_scale : float
"""
self._coeff_list = coeff_list
self._variables = _get_num_variables(coeff_list)
if self._variables not in [1, 2, 3]:
raise ValueError('Functionality allows only 1, 2, or 3 variables.')
if len(numerator) != len(self._coeff_list):
raise ValueError('numerator must be the same length as coeff_list')
self._numerator = numerator
if len(denominator) != len(self._coeff_list):
raise ValueError('denominator must be the same length as coeff_list')
self._denominator = denominator
if len(input_offsets) != self._variables:
raise ValueError('The input_offsets must be the same length as the number of variables')
self._input_offsets = input_offsets
if len(input_scales) != self._variables:
raise ValueError('The input_scale must be the same length as the number of variables')
self._input_scales = input_scales
self._output_offset = float(output_offset)
self._output_scale = float(output_scale)
self._numerator_array = _map_list_to_poly_matrix(numerator, coeff_list)
self._denominator_array = _map_list_to_poly_matrix(denominator, coeff_list)
@property
def variables(self):
"""
The number of independent variables.
Returns
-------
int
"""
return self._variables
@property
def coefficient_list(self):
"""
The coefficient list.
Returns
-------
Sequence
"""
return self._coeff_list
@property
def numerator(self):
"""
The numerator coefficients.
Returns
-------
Sequence
"""
return self._numerator
@property
def denominator(self):
"""
The denominator coefficients.
Returns
-------
Sequence
"""
return self._denominator
def __call__(self, *input_variables):
def ensure_the_type(data):
if isinstance(data, (numpy.number, int, float, numpy.ndarray)):
return data
else:
return numpy.array(data)
if len(input_variables) not in [1, self.variables]:
raise ValueError('Got an unexpected number of input arguments')
if len(input_variables) == 1:
separate = False
inp_vars = ensure_the_type(input_variables[0])
else:
separate = True
inp_vars = [ensure_the_type(entry) for entry in input_variables]
# todo: should we evaluate the viability of the input?
if self.variables == 1:
x = (inp_vars - self._input_offsets[0])/self._input_scales[0]
value = polynomial.polyval(x, self._numerator_array) / \
polynomial.polyval(x, self._denominator_array)
elif self.variables == 2:
if separate:
x = (inp_vars[0] - self._input_offsets[0])/self._input_scales[0]
y = (inp_vars[1] - self._input_offsets[1])/self._input_scales[1]
else:
if inp_vars.shape[-1] != 2:
raise ValueError(
'Final dimension of input data ({}) must match the number '
'of variables ({}).'.format(inp_vars.shape, self.variables))
x = (inp_vars[..., 0] - self._input_offsets[0])/self._input_scales[0]
y = (inp_vars[..., 1] - self._input_offsets[1])/self._input_scales[1]
value = polynomial.polyval2d(x, y, self._numerator_array) / \
polynomial.polyval2d(x, y, self._denominator_array)
elif self.variables == 3:
if separate:
x = (inp_vars[0] - self._input_offsets[0])/self._input_scales[0]
y = (inp_vars[1] - self._input_offsets[1])/self._input_scales[1]
z = (inp_vars[2] - self._input_offsets[2])/self._input_scales[2]
else:
if inp_vars.shape[-1] != 3:
raise ValueError(
'Final dimension of input data ({}) must match the number '
'of variables ({}).'.format(inp_vars.shape, self.variables))
x = (inp_vars[..., 0] - self._input_offsets[0])/self._input_scales[0]
y = (inp_vars[..., 1] - self._input_offsets[1])/self._input_scales[1]
z = (inp_vars[..., 2] - self._input_offsets[2]) / self._input_scales[2]
value = polynomial.polyval3d(x, y, z, self._numerator_array) / \
polynomial.polyval3d(x, y, z, self._denominator_array)
else:
raise ValueError('More than 3 variables is unsupported')
return value*self._output_scale + self._output_offset
def _get_scale_and_offset(array):
# type: (numpy.ndarray) -> (float, float)
min_value = numpy.min(array)
max_value = numpy.max(array)
scale_value = 0.5*(max_value - min_value)
offset_value = 0.5*(max_value + min_value)
return offset_value, scale_value
def get_rational_poly_1d(x, data, coeff_list=None, order=None, cond=None):
"""
Gets the RationalPolynomial instance that comes from fitting the provided data.
Parameters
----------
x : numpy.ndarray
data : numpy.ndarray
coeff_list : None|Sequence
order : None|int
cond : None|float
Passed through to :func:`scipy.linalg.lstsq`.
"""
if (coeff_list is None and order is None) or \
(coeff_list is not None and order is not None):
raise ValueError('Exact one of coeff_list and order must be provided.')
if order is not None:
coeff_list = get_default_coefficient_ordering(1, int(order))
if _get_num_variables(coeff_list) != 1:
raise ValueError('The number of variables defined by the coefficient list must be 1.')
scale_x, offset_x = _get_scale_and_offset(x)
scale_data, offset_data = _get_scale_and_offset(data)
numerator, denominator = rational_poly_fit_1d(
(x-offset_x)/scale_x,
(data-offset_data)/scale_data, coeff_list, cond=cond)
return RationalPolynomial(
numerator, denominator, coeff_list,
(offset_x, ), (scale_x, ),
offset_data, scale_data)
def get_rational_poly_2d(x, y, data, coeff_list=None, order=None, cond=None):
"""
Gets the RationalPolynomial instance that comes from fitting the provided data.
Parameters
----------
x : numpy.ndarray
y : numpy.ndarray
data : numpy.ndarray
coeff_list : None|Sequence
order : None|int
cond : None|float
Passed through to :func:`scipy.linalg.lstsq`.
"""
if (coeff_list is None and order is None) or \
(coeff_list is not None and order is not None):
raise ValueError('Exact one of coeff_list and order must be provided.')
if order is not None:
coeff_list = get_default_coefficient_ordering(2, int(order))
if _get_num_variables(coeff_list) != 2:
raise ValueError('The number of variables defined by the coefficient list must be 2.')
scale_x, offset_x = _get_scale_and_offset(x)
scale_y, offset_y = _get_scale_and_offset(y)
scale_data, offset_data = _get_scale_and_offset(data)
numerator, denominator = rational_poly_fit_2d(
(x-offset_x)/scale_x, (y-offset_y)/scale_y,
(data-offset_data)/scale_data, coeff_list, cond=cond)
return RationalPolynomial(
numerator, denominator, coeff_list,
(offset_x, offset_y), (scale_x, scale_y),
offset_data, scale_data)
def get_rational_poly_3d(x, y, z, data, coeff_list=None, order=None, cond=None):
"""
Gets the RationalPolynomial instance that comes from fitting the provided data.
Parameters
----------
x : numpy.ndarray
y : numpy.ndarray
z : numpy.ndarray
data : numpy.ndarray
coeff_list : None|Sequence
order : None|int
cond : None|float
Passed through to :func:`scipy.linalg.lstsq`.
"""
if (coeff_list is None and order is None) or \
(coeff_list is not None and order is not None):
raise ValueError('Exact one of coeff_list and order must be provided.')
if order is not None:
coeff_list = get_default_coefficient_ordering(3, int(order))
if _get_num_variables(coeff_list) != 3:
raise ValueError('The number of variables defined by the coefficient list must be 3.')
scale_x, offset_x = _get_scale_and_offset(x)
scale_y, offset_y = _get_scale_and_offset(y)
scale_z, offset_z = _get_scale_and_offset(z)
scale_data, offset_data = _get_scale_and_offset(data)
numerator, denominator = rational_poly_fit_3d(
(x-offset_x)/scale_x, (y-offset_y)/scale_y, (z-offset_z)/scale_z,
(data-offset_data)/scale_data, coeff_list, cond=cond)
return RationalPolynomial(
numerator, denominator, coeff_list,
(offset_x, offset_y, offset_z), (scale_x, scale_y, scale_z),
offset_data, scale_data)
####################
# collective rational polynomial function
class CombinedRationalPolynomial(object):
"""
Assemble a collection of RationalPolynomial objects with the same number of variables
into a single multi-variable output object.
"""
__slots__ = ('_collection', )
def __init__(self, *collection):
if len(collection) == 1 and isinstance(collection[0], (list, tuple)):
collection = collection[0]
if len(collection) < 2:
raise ValueError('This requires more than a single input')
coll = []
variables = None
for entry in collection:
if not isinstance(entry, RationalPolynomial):
raise TypeError(
'Every input must be an instance of RationalPolynomial,\n\t'
'got type `{}`'.format(type(entry)))
if variables is None:
variables = entry.variables
elif variables != entry.variables:
raise TypeError(
'Every input must be an instance of RationalPolynomial with\n\t'
'the same number of variables, got type `{}` and `{}`'.format(variables, entry.variables))
coll.append(entry)
self._collection = tuple(coll)
def __call__(self, *args, combine=True):
out = tuple([entry(*args) for entry in self._collection])
if combine:
return numpy.stack(out, axis=-1)
else:
return out
|
[
"numpy.stack",
"numpy.ndindex",
"numpy.power",
"numpy.polynomial.polynomial.polyval2d",
"numpy.polynomial.polynomial.polyval3d",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.polynomial.polynomial.polyval",
"scipy.linalg.lstsq",
"logging.getLogger"
] |
[((308, 335), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (325, 335), False, 'import logging\n'), ((2705, 2733), 'numpy.ndindex', 'numpy.ndindex', (['shape_details'], {}), '(shape_details)\n', (2718, 2733), False, 'import numpy\n'), ((15941, 15957), 'numpy.min', 'numpy.min', (['array'], {}), '(array)\n', (15950, 15957), False, 'import numpy\n'), ((15974, 15990), 'numpy.max', 'numpy.max', (['array'], {}), '(array)\n', (15983, 15990), False, 'import numpy\n'), ((4421, 4446), 'scipy.linalg.lstsq', 'lstsq', (['A', 'data'], {'cond': 'cond'}), '(A, data, cond=cond)\n', (4426, 4446), False, 'from scipy.linalg import lstsq, LinAlgError\n'), ((6495, 6520), 'scipy.linalg.lstsq', 'lstsq', (['A', 'data'], {'cond': 'cond'}), '(A, data, cond=cond)\n', (6500, 6520), False, 'from scipy.linalg import lstsq, LinAlgError\n'), ((8719, 8744), 'scipy.linalg.lstsq', 'lstsq', (['A', 'data'], {'cond': 'cond'}), '(A, data, cond=cond)\n', (8724, 8744), False, 'from scipy.linalg import lstsq, LinAlgError\n'), ((4225, 4246), 'numpy.power', 'numpy.power', (['x', 'entry'], {}), '(x, entry)\n', (4236, 4246), False, 'import numpy\n'), ((6227, 6251), 'numpy.power', 'numpy.power', (['x', 'entry[0]'], {}), '(x, entry[0])\n', (6238, 6251), False, 'import numpy\n'), ((6294, 6318), 'numpy.power', 'numpy.power', (['y', 'entry[1]'], {}), '(y, entry[1])\n', (6305, 6318), False, 'import numpy\n'), ((8384, 8408), 'numpy.power', 'numpy.power', (['x', 'entry[0]'], {}), '(x, entry[0])\n', (8395, 8408), False, 'import numpy\n'), ((8451, 8475), 'numpy.power', 'numpy.power', (['y', 'entry[1]'], {}), '(y, entry[1])\n', (8462, 8475), False, 'import numpy\n'), ((8518, 8542), 'numpy.power', 'numpy.power', (['z', 'entry[2]'], {}), '(z, entry[2])\n', (8529, 8542), False, 'import numpy\n'), ((21516, 21541), 'numpy.stack', 'numpy.stack', (['out'], {'axis': '(-1)'}), '(out, axis=-1)\n', (21527, 21541), False, 'import numpy\n'), ((13213, 13230), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (13224, 13230), False, 'import numpy\n'), ((13803, 13847), 'numpy.polynomial.polynomial.polyval', 'polynomial.polyval', (['x', 'self._numerator_array'], {}), '(x, self._numerator_array)\n', (13821, 13847), False, 'from numpy.polynomial import polynomial\n'), ((13868, 13914), 'numpy.polynomial.polynomial.polyval', 'polynomial.polyval', (['x', 'self._denominator_array'], {}), '(x, self._denominator_array)\n', (13886, 13914), False, 'from numpy.polynomial import polynomial\n'), ((14597, 14646), 'numpy.polynomial.polynomial.polyval2d', 'polynomial.polyval2d', (['x', 'y', 'self._numerator_array'], {}), '(x, y, self._numerator_array)\n', (14617, 14646), False, 'from numpy.polynomial import polynomial\n'), ((14667, 14718), 'numpy.polynomial.polynomial.polyval2d', 'polynomial.polyval2d', (['x', 'y', 'self._denominator_array'], {}), '(x, y, self._denominator_array)\n', (14687, 14718), False, 'from numpy.polynomial import polynomial\n'), ((15570, 15622), 'numpy.polynomial.polynomial.polyval3d', 'polynomial.polyval3d', (['x', 'y', 'z', 'self._numerator_array'], {}), '(x, y, z, self._numerator_array)\n', (15590, 15622), False, 'from numpy.polynomial import polynomial\n'), ((15643, 15697), 'numpy.polynomial.polynomial.polyval3d', 'polynomial.polyval3d', (['x', 'y', 'z', 'self._denominator_array'], {}), '(x, y, z, self._denominator_array)\n', (15663, 15697), False, 'from numpy.polynomial import polynomial\n')]
|
# Unless explicitly stated otherwise all files in this repository are licensed
# under the 3-clause BSD style license (see LICENSE).
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019 Datadog, Inc.
from typing import Optional, List, Dict
from datadog_sync.utils.base_resource import BaseResource, ResourceConfig
from datadog_sync.utils.custom_client import CustomClient
from datadog_sync.utils.resource_utils import ResourceConnectionError
class ServiceLevelObjectives(BaseResource):
resource_type = "service_level_objectives"
resource_config = ResourceConfig(
resource_connections={"monitors": ["monitor_ids"], "synthetics_tests": []},
base_path="/api/v1/slo",
excluded_attributes=["creator", "id", "created_at", "modified_at"],
)
# Additional ServiceLevelObjectives specific attributes
def get_resources(self, client: CustomClient) -> List[Dict]:
resp = client.get(self.resource_config.base_path).json()
return resp["data"]
def import_resource(self, resource: Dict) -> None:
self.resource_config.source_resources[resource["id"]] = resource
def pre_resource_action_hook(self, _id, resource: Dict) -> None:
pass
def pre_apply_hook(self, resources: Dict[str, Dict]) -> Optional[list]:
pass
def create_resource(self, _id: str, resource: Dict) -> None:
destination_client = self.config.destination_client
resp = destination_client.post(self.resource_config.base_path, resource).json()
self.resource_config.destination_resources[_id] = resp["data"][0]
def update_resource(self, _id: str, resource: Dict) -> None:
destination_client = self.config.destination_client
resp = destination_client.put(
self.resource_config.base_path + f"/{self.resource_config.destination_resources[_id]['id']}", resource
).json()
self.resource_config.destination_resources[_id] = resp["data"][0]
def connect_id(self, key: str, r_obj: Dict, resource_to_connect: str) -> None:
monitors = self.config.resources["monitors"].resource_config.destination_resources
synthetics_tests = self.config.resources["synthetics_tests"].resource_config.destination_resources
for i, obj in enumerate(r_obj[key]):
_id = str(obj)
# Check if resource exists in monitors
if _id in monitors:
r_obj[key][i] = monitors[_id]["id"]
continue
# Fall back on Synthetics and check
found = False
for k, v in synthetics_tests.items():
if k.endswith(_id):
r_obj[key][i] = v["monitor_id"]
found = True
break
if not found:
raise ResourceConnectionError(resource_to_connect, _id=_id)
|
[
"datadog_sync.utils.resource_utils.ResourceConnectionError",
"datadog_sync.utils.base_resource.ResourceConfig"
] |
[((606, 798), 'datadog_sync.utils.base_resource.ResourceConfig', 'ResourceConfig', ([], {'resource_connections': "{'monitors': ['monitor_ids'], 'synthetics_tests': []}", 'base_path': '"""/api/v1/slo"""', 'excluded_attributes': "['creator', 'id', 'created_at', 'modified_at']"}), "(resource_connections={'monitors': ['monitor_ids'],\n 'synthetics_tests': []}, base_path='/api/v1/slo', excluded_attributes=[\n 'creator', 'id', 'created_at', 'modified_at'])\n", (620, 798), False, 'from datadog_sync.utils.base_resource import BaseResource, ResourceConfig\n'), ((2838, 2891), 'datadog_sync.utils.resource_utils.ResourceConnectionError', 'ResourceConnectionError', (['resource_to_connect'], {'_id': '_id'}), '(resource_to_connect, _id=_id)\n', (2861, 2891), False, 'from datadog_sync.utils.resource_utils import ResourceConnectionError\n')]
|
# encoding: utf-8
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
browser = webdriver.Chrome()
def search():
browser.get('https://www.taobao.com/')
# input = WebDriverWait(browser, 10).until(
# EC.presence_of_element_located((By.CSS_SELECTOR, '#q'))
# )
# submit = WebDriverWait(browser, 10).until(EC.element_to_be_clickable(By.CSS_SELECTOR, '#J_TSearchForm > div.search-button > button'))
# input.send_keys('美食')
# submit.click()
browser.find_element_by_id("q").send_keys("美食")
browser.find_element_by_id("q").submit()
browser.find_element_by_class_name("btn-search tb-bg").click()
def main():
search()
if __name__ == '__main__':
main()
|
[
"selenium.webdriver.Chrome"
] |
[((227, 245), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (243, 245), False, 'from selenium import webdriver\n')]
|
import sys
import os
import argparse
from grammar import Grammar
parser = argparse.ArgumentParser()
parser.add_argument("--check_vocab", type=str, default=None,
help="location of vocab file to check entries against")
parser.add_argument("--grammars", type=str, help="directory containing grammars")
args = parser.parse_args()
assert os.path.isdir(args.grammars)
if args.grammars.endswith('/'):
args.grammars = args.grammars[:-1]
grammar = Grammar(args)
for filename in os.listdir(args.grammars):
if filename.endswith('common.avg'):
continue
grammar = Grammar(args)
grammar.read(os.path.join(args.grammars,'common.avg'))
grammar.read(os.path.join(args.grammars,filename))
if not os.path.isdir(args.grammars+'_evalset'):
os.mkdir(args.grammars+'_evalset')
grammar.sen_generate(os.path.join(args.grammars+'_evalset', os.path.basename(filename).split(".")[0]+".txt"))
|
[
"os.mkdir",
"argparse.ArgumentParser",
"os.path.basename",
"os.path.isdir",
"grammar.Grammar",
"os.path.join",
"os.listdir"
] |
[((76, 101), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (99, 101), False, 'import argparse\n'), ((356, 384), 'os.path.isdir', 'os.path.isdir', (['args.grammars'], {}), '(args.grammars)\n', (369, 384), False, 'import os\n'), ((467, 480), 'grammar.Grammar', 'Grammar', (['args'], {}), '(args)\n', (474, 480), False, 'from grammar import Grammar\n'), ((498, 523), 'os.listdir', 'os.listdir', (['args.grammars'], {}), '(args.grammars)\n', (508, 523), False, 'import os\n'), ((596, 609), 'grammar.Grammar', 'Grammar', (['args'], {}), '(args)\n', (603, 609), False, 'from grammar import Grammar\n'), ((627, 668), 'os.path.join', 'os.path.join', (['args.grammars', '"""common.avg"""'], {}), "(args.grammars, 'common.avg')\n", (639, 668), False, 'import os\n'), ((686, 723), 'os.path.join', 'os.path.join', (['args.grammars', 'filename'], {}), '(args.grammars, filename)\n', (698, 723), False, 'import os\n'), ((735, 776), 'os.path.isdir', 'os.path.isdir', (["(args.grammars + '_evalset')"], {}), "(args.grammars + '_evalset')\n", (748, 776), False, 'import os\n'), ((784, 820), 'os.mkdir', 'os.mkdir', (["(args.grammars + '_evalset')"], {}), "(args.grammars + '_evalset')\n", (792, 820), False, 'import os\n'), ((883, 909), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (899, 909), False, 'import os\n')]
|
import math
import numpy as np
from download_mnist import load
import operator
import time
# classify using kNN
# x_train = np.load('../x_train.npy')
# y_train = np.load('../y_train.npy')
# x_test = np.load('../x_test.npy')
# y_test = np.load('../y_test.npy')
x_train, y_train, x_test, y_test = load()
x_train = x_train.reshape(60000,28,28)
x_test = x_test.reshape(10000,28,28)
x_train = x_train.astype(float)
x_test = x_test.astype(float)
# print(y_test[0:10])
def kNNClassify(newInput, dataSet, labels, k):
result=[]
########################
# Input your code here #
########################
test_len = len(newInput)
train_len= len(dataSet)
dist=np.zeros((test_len,train_len))
for i in range(test_len):
for j in range(train_len):
d= np.linalg.norm(dataSet[j]-newInput[i])
dist[i,j] = d
# print(dist)
# print(labels)
for i in range(test_len):
votes = np.zeros(10)
x= np.argsort(dist[i])[:k]
# print(x)
for i in range(len(x)):
num_label = labels[x[i]]
# print(num_label)
votes[num_label]+=1
# print(votes)
result.append(np.argmax(votes))
####################
# End of your code #
####################
return result
start_time = time.time()
outputlabels=kNNClassify(x_test[0:20],x_train,y_train,12)
print(outputlabels)
result = y_test[0:20] - outputlabels
result = (1 - np.count_nonzero(result)/len(outputlabels))
print ("---classification accuracy for knn on mnist: %s ---" %result)
print ("---execution time: %s seconds ---" % (time.time() - start_time))
|
[
"numpy.count_nonzero",
"numpy.argmax",
"download_mnist.load",
"numpy.zeros",
"time.time",
"numpy.argsort",
"numpy.linalg.norm"
] |
[((311, 317), 'download_mnist.load', 'load', ([], {}), '()\n', (315, 317), False, 'from download_mnist import load\n'), ((1385, 1396), 'time.time', 'time.time', ([], {}), '()\n', (1394, 1396), False, 'import time\n'), ((709, 740), 'numpy.zeros', 'np.zeros', (['(test_len, train_len)'], {}), '((test_len, train_len))\n', (717, 740), True, 'import numpy as np\n'), ((989, 1001), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (997, 1001), True, 'import numpy as np\n'), ((1530, 1554), 'numpy.count_nonzero', 'np.count_nonzero', (['result'], {}), '(result)\n', (1546, 1554), True, 'import numpy as np\n'), ((823, 863), 'numpy.linalg.norm', 'np.linalg.norm', (['(dataSet[j] - newInput[i])'], {}), '(dataSet[j] - newInput[i])\n', (837, 863), True, 'import numpy as np\n'), ((1014, 1033), 'numpy.argsort', 'np.argsort', (['dist[i]'], {}), '(dist[i])\n', (1024, 1033), True, 'import numpy as np\n'), ((1247, 1263), 'numpy.argmax', 'np.argmax', (['votes'], {}), '(votes)\n', (1256, 1263), True, 'import numpy as np\n'), ((1692, 1703), 'time.time', 'time.time', ([], {}), '()\n', (1701, 1703), False, 'import time\n')]
|
import datetime
import logging
from django.core.management.base import BaseCommand
from main.models import Announcement
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Deletes announcements for which eol_datetime is older than current datetime.'
def handle(self, **options):
announcements = Announcement.objects.all()
num_deleted = 0
for announcement in list(announcements):
if announcement.eol_datetime < datetime.datetime.now(datetime.timezone.utc):
announcement.delete()
logger.info(f"Deleted announcement {announcement.id}, "
f"EOL was {announcement.eol_datetime}.")
num_deleted += 1
logger.info(f"Deleted {num_deleted} announcements.")
|
[
"datetime.datetime.now",
"main.models.Announcement.objects.all",
"logging.getLogger"
] |
[((131, 158), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (148, 158), False, 'import logging\n'), ((336, 362), 'main.models.Announcement.objects.all', 'Announcement.objects.all', ([], {}), '()\n', (360, 362), False, 'from main.models import Announcement\n'), ((479, 523), 'datetime.datetime.now', 'datetime.datetime.now', (['datetime.timezone.utc'], {}), '(datetime.timezone.utc)\n', (500, 523), False, 'import datetime\n')]
|
from datetime import datetime, timedelta
import logging
from time import sleep
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.utils import timezone
from whoare.whoare import WhoAre
from dominios.models import Dominio, PreDominio
from zonas.models import Zona
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Importar archivos de texto con posibles dominios nuevos a la base'
def add_arguments(self, parser):
parser.add_argument('--path', nargs='?', type=str, help='Path del archivo con los datos (lista simple de texto')
parser.add_argument('--priority', nargs='?', type=int, default=50)
def handle(self, *args, **options):
f = open(options['path'], 'r')
doms = f.read()
f.close()
dlist = doms.split('\n')
c = 0
skipped = 0
for dominio in dlist:
dominio = dominio.strip().lower()
c += 1
self.stdout.write(self.style.SUCCESS(f"{c} [{skipped}] {dominio}"))
pd, created = PreDominio.objects.get_or_create(dominio=dominio)
# ID=0 si ya existe como dominio
if not created or pd.id == 0:
skipped += 1
pd.priority=options['priority']
pd.save()
self.stdout.write(self.style.SUCCESS(f"DONE. {c} processed {skipped} skipped"))
|
[
"dominios.models.PreDominio.objects.get_or_create",
"logging.getLogger"
] |
[((332, 359), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (349, 359), False, 'import logging\n'), ((1106, 1155), 'dominios.models.PreDominio.objects.get_or_create', 'PreDominio.objects.get_or_create', ([], {'dominio': 'dominio'}), '(dominio=dominio)\n', (1138, 1155), False, 'from dominios.models import Dominio, PreDominio\n')]
|
from crispy_forms.helper import FormHelper
from crispy_forms.layout import *
from crispy_forms.bootstrap import *
from crispy_forms.layout import Layout, Submit, Reset
from django import forms
from .models import CustomDashboard, DashboardTheme, DashboardComponent, ComponentDataSource
class CustomDashboardCreateForm(forms.ModelForm):
class Meta:
model = CustomDashboard
exclude = ['create_date', 'edit_date', 'component_map', 'components']
def __init__(self, *args, **kwargs):
#get the user object from request to check permissions
self.request = kwargs.pop('request')
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-6'
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
self.helper.form_tag = True
self.helper.add_input(Submit('submit', 'Save'))
super(CustomDashboardCreateForm, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
# Commit is already set to false
obj = super(CustomDashboardCreateForm, self).save(*args, **kwargs)
return obj
class CustomDashboardModalForm(forms.ModelForm):
class Meta:
model = CustomDashboard
exclude = ['create_date', 'edit_date', 'component_map', 'components']
def __init__(self, *args, **kwargs):
#get the user object from request to check permissions
self.request = kwargs.pop("request")
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-6'
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
self.helper.form_tag = True
self.helper.add_input(Submit('submit', 'Save'))
super(CustomDashboardModalForm, self).__init__(*args, **kwargs)
class CustomDashboardMapForm(forms.ModelForm):
class Meta:
model = CustomDashboard
exclude = ['create_date', 'edit_date']
def __init__(self, *args, **kwargs):
#get the user object from request to check permissions
self.request = kwargs.pop("request")
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-6'
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
self.helper.form_tag = False
super(CustomDashboardMapForm, self).__init__(*args, **kwargs)
class CustomDashboardForm(forms.ModelForm):
class Meta:
model = CustomDashboard
exclude = ['create_date', 'edit_date']
# components_offered = forms.ModelChoiceField(queryset=getDashboardComponents.filter(component_type=value))
def __init__(self, *args, **kwargs):
#get the user object from request to check permissions
self.request = kwargs.pop("request")
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-6'
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
self.helper.form_tag = True
self.helper.form_id = "dashboard"
self.helper.layout = Layout(
HTML("""<br/>"""),
TabHolder(
Tab('Build Your View',
Fieldset("Step 1: View Properties",
HTML("""
<div class='panel panel-default'>
<!-- Default panel contents -->
{% if getCustomDashboard %}
<!-- Table -->
<table class="table">
<tr >
<th>Name</th>
<th>Description</th>
<th>Public?</th>
<th>Theme</th>
<th>Program</th>
<th></th>
</tr>
<tr>
<td>{{ getCustomDashboard.dashboard_name }}</td>
<td>{{ getCustomDashboard.dashboard_description }}</td>
<td> {% if getCustomDashboard.is_public == 1 %} Yes {% else %} No {% endif %}</td>
<td>{{ getCustomDashboard.theme }}</td>
<td>{{ getCustomDashboard.program }}</td>
<td> <a class="dashboards" data-toggle="modal" data-target="#myModal" href='/configurabledashboard/edit/{{pk}}'>Edit</a> </td>
</tr>
</table>
<a class="dashboard_components btn btn-primary" data-target="#add-components" data-toggle="tab">Next Step: Add Components</a>
<a class="dashboards btn btn-link" style='float: right;' href='/configurabledashboard/delete/{{pk}}' data-toggle="modal" data-target="#myModal">Cancel / Return to Menu</a>
{% endif %}
</div>
"""),
),
),
Tab('Add Components',
Fieldset("Step 2: Place Components on Your Page",
HTML("""
<div class='panel panel-default'>
<div class='panel panel-heading'>Layout For Your Page:</div>
<div class='panel panel-body'>
{% if getCustomDashboard.theme == 'test_theme' %}
Layout Image for Test Theme Goes Here<br><br>
{% elif getCustomDashboard.theme %}
Layout Image for your Theme Goes Here: {{getCustomDashboard.theme}}<br><br>
{% endif %}
<div class='panel panel-default'>
<div class='panel panel-body'>
<table class="table">
<tr>
<th>Layout Position</th>
<th>Component Type</th>
<th>Select Existing Component</th>
<th></th>
<th>Add New Component</th>
</tr>
{% for key, value in getDashboardLayoutList.items %}
<tr>
<td> {{key}} </td>
<td> {{value}} </td>
<td> {{getCustomDashboard.componentset}}
<div class="form-group">
<select class="form-control" id="sel1">
{% for component in getDashboardComponents %}
{% if component.component_type == value %}
<option value={{component.id}}> {{component.component_name}} </option>
{% endif %}
{% empty %}
<option value=0> None </option>
{% endfor %}
</select>
</div>
</td>
<td></td>
<td><a class="dashboards" data-toggle="modal" data-target="#myModal" href='/configurabledashboard/component_add/{{getCustomDashboard.id}}/'>New</a></td>
</tr>
{% endfor %}
</table>
<div>
<div class="panel panel-footer">Don't see a component or need to edit an existing component?<br>
<a class="dashboards" data-toggle="modal" data-target="#myModal" href='/configurabledashboard/component/{{pk}}'> View Component Inventory </a></td>
</div>
</div>
</div>
</div>
<div>
<a class="btn btn-primary" data-target="#add-dashboard-data-sources" data-toggle="tab">Next Step: Add Data Sources</a>
</div>
</div>
"""),
),
PrependedAppendedText('forms.ModelChoiceField(queryset=getDashboardComponents.filter(component_type=value)'),
),
Tab('Add Data Sources',
Fieldset("Step 3: Add Data Sources for Components",
HTML("""
<div class='panel panel-default'>
<div class='panel panel-heading'>Assigned Data Sources</div>
<div class='panel panel-body'>Layout Image for your Theme Goes Here<br><br>
<div class='panel panel-default'>
<div class='panel panel-body'>
<table class="table">
<tr>
<th>Position</th>
<th>Component Type</th>
<th>Component Name</th>
<th>Data Type</th>
<th>Data Source Type</th>
<th>Select Data Source(s)</th>
<th></th>
<th>Add New Data Source</th>
</tr>
{% if getComponentOrder.items %}
{% for key, component in getComponentOrder.items %}
<tr>
<td>{{key}}</td>
<td>{% if component.component_type %}
{{component.component_type}}
{% else %} Not Mapped {% endif %} </td>
<td>{% if component.component_name %}
{{component.component_name}}
{% else %} N/A {% endif %} </td>
<td>{% if component.data_reqiored %}
{{component.data_required}}
{% else %} N/A {% endif %} </td>
<td><div class="form-group" style="width: 75%;">
<select class="form-control" id="sel2">
<option value="external"> External </option>
<option value="internal"> Internal </option>
</select>
</div></td>
<td> <div class="form-group">
<select class="form-control" id="sel3">
<option value=0> None </option>
{% for data in getDataSources %}
{% if data.data_type == component.data_required %}
<option value=data.id> {{data.data_name}} </option>
{% endif %}
{% endfor %}
</select>
</div>
</td>
<td></td>
<td><a class="dashboards" data-toggle="modal" data-target="#myModal" href='configurabledashboard/data_add/'> New </a></td>
{% for component in getCustomDashboard.components.all %}
<td>{{component.component_name}}</td>
<td>{{component.data_required}} </td>
<td><a class="dashboards" data-toggle="modal" data-target="#myModal" href='configurabledashboard/component/{{pk}}/'>View</a></td>
<td>{% if component.data_sources %} Yes
{% else %}
No
{% endif %}
</td>
<td> <a class="dashboards" data-toggle="modal" data-target="#myModal" href='configurabledashboard/data_add/'> New </a></td>
{% endfor %}
</tr>
{% endfor %}
{% else %}
<tr>
<td>*Missing Map*</td>
<td>*Missing Map*</td>
<td>*Missing Map*</td>
<td>*Missing Map*</td>
<td>*Missing Map*</td>
<td>*Missing Map*</td>
<td>*Missing Map*</td>
<td>*Missing Map*</td>
</tr>
{% endif %}
</table>
<div>
<div class="panel panel-footer">Don't see your data source or need to edit an existing data source?<br>
<a class="dashboards" data-toggle="modal" data-target="#myModal" href='../../custom_dashboard/data/{{pk}}/'> View All Data Sources</a></td>
</div>
</div>
</div>
</div>
<div>
<a class="btn btn-primary" data-target="#assign-data" data-toggle="tab">Next Step: Assign Data Values</a>
</div>
</div>
"""),
),
),
Tab('Assign Data',
Fieldset("Step 4: Assign Data Values",
HTML("""
<div class='panel panel-default'>
<table class="table">
<tr>
<th>Component</th>
<th>Data Type</th>
<th>Data Source</th>
<th>Data Set</th>
</tr>
{% for component in getDashboardComponents %}
<tr>
<td>{{component.component_name}}</td>
{% for map_entry in getAllComponentMaps.component %}
<tr>
<td>{{map_entry.name}}</td>
<td>{{map_entry.source}}</td>
<td>{% if map_entry.name == "title" %}
<input type="text" name="title">
{% else %}
<select>
{% for data_filter in map_entry.source %}
<option value={{data_filter}}>{{data_filter}}</option>
{% endfor %}
</select>
{% endif %}
</td>
</tr>
{% endfor %}
</tr>
{% endfor %}
</table>
<div class="panel-footer">
<a class="btn btn-primary" data-target="#preview-submit" data-toggle="tab">Next Step: Preview & Submit</a>
</div>
</div>
"""),
),
),
Tab('Preview & Submit',
Fieldset("Step 5: Preview & Finalize ",
HTML("""
<div class='panel panel-body'>
{% if getCustomDashboard %}
<a class="btn btn-info" data-toggle="modal" data-target="#myPreviewModal" aria-hidden="true" href="/workflow/custom_dashboard_preview/{{ pk }}">Preview Dashboard</a>
{% else %}
No dashboard to display.
{% endif %}
<p></p><p>To keep this dashboard, select "Save" below. </p>
</div>
<div id="myPreviewModal" class="modal fade" role="dialog">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header"></div>
<div class="modal-body"></div>
<div class="modal-footer">
<button type="button" class="btn btn-primary" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
"""),
),
FormActions(
Submit('submit', 'Save', css_class='btn-default'),
Reset('reset', 'Reset', css_class='btn-warning'),
),
),
),
HTML("""<br/>"""),
)
# component_selection = forms.ChoiceField(
# choices=[],
# initial='None',
# required=True,
# )
super(CustomDashboardForm, self).__init__(*args, **kwargs)
#here go the filters and overrides
class CustomDashboardDetailForm(forms.ModelForm):
class Meta:
model = CustomDashboard
exclude = ['create_date', 'edit_date','global_item']
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
# self.helper = FormHelper()
# self.helper.form_class = 'form-horizontal'
# self.helper.label_class = 'col-sm-2'
# self.helper.field_class = 'col-sm-6'
# self.helper.form_error_title = 'Form Errors'
# self.helper.error_text_inline = True
# self.helper.help_text_inline = True
# self.helper.html5_required = True
super(CustomDashbaordDetailForm, self).__init__(*args, **kwargs)
## Dashboard Theme Form Classes
class DashboardThemeCreateForm(forms.ModelForm):
class Meta:
model = DashboardTheme
exclude = ['create_date', 'edit_date']
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-6'
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
self.helper.form_tag = False
super(DashboardThemeCreateForm, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
# Commit is already set to false
obj = super(DashboardThemeCreateForm, self).save(*args, **kwargs)
return obj
class DashboardThemeForm(forms.ModelForm):
class Meta:
model = DashboardTheme
exclude = ['create_date', 'edit_date']
def __init__(self, *args, **kwargs):
#get the user object from request to check permissions
self.request = kwargs.pop('request')
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-6'
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
self.helper.form_tag = False
self.helper.form_id = "dashboard_theme"
# self.helper.add_input(Submit('submit', 'Save'))
super(DashboardThemeForm, self).__init__(*args, **kwargs)
## --------Dashboard Component Form Classes-------------
class DashboardComponentCreateForm(forms.ModelForm):
class Meta:
model = DashboardComponent
exclude = ['create_date', 'edit_date','data_sources']
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-6'
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
self.helper.form_tag = False
self.helper.form_id = "new-component"
self.helper.layout = Layout(
HTML("""<br/>"""),
'component_name' ,'component_description','is_public','component_type','data_required','data_sources',
)
super(DashboardComponentCreateForm, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
# Commit is already set to false
obj = super(DashboardComponentCreateForm, self).save(*args, **kwargs)
return obj
class DashboardComponentForm(forms.ModelForm):
class Meta:
model = DashboardComponent
exclude = ['create_date', 'edit_date']
def __init__(self, *args, **kwargs):
#get the user object from request to check permissions
self.request = kwargs.pop("request")
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-6'
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
self.helper.form_tag = True
self.helper.add_input(Submit('submit', 'Save'))
super(DashboardComponentForm, self).__init__(*args, **kwargs)
class DashboardComponentUpdateForm(forms.ModelForm):
class Meta:
model = DashboardComponent
exclude = ['create_date', 'edit_date']
def __init__(self, *args, **kwargs):
# get the user object from request to check permissions
self.request = kwargs.pop("request")
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-6'
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
self.helper.form_tag = False
super(DashboardComponentUpdateForm, self).__init__(*args, **kwargs)
## --------Data Source Form Classes-------------
class ComponentDataSourceCreateForm(forms.ModelForm):
class Meta:
model = ComponentDataSource
exclude = ['create_date', 'edit_date']
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-6'
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
self.helper.form_tag = False
self.helper.form_id = "data-source"
super(ComponentDataSourceCreateForm, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
obj = super(ComponentDataSourceCreateForm, self).save(*args, **kwargs)
return obj
class ComponentDataSourceForm(forms.ModelForm):
class Meta:
model = ComponentDataSource
exclude = ['create_date', 'edit_date', 'data_filter_key']
def __init__(self, *args, **kwargs):
#get the user object from request to check permissions
self.request = kwargs.pop('request')
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-2 col-sm-2'
self.helper.field_class = 'col-md-6 col-sm-6'
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
self.helper.form_tag = True
self.helper.form_id = "component_data_source"
self.helper.add_input(Submit('submit', 'Save'))
super(ComponentDataSourceForm, self).__init__(*args, **kwargs)
class ComponentDataSourceUpdateForm(forms.ModelForm):
class Meta:
model = ComponentDataSource
exclude = ['create_date', 'edit_date']
def __init__(self, *args, **kwargs):
#get the user object from request to check permissions
self.request = kwargs.pop("request")
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-6'
self.helper.form_error_title = 'Form Errors'
self.helper.error_text_inline = True
self.helper.help_text_inline = True
self.helper.html5_required = True
self.helper.form_tag = True
self.helper.add_input(Submit('submit', 'Save'))
super(ComponentDataSourceUpdateForm, self).__init__(*args, **kwargs)
|
[
"crispy_forms.layout.Reset",
"crispy_forms.helper.FormHelper",
"crispy_forms.layout.Submit"
] |
[((637, 649), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (647, 649), False, 'from crispy_forms.helper import FormHelper\n'), ((1716, 1728), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (1726, 1728), False, 'from crispy_forms.helper import FormHelper\n'), ((2592, 2604), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (2602, 2604), False, 'from crispy_forms.helper import FormHelper\n'), ((3518, 3530), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (3528, 3530), False, 'from crispy_forms.helper import FormHelper\n'), ((23186, 23198), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (23196, 23198), False, 'from crispy_forms.helper import FormHelper\n'), ((24117, 24129), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (24127, 24129), False, 'from crispy_forms.helper import FormHelper\n'), ((25040, 25052), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (25050, 25052), False, 'from crispy_forms.helper import FormHelper\n'), ((26284, 26296), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (26294, 26296), False, 'from crispy_forms.helper import FormHelper\n'), ((27157, 27169), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (27167, 27169), False, 'from crispy_forms.helper import FormHelper\n'), ((27971, 27983), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (27981, 27983), False, 'from crispy_forms.helper import FormHelper\n'), ((28989, 29001), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (28999, 29001), False, 'from crispy_forms.helper import FormHelper\n'), ((29936, 29948), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (29946, 29948), False, 'from crispy_forms.helper import FormHelper\n'), ((1082, 1106), 'crispy_forms.layout.Submit', 'Submit', (['"""submit"""', '"""Save"""'], {}), "('submit', 'Save')\n", (1088, 1106), False, 'from crispy_forms.layout import Layout, Submit, Reset\n'), ((2161, 2185), 'crispy_forms.layout.Submit', 'Submit', (['"""submit"""', '"""Save"""'], {}), "('submit', 'Save')\n", (2167, 2185), False, 'from crispy_forms.layout import Layout, Submit, Reset\n'), ((26729, 26753), 'crispy_forms.layout.Submit', 'Submit', (['"""submit"""', '"""Save"""'], {}), "('submit', 'Save')\n", (26735, 26753), False, 'from crispy_forms.layout import Layout, Submit, Reset\n'), ((29506, 29530), 'crispy_forms.layout.Submit', 'Submit', (['"""submit"""', '"""Save"""'], {}), "('submit', 'Save')\n", (29512, 29530), False, 'from crispy_forms.layout import Layout, Submit, Reset\n'), ((30381, 30405), 'crispy_forms.layout.Submit', 'Submit', (['"""submit"""', '"""Save"""'], {}), "('submit', 'Save')\n", (30387, 30405), False, 'from crispy_forms.layout import Layout, Submit, Reset\n'), ((21719, 21768), 'crispy_forms.layout.Submit', 'Submit', (['"""submit"""', '"""Save"""'], {'css_class': '"""btn-default"""'}), "('submit', 'Save', css_class='btn-default')\n", (21725, 21768), False, 'from crispy_forms.layout import Layout, Submit, Reset\n'), ((21798, 21846), 'crispy_forms.layout.Reset', 'Reset', (['"""reset"""', '"""Reset"""'], {'css_class': '"""btn-warning"""'}), "('reset', 'Reset', css_class='btn-warning')\n", (21803, 21846), False, 'from crispy_forms.layout import Layout, Submit, Reset\n')]
|
import argparse
import os
import random
import sys
from pathlib import Path
import numpy as np
import toml
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader, DistributedSampler
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", ".."))) # without installation, add /path/to/Audio-ZEN
import audio_zen.loss as loss
from audio_zen.utils import initialize_module
def entry(rank, config, resume, only_validation):
torch.manual_seed(config["meta"]["seed"]) # For both CPU and GPU
np.random.seed(config["meta"]["seed"])
random.seed(config["meta"]["seed"])
torch.cuda.set_device(rank)
# Initialize the process group
# The environment variables necessary to initialize a Torch process group are provided to you by this module,
# and no need for you to pass ``RANK`` manually.
torch.distributed.init_process_group(backend="nccl")
print(f"{rank + 1} process initialized.")
# The DistributedSampler will split the dataset into the several cross-process parts.
# On the contrary, setting "Sampler=None, shuffle=True", each GPU will get all data in the whole dataset.
train_dataset = initialize_module(config["train_dataset"]["path"], args=config["train_dataset"]["args"])
sampler = DistributedSampler(dataset=train_dataset, rank=rank, shuffle=True)
train_dataloader = DataLoader(
dataset=train_dataset,
sampler=sampler,
shuffle=False,
**config["train_dataset"]["dataloader"],
)
valid_dataloader = DataLoader(
dataset=initialize_module(config["validation_dataset"]["path"], args=config["validation_dataset"]["args"]),
num_workers=0,
batch_size=1
)
model = initialize_module(config["model"]["path"], args=config["model"]["args"])
optimizer = torch.optim.Adam(
params=model.parameters(),
lr=config["optimizer"]["lr"],
betas=(config["optimizer"]["beta1"], config["optimizer"]["beta2"])
)
loss_function = getattr(loss, config["loss_function"]["name"])(**config["loss_function"]["args"])
trainer_class = initialize_module(config["trainer"]["path"], initialize=False)
trainer = trainer_class(
dist=dist,
rank=rank,
config=config,
resume=resume,
only_validation=only_validation,
model=model,
loss_function=loss_function,
optimizer=optimizer,
train_dataloader=train_dataloader,
validation_dataloader=valid_dataloader
)
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="FullSubNet")
parser.add_argument("-C", "--configuration", required=True, default="recipes/etri_drone1/fullsubnet/train.toml", type=str, help="Configuration (*.toml).")
parser.add_argument("-R", "--resume", action="store_true", help="Resume the experiment from latest checkpoint.")
parser.add_argument("-V", "--only_validation", action="store_true", help="Only run validation, which is used for debugging.")
parser.add_argument("-P", "--preloaded_model_path", type=str, help="Path of the *.pth file of a model.")
args = parser.parse_args()
local_rank = int(os.environ["LOCAL_RANK"])
#local_rank = 0
if args.preloaded_model_path:
assert not args.resume, "The 'resume' conflicts with the 'preloaded_model_path'."
config_path = Path(args.configuration).expanduser().absolute()
configuration = toml.load(config_path.as_posix())
# append the parent dir of the config path to python's context
# /path/to/recipes/dns_interspeech_2020/exp/'
sys.path.append(config_path.parent.as_posix())
configuration["meta"]["experiment_name"], _ = os.path.splitext(os.path.basename(args.configuration))
configuration["meta"]["config_path"] = args.configuration
configuration["meta"]["preloaded_model_path"] = args.preloaded_model_path
entry(local_rank, configuration, args.resume, args.only_validation)
|
[
"numpy.random.seed",
"torch.distributed.init_process_group",
"torch.utils.data.DataLoader",
"argparse.ArgumentParser",
"os.path.basename",
"torch.manual_seed",
"torch.utils.data.DistributedSampler",
"pathlib.Path",
"random.seed",
"audio_zen.utils.initialize_module",
"torch.cuda.set_device",
"os.path.join"
] |
[((470, 511), 'torch.manual_seed', 'torch.manual_seed', (["config['meta']['seed']"], {}), "(config['meta']['seed'])\n", (487, 511), False, 'import torch\n'), ((540, 578), 'numpy.random.seed', 'np.random.seed', (["config['meta']['seed']"], {}), "(config['meta']['seed'])\n", (554, 578), True, 'import numpy as np\n'), ((583, 618), 'random.seed', 'random.seed', (["config['meta']['seed']"], {}), "(config['meta']['seed'])\n", (594, 618), False, 'import random\n'), ((623, 650), 'torch.cuda.set_device', 'torch.cuda.set_device', (['rank'], {}), '(rank)\n', (644, 650), False, 'import torch\n'), ((858, 910), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (894, 910), False, 'import torch\n'), ((1178, 1271), 'audio_zen.utils.initialize_module', 'initialize_module', (["config['train_dataset']['path']"], {'args': "config['train_dataset']['args']"}), "(config['train_dataset']['path'], args=config[\n 'train_dataset']['args'])\n", (1195, 1271), False, 'from audio_zen.utils import initialize_module\n'), ((1281, 1347), 'torch.utils.data.DistributedSampler', 'DistributedSampler', ([], {'dataset': 'train_dataset', 'rank': 'rank', 'shuffle': '(True)'}), '(dataset=train_dataset, rank=rank, shuffle=True)\n', (1299, 1347), False, 'from torch.utils.data import DataLoader, DistributedSampler\n'), ((1372, 1483), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'sampler': 'sampler', 'shuffle': '(False)'}), "(dataset=train_dataset, sampler=sampler, shuffle=False, **config[\n 'train_dataset']['dataloader'])\n", (1382, 1483), False, 'from torch.utils.data import DataLoader, DistributedSampler\n'), ((1733, 1805), 'audio_zen.utils.initialize_module', 'initialize_module', (["config['model']['path']"], {'args': "config['model']['args']"}), "(config['model']['path'], args=config['model']['args'])\n", (1750, 1805), False, 'from audio_zen.utils import initialize_module\n'), ((2119, 2181), 'audio_zen.utils.initialize_module', 'initialize_module', (["config['trainer']['path']"], {'initialize': '(False)'}), "(config['trainer']['path'], initialize=False)\n", (2136, 2181), False, 'from audio_zen.utils import initialize_module\n'), ((2583, 2632), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""FullSubNet"""'}), "(description='FullSubNet')\n", (2606, 2632), False, 'import argparse\n'), ((247, 287), 'os.path.join', 'os.path.join', (['__file__', '""".."""', '""".."""', '""".."""'], {}), "(__file__, '..', '..', '..')\n", (259, 287), False, 'import os\n'), ((3731, 3767), 'os.path.basename', 'os.path.basename', (['args.configuration'], {}), '(args.configuration)\n', (3747, 3767), False, 'import os\n'), ((1570, 1673), 'audio_zen.utils.initialize_module', 'initialize_module', (["config['validation_dataset']['path']"], {'args': "config['validation_dataset']['args']"}), "(config['validation_dataset']['path'], args=config[\n 'validation_dataset']['args'])\n", (1587, 1673), False, 'from audio_zen.utils import initialize_module\n'), ((3391, 3415), 'pathlib.Path', 'Path', (['args.configuration'], {}), '(args.configuration)\n', (3395, 3415), False, 'from pathlib import Path\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Top-level module for librosa"""
import warnings
import re
from .version import version as __version__
from .version import show_versions
# And all the librosa sub-modules
from ._cache import cache
from . import core
from . import beat
from . import decompose
from . import effects
from . import feature
from . import filters
from . import onset
from . import output
from . import segment
from . import sequence
from . import util
# Exporting exception classes at the top level
from .util.exceptions import * # pylint: disable=wildcard-import
# Exporting all core functions is okay here: suppress the import warning
from .core import * # pylint: disable=wildcard-import
warnings.filterwarnings('always',
category=DeprecationWarning,
module='^{0}'.format(re.escape(__name__)))
|
[
"re.escape"
] |
[((857, 876), 're.escape', 're.escape', (['__name__'], {}), '(__name__)\n', (866, 876), False, 'import re\n')]
|
# -----------------------------------------------------------------------------
# @author:
# <NAME>
# @brief:
# generate the videos into the same directory
# -----------------------------------------------------------------------------
import env_wrapper
import numpy as np
import argparse
import glob
import cv2
import os
# import matplotlib.pyplot as plt
if __name__ == '__main__':
'''
@brief:
Either plot the directory, or simply one npy file
'''
parser = argparse.ArgumentParser(description="Plot results from a dir")
parser.add_argument(
"-i",
"--file_name",
type=str,
required=True,
help="The directory of the summary file"
)
parser.add_argument(
"-s",
"--size",
type=int,
required=False,
default=480
)
args = parser.parse_args()
# file list
if args.file_name.endswith('.npy'):
candidate_list = [args.file_name]
else:
candidate_list = glob.glob(os.path.join(args.file_name, '*.npy'))
# make the environment
env_name = os.path.abspath(candidate_list[0]).split('/')[-2].split('_20')[0]
args.task = env_name.replace('IM', '') # use the original environment
env, is_deepmind_env = env_wrapper.make_env(
args=args, rand_seed=1, allow_monitor=0
)
for candidate in candidate_list:
# process each environment
data = np.load(candidate)
if os.path.exists(candidate.replace('.npy', '.mp4')):
continue
video = cv2.VideoWriter(
candidate.replace('.npy', '.mp4'),
cv2.VideoWriter_fourcc(*'mp4v'),
40,
(args.size * 2, args.size)
)
env.reset()
for i_frame in range(len(data)):
with env.env.physics.reset_context():
if 'fish' in args.task:
# set the target position
env.env.physics.named.model.geom_pos['target', 'x'] = \
data[i_frame][-2]
env.env.physics.named.model.geom_pos['target', 'y'] = \
data[i_frame][-1]
env.env.physics.data.qpos[:] = data[i_frame][:-2]
else:
env.env.physics.data.qpos[:] = data[i_frame]
image = np.hstack(
[env.env.physics.render(args.size, args.size, camera_id=0),
env.env.physics.render(args.size, args.size, camera_id=1)]
)
# rgb to bgr
image = image[:, :, [2, 1, 0]]
video.write(image)
video.release()
|
[
"numpy.load",
"os.path.abspath",
"argparse.ArgumentParser",
"cv2.VideoWriter_fourcc",
"env_wrapper.make_env",
"os.path.join"
] |
[((509, 571), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot results from a dir"""'}), "(description='Plot results from a dir')\n", (532, 571), False, 'import argparse\n'), ((1282, 1343), 'env_wrapper.make_env', 'env_wrapper.make_env', ([], {'args': 'args', 'rand_seed': '(1)', 'allow_monitor': '(0)'}), '(args=args, rand_seed=1, allow_monitor=0)\n', (1302, 1343), False, 'import env_wrapper\n'), ((1446, 1464), 'numpy.load', 'np.load', (['candidate'], {}), '(candidate)\n', (1453, 1464), True, 'import numpy as np\n'), ((1032, 1069), 'os.path.join', 'os.path.join', (['args.file_name', '"""*.npy"""'], {}), "(args.file_name, '*.npy')\n", (1044, 1069), False, 'import os\n'), ((1641, 1672), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (1663, 1672), False, 'import cv2\n'), ((1114, 1148), 'os.path.abspath', 'os.path.abspath', (['candidate_list[0]'], {}), '(candidate_list[0])\n', (1129, 1148), False, 'import os\n')]
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from ..overridable.abstract_models import AbstractArticle as BaseArticle
class AbstractArticle(BaseArticle):
extract = models.TextField(
blank=True)
class Meta:
abstract = True
|
[
"django.db.models.TextField"
] |
[((221, 249), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (237, 249), False, 'from django.db import models\n')]
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Currency'
db.create_table(u'currencies_currency', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(max_length=3)),
('name', self.gf('django.db.models.fields.CharField')(max_length=35)),
('symbol', self.gf('django.db.models.fields.CharField')(max_length=4, blank=True)),
('factor', self.gf('django.db.models.fields.DecimalField')(default=1.0, max_digits=30, decimal_places=10)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('is_base', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_default', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'currencies', ['Currency'])
def backwards(self, orm):
# Deleting model 'Currency'
db.delete_table(u'currencies_currency')
models = {
u'currencies.currency': {
'Meta': {'ordering': "['name']", 'object_name': 'Currency'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'factor': ('django.db.models.fields.DecimalField', [], {'default': '1.0', 'max_digits': '30', 'decimal_places': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_base': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
}
}
complete_apps = ['currencies']
|
[
"south.db.db.delete_table",
"south.db.db.send_create_signal"
] |
[((1071, 1121), 'south.db.db.send_create_signal', 'db.send_create_signal', (['u"""currencies"""', "['Currency']"], {}), "(u'currencies', ['Currency'])\n", (1092, 1121), False, 'from south.db import db\n'), ((1198, 1237), 'south.db.db.delete_table', 'db.delete_table', (['u"""currencies_currency"""'], {}), "(u'currencies_currency')\n", (1213, 1237), False, 'from south.db import db\n')]
|
import pkg_resources
import inspect
from cklib.logging import log
from typing import List, Optional
from cklib.args import ArgumentParser
from cklib.baseplugin import BasePlugin, BaseCliPlugin, PluginType
plugins = {}
initialized = False
class PluginLoader:
"""Cloudkeeper Plugin Loader"""
def __init__(self, plugin_type: Optional[PluginType] = None) -> None:
# self.__plugins is a dict with key PluginType and value List
# The List will hold all the Plugins of a PluginType
# Current PluginTypes are COLLECTOR, CLI and PERSISTENT. So the Dict could look
# something like this:
# {
# PluginType.COLLECTOR: [AWSPlugin, GCPPlugin, AzurePlugin],
# PluginType.CLI: [CliDebugPlugin]
# PluginType.PERSISTENT: [SlackNotificationPlugin, VolumeCleanupPlugin]
# }
global plugins
if plugin_type is not None:
log.debug(f"Only loading plugins of type {plugin_type}")
plugins[plugin_type] = []
else:
for plugin_type in PluginType:
if plugin_type not in plugins:
log.debug(f"Loading plugins of type {plugin_type}")
plugins[plugin_type] = []
def find_plugins(self) -> None:
"""Finds Cloudkeeper Plugins
Cloudkeeper Plugins have an entry point cloudkeeper.plugins.
Any package resource with an entry point of that name will be handed to
app_plugin() which validates that the package resource is a subclass of
BasePlugin.
"""
global initialized
log.debug("Finding plugins")
for entry_point in pkg_resources.iter_entry_points("cloudkeeper.plugins"):
plugin = entry_point.load()
self.add_plugin(plugin)
initialized = True
def add_plugin(self, plugin) -> bool:
"""Adds a Plugin class to the list of Plugins"""
global plugins
if (
inspect.isclass(plugin)
and not inspect.isabstract(plugin)
and issubclass(plugin, (BasePlugin, BaseCliPlugin))
and plugin.plugin_type in plugins
):
if plugin.plugin_type == PluginType.COLLECTOR:
if (
ArgumentParser.args.collector
and plugin.cloud not in ArgumentParser.args.collector
):
return False
log.debug(f"Found plugin {plugin} ({plugin.plugin_type.name})")
if plugin not in plugins[plugin.plugin_type]:
plugins[plugin.plugin_type].append(plugin)
return True
def plugins(self, plugin_type: PluginType) -> List:
"""Returns the list of Plugins of a certain PluginType"""
if not initialized:
self.find_plugins()
return plugins.get(plugin_type, [])
@staticmethod
def add_args(arg_parser: ArgumentParser) -> None:
"""Add args to the arg parser
This adds the PluginLoader()'s own args.
"""
arg_parser.add_argument(
"--collector",
help="Collectors to load (default: all)",
dest="collector",
type=str,
default=None,
nargs="+",
)
def add_plugin_args(self, arg_parser: ArgumentParser) -> None:
"""Add args to the arg parser
This adds all the Plugin's args.
"""
if not initialized:
self.find_plugins()
log.debug("Adding plugin args")
for type_plugins in plugins.values(): # iterate over all PluginTypes
for Plugin in type_plugins: # iterate over each Plugin of each PluginType
Plugin.add_args(
arg_parser
) # add that Plugin's args to the ArgumentParser
|
[
"inspect.isclass",
"inspect.isabstract",
"pkg_resources.iter_entry_points",
"cklib.logging.log.debug"
] |
[((1605, 1633), 'cklib.logging.log.debug', 'log.debug', (['"""Finding plugins"""'], {}), "('Finding plugins')\n", (1614, 1633), False, 'from cklib.logging import log\n'), ((1661, 1715), 'pkg_resources.iter_entry_points', 'pkg_resources.iter_entry_points', (['"""cloudkeeper.plugins"""'], {}), "('cloudkeeper.plugins')\n", (1692, 1715), False, 'import pkg_resources\n'), ((3483, 3514), 'cklib.logging.log.debug', 'log.debug', (['"""Adding plugin args"""'], {}), "('Adding plugin args')\n", (3492, 3514), False, 'from cklib.logging import log\n'), ((917, 973), 'cklib.logging.log.debug', 'log.debug', (['f"""Only loading plugins of type {plugin_type}"""'], {}), "(f'Only loading plugins of type {plugin_type}')\n", (926, 973), False, 'from cklib.logging import log\n'), ((1968, 1991), 'inspect.isclass', 'inspect.isclass', (['plugin'], {}), '(plugin)\n', (1983, 1991), False, 'import inspect\n'), ((2429, 2492), 'cklib.logging.log.debug', 'log.debug', (['f"""Found plugin {plugin} ({plugin.plugin_type.name})"""'], {}), "(f'Found plugin {plugin} ({plugin.plugin_type.name})')\n", (2438, 2492), False, 'from cklib.logging import log\n'), ((2012, 2038), 'inspect.isabstract', 'inspect.isabstract', (['plugin'], {}), '(plugin)\n', (2030, 2038), False, 'import inspect\n'), ((1136, 1187), 'cklib.logging.log.debug', 'log.debug', (['f"""Loading plugins of type {plugin_type}"""'], {}), "(f'Loading plugins of type {plugin_type}')\n", (1145, 1187), False, 'from cklib.logging import log\n')]
|
import copy
import itertools
import re
from typing import Any, Callable, Dict, Generator, Iterator, List, Optional, Union
import numpy as np
import torch
from omegaconf import DictConfig
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import IterableDataset
from classy.data.data_drivers import ClassySample, DataDriver
from classy.utils.commons import add_noise_to_value, chunks, flatten
from classy.utils.log import get_project_logger
from classy.utils.vocabulary import Vocabulary
logger = get_project_logger(__name__)
def batchify(tensors: List[torch.Tensor], padding_value: int) -> torch.Tensor:
return pad_sequence(tensors, batch_first=True, padding_value=padding_value)
def batchify_matrices(tensors: List[torch.Tensor], padding_value: int) -> torch.Tensor:
x = max([t.shape[0] for t in tensors])
y = max([t.shape[1] for t in tensors])
out_matrix = torch.zeros((len(tensors), x, y))
out_matrix += padding_value
for i, tensor in enumerate(tensors):
out_matrix[i][0 : tensor.shape[0], 0 : tensor.shape[1]] = tensor
return out_matrix
class BaseDataset(IterableDataset):
@staticmethod
def requires_vocab() -> bool:
return True
@staticmethod
def fit_vocabulary(samples: Iterator[ClassySample]) -> Vocabulary:
raise NotImplementedError
@classmethod
def adapt_dataset_from(cls, training_dataset: DictConfig, setting: str):
if setting == "validation":
validation_dataset = copy.deepcopy(training_dataset)
validation_dataset["materialize"] = True
validation_dataset["for_inference"] = True
return validation_dataset
elif setting == "prediction":
prediction_dataset = copy.deepcopy(training_dataset)
prediction_dataset["_target_"] = re.sub(
".from_file$", ".from_samples", prediction_dataset["_target_"]
)
prediction_dataset["min_length"] = -1
prediction_dataset["max_length"] = -1
prediction_dataset["for_inference"] = True
return prediction_dataset
else:
raise ValueError(
f"Setting {setting} not supported. Choose between [validation, prediction] or change config."
)
@classmethod
def from_file(
cls,
path: Union[str, Dict[str, DataDriver]],
data_driver: Optional[DataDriver] = None,
vocabulary: Vocabulary = None,
**kwargs,
) -> "BaseDataset":
dataset_bundle: Dict[str, Any] = (
{path: data_driver} if type(path) == str else path
)
if vocabulary is None and cls.requires_vocab():
# vocabulary fitting here
logger.info("Fitting vocabulary")
vocabulary = cls.fit_vocabulary(
itertools.chain(
*[dd.read_from_path(p) for p, dd in dataset_bundle.items()]
)
)
logger.info("Vocabulary fitting completed")
return cls(
samples_iterator=lambda: itertools.chain(
*[dd.read_from_path(p) for p, dd in dataset_bundle.items()]
),
vocabulary=vocabulary,
**kwargs,
)
@classmethod
def from_samples(
cls,
samples: Iterator[ClassySample],
vocabulary: Vocabulary,
**kwargs,
):
return cls(samples_iterator=lambda: samples, vocabulary=vocabulary, **kwargs)
def __init__(
self,
samples_iterator: Callable[[], Iterator[ClassySample]],
vocabulary: Vocabulary,
fields_batchers: Optional[Dict[str, Union[None, Callable[[list], Any]]]],
for_inference: bool,
batch_size: Optional[int] = None,
tokens_per_batch: Optional[int] = None,
max_batch_size: Optional[int] = None,
batching_fields: Optional[List[str]] = None,
section_size: Optional[int] = None,
prebatch: bool = False,
materialize: bool = False,
drop_last: bool = False,
min_length: int = -1,
max_length: int = -1,
):
super().__init__()
self.samples_iterator = samples_iterator
self.vocabulary = vocabulary
self.fields_batcher = fields_batchers
self.prebatch, self.section_size = prebatch, section_size
self.materialize = materialize
self.drop_last = drop_last
self.min_length, self.max_length = min_length, max_length
self.for_inference = for_inference
self.batch_size = batch_size
self.tokens_per_batch, self.max_batch_size, self.batching_fields = (
tokens_per_batch,
max_batch_size,
batching_fields,
)
assert bool(self.batch_size is not None) or bool(
self.tokens_per_batch is not None
), f"Either batch_size or tokens_per_batch must be provided, but found {batch_size} and {tokens_per_batch}"
if self.batch_size is not None:
if max_batch_size is not None:
logger.warning(
f"max_batch_size has no meaning when not using token batching"
)
else:
assert len(batching_fields) > 0, "At least 1 batching field is required"
if self.tokens_per_batch < self.max_length:
logger.warning(
f"Token batch size {self.tokens_per_batch} < max length {self.max_length}. This might result in batches with only 1 sample that contain more token than the specified token batch size"
)
# used to store the materialized dataset
self._dataset_store = None
if materialize:
logger.warning("Materializing dataset.")
self.materialize_dataset()
def dataset_iterator_func(self):
raise NotImplementedError
def prebatch_elements(self, dataset_elements: List):
sorting_fn = (
lambda elem: add_noise_to_value(
sum(len(elem[k]) for k in self.batching_fields), noise_param=0.1
)
if not self.for_inference
else sum(len(elem[k]) for k in self.batching_fields)
)
dataset_elements = sorted(dataset_elements, key=sorting_fn)
ds = list(chunks(dataset_elements, 512))
np.random.shuffle(ds)
return flatten(ds)
def materialize_dataset(self) -> None:
if self._dataset_store is not None:
logger.info("The dataset is already materialized skipping materialization")
return
logger.info("Starting dataset materialization")
self._dataset_store = list(self.dataset_iterator_func())
logger.info("Materialization completed")
def materialize_batches(
self, dataset_elements: List[Dict[str, Any]]
) -> Generator[Dict[str, Any], None, None]:
if self.prebatch:
dataset_elements = self.prebatch_elements(dataset_elements)
current_batch = []
# function that creates a batch from the 'current_batch' list
def output_batch() -> Dict[str, Any]:
batch_dict = dict()
de_values_by_field = {
fn: [de[fn] for de in current_batch if fn in de]
for fn in self.fields_batcher
}
# in case you provide fields batchers but in the batch there are no elements for that field
de_values_by_field = {
fn: fvs for fn, fvs in de_values_by_field.items() if len(fvs) > 0
}
assert len(set([len(v) for v in de_values_by_field.values()]))
# todo: maybe we should report the user about possible fields filtering due to "None" instances
de_values_by_field = {
fn: fvs
for fn, fvs in de_values_by_field.items()
if all([fv is not None for fv in fvs])
}
for field_name, field_values in de_values_by_field.items():
field_batch = (
self.fields_batcher[field_name](field_values)
if self.fields_batcher[field_name] is not None
else field_values
)
batch_dict[field_name] = field_batch
return batch_dict
max_len_discards, min_len_discards = 0, 0
should_token_batch = self.batch_size is None
for de in dataset_elements:
if (
should_token_batch
and self.max_batch_size != -1
and len(current_batch) == self.max_batch_size
) or (not should_token_batch and len(current_batch) == self.batch_size):
yield output_batch()
current_batch = []
# todo support max length (and min length) as dicts
too_long_fields = [
k
for k in de
if self.max_length != -1
and torch.is_tensor(de[k])
and len(de[k]) > self.max_length
]
if len(too_long_fields) > 0:
max_len_discards += 1
continue
too_short_fields = [
k
for k in de
if self.min_length != -1
and torch.is_tensor(de[k])
and len(de[k]) < self.min_length
]
if len(too_short_fields) > 0:
min_len_discards += 1
continue
if should_token_batch:
de_len = sum(len(de[k]) for k in self.batching_fields)
future_max_len = max(
de_len,
max(
[
sum(len(bde[k]) for k in self.batching_fields)
for bde in current_batch
],
default=0,
),
)
future_tokens_per_batch = future_max_len * (len(current_batch) + 1)
if (
len(current_batch) > 0
and future_tokens_per_batch >= self.tokens_per_batch
):
yield output_batch()
current_batch = []
current_batch.append(de)
if len(current_batch) != 0 and not self.drop_last:
yield output_batch()
if max_len_discards > 0:
if self.for_inference:
logger.warning(
f"WARNING: Inference mode is True but {max_len_discards} samples longer than max length were "
f"found. The {max_len_discards} samples will be DISCARDED. If you are doing some kind of evaluation"
f", this can INVALIDATE results. This might happen if the max length was not set to -1 or if the "
f"sample length exceeds the maximum length supported by the current model."
)
else:
logger.warning(
f"During iteration, {max_len_discards} elements were "
f"discarded since longer than max length {self.max_length}"
)
if min_len_discards > 0:
if self.for_inference:
logger.warning(
f"WARNING: Inference mode is True but {min_len_discards} samples shorter than min length were "
f"found. The {min_len_discards} samples will be DISCARDED. If you are doing some kind of evaluation"
f", this can INVALIDATE results. This might happen if the min length was not set to -1 or if the "
f"sample length is shorter than the minimum length supported by the current model."
)
else:
logger.warning(
f"During iteration, {min_len_discards} elements were "
f"discarded since shorter than min length {self.min_length}"
)
def __iter__(self):
dataset_iterator = (
self.dataset_iterator_func()
if self._dataset_store is None
else self._dataset_store
)
current_dataset_elements = []
i = None
for i, dataset_elem in enumerate(dataset_iterator, start=1):
if (
self.section_size is not None
and len(current_dataset_elements) == self.section_size
):
for batch in self.materialize_batches(current_dataset_elements):
yield batch
current_dataset_elements = []
current_dataset_elements.append(dataset_elem)
if i % 50_000 == 0:
logger.info(f"Processed: {i} number of elements")
if len(current_dataset_elements) != 0:
for batch in self.materialize_batches(current_dataset_elements):
yield batch
if i is not None:
logger.info(f"Dataset finished: {i} number of elements processed")
else:
logger.warning("Dataset empty")
|
[
"copy.deepcopy",
"classy.utils.commons.flatten",
"classy.utils.commons.chunks",
"re.sub",
"torch.nn.utils.rnn.pad_sequence",
"torch.is_tensor",
"classy.utils.log.get_project_logger",
"numpy.random.shuffle"
] |
[((514, 542), 'classy.utils.log.get_project_logger', 'get_project_logger', (['__name__'], {}), '(__name__)\n', (532, 542), False, 'from classy.utils.log import get_project_logger\n'), ((635, 703), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['tensors'], {'batch_first': '(True)', 'padding_value': 'padding_value'}), '(tensors, batch_first=True, padding_value=padding_value)\n', (647, 703), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((6329, 6350), 'numpy.random.shuffle', 'np.random.shuffle', (['ds'], {}), '(ds)\n', (6346, 6350), True, 'import numpy as np\n'), ((6366, 6377), 'classy.utils.commons.flatten', 'flatten', (['ds'], {}), '(ds)\n', (6373, 6377), False, 'from classy.utils.commons import add_noise_to_value, chunks, flatten\n'), ((1497, 1528), 'copy.deepcopy', 'copy.deepcopy', (['training_dataset'], {}), '(training_dataset)\n', (1510, 1528), False, 'import copy\n'), ((6290, 6319), 'classy.utils.commons.chunks', 'chunks', (['dataset_elements', '(512)'], {}), '(dataset_elements, 512)\n', (6296, 6319), False, 'from classy.utils.commons import add_noise_to_value, chunks, flatten\n'), ((1746, 1777), 'copy.deepcopy', 'copy.deepcopy', (['training_dataset'], {}), '(training_dataset)\n', (1759, 1777), False, 'import copy\n'), ((1823, 1893), 're.sub', 're.sub', (['""".from_file$"""', '""".from_samples"""', "prediction_dataset['_target_']"], {}), "('.from_file$', '.from_samples', prediction_dataset['_target_'])\n", (1829, 1893), False, 'import re\n'), ((8963, 8985), 'torch.is_tensor', 'torch.is_tensor', (['de[k]'], {}), '(de[k])\n', (8978, 8985), False, 'import torch\n'), ((9294, 9316), 'torch.is_tensor', 'torch.is_tensor', (['de[k]'], {}), '(de[k])\n', (9309, 9316), False, 'import torch\n')]
|
from hadooplib.mapper import MapperBase
from hadooplib.inputformat import KeyValueInput
class TFIDFMapper1(MapperBase):
"""
keep only the word in the key field
remove filename from key and put it into value
(word filename, number) -> (word, filename number)
e.g. (dog 1.txt, 1) -> (dog, 1.txt 1)
"""
def __init__(self):
MapperBase.__init__(self)
self.set_inputformat(KeyValueInput)
def map(self, key, value):
"""
extract filename from key and put it into value
@param key: word and filename
@param value: term frequency
"""
word, filename = key.split()
self.outputcollector.collect(word, filename + "," + value)
if __name__ == "__main__":
TFIDFMapper1().call_map()
|
[
"hadooplib.mapper.MapperBase.__init__"
] |
[((358, 383), 'hadooplib.mapper.MapperBase.__init__', 'MapperBase.__init__', (['self'], {}), '(self)\n', (377, 383), False, 'from hadooplib.mapper import MapperBase\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
# In[2]:
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
# In[3]:
def process_kfold(model, X_titanic_df, y_titanic_df, folds=5):
kfold = KFold(n_splits=folds)
scores = []
for iter_count , (train_index, test_index) in enumerate(kfold.split(X_titanic_df)):
X_train, X_test = X_titanic_df.values[train_index], X_titanic_df.values[test_index]
y_train, y_test = y_titanic_df.values[train_index], y_titanic_df.values[test_index]
model.fit(X_train, y_train)
predictions = model.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
scores.append(accuracy)
print("KFold {0} Accuracy: {1:.4f}".format(iter_count, accuracy))
mean_score = np.mean(scores)
print("Average Accuracy: {0:.4f}".format(mean_score))
# In[4]:
def processing(X_train, X_test, y_train, X_titanic_df, y_titanic_df, algorithm='dtc'):
predict_ret = np.empty([1, 1])
if algorithm == 'dtc':
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
predict_ret = dtc.predict(X_test)
# process_kfold(dtc, X_titanic_df, y_titanic_df)
elif algorithm == 'rfc':
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
predict_ret = rfc.predict(X_test)
# process_kfold(rfc, X_titanic_df, y_titanic_df)
elif algorithm == 'lr':
lr = RandomForestClassifier()
lr.fit(X_train, y_train)
predict_ret = lr.predict(X_test)
# process_kfold(lr, X_titanic_df, y_titanic_df)
return predict_ret
# In[ ]:
|
[
"sklearn.ensemble.RandomForestClassifier",
"numpy.empty",
"sklearn.metrics.accuracy_score",
"sklearn.model_selection.KFold",
"sklearn.tree.DecisionTreeClassifier",
"numpy.mean"
] |
[((408, 429), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'folds'}), '(n_splits=folds)\n', (413, 429), False, 'from sklearn.model_selection import KFold\n'), ((986, 1001), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (993, 1001), True, 'import numpy as np\n'), ((1184, 1200), 'numpy.empty', 'np.empty', (['[1, 1]'], {}), '([1, 1])\n', (1192, 1200), True, 'import numpy as np\n'), ((821, 856), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (835, 856), False, 'from sklearn.metrics import accuracy_score\n'), ((1247, 1271), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1269, 1271), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1448, 1472), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (1470, 1472), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1647, 1671), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (1669, 1671), False, 'from sklearn.ensemble import RandomForestClassifier\n')]
|
from PIL import Image
import pandas as pd
import numpy as np
import time
import os
import random
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from config import CONFIG
def parse_config():
assets_path = os.path.join(currentdir, 'assets')
for layer in CONFIG:
layer_path = os.path.join(assets_path, layer['directory'])
traits = sorted([trait for trait in os.listdir(layer_path) if trait[0] != '.'])
# 如果不需要图层,请在特性数组的开头添加一个“None”
if not layer['required']:
traits = [None] + traits
if layer['rarity_weights'] is None:
rarities = [1 for x in traits]
elif layer['rarity_weights'] == 'random':
rarities = [random.random() for x in traits]
elif type(layer['rarity_weights'] == 'list'):
assert len(traits) == len(layer['rarity_weights']), "确保您拥有当前数量的稀有权重"
rarities = layer['rarity_weights']
else:
raise ValueError("稀有权重无效")
rarities = get_weighted_rarities(rarities)
layer['rarity_weights'] = rarities
layer['cum_rarity_weights'] = np.cumsum(rarities)
print( layer['cum_rarity_weights'])
layer['traits'] = traits
def get_weighted_rarities(arr):
return np.array(arr)/ sum(arr)
def generate_single_image(filepaths, output_filename=None):
bg = Image.open(os.path.join(currentdir, 'assets', filepaths[0]))
for filepath in filepaths[1:]:
img = Image.open(os.path.join(currentdir, 'assets', filepath))
bg.paste(img, (0,0), img)
if output_filename is not None:
bg.save(output_filename)
else:
if not os.path.exists(os.path.join('output', 'single_images')):
os.makedirs(os.path.join('output', 'single_images'))
bg.save(os.path.join('output', 'single_images', str(int(time.time())) + '.png'))
def get_total_combinations():
total = 1
for layer in CONFIG:
total = total * len(layer['traits'])
return total
def select_index(cum_rarities, rand):
cum_rarities = [0] + list(cum_rarities)
for i in range(len(cum_rarities) - 1):
if rand >= cum_rarities[i] and rand <= cum_rarities[i+1]:
return i
return None
def generate_trait_set_from_config():
trait_set = []
trait_paths = []
for layer in CONFIG:
traits, cum_rarities = layer['traits'], layer['cum_rarity_weights']
print(layer['id'],traits, layer['rarity_weights'])
rand_num = random.random()
idx = select_index(cum_rarities, rand_num)
trait_set.append(traits[idx])
if traits[idx] is not None:
trait_path = os.path.join(layer['directory'], traits[idx])
trait_paths.append(trait_path)
return trait_set, trait_paths
def generate_images(edition, count, drop_dup=True):
rarity_table = {}
for layer in CONFIG:
rarity_table[layer['name']] = []
op_path = os.path.join('output', 'edition ' + str(edition), 'images')
zfill_count = len(str(count - 1))
if not os.path.exists(op_path):
os.makedirs(op_path)
for n in range(count):
image_name = str(n).zfill(zfill_count) + '.png'
trait_sets, trait_paths = generate_trait_set_from_config()
generate_single_image(trait_paths, os.path.join(op_path, image_name))
for idx, trait in enumerate(trait_sets):
if trait is not None:
rarity_table[CONFIG[idx]['name']].append(trait[: -1 * len('.png')])
else:
rarity_table[CONFIG[idx]['name']].append('none')
rarity_table = pd.DataFrame(rarity_table).drop_duplicates()
print("生成第 %i 张图片, %i张不同" % (count, rarity_table.shape[0]))
if drop_dup:
img_tb_removed = sorted(list(set(range(count)) - set(rarity_table.index)))
print("移除 %i 张图片..." % (len(img_tb_removed)))
for i in img_tb_removed:
os.remove(os.path.join(op_path, str(i).zfill(zfill_count) + '.png'))
for idx, img in enumerate(sorted(os.listdir(op_path))):
os.rename(os.path.join(op_path, img), os.path.join(op_path, str(idx).zfill(zfill_count) + '.png'))
rarity_table = rarity_table.reset_index()
rarity_table = rarity_table.drop('index', axis=1)
return rarity_table
def main():
print("检查素材...")
parse_config()
tot_comb = get_total_combinations()
print("您可以创建总共%i个不同的NFT" % (tot_comb))
print("您希望创建多少个NFT?输入一个大于0的数字:")
while True:
num_avatars = int(input())
if num_avatars > 0:
break
print("您想把这些NFT命名为:")
edition_name = input()
print("开始生成...")
rt = generate_images(edition_name, num_avatars)
print("保存元数据...")
rt.to_csv(os.path.join('output', 'edition ' + str(edition_name), 'metadata.csv'))
print("生成成功!")
# Run the main function
main()
|
[
"pandas.DataFrame",
"warnings.simplefilter",
"os.makedirs",
"os.path.exists",
"time.time",
"numpy.cumsum",
"random.random",
"numpy.array",
"inspect.currentframe",
"os.path.join",
"os.listdir"
] |
[((216, 278), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (237, 278), False, 'import warnings\n'), ((346, 380), 'os.path.join', 'os.path.join', (['currentdir', '"""assets"""'], {}), "(currentdir, 'assets')\n", (358, 380), False, 'import os\n'), ((427, 472), 'os.path.join', 'os.path.join', (['assets_path', "layer['directory']"], {}), "(assets_path, layer['directory'])\n", (439, 472), False, 'import os\n'), ((1232, 1251), 'numpy.cumsum', 'np.cumsum', (['rarities'], {}), '(rarities)\n', (1241, 1251), True, 'import numpy as np\n'), ((1373, 1386), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (1381, 1386), True, 'import numpy as np\n'), ((1479, 1527), 'os.path.join', 'os.path.join', (['currentdir', '"""assets"""', 'filepaths[0]'], {}), "(currentdir, 'assets', filepaths[0])\n", (1491, 1527), False, 'import os\n'), ((2611, 2626), 'random.random', 'random.random', ([], {}), '()\n', (2624, 2626), False, 'import random\n'), ((3182, 3205), 'os.path.exists', 'os.path.exists', (['op_path'], {}), '(op_path)\n', (3196, 3205), False, 'import os\n'), ((3215, 3235), 'os.makedirs', 'os.makedirs', (['op_path'], {}), '(op_path)\n', (3226, 3235), False, 'import os\n'), ((173, 195), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (193, 195), False, 'import inspect\n'), ((1589, 1633), 'os.path.join', 'os.path.join', (['currentdir', '"""assets"""', 'filepath'], {}), "(currentdir, 'assets', filepath)\n", (1601, 1633), False, 'import os\n'), ((2780, 2825), 'os.path.join', 'os.path.join', (["layer['directory']", 'traits[idx]'], {}), "(layer['directory'], traits[idx])\n", (2792, 2825), False, 'import os\n'), ((3430, 3463), 'os.path.join', 'os.path.join', (['op_path', 'image_name'], {}), '(op_path, image_name)\n', (3442, 3463), False, 'import os\n'), ((3734, 3760), 'pandas.DataFrame', 'pd.DataFrame', (['rarity_table'], {}), '(rarity_table)\n', (3746, 3760), True, 'import pandas as pd\n'), ((1778, 1817), 'os.path.join', 'os.path.join', (['"""output"""', '"""single_images"""'], {}), "('output', 'single_images')\n", (1790, 1817), False, 'import os\n'), ((1844, 1883), 'os.path.join', 'os.path.join', (['"""output"""', '"""single_images"""'], {}), "('output', 'single_images')\n", (1856, 1883), False, 'import os\n'), ((4157, 4176), 'os.listdir', 'os.listdir', (['op_path'], {}), '(op_path)\n', (4167, 4176), False, 'import os\n'), ((4202, 4228), 'os.path.join', 'os.path.join', (['op_path', 'img'], {}), '(op_path, img)\n', (4214, 4228), False, 'import os\n'), ((517, 539), 'os.listdir', 'os.listdir', (['layer_path'], {}), '(layer_path)\n', (527, 539), False, 'import os\n'), ((832, 847), 'random.random', 'random.random', ([], {}), '()\n', (845, 847), False, 'import random\n'), ((1949, 1960), 'time.time', 'time.time', ([], {}), '()\n', (1958, 1960), False, 'import time\n')]
|
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponseRedirect
from django.urls import reverse
from cradmin_legacy.crinstance import reverse_cradmin_url
from devilry.apps.core.models import Delivery
@login_required
def show_delivery(request, delivery_id):
try:
delivery = Delivery.objects\
.select_related('deadline', 'deadline__assignment_group')\
.get(id=delivery_id)
except Delivery.DoesNotExist as e:
raise Http404()
else:
return HttpResponseRedirect(
reverse_cradmin_url(
instanceid='devilry_student_group',
appname='deliveries',
roleid=delivery.assignment_group.id,
viewname='deliverydetails',
kwargs={'pk': delivery.id}))
|
[
"cradmin_legacy.crinstance.reverse_cradmin_url",
"django.http.Http404",
"devilry.apps.core.models.Delivery.objects.select_related"
] |
[((511, 520), 'django.http.Http404', 'Http404', ([], {}), '()\n', (518, 520), False, 'from django.http import Http404, HttpResponseRedirect\n'), ((580, 760), 'cradmin_legacy.crinstance.reverse_cradmin_url', 'reverse_cradmin_url', ([], {'instanceid': '"""devilry_student_group"""', 'appname': '"""deliveries"""', 'roleid': 'delivery.assignment_group.id', 'viewname': '"""deliverydetails"""', 'kwargs': "{'pk': delivery.id}"}), "(instanceid='devilry_student_group', appname=\n 'deliveries', roleid=delivery.assignment_group.id, viewname=\n 'deliverydetails', kwargs={'pk': delivery.id})\n", (599, 760), False, 'from cradmin_legacy.crinstance import reverse_cradmin_url\n'), ((336, 409), 'devilry.apps.core.models.Delivery.objects.select_related', 'Delivery.objects.select_related', (['"""deadline"""', '"""deadline__assignment_group"""'], {}), "('deadline', 'deadline__assignment_group')\n", (367, 409), False, 'from devilry.apps.core.models import Delivery\n')]
|
from ldpc.utils import FramesManager
from bitstring import Bits
# Frames shouldn't be instantiated on their own, but instead using the manager
fm = FramesManager()
# using the manager to crete a frame
bits = Bits(bytes=b"\x01\x02")
tx_frame = fm.create_frame(bits)
# create a copy of the tx_frame to simulate rx
rx_frame = fm.copy_frame(tx_frame)
# Note that copying, copies only bits, ids are still unique
print(rx_frame.uid != tx_frame.uid)
# two frames can be registered as a tx, rx pair:
fm.register_pair(tx_frame, rx_frame)
|
[
"ldpc.utils.FramesManager",
"bitstring.Bits"
] |
[((149, 164), 'ldpc.utils.FramesManager', 'FramesManager', ([], {}), '()\n', (162, 164), False, 'from ldpc.utils import FramesManager\n'), ((209, 232), 'bitstring.Bits', 'Bits', ([], {'bytes': "b'\\x01\\x02'"}), "(bytes=b'\\x01\\x02')\n", (213, 232), False, 'from bitstring import Bits\n')]
|
from setuptools import find_packages, setup
setup(
name='eurito_funding',
packages=find_packages(),
version='0.1.0',
description='EURITO: Advanced R&I funding analytics pilot',
author='nesta',
license='MIT',
)
|
[
"setuptools.find_packages"
] |
[((92, 107), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (105, 107), False, 'from setuptools import find_packages, setup\n')]
|
import math
# from settings import *
from numba import njit
@njit(fastmath=True)#, cache=True)
def mod_vec3_n(vec, n):
return (vec[0] % n, vec[1] % n, vec[2] % n)
@njit(fastmath=True)#, cache=True)
def length_vec3(vec):
return math.sqrt(vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2)
@njit(fastmath=True)#, cache=True)
def length_vec2(vec):
return math.sqrt(vec[0] ** 2 + vec[1] ** 2)
@njit(fastmath=True)#, cache=True)
def sub_vecs3(v1, v2):
return (v1[0] - v2[0], v1[1] - v2[1], v1[2] - v2[2])
@njit(fastmath=True)#, cache=True)
def sub_vecs2(v1, v2):
return (v1[0] - v2[0], v1[1] - v2[1])
@njit(fastmath=True)#, cache=True)
def sub_vec3_n(v1, n):
return (v1[0] - n, v1[1] - n, v1[2] - n)
@njit(fastmath=True)#, cache=True)
def sub_n_vec3(n, v1):
return (-v1[0] + n, -v1[1] + n, -v1[2] + n)
@njit(fastmath=True)#, cache=True)
def sub_vec2_n(v1, n):
return (v1[0] - n, v1[1] - n)
@njit(fastmath=True)#, cache=True)
def sum_vecs3(v1, v2):
return (v1[0] + v2[0], v1[1] + v2[1], v1[2] + v2[2])
@njit(fastmath=True)#, cache=True)
def sum_vec2_n(v1, n):
return (v1[0] + n, v1[1] + n)
@njit(fastmath=True)#, cache=True)
def mul_vec3_n(v1, n):
return (v1[0] * n, v1[1] * n, v1[2] * n)
@njit(fastmath=True)#, cache=True)
def mul_vec2_n(v1, n):
return (v1[0] * n, v1[1] * n)
@njit(fastmath=True)#, cache=True)
def div_vec3_n(v1, n):
n = 1 / n
return (v1[0] * n, v1[1] * n, v1[2] * n)
@njit(fastmath=True)#, cache=True)
def div_vecs2(v1, v2):
v2 = 1 / v2
return (v1[0] * v2[0], v1[1] * v2[1])
@njit(fastmath=True)#, cache=True)
def dot_vecs3(v1, v2):
return (v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2])
@njit(fastmath=True)#, cache=True)
def abs_vec3(vec):
return (abs(vec[0]), abs(vec[1]), abs(vec[2]))
@njit(fastmath=True)#, cache=True)
def normalize_vec3(vec):
len_vec = 1 / math.sqrt(vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2)
return (vec[0] * len_vec, vec[1] * len_vec, vec[2] * len_vec)
@njit(fastmath=True)#, cache=True)
def cross_vecs3(v1, v2):
return (v1[1] * v2[2] - v1[2] * v2[1], v1[2] * v2[0] - v1[0] * v2[2], v1[0] * v2[1] - v1[1] * v2[0])
@njit(fastmath=True)#, cache=True)
def view_matrix():
cam_pos = (0.0, 2.5, 9.0)
center = (0.0, 0.0, 0.0)
up = (0.0, 1.0, 0.0)
f = normalize_vec3(sub_vecs3(center, cam_pos))
s = normalize_vec3(cross_vecs3(f, up))
u = cross_vecs3(s, f)
s = (s[0], s[1], s[2], 0.0)
u = (u[0], u[1], u[2], 0.0)
f = (f[0], f[1], f[2], 0.0)
return (s, u, f)
@njit(fastmath=True)#, cache=True)
def rotate_y_matrix(ray_dir, angle):
s = math.sin(angle)
c = math.cos(angle)
a0 = (c, 0, -s, 0)
a1 = (0, 1, 0, 0)
a2 = (s, 0, c, 0)
a = (a0, a1, a2)
return mul_matrix_vec3(a, ray_dir)
@njit(fastmath=True)#, cache=True)
def rotate_x_matrix(ray_dir, angle):
s = math.sin(angle)
c = math.cos(angle)
a0 = (1, 0, 0, 0)
a1 = (0, c, -s, 0)
a2 = (0, s, c, 0)
a = (a0, a1, a2)
return mul_matrix_vec3(a, ray_dir)
@njit(fastmath=True)#, cache=True)
def rotate_z_matrix(ray_dir, angle):
s = math.sin(angle)
c = math.cos(angle)
a0 = (c, -s, 0, 0)
a1 = (s, c, 0, 0)
a2 = (0, 0, 1, 0)
a = (a0, a1, a2)
return mul_matrix_vec3(a, ray_dir)
@njit(fastmath=True)#, cache=True)
def translation_matrix(ray_dir, vec):
a0 = (1, 0, 0, vec[0])
a1 = (0, 1, 0, vec[1])
a2 = (0, 0, 1, vec[2])
a = (a0, a1, a2)
return mul_matrix_vec3(a, ray_dir)
@njit(fastmath=True)#, cache=True)
def scale_matrix(ray_dir, n):
a0 = (n, 0, 0, 0)
a1 = (0, n, 0, 0)
a2 = (0, 0, n, 0)
a = (a0, a1, a2)
return mul_matrix_vec3(a, ray_dir)
@njit(fastmath=True)
def mul_matrix_vec3(a, b):
c0 = (a[0][0] * b[0] + a[0][1] * b[1] + a[0][2] * b[2] + a[0][3] * 1)
c1 = (a[1][0] * b[0] + a[1][1] * b[1] + a[1][2] * b[2] + a[1][3] * 1)
c2 = (a[2][0] * b[0] + a[2][1] * b[1] + a[2][2] * b[2] + a[2][3] * 1)
return (c0, c1, c2)
@njit(fastmath=True)#, cache=True)
def rotate_y(vec3, angle):
s = math.sin(angle)
c = math.cos(angle)
return (vec3[0] * s, vec3[1], vec3[2] * c)
@njit(fastmath=True)#, cache=True)
def rotate_x(vec3, angle):
s = math.sin(angle)
c = math.cos(angle)
return (vec3[0], vec3[1] * s, vec3[2] * c)
@njit(fastmath=True)#, cache=True)
def rotate_z(vec3, angle):
s = math.sin(angle)
c = math.cos(angle)
return (vec3[0] * s, vec3[1] * s, vec3[2])
|
[
"math.sin",
"numba.njit",
"math.cos",
"math.sqrt"
] |
[((66, 85), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (70, 85), False, 'from numba import njit\n'), ((180, 199), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (184, 199), False, 'from numba import njit\n'), ((306, 325), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (310, 325), False, 'from numba import njit\n'), ((418, 437), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (422, 437), False, 'from numba import njit\n'), ((540, 559), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (544, 559), False, 'from numba import njit\n'), ((647, 666), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (651, 666), False, 'from numba import njit\n'), ((757, 776), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (761, 776), False, 'from numba import njit\n'), ((870, 889), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (874, 889), False, 'from numba import njit\n'), ((969, 988), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (973, 988), False, 'from numba import njit\n'), ((1091, 1110), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (1095, 1110), False, 'from numba import njit\n'), ((1190, 1209), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (1194, 1209), False, 'from numba import njit\n'), ((1300, 1319), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (1304, 1319), False, 'from numba import njit\n'), ((1399, 1418), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (1403, 1418), False, 'from numba import njit\n'), ((1524, 1543), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (1528, 1543), False, 'from numba import njit\n'), ((1648, 1667), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (1652, 1667), False, 'from numba import njit\n'), ((1772, 1791), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (1776, 1791), False, 'from numba import njit\n'), ((1884, 1903), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (1888, 1903), False, 'from numba import njit\n'), ((2087, 2106), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (2091, 2106), False, 'from numba import njit\n'), ((2259, 2278), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (2263, 2278), False, 'from numba import njit\n'), ((2648, 2667), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (2652, 2667), False, 'from numba import njit\n'), ((2908, 2927), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (2912, 2927), False, 'from numba import njit\n'), ((3168, 3187), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (3172, 3187), False, 'from numba import njit\n'), ((3428, 3447), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (3432, 3447), False, 'from numba import njit\n'), ((3653, 3672), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (3657, 3672), False, 'from numba import njit\n'), ((3855, 3874), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (3859, 3874), False, 'from numba import njit\n'), ((4159, 4178), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (4163, 4178), False, 'from numba import njit\n'), ((4323, 4342), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (4327, 4342), False, 'from numba import njit\n'), ((4487, 4506), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (4491, 4506), False, 'from numba import njit\n'), ((249, 299), 'math.sqrt', 'math.sqrt', (['(vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2)'], {}), '(vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2)\n', (258, 299), False, 'import math\n'), ((375, 411), 'math.sqrt', 'math.sqrt', (['(vec[0] ** 2 + vec[1] ** 2)'], {}), '(vec[0] ** 2 + vec[1] ** 2)\n', (384, 411), False, 'import math\n'), ((2729, 2744), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (2737, 2744), False, 'import math\n'), ((2754, 2769), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (2762, 2769), False, 'import math\n'), ((2989, 3004), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (2997, 3004), False, 'import math\n'), ((3014, 3029), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (3022, 3029), False, 'import math\n'), ((3249, 3264), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (3257, 3264), False, 'import math\n'), ((3274, 3289), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (3282, 3289), False, 'import math\n'), ((4230, 4245), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (4238, 4245), False, 'import math\n'), ((4255, 4270), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (4263, 4270), False, 'import math\n'), ((4394, 4409), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (4402, 4409), False, 'import math\n'), ((4419, 4434), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (4427, 4434), False, 'import math\n'), ((4558, 4573), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (4566, 4573), False, 'import math\n'), ((4583, 4598), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (4591, 4598), False, 'import math\n'), ((1963, 2013), 'math.sqrt', 'math.sqrt', (['(vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2)'], {}), '(vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2)\n', (1972, 2013), False, 'import math\n')]
|
import numpy as np
import queue
def stdin_input():
N, Q = map(int, input().split())
a, b = [], []
for i in range(N - 1):
line = input()
a.append(int(line.split()[0]) - 1)
b.append(int(line.split()[1]) - 1)
c, d = [], []
for i in range(Q):
line = input()
c.append(int(line.split()[0]) - 1)
d.append(int(line.split()[1]) - 1)
return N, Q, a, b, c, d
def fixed_input():
input_text = '''9 9
8 9
2 3
4 8
4 5
5 6
3 4
1 9
3 7
7 9
2 5
2 6
4 6
2 4
5 8
7 8
3 6
5 6
'''
lines = input_text.splitlines()
N, Q = map(int, lines[0].split())
a, b = [], []
for i in range(N - 1):
a.append(int(lines[1 + i].split()[0]) - 1)
b.append(int(lines[1 + i].split()[1]) - 1)
c, d = [], []
for i in range(Q):
c.append(int(lines[N + i].split()[0]) - 1)
d.append(int(lines[N + i].split()[1]) - 1)
return N, Q, a, b, c, d
def get_distance_from_0(N, a, b):
#paths = sorted([(min(a[i], b[i]), max(a[i], b[i])) for i in range(N-1)])
# destination = [[] for i in range(N)]
# for i in range(N-1):
# destination[a[i]].append(b[i])
# destination[b[i]].append(a[i])
# dist = np.zeros(N, dtype=int)
# for path in paths:
# dist[path[1]] = dist[path[0]] + 1
# return dist
dist = [-1 for i in range(N)]
dist[0] = 0
destination = [[] for i in range(N)]
for i in range(N - 1):
destination[a[i]].append(b[i])
destination[b[i]].append(a[i])
q = queue.Queue()
q.put(0)
while not q.empty():
i = q.get()
for j in destination[i]:
if dist[j] >= 0:
continue
q.put(j)
dist[j] = dist[i] + 1
return dist
def is_meet_town(in_obj):
N, Q, a, b, c, d = in_obj
d0 = get_distance_from_0(N, a, b)
results = []
for i in range(Q):
results.append((d0[c[i]] + d0[d[i]]) % 2 == 0)
return results
def show_result(out_obj):
for result in out_obj:
print("Town" if result else "Road")
#in_obj = fixed_input()
in_obj = stdin_input()
out_obj = is_meet_town(in_obj)
show_result(out_obj)
|
[
"queue.Queue"
] |
[((1519, 1532), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (1530, 1532), False, 'import queue\n')]
|
from PyQt5.QtWidgets import QAction, QFileDialog, QMenu, QPushButton
from inselect.lib.user_template import UserTemplate
from inselect.lib.utils import debug_print
from .user_template_choice import user_template_choice
from .utils import load_icon, reveal_path
class UserTemplatePopupButton(QPushButton):
"User template popup button"
FILE_FILTER = 'Inselect user templates (*{0})'.format(
UserTemplate.EXTENSION
)
def __init__(self, parent=None):
super(UserTemplatePopupButton, self).__init__(parent)
# Configure the UI
self._create_actions()
self.popup = QMenu()
self.inject_actions(self.popup)
self.setMenu(self.popup)
user_template_choice().template_changed.connect(self.changed)
# User template might already have been loaded so load the initial
if user_template_choice().current:
self.changed()
def __del__(self):
# Doing this prevents segfault on exit. Unsatisfactory.
del self.popup
def _create_actions(self):
self._choose_action = QAction(
"Choose...", self, triggered=self.choose,
icon=load_icon(':/icons/open.png')
)
self._refresh_action = QAction(
"Reload", self, triggered=self.refresh,
icon=load_icon(':/icons/refresh.png')
)
self._reveal_template_action = QAction(
"Reveal template", self, triggered=self.reveal
)
self._default_action = QAction(
"Default ({0})".format(user_template_choice().DEFAULT.name),
self, triggered=self.default, icon=load_icon(':/icons/close.png')
)
def inject_actions(self, menu):
"Adds user template actions to menu"
menu.addAction(self._choose_action)
menu.addAction(self._refresh_action)
menu.addAction(self._reveal_template_action)
menu.addSeparator()
menu.addAction(self._default_action)
def default(self, checked=False):
"Sets the default template"
user_template_choice().select_default()
def choose(self, checked=False):
"Shows a 'choose template' file dialog"
debug_print('UserTemplateWidget.choose')
path, selectedFilter = QFileDialog.getOpenFileName(
self, "Choose user template",
str(user_template_choice().last_directory()),
self.FILE_FILTER
)
if path:
# Save the user's choice
user_template_choice().load(path)
def refresh(self, checked=False):
debug_print('UserTemplateWidget.refresh')
user_template_choice().refresh()
def reveal(self, checked=False):
reveal_path(user_template_choice().current_path)
def changed(self):
"Slot for UserTemplateChoice.template_changed"
debug_print('UserTemplateWidget.changed')
choice = user_template_choice()
self.setText(choice.current.name)
self._default_action.setEnabled(not choice.current_is_default)
self._refresh_action.setEnabled(not choice.current_is_default)
self._reveal_template_action.setEnabled(not choice.current_is_default)
|
[
"PyQt5.QtWidgets.QMenu",
"PyQt5.QtWidgets.QAction",
"inselect.lib.utils.debug_print"
] |
[((619, 626), 'PyQt5.QtWidgets.QMenu', 'QMenu', ([], {}), '()\n', (624, 626), False, 'from PyQt5.QtWidgets import QAction, QFileDialog, QMenu, QPushButton\n'), ((1401, 1456), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""Reveal template"""', 'self'], {'triggered': 'self.reveal'}), "('Reveal template', self, triggered=self.reveal)\n", (1408, 1456), False, 'from PyQt5.QtWidgets import QAction, QFileDialog, QMenu, QPushButton\n'), ((2194, 2234), 'inselect.lib.utils.debug_print', 'debug_print', (['"""UserTemplateWidget.choose"""'], {}), "('UserTemplateWidget.choose')\n", (2205, 2234), False, 'from inselect.lib.utils import debug_print\n'), ((2582, 2623), 'inselect.lib.utils.debug_print', 'debug_print', (['"""UserTemplateWidget.refresh"""'], {}), "('UserTemplateWidget.refresh')\n", (2593, 2623), False, 'from inselect.lib.utils import debug_print\n'), ((2847, 2888), 'inselect.lib.utils.debug_print', 'debug_print', (['"""UserTemplateWidget.changed"""'], {}), "('UserTemplateWidget.changed')\n", (2858, 2888), False, 'from inselect.lib.utils import debug_print\n')]
|
import os, shutil
def main():
# Will contain all directories of files that need to be sorted
sortableFiles = []
# Will contain all directories of folders that are within the directory to be sorted
unsortableFiles = []
# Will be populated with filepaths of all the items inside a given directory
directoryPaths = []
# Will be populated with all file types within given directory
fileTypes = []
# Will be populated with all generated directories to create new "sorted" directories
directoryToCreate = []
# Will be populated with all the file types that there are only one of
singularFiles = []
###### PROGRAM START
chosenDirectory = raw_input("Enter directory: ")
if os.path.isdir(chosenDirectory) == True:
directoryContents = os.listdir(chosenDirectory)
for x in range(len(directoryContents)):
directoryPaths.append(os.path.join(chosenDirectory, directoryContents[x]))
# This for loop views into directoryPaths, which is populated
# with all the filepaths found in the given directory.
# It then checks every filepath, and determines if they are files or folders
counter = 0
for entry in directoryPaths:
if os.path.isdir(entry) == True:
unsortableFiles.append(directoryPaths[counter])
elif os.path.isfile(entry) == True:
sortableFiles.append(directoryPaths[counter])
counter += 1
# Create list of all file extensions that need to have a new directory created
for x in range(len(sortableFiles)):
fileName, fileExtension = os.path.splitext(sortableFiles[x])
fileExtension = fileExtension.replace('.','')
fileTypes.append(fileExtension)
# Populates the directoryToCreate list that will contain every, well, directory to create
for x in range(len(fileTypes)):
directoryToCreate.append(os.path.join(chosenDirectory, fileTypes[x]))
### FILE MANIPULATION STARTS HERE
# This create all the folders that the sortableFiles will be place in
for x in range(len(fileTypes)):
try:
os.mkdir(directoryToCreate[x])
except OSError:
continue
# This for loop will move all the files in sortableFiles
# to their new "homes" that were created in the for loop above.
for x in range(len(fileTypes)):
try:
shutil.move(sortableFiles[x],directoryToCreate[x])
except shutil.Error:
continue
##### Development runtime infao
print("\nQueued Directories")
for entry in directoryToCreate:
print(entry)
print("\nFile Types")
for entry in fileTypes:
print(entry)
print("\nSortable")
for entry in sortableFiles:
print(entry)
print("\nThere are only one of these file types therefore they will not be moved")
for entry in singularFiles:
print(entry)
print("\nUnsortable")
for entry in unsortableFiles:
print(entry)
print("\nSuccessfully sorted "+str(len(fileTypes))+" files!")
###### SUCCESSFUL PROGRAM FINISH
if __name__ == '__main__':
main()
|
[
"os.mkdir",
"os.path.isdir",
"os.path.isfile",
"os.path.splitext",
"shutil.move",
"os.path.join",
"os.listdir"
] |
[((686, 716), 'os.path.isdir', 'os.path.isdir', (['chosenDirectory'], {}), '(chosenDirectory)\n', (699, 716), False, 'import os, shutil\n'), ((748, 775), 'os.listdir', 'os.listdir', (['chosenDirectory'], {}), '(chosenDirectory)\n', (758, 775), False, 'import os, shutil\n'), ((1474, 1508), 'os.path.splitext', 'os.path.splitext', (['sortableFiles[x]'], {}), '(sortableFiles[x])\n', (1490, 1508), False, 'import os, shutil\n'), ((842, 893), 'os.path.join', 'os.path.join', (['chosenDirectory', 'directoryContents[x]'], {}), '(chosenDirectory, directoryContents[x])\n', (854, 893), False, 'import os, shutil\n'), ((1142, 1162), 'os.path.isdir', 'os.path.isdir', (['entry'], {}), '(entry)\n', (1155, 1162), False, 'import os, shutil\n'), ((1743, 1786), 'os.path.join', 'os.path.join', (['chosenDirectory', 'fileTypes[x]'], {}), '(chosenDirectory, fileTypes[x])\n', (1755, 1786), False, 'import os, shutil\n'), ((1939, 1969), 'os.mkdir', 'os.mkdir', (['directoryToCreate[x]'], {}), '(directoryToCreate[x])\n', (1947, 1969), False, 'import os, shutil\n'), ((2169, 2220), 'shutil.move', 'shutil.move', (['sortableFiles[x]', 'directoryToCreate[x]'], {}), '(sortableFiles[x], directoryToCreate[x])\n', (2180, 2220), False, 'import os, shutil\n'), ((1230, 1251), 'os.path.isfile', 'os.path.isfile', (['entry'], {}), '(entry)\n', (1244, 1251), False, 'import os, shutil\n')]
|
"""Shim for executing the openapi spec validator."""
import re
import sys
from openapi_spec_validator.__main__ import main
if __name__ == "__main__":
sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0])
sys.exit(main())
|
[
"re.sub",
"openapi_spec_validator.__main__.main"
] |
[((172, 223), 're.sub', 're.sub', (['"""(-script\\\\.pyw|\\\\.exe)?$"""', '""""""', 'sys.argv[0]'], {}), "('(-script\\\\.pyw|\\\\.exe)?$', '', sys.argv[0])\n", (178, 223), False, 'import re\n'), ((236, 242), 'openapi_spec_validator.__main__.main', 'main', ([], {}), '()\n', (240, 242), False, 'from openapi_spec_validator.__main__ import main\n')]
|
import json
import pandas as pd
from collections import Counter
from keras.optimizers import Adam
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
from keras.layers import Conv2D, MaxPool2D, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.vgg16 import preprocess_input
train_data_dir = '/home/jianxig/training_set_17_combo/frames/'
validation_data_dir = '/home/jianxig/validation_set_17_combo/frames/'
csv_file_training = "/home/jianxig/training_set_17_combo/labels_for_training(d_17).csv"
csv_file_validation = "/home/jianxig/validation_set_17_combo/labels_for_validation(d_17).csv"
img_width = 112
img_height = 112
batch_size = 6
nb_validation_samples=2910
nb_train_samples=49962
epochs=50
# Load the filenames and labels
dataFrame=pd.read_csv(csv_file_training, delimiter=',')
dataFrame.Labels=dataFrame.Labels.astype(str) # Change the datatype of the column 'Labels'
# Specify the class names
class_names=['Preparation','In fistula','infiltration']
# Apply data augmentation to the training set.
# Note: The preprocessing method is the same as VGG16
datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=10,
width_shift_range=0.2,
height_shift_range=0.2,
brightness_range=[0.5, 1.5])
# Preprocessing the training set.
generator_train = datagen.flow_from_dataframe(
dataFrame,
directory = train_data_dir,
x_col = 'File_Name',
y_col = 'Labels',
target_size = (img_height, img_width),
class_mode='categorical',
batch_size=batch_size,
shuffle=True)
# Calculate weights for unbalanced classes within training set
counter = Counter(dataFrame.iloc[:, 1].tolist()) # Count how many images within each class
max_val = float(max(counter.values()))
class_weights = {int(class_id,10) : max_val/num_images for class_id, num_images in counter.items()}
# (Validation) Load the filenames and labels
dataFrame_valid=pd.read_csv(csv_file_validation, delimiter=',')
dataFrame_valid.Labels=dataFrame_valid.Labels.astype(str)
# Preprocess the validation set, but avoid data augmentation
datagen_val = ImageDataGenerator(preprocessing_function=preprocess_input)
# Preprocessing the validation set.
generator_validation = datagen_val.flow_from_dataframe(
dataFrame_valid,
directory = validation_data_dir,
x_col = 'File_Name',
y_col = 'Labels',
target_size = (img_height, img_width),
class_mode='categorical',
batch_size=batch_size,
shuffle=True)
# Construct the network
model=Sequential()
# The first CNN block
model.add(
Conv2D(input_shape=(112,112,3), filters=16, kernel_size=(3,3), strides=(1, 1), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
# The seccond CNN block
model.add(Conv2D(filters=32, kernel_size=(3,3), strides=(1, 1), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
# The third CNN block
model.add(Conv2D(filters=32, kernel_size=(3,3), strides=(1, 1), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
# Convert the 2D features to the 1D features
model.add(Flatten())
# The output layer
model.add(Dense(3, activation='softmax'))
# Display the architecture
model.summary()
# Save the architecture of the network
with open('/home/jianxig/CNN_17/dataset_17/16_32_32/arch_Jan_22.json','w') as f:
f.write(model.to_json())
# Configure the learning process of the customized network
model.compile(optimizer=Adam(lr = 0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
# Save the optimal weights
checkpointer = ModelCheckpoint(filepath='/home/jianxig/CNN_17/dataset_17/16_32_32/bestWeights_1_aug_Jan_22.hdf5',
monitor='val_accuracy',verbose=1, save_best_only=True, mode='max')
# Train the customized network (weights are used)
step_for_training = len(generator_train)
step_for_validation = len(generator_validation)
model_history = model.fit_generator(generator_train,
steps_per_epoch=(step_for_training),
epochs=epochs,
validation_data=generator_validation,
validation_steps=(step_for_validation),
callbacks=[checkpointer],
class_weight=class_weights)
# Save the history
with open('/home/jianxig/CNN_17/dataset_17/16_32_32/log_1_aug_Jan_22.json', 'w') as file_write:
json.dump(model_history.history, file_write)
|
[
"keras.preprocessing.image.ImageDataGenerator",
"json.dump",
"pandas.read_csv",
"keras.callbacks.ModelCheckpoint",
"keras.layers.MaxPool2D",
"keras.layers.Flatten",
"keras.optimizers.Adam",
"keras.layers.Dense",
"keras.layers.Conv2D",
"keras.models.Sequential"
] |
[((817, 862), 'pandas.read_csv', 'pd.read_csv', (['csv_file_training'], {'delimiter': '""","""'}), "(csv_file_training, delimiter=',')\n", (828, 862), True, 'import pandas as pd\n'), ((1150, 1314), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'preprocess_input', 'rotation_range': '(10)', 'width_shift_range': '(0.2)', 'height_shift_range': '(0.2)', 'brightness_range': '[0.5, 1.5]'}), '(preprocessing_function=preprocess_input, rotation_range=\n 10, width_shift_range=0.2, height_shift_range=0.2, brightness_range=[\n 0.5, 1.5])\n', (1168, 1314), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((2089, 2136), 'pandas.read_csv', 'pd.read_csv', (['csv_file_validation'], {'delimiter': '""","""'}), "(csv_file_validation, delimiter=',')\n", (2100, 2136), True, 'import pandas as pd\n'), ((2272, 2331), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'preprocess_input'}), '(preprocessing_function=preprocess_input)\n', (2290, 2331), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((2763, 2775), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2773, 2775), False, 'from keras.models import Sequential\n'), ((3868, 4043), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""/home/jianxig/CNN_17/dataset_17/16_32_32/bestWeights_1_aug_Jan_22.hdf5"""', 'monitor': '"""val_accuracy"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(filepath=\n '/home/jianxig/CNN_17/dataset_17/16_32_32/bestWeights_1_aug_Jan_22.hdf5',\n monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')\n", (3883, 4043), False, 'from keras.callbacks import ModelCheckpoint\n'), ((2814, 2935), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': '(112, 112, 3)', 'filters': '(16)', 'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(input_shape=(112, 112, 3), filters=16, kernel_size=(3, 3), strides=(\n 1, 1), padding='same', activation='relu')\n", (2820, 2935), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((2940, 2983), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (2949, 2983), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((3018, 3111), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(3, 3), strides=(1, 1), padding='same',\n activation='relu')\n", (3024, 3111), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((3119, 3162), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (3128, 3162), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((3195, 3288), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(3, 3), strides=(1, 1), padding='same',\n activation='relu')\n", (3201, 3288), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((3296, 3339), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (3305, 3339), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((3395, 3404), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3402, 3404), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((3436, 3466), 'keras.layers.Dense', 'Dense', (['(3)'], {'activation': '"""softmax"""'}), "(3, activation='softmax')\n", (3441, 3466), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n'), ((4845, 4889), 'json.dump', 'json.dump', (['model_history.history', 'file_write'], {}), '(model_history.history, file_write)\n', (4854, 4889), False, 'import json\n'), ((3750, 3765), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (3754, 3765), False, 'from keras.optimizers import Adam\n')]
|
#!/usr/bin/env python
#coding=utf-8
from pyecharts import WordCloud
def test_wordcloud():
# wordcloud_0
name = ['<NAME>', 'Macys', '<NAME>', 'Jurassic World', 'Charter Communications',
'<NAME>', 'Planet Fitness', 'Pitch Perfect', 'Express', 'Home', '<NAME>',
'<NAME>', '<NAME>', 'KXAN', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', 'NCAA baseball tournament', 'Point Break']
value = [10000, 6181, 4386, 4055, 2467, 2244, 1898, 1484, 1112, 965, 847, 582, 555,
550, 462, 366, 360, 282, 273, 265]
wordcloud = WordCloud(width=1300, height=620)
wordcloud.add("", name, value, word_size_range=[30, 100], rotate_step=66)
wordcloud.show_config()
wordcloud.render()
# wordcloud_1
wordcloud = WordCloud(width=1300, height=620)
wordcloud.add("", name, value, word_size_range=[30, 100], shape='diamond')
wordcloud.show_config()
wordcloud.render()
|
[
"pyecharts.WordCloud"
] |
[((570, 603), 'pyecharts.WordCloud', 'WordCloud', ([], {'width': '(1300)', 'height': '(620)'}), '(width=1300, height=620)\n', (579, 603), False, 'from pyecharts import WordCloud\n'), ((768, 801), 'pyecharts.WordCloud', 'WordCloud', ([], {'width': '(1300)', 'height': '(620)'}), '(width=1300, height=620)\n', (777, 801), False, 'from pyecharts import WordCloud\n')]
|
#!/usr/bin/env python
'''
Tests for gtfutils / junctions
'''
import os
import unittest
import StringIO
import ngsutils.gtf.junctions
from ngsutils.gtf import GTF
# >test1
# 1 2 3 4 5 6 7 8 9 100
# 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
# aaaaaaaaaCCCCCCCATGCtttttttttGCGCTTTGATCcccccccccCTGAGGGGGGGGGGGGGATCGgggggggggACTgggggggTCGAGGGGGGG
# exons:
# 10,20
# 30,40
# 50,70
# 90,100
# opt: 80-82
fa = os.path.join(os.path.dirname(__file__), 'test-junc.fa')
class GTFJunctionsTest(unittest.TestCase):
def testJunctionsSimple(self):
gtf = GTF(fileobj=StringIO.StringIO('''\
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|30|40|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|50|70|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
'''.replace('|', '\t')), quiet=True)
valid = '''\
>test1:16-20,29-33
ATGCGCGC
>test1:16-20,49-53
ATGCCTGA
>test1:16-20,89-93
ATGCTCGA
>test1:36-40,49-53
GATCCTGA
>test1:36-40,89-93
GATCTCGA
>test1:66-70,89-93
ATCGTCGA
'''
out = StringIO.StringIO('')
ngsutils.gtf.junctions.gtf_junctions(gtf, fa, fragment_size=4, min_size=8, out=out, quiet=True)
self.assertEqual(out.getvalue(), valid)
def testJunctionsMultiExon(self):
gtf = GTF(fileobj=StringIO.StringIO('''\
test1|test|exon|30|40|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|50|70|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|80|82|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
'''.replace('|', '\t')), quiet=True)
valid = '''\
>test1:36-40,49-53
GATCCTGA
>test1:36-40,79-82,89-93
GATCACTTCGA
>test1:36-40,89-93
GATCTCGA
>test1:66-70,79-82,89-93
ATCGACTTCGA
>test1:66-70,89-93
ATCGTCGA
'''
out = StringIO.StringIO('')
ngsutils.gtf.junctions.gtf_junctions(gtf, fa, fragment_size=4, min_size=8, out=out, quiet=True)
self.assertEqual(out.getvalue(), valid)
def testJunctionsIsoforms(self):
gtf = GTF(fileobj=StringIO.StringIO('''\
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|30|40|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
test1|test|exon|50|70|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
'''.replace('|', '\t')), quiet=True)
valid = '''\
>test1:16-20,29-33
ATGCGCGC
>test1:16-20,49-53
ATGCCTGA
>test1:16-20,89-93
ATGCTCGA
>test1:36-40,49-53
GATCCTGA
>test1:36-40,89-93
GATCTCGA
>test1:66-70,89-93
ATCGTCGA
'''
out = StringIO.StringIO('')
ngsutils.gtf.junctions.gtf_junctions(gtf, fa, fragment_size=4, min_size=8, out=out, quiet=True)
self.assertEqual(out.getvalue(), valid)
def testJunctionsIsoformsKnown(self):
gtf = GTF(fileobj=StringIO.StringIO('''\
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|30|40|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
test1|test|exon|50|70|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
'''.replace('|', '\t')), quiet=True)
valid = '''\
>test1:16-20,29-33
ATGCGCGC
>test1:36-40,89-93
GATCTCGA
>test1:16-20,49-53
ATGCCTGA
>test1:66-70,89-93
ATCGTCGA
'''
out = StringIO.StringIO('')
ngsutils.gtf.junctions.gtf_junctions(gtf, fa, fragment_size=4, min_size=8, known=True, out=out, quiet=True)
self.assertEqual(out.getvalue(), valid)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.path.dirname",
"StringIO.StringIO"
] |
[((561, 586), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (576, 586), False, 'import os\n'), ((4343, 4358), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4356, 4358), False, 'import unittest\n'), ((1315, 1336), 'StringIO.StringIO', 'StringIO.StringIO', (['""""""'], {}), "('')\n", (1332, 1336), False, 'import StringIO\n'), ((2149, 2170), 'StringIO.StringIO', 'StringIO.StringIO', (['""""""'], {}), "('')\n", (2166, 2170), False, 'import StringIO\n'), ((3162, 3183), 'StringIO.StringIO', 'StringIO.StringIO', (['""""""'], {}), "('')\n", (3179, 3183), False, 'import StringIO\n'), ((4123, 4144), 'StringIO.StringIO', 'StringIO.StringIO', (['""""""'], {}), "('')\n", (4140, 4144), False, 'import StringIO\n')]
|
import logging
from contextlib import contextmanager
from typing import NamedTuple
import ixmp
import message_ix
from d2ix.core import MessageInterface
logger = logging.getLogger(__name__)
RUN_CONFIG = '../config/run_config_server.yml'
class TechnologyOut(NamedTuple):
technology: str
commodity: str
level: str
class Costs(NamedTuple):
inv_cost: float
fix_cost: float
var_cost: float
class TestScenario(NamedTuple):
model: str
scenario: str
first_test_year: int
class TestConfig(NamedTuple):
model: str
scenario: str
base_xls: str
manual_parameter_xls: str
historical_data: bool
first_historical_year: int
first_model_year: int
last_model_year: int
historical_range_year: int
model_range_year: int
run_config: str
verbose: bool
yaml_export: bool
class RunScenario(object):
mp: ixmp.Platform
base_parallel_scenario: message_ix.Scenario
db_server: bool = False
def __init__(self, run_config: str, log_level: str = 'INFO'):
logger.setLevel(log_level)
self.log_level = log_level
self.run_config = run_config
self._define_platform()
def _define_platform(self) -> None:
_config = MessageInterface(self.run_config)
db_config = _config.config['db']
self.mp = ixmp.Platform(dbprops=db_config.get('dbprops'), dbtype=db_config.get('dbtype'))
self.mp.set_log_level(self.log_level)
if not db_config.get('dbtype'):
self.db_server = True
@contextmanager
def make_scenario(self, clone_model: str, clone_scenario: str, new_scenario_name: str) -> message_ix.Scenario:
logger.info(f'Clone scenario: \'{clone_scenario}\' from model: \'{clone_model}\' to \'{new_scenario_name}\'.')
if not self.db_server:
self.mp.open_db()
base_ds = message_ix.Scenario(self.mp, clone_model, clone_scenario)
scenario = base_ds.clone(scenario=new_scenario_name, keep_solution=False)
scenario.check_out()
yield scenario
scenario.commit(f'Changes committed by \'{clone_model}\' - \'{new_scenario_name}\'')
scenario.set_as_default()
scenario.solve()
if not self.db_server:
self.mp.close_db()
@contextmanager
def read_scenario(self, model: str, scenario_name: str):
if not self.db_server:
self.mp.open_db()
scenario = message_ix.Scenario(mp=self.mp, model=model, scenario=scenario_name)
yield scenario
if not self.db_server:
self.mp.close_db()
|
[
"d2ix.core.MessageInterface",
"message_ix.Scenario",
"logging.getLogger"
] |
[((164, 191), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (181, 191), False, 'import logging\n'), ((1238, 1271), 'd2ix.core.MessageInterface', 'MessageInterface', (['self.run_config'], {}), '(self.run_config)\n', (1254, 1271), False, 'from d2ix.core import MessageInterface\n'), ((1865, 1922), 'message_ix.Scenario', 'message_ix.Scenario', (['self.mp', 'clone_model', 'clone_scenario'], {}), '(self.mp, clone_model, clone_scenario)\n', (1884, 1922), False, 'import message_ix\n'), ((2433, 2501), 'message_ix.Scenario', 'message_ix.Scenario', ([], {'mp': 'self.mp', 'model': 'model', 'scenario': 'scenario_name'}), '(mp=self.mp, model=model, scenario=scenario_name)\n', (2452, 2501), False, 'import message_ix\n')]
|
import common as com
import matplotlib.pyplot as plt
import os
import numpy as np
def get_xy(file):
import csv
import numpy as np
x = []
y = []
with open(file, 'r') as fh:
open_file = csv.reader(fh, delimiter='\t')
for line in open_file:
x_ = line[0]
y_ = line[1]
x_ = float(x_)
y_ = float(y_)
x.append(x_)
y.append(y_)
return x, y
def get_errors(file):
import csv
import numpy as np
err = []
with open(file, 'r') as fh:
open_file = csv.reader(fh, delimiter='\t')
for line in open_file:
err_ = line[2]
err_ = float(err_)
err.append(err_)
return err
def make_plot(plt, x, y, format_, config, hysteresis=False):
if hysteresis:
try:
hyst_legend = config['legend']
except:
hyst_legend = True
com.plot_hysteresis(plt, x, y, format=format_, legend=hyst_legend)
else:
plt.plot(x, y, format_)
if __name__ == '__main__':
com.print_running_message(__file__)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', action='store', dest='input', help='input path')
parser.add_argument('-c', '--config', action='store', dest='config', help='config path')
parser.add_argument('-o', '--output', action='store', dest='output', help='output path')
args = parser.parse_args()
# input
input_path = args.input
if input_path is None:
input_path = com.get_path('Input')
if not os.path.isfile(input_path):
print('{} is not a valid input file.'.format(input_path))
input_path = com.get_path('Input')
input_file = input_path
input_dir = os.path.split(input_file)[0]
# output
output_path = args.output
if output_path is None:
output_path = input_dir
# config
config_path = args.config
if config_path is None:
config_path = com.get_path('Config')
if not os.path.isfile(config_path):
print('{} is not a valid config file.'.format(config_path))
config_path = com.get_path('Config')
config = com.read_yaml(config_path)
# read data
x, y = get_xy(input_file)
try:
normalize = config['norm']
if normalize:
print('Normalizing data.')
y = np.asarray(y)
y = com.normalize(y)
except:
pass
# # dummy data
# x = range(0,10)
# y = np.asarray(x) ** 3
# plotting
plt.figure(1)
try:
format_ = config['format']
except:
format_ = '-'
try:
hysteresis = config['hysteresis']
except:
hysteresis = False
make_plot(plt, x, y, format_, config, hysteresis=hysteresis)
try:
plottwo = config['plottwo']
except:
plottwo = False
if plottwo:
try:
input_file2 = config['plottwofp']
except:
input_file2 = None
if input_file2 is None:
input_file2 = com.get_path('Second input')
x2, y2 = get_xy(input_file2)
make_plot(plt, x2, y2, format_, config, hysteresis=hysteresis)
# TODO: Combine these statements to fix color
try:
error_provided_in_file = config['errorprovided']
except:
error_provided_in_file = False
if error_provided_in_file:
errors = get_errors(input_file)
plt.errorbar(x, y, yerr=errors, fmt='b{}'.format(format_))
if plottwo:
errors2 = get_errors(input_file2)
plt.errorbar(x2, y2, yerr=errors2, fmt='b{}'.format(format_))
try:
yerror = config['yerror']
except:
yerror = None
if yerror is not None: # TODO: Allow x error
plt.errorbar(x, y, yerr=yerror, fmt='b{}'.format(format_))
# FORMATTING
try:
grid = config['grid']
if grid:
plt.grid(b=True, which='major', color='0.50', linestyle='-')
plt.grid(b=True, which='minor', color='0.25', linestyle='--')
except:
pass
try:
biglabels = config['biglabels']
except:
biglabels = False
if biglabels:
from matplotlib import rc
font = {'size': 20}
rc('font', **font)
try:
title = config['title']
except:
title = None
if title is not None:
plt.title(title)
try:
xlabel = config['xlabel']
except:
xlabel = None
if xlabel is not None:
plt.xlabel(xlabel)
try:
ylabel = config['ylabel']
except:
ylabel = None
if ylabel is not None:
plt.ylabel(ylabel)
try:
fitline = config['fitline']
except:
fitline = False
if fitline:
# include line
try:
a_ = config['a']
b_ = config['b']
x0, xspan = config['linerange']
except:
a_ = None
b_ = None
x0 = None
xspan = None
print('Could not fit line. Please provide `a` and `b` for y = ax + b in `config.yaml`.')
if a_ is not None and b_ is not None:
if x0 is None:
x0 = 0
if xspan is None:
xspan = 100
x_ = range(x0, xspan, 1)
x_ = np.asarray(x_)
y_ = a_ * x_ + b_
plt.plot(x_, y_, color='b')
plt.axvline(0, color='k')
try:
fitline2 = config['fitline2']
except:
fitline2 = False
if fitline2:
# include line
try:
a2_ = config['a2']
b2_ = config['b2']
x20, x2span = config['linerange2']
except:
a2_ = None
b2_ = None
x20 = None
x2span = None
print('Could not fit line. Please provide `a2` and `b2` for y2 = a2x2 + b2 in `config.yaml`.')
if a2_ is not None and b2_ is not None:
if x20 is None:
x20 = 0
if x2span is None:
x2span = 100
x2_ = range(x20, x2span, 1)
x2_ = np.asarray(x2_)
y2_ = a2_ * x2_ + b2_
plt.plot(x2_, y2_, color='b')
plt.axvline(0, color='k')
# fit exponential
try:
fitexp = config['fitexp']
except:
fitexp = False
if fitexp:
try:
a_ = config['a']
b_ = config['b']
x0, xspan = config['linerange']
except:
a_ = None
b_ = None
x0 = None
xspan = None
print('Could not fit line. Please provide `a` and `b` for y = ax + b in `config.yaml`.')
if a_ is not None and b_ is not None:
if x0 is None:
x0 = 0
if xspan is None:
xspan = 100
x_ = range(x0, xspan, 1)
x_ = np.asarray(x_)
y_ = a_ * np.exp(b_ * x_)
plt.plot(x_, y_, color='b')
plt.axvline(0, color='k')
# output
try:
outname = config['outname']
outexts = config['outexts']
except:
outname = 'graph'
outexts = ['.png', '.svg']
for ext in outexts:
outfile = '{}{}'.format(outname, ext)
com.saveplot(plt, output_path, outfile, printmessage=True)
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.rc",
"csv.reader",
"argparse.ArgumentParser",
"matplotlib.pyplot.figure",
"os.path.isfile",
"numpy.exp",
"common.plot_hysteresis",
"matplotlib.pyplot.axvline",
"common.saveplot",
"common.get_path",
"matplotlib.pyplot.show",
"common.normalize",
"common.read_yaml",
"numpy.asarray",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"common.print_running_message",
"matplotlib.pyplot.xlabel",
"os.path.split"
] |
[((1114, 1149), 'common.print_running_message', 'com.print_running_message', (['__file__'], {}), '(__file__)\n', (1139, 1149), True, 'import common as com\n'), ((1187, 1212), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1210, 1212), False, 'import argparse\n'), ((2263, 2289), 'common.read_yaml', 'com.read_yaml', (['config_path'], {}), '(config_path)\n', (2276, 2289), True, 'import common as com\n'), ((2641, 2654), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2651, 2654), True, 'import matplotlib.pyplot as plt\n'), ((7590, 7600), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7598, 7600), True, 'import matplotlib.pyplot as plt\n'), ((224, 254), 'csv.reader', 'csv.reader', (['fh'], {'delimiter': '"""\t"""'}), "(fh, delimiter='\\t')\n", (234, 254), False, 'import csv\n'), ((597, 627), 'csv.reader', 'csv.reader', (['fh'], {'delimiter': '"""\t"""'}), "(fh, delimiter='\\t')\n", (607, 627), False, 'import csv\n'), ((966, 1032), 'common.plot_hysteresis', 'com.plot_hysteresis', (['plt', 'x', 'y'], {'format': 'format_', 'legend': 'hyst_legend'}), '(plt, x, y, format=format_, legend=hyst_legend)\n', (985, 1032), True, 'import common as com\n'), ((1053, 1076), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', 'format_'], {}), '(x, y, format_)\n', (1061, 1076), True, 'import matplotlib.pyplot as plt\n'), ((1618, 1639), 'common.get_path', 'com.get_path', (['"""Input"""'], {}), "('Input')\n", (1630, 1639), True, 'import common as com\n'), ((1652, 1678), 'os.path.isfile', 'os.path.isfile', (['input_path'], {}), '(input_path)\n', (1666, 1678), False, 'import os\n'), ((1769, 1790), 'common.get_path', 'com.get_path', (['"""Input"""'], {}), "('Input')\n", (1781, 1790), True, 'import common as com\n'), ((1837, 1862), 'os.path.split', 'os.path.split', (['input_file'], {}), '(input_file)\n', (1850, 1862), False, 'import os\n'), ((2070, 2092), 'common.get_path', 'com.get_path', (['"""Config"""'], {}), "('Config')\n", (2082, 2092), True, 'import common as com\n'), ((2105, 2132), 'os.path.isfile', 'os.path.isfile', (['config_path'], {}), '(config_path)\n', (2119, 2132), False, 'import os\n'), ((2226, 2248), 'common.get_path', 'com.get_path', (['"""Config"""'], {}), "('Config')\n", (2238, 2248), True, 'import common as com\n'), ((4402, 4420), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **font)\n", (4404, 4420), False, 'from matplotlib import rc\n'), ((4535, 4551), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4544, 4551), True, 'import matplotlib.pyplot as plt\n'), ((4670, 4688), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (4680, 4688), True, 'import matplotlib.pyplot as plt\n'), ((4807, 4825), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (4817, 4825), True, 'import matplotlib.pyplot as plt\n'), ((7524, 7582), 'common.saveplot', 'com.saveplot', (['plt', 'output_path', 'outfile'], {'printmessage': '(True)'}), '(plt, output_path, outfile, printmessage=True)\n', (7536, 7582), True, 'import common as com\n'), ((2468, 2481), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (2478, 2481), True, 'import numpy as np\n'), ((2499, 2515), 'common.normalize', 'com.normalize', (['y'], {}), '(y)\n', (2512, 2515), True, 'import common as com\n'), ((3169, 3197), 'common.get_path', 'com.get_path', (['"""Second input"""'], {}), "('Second input')\n", (3181, 3197), True, 'import common as com\n'), ((4056, 4116), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'color': '"""0.50"""', 'linestyle': '"""-"""'}), "(b=True, which='major', color='0.50', linestyle='-')\n", (4064, 4116), True, 'import matplotlib.pyplot as plt\n'), ((4130, 4191), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""minor"""', 'color': '"""0.25"""', 'linestyle': '"""--"""'}), "(b=True, which='minor', color='0.25', linestyle='--')\n", (4138, 4191), True, 'import matplotlib.pyplot as plt\n'), ((5500, 5514), 'numpy.asarray', 'np.asarray', (['x_'], {}), '(x_)\n', (5510, 5514), True, 'import numpy as np\n'), ((5559, 5586), 'matplotlib.pyplot.plot', 'plt.plot', (['x_', 'y_'], {'color': '"""b"""'}), "(x_, y_, color='b')\n", (5567, 5586), True, 'import matplotlib.pyplot as plt\n'), ((5600, 5625), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'color': '"""k"""'}), "(0, color='k')\n", (5611, 5625), True, 'import matplotlib.pyplot as plt\n'), ((6331, 6346), 'numpy.asarray', 'np.asarray', (['x2_'], {}), '(x2_)\n', (6341, 6346), True, 'import numpy as np\n'), ((6395, 6424), 'matplotlib.pyplot.plot', 'plt.plot', (['x2_', 'y2_'], {'color': '"""b"""'}), "(x2_, y2_, color='b')\n", (6403, 6424), True, 'import matplotlib.pyplot as plt\n'), ((6438, 6463), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'color': '"""k"""'}), "(0, color='k')\n", (6449, 6463), True, 'import matplotlib.pyplot as plt\n'), ((7133, 7147), 'numpy.asarray', 'np.asarray', (['x_'], {}), '(x_)\n', (7143, 7147), True, 'import numpy as np\n'), ((7200, 7227), 'matplotlib.pyplot.plot', 'plt.plot', (['x_', 'y_'], {'color': '"""b"""'}), "(x_, y_, color='b')\n", (7208, 7227), True, 'import matplotlib.pyplot as plt\n'), ((7241, 7266), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'color': '"""k"""'}), "(0, color='k')\n", (7252, 7266), True, 'import matplotlib.pyplot as plt\n'), ((7171, 7186), 'numpy.exp', 'np.exp', (['(b_ * x_)'], {}), '(b_ * x_)\n', (7177, 7186), True, 'import numpy as np\n')]
|
"""A simple timer implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
class _CountDownTimer(object):
"""A simple count down timer implementation."""
def __init__(self, duration_secs):
"""Initializes a `_CountDownTimer`.
Args:
duration_secs: Float seconds for countdown.
Returns:
A `_CountDownTimer` instance.
"""
self._start_time_secs = time.time()
self._duration_secs = duration_secs
def secs_remaining(self):
"""Returns the remaining countdown seconds."""
diff = self._duration_secs - (time.time() - self._start_time_secs)
return max(0., diff)
|
[
"time.time"
] |
[((1053, 1064), 'time.time', 'time.time', ([], {}), '()\n', (1062, 1064), False, 'import time\n'), ((1220, 1231), 'time.time', 'time.time', ([], {}), '()\n', (1229, 1231), False, 'import time\n')]
|
import sqlite3
conn = sqlite3.connect('food.db')
curs = conn.cursor()
query = 'SELECT * FROM food'
curs.execute(query)
names = [f[0] for f in curs.description]
for row in curs.fetchall():
for pair in zip(names,row):
print ('%s:%s' % pair)
print()
|
[
"sqlite3.connect"
] |
[((23, 49), 'sqlite3.connect', 'sqlite3.connect', (['"""food.db"""'], {}), "('food.db')\n", (38, 49), False, 'import sqlite3\n')]
|
"""
Tests of noise input.
"""
import unittest
import numpy as np
from chspy import CubicHermiteSpline
from neurolib.models.aln import ALNModel
from neurolib.utils.stimulus import (
ConcatenatedStimulus,
ExponentialInput,
LinearRampInput,
OrnsteinUhlenbeckProcess,
RectifiedInput,
SinusoidalInput,
SquareInput,
StepInput,
SummedStimulus,
WienerProcess,
ZeroInput,
)
TESTING_TIME = 5.3
DURATION = 10
DT = 0.1
STIM_START = 2
STIM_END = 8
SHAPE = (2, int(DURATION / DT))
class TestCubicSplines(unittest.TestCase):
RESULT_SPLINES = np.array([-0.214062, -0.215043])
RESULT_ARRAY = np.array([0.193429, 0.073445])
def test_splines(self):
dW = WienerProcess(n=2, seed=42).as_cubic_splines(duration=DURATION, dt=DT)
self.assertTrue(isinstance(dW, CubicHermiteSpline))
np.testing.assert_allclose(self.RESULT_SPLINES, dW.get_state(TESTING_TIME), atol=1e-05)
def test_arrays(self):
dW = WienerProcess(n=2, seed=42).as_array(duration=DURATION, dt=DT)
self.assertTrue(isinstance(dW, np.ndarray))
time_idx = np.around(TESTING_TIME / DT).astype(int)
np.testing.assert_allclose(self.RESULT_ARRAY, dW[:, time_idx], atol=1e-05)
def test_shift_start_time(self):
SHIFT = 5.0
dW = WienerProcess(n=2, seed=42).as_cubic_splines(duration=DURATION, dt=DT, shift_start_time=SHIFT)
self.assertTrue(isinstance(dW, CubicHermiteSpline))
self.assertEqual(dW[0].time, SHIFT + DT)
np.testing.assert_allclose(self.RESULT_SPLINES, dW.get_state(TESTING_TIME + SHIFT), atol=1e-05)
class TestToModel(unittest.TestCase):
def test_single_node(self):
model = ALNModel()
model.params["duration"] = 2 * 1000
stim = SinusoidalInput(amplitude=1.0, frequency=1.0)
model_stim = stim.to_model(model)
model.params["ext_exc_current"] = model_stim
model.run()
self.assertTrue(isinstance(model_stim, np.ndarray))
self.assertTupleEqual(model_stim.shape, (1, int(model.params["duration"] / model.params["dt"])))
def test_multi_node_multi_stim(self):
model = ALNModel(Cmat=np.random.rand(5, 5), Dmat=np.zeros((5, 5)))
model.params["duration"] = 2 * 1000
stim = SinusoidalInput(amplitude=1.0, frequency=1.0)
model_stim = stim.to_model(model)
model.params["ext_exc_current"] = model_stim
model.run()
self.assertTrue(isinstance(model_stim, np.ndarray))
self.assertTupleEqual(model_stim.shape, (5, int(model.params["duration"] / model.params["dt"])))
class TestZeroInput(unittest.TestCase):
def test_generate_input(self):
nn = ZeroInput(n=2, seed=42).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(nn, np.ndarray))
self.assertTupleEqual(nn.shape, SHAPE)
np.testing.assert_allclose(nn, np.zeros(SHAPE))
def test_get_params(self):
nn = ZeroInput(n=2, seed=42)
params = nn.get_params()
params.pop("type")
self.assertDictEqual(params, {"n": 2, "seed": 42})
def test_set_params(self):
nn = ZeroInput(n=2, seed=42)
UPDATE = {"seed": 635}
nn.update_params(UPDATE)
params = nn.get_params()
params.pop("type")
self.assertDictEqual(params, {"n": 2, "seed": 42, **UPDATE})
class TestWienerProcess(unittest.TestCase):
def test_generate_input(self):
dW = WienerProcess(n=2, seed=42).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(dW, np.ndarray))
self.assertTupleEqual(dW.shape, SHAPE)
def test_get_params(self):
dW = WienerProcess(n=2, seed=42)
params = dW.get_params()
params.pop("type")
self.assertDictEqual(params, {"n": 2, "seed": 42})
def test_set_params(self):
dW = WienerProcess(n=2, seed=42)
UPDATE = {"seed": 6152, "n": 5}
dW.update_params(UPDATE)
params = dW.get_params()
params.pop("type")
self.assertDictEqual(params, {"n": 2, "seed": 42, **UPDATE})
class TestOrnsteinUhlenbeckProcess(unittest.TestCase):
def test_generate_input(self):
ou = OrnsteinUhlenbeckProcess(
mu=3.0,
sigma=0.1,
tau=2 * DT,
n=2,
seed=42,
).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ou, np.ndarray))
self.assertTupleEqual(ou.shape, SHAPE)
def test_get_params(self):
ou = OrnsteinUhlenbeckProcess(
mu=3.0,
sigma=0.1,
tau=2 * DT,
n=2,
seed=42,
)
params = ou.get_params()
params.pop("type")
self.assertDictEqual(params, {"n": 2, "seed": 42, "mu": 3.0, "sigma": 0.1, "tau": 2 * DT})
def test_set_params(self):
ou = OrnsteinUhlenbeckProcess(
mu=3.0,
sigma=0.1,
tau=2 * DT,
n=2,
seed=42,
)
UPDATE = {"mu": 2.3, "seed": 12}
ou.update_params(UPDATE)
params = ou.get_params()
params.pop("type")
self.assertDictEqual(params, {"n": 2, "seed": 42, "mu": 3.0, "sigma": 0.1, "tau": 2 * DT, **UPDATE})
class TestStepInput(unittest.TestCase):
STEP_SIZE = 2.3
def test_generate_input(self):
step = StepInput(
step_size=self.STEP_SIZE,
n=2,
seed=42,
).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(step, np.ndarray))
self.assertTupleEqual(step.shape, SHAPE)
np.testing.assert_allclose(step, self.STEP_SIZE)
def test_start_end_input(self):
step = StepInput(
start=STIM_START,
end=STIM_END,
step_size=self.STEP_SIZE,
n=2,
seed=42,
).as_array(duration=DURATION, dt=DT)
np.testing.assert_allclose(step[:, : int(STIM_START / DT)], 0.0)
np.testing.assert_allclose(step[:, int(STIM_END / DT) :], 0.0)
class TestSinusoidalInput(unittest.TestCase):
AMPLITUDE = 2.3
FREQUENCY = 1000.0
def test_generate_input(self):
sin = SinusoidalInput(
amplitude=self.AMPLITUDE, frequency=self.FREQUENCY, n=2, seed=42, dc_bias=True
).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(sin, np.ndarray))
self.assertTupleEqual(sin.shape, SHAPE)
np.testing.assert_almost_equal(np.mean(sin, axis=1), np.array(2 * [self.AMPLITUDE]))
def test_start_end_input(self):
sin = SinusoidalInput(
start=STIM_START,
end=STIM_END,
amplitude=self.AMPLITUDE,
frequency=self.FREQUENCY,
n=2,
seed=42,
).as_array(duration=DURATION, dt=DT)
np.testing.assert_allclose(sin[:, : int(STIM_START / DT)], 0.0)
np.testing.assert_allclose(sin[:, int(STIM_END / DT) :], 0.0)
def test_get_params(self):
sin = SinusoidalInput(
start=STIM_START,
end=STIM_END,
amplitude=self.AMPLITUDE,
frequency=self.FREQUENCY,
n=2,
seed=42,
)
params = sin.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"frequency": self.FREQUENCY,
"amplitude": self.AMPLITUDE,
"start": STIM_START,
"dc_bias": False,
"end": STIM_END,
},
)
def test_set_params(self):
sin = SinusoidalInput(
start=STIM_START,
end=STIM_END,
amplitude=self.AMPLITUDE,
frequency=self.FREQUENCY,
n=2,
seed=42,
)
UPDATE = {"amplitude": 43.0, "seed": 12, "start": "None"}
sin.update_params(UPDATE)
params = sin.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"frequency": self.FREQUENCY,
"amplitude": self.AMPLITUDE,
"dc_bias": False,
"end": STIM_END,
**UPDATE,
"start": None,
},
)
class TestSquareInput(unittest.TestCase):
AMPLITUDE = 2.3
FREQUENCY = 20.0
def test_generate_input(self):
sq = SquareInput(
amplitude=self.AMPLITUDE,
frequency=self.FREQUENCY,
n=2,
seed=42,
).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(sq, np.ndarray))
self.assertTupleEqual(sq.shape, SHAPE)
np.testing.assert_almost_equal(np.mean(sq, axis=1), np.array(2 * [self.AMPLITUDE]))
def test_start_end_input(self):
sq = SquareInput(
start=STIM_START,
end=STIM_END,
amplitude=self.AMPLITUDE,
frequency=self.FREQUENCY,
n=2,
seed=42,
).as_array(duration=DURATION, dt=DT)
np.testing.assert_allclose(sq[:, : int(STIM_START / DT)], 0.0)
np.testing.assert_allclose(sq[:, int(STIM_END / DT) :], 0.0)
def test_get_params(self):
sq = SquareInput(
start=STIM_START,
end=STIM_END,
amplitude=self.AMPLITUDE,
frequency=self.FREQUENCY,
n=2,
seed=42,
)
params = sq.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"frequency": self.FREQUENCY,
"amplitude": self.AMPLITUDE,
"start": STIM_START,
"end": STIM_END,
"dc_bias": False,
},
)
def test_set_params(self):
sq = SquareInput(
start=STIM_START,
end=STIM_END,
amplitude=self.AMPLITUDE,
frequency=self.FREQUENCY,
n=2,
seed=42,
)
UPDATE = {"amplitude": 43.0, "seed": 12, "start": "None"}
sq.update_params(UPDATE)
params = sq.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"frequency": self.FREQUENCY,
"amplitude": self.AMPLITUDE,
"end": STIM_END,
"dc_bias": False,
**UPDATE,
"start": None,
},
)
class TestLinearRampInput(unittest.TestCase):
INP_MAX = 5.0
RAMP_LENGTH = 2.0
def test_generate_input(self):
ramp = LinearRampInput(
inp_max=self.INP_MAX,
ramp_length=self.RAMP_LENGTH,
n=2,
seed=42,
).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ramp, np.ndarray))
self.assertTupleEqual(ramp.shape, SHAPE)
np.testing.assert_equal(np.max(ramp, axis=1), np.array(2 * [self.INP_MAX]))
np.testing.assert_equal(np.min(ramp, axis=1), np.array(2 * [0.25]))
def test_start_end_input(self):
ramp = LinearRampInput(
start=STIM_START,
end=STIM_END,
inp_max=self.INP_MAX,
ramp_length=self.RAMP_LENGTH,
n=2,
seed=42,
).as_array(duration=DURATION, dt=DT)
np.testing.assert_allclose(ramp[:, : int(STIM_START / DT)], 0.0)
np.testing.assert_allclose(ramp[:, int(STIM_END / DT) :], 0.0)
def test_get_params(self):
ramp = LinearRampInput(
start=STIM_START,
end=STIM_END,
inp_max=self.INP_MAX,
ramp_length=self.RAMP_LENGTH,
n=2,
seed=42,
)
params = ramp.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"inp_max": self.INP_MAX,
"ramp_length": self.RAMP_LENGTH,
"start": STIM_START,
"end": STIM_END,
},
)
def test_set_params(self):
ramp = LinearRampInput(
start=STIM_START,
end=STIM_END,
inp_max=self.INP_MAX,
ramp_length=self.RAMP_LENGTH,
n=2,
seed=42,
)
UPDATE = {"inp_max": 41.0, "seed": 12}
ramp.update_params(UPDATE)
params = ramp.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"inp_max": self.INP_MAX,
"ramp_length": self.RAMP_LENGTH,
"start": STIM_START,
"end": STIM_END,
**UPDATE,
},
)
class TestExponentialInput(unittest.TestCase):
INP_MAX = 5.0
EXP_COEF = 30.0
EXP_TYPE = "rise"
def test_generate_input_rise(self):
exp_rise = ExponentialInput(
inp_max=self.INP_MAX,
exp_type="rise",
n=2,
seed=42,
).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(exp_rise, np.ndarray))
self.assertTupleEqual(exp_rise.shape, SHAPE)
np.testing.assert_almost_equal(np.max(exp_rise, axis=1), np.array(2 * [self.INP_MAX]))
self.assertTrue(np.all(np.diff(exp_rise) >= 0))
def test_generate_input_decay(self):
exp_decay = ExponentialInput(
inp_max=self.INP_MAX,
exp_type="decay",
n=2,
seed=42,
).generate_input(duration=DURATION, dt=DT)
self.assertTrue(isinstance(exp_decay, np.ndarray))
self.assertTupleEqual(exp_decay.shape, SHAPE)
self.assertTrue(np.all(np.diff(exp_decay) <= 0))
def test_start_end_input(self):
exp_rise = ExponentialInput(
start=STIM_START,
end=STIM_END,
inp_max=self.INP_MAX,
n=2,
seed=42,
).as_array(duration=DURATION, dt=DT)
np.testing.assert_allclose(exp_rise[:, : int(STIM_START / DT)], 0.0)
np.testing.assert_allclose(exp_rise[:, int(STIM_END / DT) :], 0.0)
def test_get_params(self):
exp_rise = ExponentialInput(
start=STIM_START,
end=STIM_END,
inp_max=self.INP_MAX,
n=2,
seed=42,
)
params = exp_rise.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"inp_max": self.INP_MAX,
"exp_coef": self.EXP_COEF,
"exp_type": self.EXP_TYPE,
"start": STIM_START,
"end": STIM_END,
},
)
def test_set_params(self):
exp_rise = ExponentialInput(
start=STIM_START,
end=STIM_END,
inp_max=self.INP_MAX,
n=2,
seed=42,
)
UPDATE = {"inp_max": 41.0, "seed": 12}
exp_rise.update_params(UPDATE)
params = exp_rise.get_params()
params.pop("type")
self.assertDictEqual(
params,
{
"n": 2,
"seed": 42,
"inp_max": self.INP_MAX,
"exp_coef": self.EXP_COEF,
"exp_type": self.EXP_TYPE,
"start": STIM_START,
"end": STIM_END,
**UPDATE,
},
)
class TestSummedStimulus(unittest.TestCase):
def _create_input(self):
ou = OrnsteinUhlenbeckProcess(mu=0.1, sigma=0.02, tau=2.0, n=2)
sq = SquareInput(amplitude=0.2, frequency=50, n=2, start=5)
sin = SinusoidalInput(amplitude=0.1, frequency=100, n=2, start=2)
step = StepInput(step_size=0.5, n=2, start=7)
return sq + (sin + step + ou)
def test_init(self):
summed = self._create_input()
self.assertEqual(len(summed), 4)
self.assertTrue(isinstance(summed, SummedStimulus))
self.assertEqual(summed.n, 2)
self.assertEqual(len(summed.inputs), 4)
def test_set_n(self):
summed = self._create_input()
self.assertEqual(summed.n, 2)
ts = summed.as_array(duration=DURATION, dt=DT)
self.assertEqual(ts.shape[0], 2)
summed.n = 5
self.assertEqual(summed.n, 5)
ts = summed.as_array(duration=DURATION, dt=DT)
self.assertEqual(ts.shape[0], 5)
def test_generate_input(self):
summed = self._create_input()
ts = summed.as_array(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ts, np.ndarray))
self.assertTupleEqual(ts.shape, SHAPE)
ts = summed.as_cubic_splines(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ts, CubicHermiteSpline))
def test_get_params(self):
summed = self._create_input()
params = summed.get_params()
self.assertTrue(isinstance(params, dict))
self.assertEqual(len(params), 1 + len(summed.inputs))
for i, process in enumerate(summed):
self.assertDictEqual(process.get_params(), params[f"input_{i}"])
def test_update_params(self):
summed = self._create_input()
UPDATE_DICT = {f"input_{i}": {"n": 3} for i in range(len(summed))}
summed.update_params(UPDATE_DICT)
self.assertEqual(summed.n, 3)
class TestConcatenatedStimulus(unittest.TestCase):
def _create_input(self):
ou = OrnsteinUhlenbeckProcess(mu=0.1, sigma=0.02, tau=2.0, n=2)
sq = SquareInput(amplitude=0.2, frequency=20.0, n=2)
sin = SinusoidalInput(amplitude=0.1, frequency=10.0, n=2)
step = StepInput(step_size=0.5, n=2)
return ou & (sq & sin & step)
def test_init(self):
conc = self._create_input()
self.assertEqual(len(conc), 4)
self.assertTrue(isinstance(conc, ConcatenatedStimulus))
self.assertEqual(conc.n, 2)
self.assertEqual(len(conc.inputs), 4)
def test_set_n(self):
conc = self._create_input()
self.assertEqual(conc.n, 2)
ts = conc.as_array(duration=DURATION, dt=DT)
self.assertEqual(ts.shape[0], 2)
conc.n = 5
self.assertEqual(conc.n, 5)
ts = conc.as_array(duration=DURATION, dt=DT)
self.assertEqual(ts.shape[0], 5)
def test_generate_input(self):
conc = self._create_input()
ts = conc.as_array(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ts, np.ndarray))
self.assertTupleEqual(ts.shape, SHAPE)
ts = conc.as_cubic_splines(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ts, CubicHermiteSpline))
def test_get_params(self):
conc = self._create_input()
params = conc.get_params()
self.assertTrue(isinstance(params, dict))
self.assertEqual(len(params), 1 + len(conc.inputs))
for i, process in enumerate(conc):
self.assertDictEqual(process.get_params(), params[f"input_{i}"])
def test_update_params(self):
conc = self._create_input()
UPDATE_DICT = {f"input_{i}": {"n": 3} for i in range(len(conc))}
conc.update_params(UPDATE_DICT)
self.assertEqual(conc.n, 3)
class TestBeastInput(unittest.TestCase):
def _create_input(self):
ou = OrnsteinUhlenbeckProcess(mu=0.1, sigma=0.02, tau=2.0, n=2)
sq = SquareInput(amplitude=0.2, frequency=20.0, n=2)
sin = SinusoidalInput(amplitude=0.1, frequency=10.0, n=2)
step = StepInput(step_size=0.5, n=2)
return (sq + sin) & (step + ou)
def test_init(self):
beast = self._create_input()
self.assertEqual(len(beast), 2)
self.assertTrue(isinstance(beast, ConcatenatedStimulus))
for process in beast:
self.assertTrue(isinstance(process, SummedStimulus))
self.assertEqual(beast.n, 2)
def test_set_n(self):
beast = self._create_input()
self.assertEqual(beast.n, 2)
ts = beast.as_array(duration=DURATION, dt=DT)
self.assertEqual(ts.shape[0], 2)
beast.n = 5
self.assertEqual(beast.n, 5)
ts = beast.as_array(duration=DURATION, dt=DT)
self.assertEqual(ts.shape[0], 5)
def test_generate_input(self):
beast = self._create_input()
ts = beast.as_array(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ts, np.ndarray))
self.assertTupleEqual(ts.shape, SHAPE)
ts = beast.as_cubic_splines(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ts, CubicHermiteSpline))
def test_get_params(self):
beast = self._create_input()
params = beast.get_params()
self.assertTrue(isinstance(params, dict))
self.assertEqual(len(params), 1 + len(beast.inputs))
for i, process in enumerate(beast):
self.assertDictEqual(process.get_params(), params[f"input_{i}"])
class TestRectifiedInput(unittest.TestCase):
def test_init(self):
rect = RectifiedInput(0.2, n=2)
self.assertTrue(isinstance(rect, ConcatenatedStimulus))
self.assertEqual(len(rect), 5)
self.assertEqual(rect.n, 2)
def test_generate(self):
rect = RectifiedInput(0.2, n=2)
ts = rect.as_array(DURATION, DT)
self.assertTrue(isinstance(ts, np.ndarray))
self.assertTupleEqual(ts.shape, SHAPE)
ts = rect.as_cubic_splines(duration=DURATION, dt=DT)
self.assertTrue(isinstance(ts, CubicHermiteSpline))
if __name__ == "__main__":
unittest.main()
|
[
"neurolib.utils.stimulus.RectifiedInput",
"neurolib.utils.stimulus.StepInput",
"neurolib.utils.stimulus.SinusoidalInput",
"neurolib.utils.stimulus.OrnsteinUhlenbeckProcess",
"numpy.around",
"numpy.mean",
"neurolib.utils.stimulus.LinearRampInput",
"unittest.main",
"neurolib.utils.stimulus.ExponentialInput",
"numpy.max",
"neurolib.utils.stimulus.WienerProcess",
"numpy.testing.assert_allclose",
"numpy.min",
"neurolib.models.aln.ALNModel",
"neurolib.utils.stimulus.ZeroInput",
"neurolib.utils.stimulus.SquareInput",
"numpy.zeros",
"numpy.diff",
"numpy.array",
"numpy.random.rand"
] |
[((580, 612), 'numpy.array', 'np.array', (['[-0.214062, -0.215043]'], {}), '([-0.214062, -0.215043])\n', (588, 612), True, 'import numpy as np\n'), ((632, 662), 'numpy.array', 'np.array', (['[0.193429, 0.073445]'], {}), '([0.193429, 0.073445])\n', (640, 662), True, 'import numpy as np\n'), ((21803, 21818), 'unittest.main', 'unittest.main', ([], {}), '()\n', (21816, 21818), False, 'import unittest\n'), ((1156, 1230), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['self.RESULT_ARRAY', 'dW[:, time_idx]'], {'atol': '(1e-05)'}), '(self.RESULT_ARRAY, dW[:, time_idx], atol=1e-05)\n', (1182, 1230), True, 'import numpy as np\n'), ((1698, 1708), 'neurolib.models.aln.ALNModel', 'ALNModel', ([], {}), '()\n', (1706, 1708), False, 'from neurolib.models.aln import ALNModel\n'), ((1768, 1813), 'neurolib.utils.stimulus.SinusoidalInput', 'SinusoidalInput', ([], {'amplitude': '(1.0)', 'frequency': '(1.0)'}), '(amplitude=1.0, frequency=1.0)\n', (1783, 1813), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((2271, 2316), 'neurolib.utils.stimulus.SinusoidalInput', 'SinusoidalInput', ([], {'amplitude': '(1.0)', 'frequency': '(1.0)'}), '(amplitude=1.0, frequency=1.0)\n', (2286, 2316), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((2952, 2975), 'neurolib.utils.stimulus.ZeroInput', 'ZeroInput', ([], {'n': '(2)', 'seed': '(42)'}), '(n=2, seed=42)\n', (2961, 2975), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((3140, 3163), 'neurolib.utils.stimulus.ZeroInput', 'ZeroInput', ([], {'n': '(2)', 'seed': '(42)'}), '(n=2, seed=42)\n', (3149, 3163), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((3664, 3691), 'neurolib.utils.stimulus.WienerProcess', 'WienerProcess', ([], {'n': '(2)', 'seed': '(42)'}), '(n=2, seed=42)\n', (3677, 3691), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((3856, 3883), 'neurolib.utils.stimulus.WienerProcess', 'WienerProcess', ([], {'n': '(2)', 'seed': '(42)'}), '(n=2, seed=42)\n', (3869, 3883), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((4517, 4586), 'neurolib.utils.stimulus.OrnsteinUhlenbeckProcess', 'OrnsteinUhlenbeckProcess', ([], {'mu': '(3.0)', 'sigma': '(0.1)', 'tau': '(2 * DT)', 'n': '(2)', 'seed': '(42)'}), '(mu=3.0, sigma=0.1, tau=2 * DT, n=2, seed=42)\n', (4541, 4586), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((4862, 4931), 'neurolib.utils.stimulus.OrnsteinUhlenbeckProcess', 'OrnsteinUhlenbeckProcess', ([], {'mu': '(3.0)', 'sigma': '(0.1)', 'tau': '(2 * DT)', 'n': '(2)', 'seed': '(42)'}), '(mu=3.0, sigma=0.1, tau=2 * DT, n=2, seed=42)\n', (4886, 4931), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((5608, 5656), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['step', 'self.STEP_SIZE'], {}), '(step, self.STEP_SIZE)\n', (5634, 5656), True, 'import numpy as np\n'), ((7006, 7123), 'neurolib.utils.stimulus.SinusoidalInput', 'SinusoidalInput', ([], {'start': 'STIM_START', 'end': 'STIM_END', 'amplitude': 'self.AMPLITUDE', 'frequency': 'self.FREQUENCY', 'n': '(2)', 'seed': '(42)'}), '(start=STIM_START, end=STIM_END, amplitude=self.AMPLITUDE,\n frequency=self.FREQUENCY, n=2, seed=42)\n', (7021, 7123), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((7645, 7762), 'neurolib.utils.stimulus.SinusoidalInput', 'SinusoidalInput', ([], {'start': 'STIM_START', 'end': 'STIM_END', 'amplitude': 'self.AMPLITUDE', 'frequency': 'self.FREQUENCY', 'n': '(2)', 'seed': '(42)'}), '(start=STIM_START, end=STIM_END, amplitude=self.AMPLITUDE,\n frequency=self.FREQUENCY, n=2, seed=42)\n', (7660, 7762), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((9325, 9438), 'neurolib.utils.stimulus.SquareInput', 'SquareInput', ([], {'start': 'STIM_START', 'end': 'STIM_END', 'amplitude': 'self.AMPLITUDE', 'frequency': 'self.FREQUENCY', 'n': '(2)', 'seed': '(42)'}), '(start=STIM_START, end=STIM_END, amplitude=self.AMPLITUDE,\n frequency=self.FREQUENCY, n=2, seed=42)\n', (9336, 9438), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((9958, 10071), 'neurolib.utils.stimulus.SquareInput', 'SquareInput', ([], {'start': 'STIM_START', 'end': 'STIM_END', 'amplitude': 'self.AMPLITUDE', 'frequency': 'self.FREQUENCY', 'n': '(2)', 'seed': '(42)'}), '(start=STIM_START, end=STIM_END, amplitude=self.AMPLITUDE,\n frequency=self.FREQUENCY, n=2, seed=42)\n', (9969, 10071), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((11725, 11842), 'neurolib.utils.stimulus.LinearRampInput', 'LinearRampInput', ([], {'start': 'STIM_START', 'end': 'STIM_END', 'inp_max': 'self.INP_MAX', 'ramp_length': 'self.RAMP_LENGTH', 'n': '(2)', 'seed': '(42)'}), '(start=STIM_START, end=STIM_END, inp_max=self.INP_MAX,\n ramp_length=self.RAMP_LENGTH, n=2, seed=42)\n', (11740, 11842), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((12332, 12449), 'neurolib.utils.stimulus.LinearRampInput', 'LinearRampInput', ([], {'start': 'STIM_START', 'end': 'STIM_END', 'inp_max': 'self.INP_MAX', 'ramp_length': 'self.RAMP_LENGTH', 'n': '(2)', 'seed': '(42)'}), '(start=STIM_START, end=STIM_END, inp_max=self.INP_MAX,\n ramp_length=self.RAMP_LENGTH, n=2, seed=42)\n', (12347, 12449), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((14454, 14542), 'neurolib.utils.stimulus.ExponentialInput', 'ExponentialInput', ([], {'start': 'STIM_START', 'end': 'STIM_END', 'inp_max': 'self.INP_MAX', 'n': '(2)', 'seed': '(42)'}), '(start=STIM_START, end=STIM_END, inp_max=self.INP_MAX, n=2,\n seed=42)\n', (14470, 14542), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((15065, 15153), 'neurolib.utils.stimulus.ExponentialInput', 'ExponentialInput', ([], {'start': 'STIM_START', 'end': 'STIM_END', 'inp_max': 'self.INP_MAX', 'n': '(2)', 'seed': '(42)'}), '(start=STIM_START, end=STIM_END, inp_max=self.INP_MAX, n=2,\n seed=42)\n', (15081, 15153), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((15826, 15884), 'neurolib.utils.stimulus.OrnsteinUhlenbeckProcess', 'OrnsteinUhlenbeckProcess', ([], {'mu': '(0.1)', 'sigma': '(0.02)', 'tau': '(2.0)', 'n': '(2)'}), '(mu=0.1, sigma=0.02, tau=2.0, n=2)\n', (15850, 15884), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((15898, 15952), 'neurolib.utils.stimulus.SquareInput', 'SquareInput', ([], {'amplitude': '(0.2)', 'frequency': '(50)', 'n': '(2)', 'start': '(5)'}), '(amplitude=0.2, frequency=50, n=2, start=5)\n', (15909, 15952), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((15967, 16026), 'neurolib.utils.stimulus.SinusoidalInput', 'SinusoidalInput', ([], {'amplitude': '(0.1)', 'frequency': '(100)', 'n': '(2)', 'start': '(2)'}), '(amplitude=0.1, frequency=100, n=2, start=2)\n', (15982, 16026), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((16042, 16080), 'neurolib.utils.stimulus.StepInput', 'StepInput', ([], {'step_size': '(0.5)', 'n': '(2)', 'start': '(7)'}), '(step_size=0.5, n=2, start=7)\n', (16051, 16080), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((17740, 17798), 'neurolib.utils.stimulus.OrnsteinUhlenbeckProcess', 'OrnsteinUhlenbeckProcess', ([], {'mu': '(0.1)', 'sigma': '(0.02)', 'tau': '(2.0)', 'n': '(2)'}), '(mu=0.1, sigma=0.02, tau=2.0, n=2)\n', (17764, 17798), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((17812, 17859), 'neurolib.utils.stimulus.SquareInput', 'SquareInput', ([], {'amplitude': '(0.2)', 'frequency': '(20.0)', 'n': '(2)'}), '(amplitude=0.2, frequency=20.0, n=2)\n', (17823, 17859), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((17874, 17925), 'neurolib.utils.stimulus.SinusoidalInput', 'SinusoidalInput', ([], {'amplitude': '(0.1)', 'frequency': '(10.0)', 'n': '(2)'}), '(amplitude=0.1, frequency=10.0, n=2)\n', (17889, 17925), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((17941, 17970), 'neurolib.utils.stimulus.StepInput', 'StepInput', ([], {'step_size': '(0.5)', 'n': '(2)'}), '(step_size=0.5, n=2)\n', (17950, 17970), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((19582, 19640), 'neurolib.utils.stimulus.OrnsteinUhlenbeckProcess', 'OrnsteinUhlenbeckProcess', ([], {'mu': '(0.1)', 'sigma': '(0.02)', 'tau': '(2.0)', 'n': '(2)'}), '(mu=0.1, sigma=0.02, tau=2.0, n=2)\n', (19606, 19640), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((19654, 19701), 'neurolib.utils.stimulus.SquareInput', 'SquareInput', ([], {'amplitude': '(0.2)', 'frequency': '(20.0)', 'n': '(2)'}), '(amplitude=0.2, frequency=20.0, n=2)\n', (19665, 19701), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((19716, 19767), 'neurolib.utils.stimulus.SinusoidalInput', 'SinusoidalInput', ([], {'amplitude': '(0.1)', 'frequency': '(10.0)', 'n': '(2)'}), '(amplitude=0.1, frequency=10.0, n=2)\n', (19731, 19767), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((19783, 19812), 'neurolib.utils.stimulus.StepInput', 'StepInput', ([], {'step_size': '(0.5)', 'n': '(2)'}), '(step_size=0.5, n=2)\n', (19792, 19812), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((21274, 21298), 'neurolib.utils.stimulus.RectifiedInput', 'RectifiedInput', (['(0.2)'], {'n': '(2)'}), '(0.2, n=2)\n', (21288, 21298), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((21483, 21507), 'neurolib.utils.stimulus.RectifiedInput', 'RectifiedInput', (['(0.2)'], {'n': '(2)'}), '(0.2, n=2)\n', (21497, 21507), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((2890, 2905), 'numpy.zeros', 'np.zeros', (['SHAPE'], {}), '(SHAPE)\n', (2898, 2905), True, 'import numpy as np\n'), ((6481, 6501), 'numpy.mean', 'np.mean', (['sin'], {'axis': '(1)'}), '(sin, axis=1)\n', (6488, 6501), True, 'import numpy as np\n'), ((6503, 6533), 'numpy.array', 'np.array', (['(2 * [self.AMPLITUDE])'], {}), '(2 * [self.AMPLITUDE])\n', (6511, 6533), True, 'import numpy as np\n'), ((8809, 8828), 'numpy.mean', 'np.mean', (['sq'], {'axis': '(1)'}), '(sq, axis=1)\n', (8816, 8828), True, 'import numpy as np\n'), ((8830, 8860), 'numpy.array', 'np.array', (['(2 * [self.AMPLITUDE])'], {}), '(2 * [self.AMPLITUDE])\n', (8838, 8860), True, 'import numpy as np\n'), ((11122, 11142), 'numpy.max', 'np.max', (['ramp'], {'axis': '(1)'}), '(ramp, axis=1)\n', (11128, 11142), True, 'import numpy as np\n'), ((11144, 11172), 'numpy.array', 'np.array', (['(2 * [self.INP_MAX])'], {}), '(2 * [self.INP_MAX])\n', (11152, 11172), True, 'import numpy as np\n'), ((11206, 11226), 'numpy.min', 'np.min', (['ramp'], {'axis': '(1)'}), '(ramp, axis=1)\n', (11212, 11226), True, 'import numpy as np\n'), ((11228, 11248), 'numpy.array', 'np.array', (['(2 * [0.25])'], {}), '(2 * [0.25])\n', (11236, 11248), True, 'import numpy as np\n'), ((13489, 13513), 'numpy.max', 'np.max', (['exp_rise'], {'axis': '(1)'}), '(exp_rise, axis=1)\n', (13495, 13513), True, 'import numpy as np\n'), ((13515, 13543), 'numpy.array', 'np.array', (['(2 * [self.INP_MAX])'], {}), '(2 * [self.INP_MAX])\n', (13523, 13543), True, 'import numpy as np\n'), ((705, 732), 'neurolib.utils.stimulus.WienerProcess', 'WienerProcess', ([], {'n': '(2)', 'seed': '(42)'}), '(n=2, seed=42)\n', (718, 732), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((973, 1000), 'neurolib.utils.stimulus.WienerProcess', 'WienerProcess', ([], {'n': '(2)', 'seed': '(42)'}), '(n=2, seed=42)\n', (986, 1000), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((1107, 1135), 'numpy.around', 'np.around', (['(TESTING_TIME / DT)'], {}), '(TESTING_TIME / DT)\n', (1116, 1135), True, 'import numpy as np\n'), ((1302, 1329), 'neurolib.utils.stimulus.WienerProcess', 'WienerProcess', ([], {'n': '(2)', 'seed': '(42)'}), '(n=2, seed=42)\n', (1315, 1329), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((2167, 2187), 'numpy.random.rand', 'np.random.rand', (['(5)', '(5)'], {}), '(5, 5)\n', (2181, 2187), True, 'import numpy as np\n'), ((2194, 2210), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (2202, 2210), True, 'import numpy as np\n'), ((2687, 2710), 'neurolib.utils.stimulus.ZeroInput', 'ZeroInput', ([], {'n': '(2)', 'seed': '(42)'}), '(n=2, seed=42)\n', (2696, 2710), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((3451, 3478), 'neurolib.utils.stimulus.WienerProcess', 'WienerProcess', ([], {'n': '(2)', 'seed': '(42)'}), '(n=2, seed=42)\n', (3464, 3478), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((4191, 4260), 'neurolib.utils.stimulus.OrnsteinUhlenbeckProcess', 'OrnsteinUhlenbeckProcess', ([], {'mu': '(3.0)', 'sigma': '(0.1)', 'tau': '(2 * DT)', 'n': '(2)', 'seed': '(42)'}), '(mu=3.0, sigma=0.1, tau=2 * DT, n=2, seed=42)\n', (4215, 4260), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((5359, 5408), 'neurolib.utils.stimulus.StepInput', 'StepInput', ([], {'step_size': 'self.STEP_SIZE', 'n': '(2)', 'seed': '(42)'}), '(step_size=self.STEP_SIZE, n=2, seed=42)\n', (5368, 5408), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((5709, 5794), 'neurolib.utils.stimulus.StepInput', 'StepInput', ([], {'start': 'STIM_START', 'end': 'STIM_END', 'step_size': 'self.STEP_SIZE', 'n': '(2)', 'seed': '(42)'}), '(start=STIM_START, end=STIM_END, step_size=self.STEP_SIZE, n=2,\n seed=42)\n', (5718, 5794), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((6182, 6281), 'neurolib.utils.stimulus.SinusoidalInput', 'SinusoidalInput', ([], {'amplitude': 'self.AMPLITUDE', 'frequency': 'self.FREQUENCY', 'n': '(2)', 'seed': '(42)', 'dc_bias': '(True)'}), '(amplitude=self.AMPLITUDE, frequency=self.FREQUENCY, n=2,\n seed=42, dc_bias=True)\n', (6197, 6281), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((6586, 6703), 'neurolib.utils.stimulus.SinusoidalInput', 'SinusoidalInput', ([], {'start': 'STIM_START', 'end': 'STIM_END', 'amplitude': 'self.AMPLITUDE', 'frequency': 'self.FREQUENCY', 'n': '(2)', 'seed': '(42)'}), '(start=STIM_START, end=STIM_END, amplitude=self.AMPLITUDE,\n frequency=self.FREQUENCY, n=2, seed=42)\n', (6601, 6703), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((8493, 8570), 'neurolib.utils.stimulus.SquareInput', 'SquareInput', ([], {'amplitude': 'self.AMPLITUDE', 'frequency': 'self.FREQUENCY', 'n': '(2)', 'seed': '(42)'}), '(amplitude=self.AMPLITUDE, frequency=self.FREQUENCY, n=2, seed=42)\n', (8504, 8570), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((8912, 9025), 'neurolib.utils.stimulus.SquareInput', 'SquareInput', ([], {'start': 'STIM_START', 'end': 'STIM_END', 'amplitude': 'self.AMPLITUDE', 'frequency': 'self.FREQUENCY', 'n': '(2)', 'seed': '(42)'}), '(start=STIM_START, end=STIM_END, amplitude=self.AMPLITUDE,\n frequency=self.FREQUENCY, n=2, seed=42)\n', (8923, 9025), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((10805, 10890), 'neurolib.utils.stimulus.LinearRampInput', 'LinearRampInput', ([], {'inp_max': 'self.INP_MAX', 'ramp_length': 'self.RAMP_LENGTH', 'n': '(2)', 'seed': '(42)'}), '(inp_max=self.INP_MAX, ramp_length=self.RAMP_LENGTH, n=2,\n seed=42)\n', (10820, 10890), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((11302, 11419), 'neurolib.utils.stimulus.LinearRampInput', 'LinearRampInput', ([], {'start': 'STIM_START', 'end': 'STIM_END', 'inp_max': 'self.INP_MAX', 'ramp_length': 'self.RAMP_LENGTH', 'n': '(2)', 'seed': '(42)'}), '(start=STIM_START, end=STIM_END, inp_max=self.INP_MAX,\n ramp_length=self.RAMP_LENGTH, n=2, seed=42)\n', (11317, 11419), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((13169, 13238), 'neurolib.utils.stimulus.ExponentialInput', 'ExponentialInput', ([], {'inp_max': 'self.INP_MAX', 'exp_type': '"""rise"""', 'n': '(2)', 'seed': '(42)'}), "(inp_max=self.INP_MAX, exp_type='rise', n=2, seed=42)\n", (13185, 13238), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((13663, 13733), 'neurolib.utils.stimulus.ExponentialInput', 'ExponentialInput', ([], {'inp_max': 'self.INP_MAX', 'exp_type': '"""decay"""', 'n': '(2)', 'seed': '(42)'}), "(inp_max=self.INP_MAX, exp_type='decay', n=2, seed=42)\n", (13679, 13733), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((14060, 14148), 'neurolib.utils.stimulus.ExponentialInput', 'ExponentialInput', ([], {'start': 'STIM_START', 'end': 'STIM_END', 'inp_max': 'self.INP_MAX', 'n': '(2)', 'seed': '(42)'}), '(start=STIM_START, end=STIM_END, inp_max=self.INP_MAX, n=2,\n seed=42)\n', (14076, 14148), False, 'from neurolib.utils.stimulus import ConcatenatedStimulus, ExponentialInput, LinearRampInput, OrnsteinUhlenbeckProcess, RectifiedInput, SinusoidalInput, SquareInput, StepInput, SummedStimulus, WienerProcess, ZeroInput\n'), ((13576, 13593), 'numpy.diff', 'np.diff', (['exp_rise'], {}), '(exp_rise)\n', (13583, 13593), True, 'import numpy as np\n'), ((13978, 13996), 'numpy.diff', 'np.diff', (['exp_decay'], {}), '(exp_decay)\n', (13985, 13996), True, 'import numpy as np\n')]
|
import argparse
from collections import defaultdict
import base64
import requests
import uncurl
from vwh_importable_data import POST_RESP_BODY_2, POST_RESP_HEADERS_2, OPTIONS_RESP_BODY_2, \
POST_RESP_BODY_1, OPTIONS_RESP_HEADERS_2, POST_RESP_HEADERS_1, OPTIONS_RESP_BODY_1, \
OPTIONS_RESP_HEADERS_1, POST_REQ_2, OPTIONS_REQ_2, POST_REQ_1, OPTIONS_REQ_1, GAME_URL
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--credentials-file', '-c',
help='Path to a file containing base64.b64encode("Username:password")')
parser.add_argument(
'--login-url', '-l',
help='Login url of the service which needs to authenticate me',
default='https://armorgames.com/login')
parser.add_argument(
'--game-url', '-g',
help='URL of the game to be played. Would require to be authenticated by the login service')
return parser
def extract_credentials_from_file(credentials_file):
"""
:param credentials_file:
:return: the username string and the hex encoded password
"""
try:
with open(credentials_file, 'rb') as thefile:
content = thefile.read()
credentials_string = base64.b64decode(content)
user, pwd = credentials_string.split(b':', 1)
return user, pwd.hex()
except (TypeError, IOError) as err:
raise Exception(
"Something's wrong with the credentials file: {}:"
"\n{}".format(credentials_file, str(err)))
def cli_main_old():
parser = get_parser()
args = parser.parse_args()
user, encoded_password = extract_credentials_from_file(args.credentials_file)
session = requests.Session()
response = session.post(
args.login_url, data={'username': user, 'password': bytes.fromhex(encoded_password)})
game_url = GAME_URL
# when i log in and get the site below, what will be the src of the iframe?
# armorgames.com/forge-of-gods-game/17842
response2 = session.get('https://armorgames.com/forge-of-gods-game/17842')
start_of_auth_token = response2.text.index('auth_token=')
string_from_auth_token = response2.text[start_of_auth_token + 11]
pass
# todo
# 1. Compare 2 request sets from get_league
class GetLeagueRequest(object):
def __init__(self, options_request_str, post_request_str):
"""Holds data regarding the `get_league` requests made. Strips out newline characters
:param str options_request_str: the curl string for the OPTIONS http request
:param str post_request_str: the curl string for the second http request, a POST
"""
self.options_request = uncurl.parse(options_request_str.replace('\n', ''))
self.post_request = uncurl.parse(post_request_str.replace('\n', ''))
class GetLeagueResponse(object):
def __init__(self, options_headers, options_body, post_headers, post_body):
"""Holds data regarding the responses to the `get_league` requests.
Copy paste the headers and the
:param str options_headers: the string copy-pasted from google chrome, from the HEADERS section
:param str options_body:
:param str post_headers:
:param str post_body:
"""
self.options_headers = defaultdict(lambda: [])
def cli_main():
options_req_1 = OPTIONS_REQ_1
post_req_1 = POST_REQ_1
options_req_2 = OPTIONS_REQ_2
post_req_2 = POST_REQ_2
get_league_request_1 = GetLeagueRequest(options_req_1, post_req_1)
get_league_request_2 = GetLeagueRequest(options_req_2, post_req_2)
options_resp_headers_1 = OPTIONS_RESP_HEADERS_1
options_resp_body_1 = OPTIONS_RESP_BODY_1
post_resp_headers_1 = POST_RESP_HEADERS_1
post_resp_body_1 = POST_RESP_BODY_1
options_resp_headers_2 = OPTIONS_RESP_HEADERS_2
options_resp_body_2 = OPTIONS_RESP_BODY_2
post_resp_headers_2 = POST_RESP_HEADERS_2
post_resp_body_2 = POST_RESP_BODY_2
get_league_response_1 = GetLeagueResponse(
options_resp_headers_1, options_resp_body_1, post_resp_headers_1, post_resp_body_1)
get_league_response_2 = GetLeagueResponse(
options_resp_headers_2, options_resp_body_2, post_resp_headers_2, post_resp_body_2)
if __name__ == '__main__':
cli_main()
|
[
"collections.defaultdict",
"requests.Session",
"argparse.ArgumentParser",
"base64.b64decode"
] |
[((397, 422), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (420, 422), False, 'import argparse\n'), ((1535, 1553), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1551, 1553), False, 'import requests\n'), ((2994, 3018), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (3005, 3018), False, 'from collections import defaultdict\n'), ((1113, 1138), 'base64.b64decode', 'base64.b64decode', (['content'], {}), '(content)\n', (1129, 1138), False, 'import base64\n')]
|
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
# Copyright (c) 2018 Juniper Networks, Inc.
# All rights reserved.
# Use is subject to license terms.
#
# Author: cklewar
import abc
import lib.constants as c
from lib.processor import BackendClientProcessor
class Service(object):
def __init__(self, normalizer, svc_cfg):
self.svc_cfg = svc_cfg
self.logger = c.logger
self.normalizer = normalizer
self.status = c.SVC_INIT
self._backendp = BackendClientProcessor(exchange='', routing_key=c.AMQP_RPC_BACKEND_QUEUE)
@abc.abstractmethod
def start_service(self):
raise NotImplementedError()
@abc.abstractmethod
def stop_service(self):
raise NotImplementedError()
@abc.abstractmethod
def restart_service(self):
raise NotImplementedError()
|
[
"lib.processor.BackendClientProcessor"
] |
[((500, 573), 'lib.processor.BackendClientProcessor', 'BackendClientProcessor', ([], {'exchange': '""""""', 'routing_key': 'c.AMQP_RPC_BACKEND_QUEUE'}), "(exchange='', routing_key=c.AMQP_RPC_BACKEND_QUEUE)\n", (522, 573), False, 'from lib.processor import BackendClientProcessor\n')]
|
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr
import pdb
def fpn_rpn_reshape(pred_cls_score_list, pred_bbox_offsets_list):
final_pred_bbox_offsets_list = []
final_pred_cls_score_list = []
batch_per_gpu = pred_cls_score_list[0].shape[0]
for bid in range(batch_per_gpu):
batch_pred_bbox_offsets_list = []
batch_pred_cls_score_list = []
for i in range(len(pred_cls_score_list)):
pred_cls_score_perlvl = pred_cls_score_list[i][bid] \
.transpose(1, 2, 0).reshape(-1, 2)
pred_bbox_offsets_perlvl = pred_bbox_offsets_list[i][bid] \
.transpose(1, 2, 0).reshape(-1, 4)
batch_pred_cls_score_list.append(pred_cls_score_perlvl)
batch_pred_bbox_offsets_list.append(pred_bbox_offsets_perlvl)
batch_pred_cls_score = F.concat(batch_pred_cls_score_list, axis=0)
batch_pred_bbox_offsets = F.concat(batch_pred_bbox_offsets_list, axis=0)
final_pred_cls_score_list.append(batch_pred_cls_score)
final_pred_bbox_offsets_list.append(batch_pred_bbox_offsets)
final_pred_cls_score = F.concat(final_pred_cls_score_list, axis=0)
final_pred_bbox_offsets = F.concat(final_pred_bbox_offsets_list, axis=0)
return final_pred_cls_score, final_pred_bbox_offsets
def fpn_anchor_target_opr_core_impl(
gt_boxes, im_info, anchors, allow_low_quality_matches=True):
ignore_label = config.ignore_label
# get the gt boxes
gtboxes = gt_boxes[:im_info[5].astype(np.int32)]
ignore_mask = F.equal(gtboxes[:, 4], config.ignore_label)
# find the valid gtboxes
_, index = F.cond_take(1 - ignore_mask > 0, ignore_mask)
valid_gt_boxes = gtboxes[index.astype(np.int32)]
# compute the iou matrix
overlaps = box_overlap_opr(anchors, valid_gt_boxes[:, :4])
# match the dtboxes
a_shp0 = anchors.shape[0]
argmax_overlaps = F.argmax(overlaps, axis=1)
max_overlaps = F.nn.indexing_one_hot(overlaps, argmax_overlaps.astype(np.int32), 1)
labels = F.ones(a_shp0).astype(np.int32) * ignore_label
# set negative ones
labels = labels * (max_overlaps >= config.rpn_negative_overlap).astype(np.float32)
# set positive ones
fg_mask = (max_overlaps >= config.rpn_positive_overlap)
const_one = mge.tensor(1.0)
if allow_low_quality_matches:
# match the max gt
gt_max_overlaps = F.max(overlaps, axis=0)
gt_argmax_overlaps = F.argmax(overlaps, axis=0)
gt_argmax_overlaps = gt_argmax_overlaps.astype(np.int32)
max_overlaps[gt_argmax_overlaps] = 1.
m = gt_max_overlaps.shape[0]
argmax_overlaps[gt_argmax_overlaps] = F.linspace(0, m - 1, m).astype(np.int32)
fg_mask = (max_overlaps >= config.rpn_positive_overlap)
labels[fg_mask] = 1
# compute the bbox targets
bbox_targets = bbox_transform_opr(
anchors, valid_gt_boxes[argmax_overlaps, :4])
if config.rpn_bbox_normalize_targets:
std_opr = mge.tensor(config.bbox_normalize_stds[None, :]).to(anchors.device)
mean_opr = mge.tensor(config.bbox_normalize_means[None, :]).to(anchors.device)
minus_opr = mean_opr / std_opr
bbox_targets = bbox_targets / std_opr - minus_opr
return labels, bbox_targets
def fpn_anchor_target(boxes, im_info, all_anchors_list):
final_labels_list = []
final_bbox_targets_list = []
batch_per_gpu = boxes.shape[0]
for bid in range(batch_per_gpu):
batch_labels_list = []
batch_bbox_targets_list = []
for i in range(len(all_anchors_list)):
anchors_perlvl = all_anchors_list[i]
rpn_labels_perlvl, rpn_bbox_targets_perlvl = fpn_anchor_target_opr_core_impl(
boxes[bid], im_info[bid], anchors_perlvl)
batch_labels_list.append(rpn_labels_perlvl)
batch_bbox_targets_list.append(rpn_bbox_targets_perlvl)
# here we samples the rpn_labels
concated_batch_labels = F.concat(batch_labels_list, axis=0)
concated_batch_bbox_targets = F.concat(batch_bbox_targets_list, axis=0)
# sample labels
num_positive = config.num_sample_anchors * config.positive_anchor_ratio
concated_batch_labels = _bernoulli_sample_labels(concated_batch_labels,
num_positive, 1, config.ignore_label)
num_positive = F.equal(concated_batch_labels, 1).sum()
num_negative = config.num_sample_anchors - num_positive
concated_batch_labels = _bernoulli_sample_labels(concated_batch_labels,
num_negative, 0, config.ignore_label)
final_labels_list.append(concated_batch_labels)
final_bbox_targets_list.append(concated_batch_bbox_targets)
final_labels = F.concat(final_labels_list, axis=0)
final_bbox_targets = F.concat(final_bbox_targets_list, axis=0)
bbox_targets, labels = final_bbox_targets.detach(), final_labels.detach()
return labels, bbox_targets
def _bernoulli_sample_labels(
labels, num_samples, sample_value, ignore_label=-1):
""" Using the bernoulli sampling method"""
sample_label_mask = F.equal(labels, sample_value)
num_mask = sample_label_mask.sum()
num_final_samples = F.minimum(num_mask, num_samples)
# here, we use the bernoulli probability to sample the anchors
sample_prob = num_final_samples / num_mask
uniform_rng = rand.uniform(0, 1, sample_label_mask.shape)
disable_mask = (uniform_rng >= sample_prob) * sample_label_mask
#TODO check cudaerror: illegal memory access was encountered
labels = labels * (1 - disable_mask) + disable_mask * ignore_label
return labels
|
[
"megengine.functional.argmax",
"megengine.tensor",
"megengine.functional.linspace",
"megengine.functional.equal",
"megengine.random.uniform",
"megengine.functional.ones",
"megengine.functional.minimum",
"det_opr.bbox_opr.bbox_transform_opr",
"det_opr.bbox_opr.box_overlap_opr",
"megengine.functional.max",
"megengine.functional.concat",
"megengine.functional.cond_take"
] |
[((1268, 1311), 'megengine.functional.concat', 'F.concat', (['final_pred_cls_score_list'], {'axis': '(0)'}), '(final_pred_cls_score_list, axis=0)\n', (1276, 1311), True, 'import megengine.functional as F\n'), ((1342, 1388), 'megengine.functional.concat', 'F.concat', (['final_pred_bbox_offsets_list'], {'axis': '(0)'}), '(final_pred_bbox_offsets_list, axis=0)\n', (1350, 1388), True, 'import megengine.functional as F\n'), ((1696, 1739), 'megengine.functional.equal', 'F.equal', (['gtboxes[:, 4]', 'config.ignore_label'], {}), '(gtboxes[:, 4], config.ignore_label)\n', (1703, 1739), True, 'import megengine.functional as F\n'), ((1785, 1830), 'megengine.functional.cond_take', 'F.cond_take', (['(1 - ignore_mask > 0)', 'ignore_mask'], {}), '(1 - ignore_mask > 0, ignore_mask)\n', (1796, 1830), True, 'import megengine.functional as F\n'), ((1933, 1980), 'det_opr.bbox_opr.box_overlap_opr', 'box_overlap_opr', (['anchors', 'valid_gt_boxes[:, :4]'], {}), '(anchors, valid_gt_boxes[:, :4])\n', (1948, 1980), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr\n'), ((2057, 2083), 'megengine.functional.argmax', 'F.argmax', (['overlaps'], {'axis': '(1)'}), '(overlaps, axis=1)\n', (2065, 2083), True, 'import megengine.functional as F\n'), ((2449, 2464), 'megengine.tensor', 'mge.tensor', (['(1.0)'], {}), '(1.0)\n', (2459, 2464), True, 'import megengine as mge\n'), ((3030, 3094), 'det_opr.bbox_opr.bbox_transform_opr', 'bbox_transform_opr', (['anchors', 'valid_gt_boxes[argmax_overlaps, :4]'], {}), '(anchors, valid_gt_boxes[argmax_overlaps, :4])\n', (3048, 3094), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr\n'), ((4906, 4941), 'megengine.functional.concat', 'F.concat', (['final_labels_list'], {'axis': '(0)'}), '(final_labels_list, axis=0)\n', (4914, 4941), True, 'import megengine.functional as F\n'), ((4967, 5008), 'megengine.functional.concat', 'F.concat', (['final_bbox_targets_list'], {'axis': '(0)'}), '(final_bbox_targets_list, axis=0)\n', (4975, 5008), True, 'import megengine.functional as F\n'), ((5282, 5311), 'megengine.functional.equal', 'F.equal', (['labels', 'sample_value'], {}), '(labels, sample_value)\n', (5289, 5311), True, 'import megengine.functional as F\n'), ((5375, 5407), 'megengine.functional.minimum', 'F.minimum', (['num_mask', 'num_samples'], {}), '(num_mask, num_samples)\n', (5384, 5407), True, 'import megengine.functional as F\n'), ((5540, 5583), 'megengine.random.uniform', 'rand.uniform', (['(0)', '(1)', 'sample_label_mask.shape'], {}), '(0, 1, sample_label_mask.shape)\n', (5552, 5583), True, 'import megengine.random as rand\n'), ((983, 1026), 'megengine.functional.concat', 'F.concat', (['batch_pred_cls_score_list'], {'axis': '(0)'}), '(batch_pred_cls_score_list, axis=0)\n', (991, 1026), True, 'import megengine.functional as F\n'), ((1061, 1107), 'megengine.functional.concat', 'F.concat', (['batch_pred_bbox_offsets_list'], {'axis': '(0)'}), '(batch_pred_bbox_offsets_list, axis=0)\n', (1069, 1107), True, 'import megengine.functional as F\n'), ((2559, 2582), 'megengine.functional.max', 'F.max', (['overlaps'], {'axis': '(0)'}), '(overlaps, axis=0)\n', (2564, 2582), True, 'import megengine.functional as F\n'), ((2612, 2638), 'megengine.functional.argmax', 'F.argmax', (['overlaps'], {'axis': '(0)'}), '(overlaps, axis=0)\n', (2620, 2638), True, 'import megengine.functional as F\n'), ((4147, 4182), 'megengine.functional.concat', 'F.concat', (['batch_labels_list'], {'axis': '(0)'}), '(batch_labels_list, axis=0)\n', (4155, 4182), True, 'import megengine.functional as F\n'), ((4221, 4262), 'megengine.functional.concat', 'F.concat', (['batch_bbox_targets_list'], {'axis': '(0)'}), '(batch_bbox_targets_list, axis=0)\n', (4229, 4262), True, 'import megengine.functional as F\n'), ((2190, 2204), 'megengine.functional.ones', 'F.ones', (['a_shp0'], {}), '(a_shp0)\n', (2196, 2204), True, 'import megengine.functional as F\n'), ((2842, 2865), 'megengine.functional.linspace', 'F.linspace', (['(0)', '(m - 1)', 'm'], {}), '(0, m - 1, m)\n', (2852, 2865), True, 'import megengine.functional as F\n'), ((3165, 3212), 'megengine.tensor', 'mge.tensor', (['config.bbox_normalize_stds[None, :]'], {}), '(config.bbox_normalize_stds[None, :])\n', (3175, 3212), True, 'import megengine as mge\n'), ((3251, 3299), 'megengine.tensor', 'mge.tensor', (['config.bbox_normalize_means[None, :]'], {}), '(config.bbox_normalize_means[None, :])\n', (3261, 3299), True, 'import megengine as mge\n'), ((4524, 4557), 'megengine.functional.equal', 'F.equal', (['concated_batch_labels', '(1)'], {}), '(concated_batch_labels, 1)\n', (4531, 4557), True, 'import megengine.functional as F\n')]
|
from os import path
import click
from .settings import NEW_BUILDS_DIRNAME
from .util import print_and_run, read_version_data
def _wait_for_build(filename):
click.echo('Build the client in another window, and return here afterwards')
version = read_version_data()['version']
filename = path.join(NEW_BUILDS_DIRNAME, filename.format(version=version))
while True:
if click.confirm('Has the build completed? '):
if path.exists(filename):
return
click.echo(f'File {filename} not found, please try again.')
@click.command()
def do_release():
print_and_run(('python', '-m', 'make.clean'))
print_and_run(('python', '-m', 'make', '--release'))
if click.confirm('Build Docker container?', default=True):
print_and_run(('python', '-m', 'make', '--docker', '--release'))
if click.confirm('Build MacOS client? ', default=True):
_wait_for_build('Kanmail-mac-{version}.tar.gz')
if click.confirm('Build Windows client? ', default=True):
_wait_for_build('Kanmail-win-{version}.zip')
if click.confirm('Build Linux client? ', default=False):
_wait_for_build('Kanmail-nix64-{version}.tar.gz')
print_and_run(('python', '-m', 'make', '--release', '--complete'))
if __name__ == '__main__':
do_release()
|
[
"click.confirm",
"click.echo",
"os.path.exists",
"click.command"
] |
[((574, 589), 'click.command', 'click.command', ([], {}), '()\n', (587, 589), False, 'import click\n'), ((164, 240), 'click.echo', 'click.echo', (['"""Build the client in another window, and return here afterwards"""'], {}), "('Build the client in another window, and return here afterwards')\n", (174, 240), False, 'import click\n'), ((723, 777), 'click.confirm', 'click.confirm', (['"""Build Docker container?"""'], {'default': '(True)'}), "('Build Docker container?', default=True)\n", (736, 777), False, 'import click\n'), ((860, 911), 'click.confirm', 'click.confirm', (['"""Build MacOS client? """'], {'default': '(True)'}), "('Build MacOS client? ', default=True)\n", (873, 911), False, 'import click\n'), ((977, 1030), 'click.confirm', 'click.confirm', (['"""Build Windows client? """'], {'default': '(True)'}), "('Build Windows client? ', default=True)\n", (990, 1030), False, 'import click\n'), ((1093, 1145), 'click.confirm', 'click.confirm', (['"""Build Linux client? """'], {'default': '(False)'}), "('Build Linux client? ', default=False)\n", (1106, 1145), False, 'import click\n'), ((394, 436), 'click.confirm', 'click.confirm', (['"""Has the build completed? """'], {}), "('Has the build completed? ')\n", (407, 436), False, 'import click\n'), ((453, 474), 'os.path.exists', 'path.exists', (['filename'], {}), '(filename)\n', (464, 474), False, 'from os import path\n'), ((511, 570), 'click.echo', 'click.echo', (['f"""File {filename} not found, please try again."""'], {}), "(f'File {filename} not found, please try again.')\n", (521, 570), False, 'import click\n')]
|
# Download the helper library from https://www.twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
# and set the environment variables. See http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body="Join Earth's mightiest heroes. Like <NAME>.",
from_='+13255150320',
to='+919992520053'
)
print(message.sid)
|
[
"twilio.rest.Client"
] |
[((352, 383), 'twilio.rest.Client', 'Client', (['account_sid', 'auth_token'], {}), '(account_sid, auth_token)\n', (358, 383), False, 'from twilio.rest import Client\n')]
|
import untangle
from src.Data_Model.State import State
from src.SCXML_Parser.State_parsor import State_parsor
class Scxml_parsor(object):
def __init__(self, file):
self._tree = untangle.parse(file).scxml
self._first = State_parsor(self._tree).obtain_Initial()
self._states = []
for state in self._tree.state :
st_parse = State_parsor(state)
self._states.append(st_parse.obtain_state())
@property
def first(self):
return self._first
@property
def states(self):
return self._states
|
[
"src.SCXML_Parser.State_parsor.State_parsor",
"untangle.parse"
] |
[((194, 214), 'untangle.parse', 'untangle.parse', (['file'], {}), '(file)\n', (208, 214), False, 'import untangle\n'), ((376, 395), 'src.SCXML_Parser.State_parsor.State_parsor', 'State_parsor', (['state'], {}), '(state)\n', (388, 395), False, 'from src.SCXML_Parser.State_parsor import State_parsor\n'), ((244, 268), 'src.SCXML_Parser.State_parsor.State_parsor', 'State_parsor', (['self._tree'], {}), '(self._tree)\n', (256, 268), False, 'from src.SCXML_Parser.State_parsor import State_parsor\n')]
|
from .. import combinators as c
from .. import maybeerror as me
import unittest as u
m = me.MaybeError
l = c.ConsList
def good(rest, state, result):
return m.pure({'rest': rest, 'state': state, 'result': result})
iz1 = c.basic
iz2 = c.position
iz3 = c.count
class Checks(u.TestCase):
def testCheckFunction(self):
self.assertIsNone(c.checkFunction('test', lambda: 1))
self.assertRaises(Exception, lambda: c.checkFunction('test', 'abc'))
self.assertRaises(Exception, lambda: c.checkFunction('test', None))
def testCheckParser(self):
self.assertIsNone(c.checkParser('test', c.pure(4)))
self.assertRaises(Exception, lambda: c.checkParser('test', 'abc'))
self.assertRaises(Exception, lambda: c.checkParser('test', None))
self.assertRaises(Exception, lambda: c.checkParser('test', lambda: None))
class TestParser(u.TestCase):
def testPure(self):
val = c.pure(3).parse('abc', 2)
self.assertEqual(val, good('abc', 2, 3))
def testZero(self):
self.assertEqual(c.zero.parse(None, None), m.zero)
def testError(self):
v1 = c.error('uh-oh').parse('abc', 123)
self.assertEqual(v1, m.error('uh-oh'))
def testFmap(self):
f = lambda x: x + 7
v1 = c.fmap(f, c.pure(3)).parse('ab', 81)
v2 = c.fmap(f, c.zero).parse('ab', 81)
v3 = c.fmap(f, c.error('oops')).parse('ab', 81)
self.assertEqual(v1, good('ab', 81, 10))
self.assertEqual(v2, m.zero)
self.assertEqual(v3, m.error('oops'))
def testBind(self):
two = c.bind(iz1.item, iz1.literal)
self.assertEqual(two.parse(l('abcde'), {}), m.zero)
self.assertEqual(two.parse(l('aabcde'), {}), good(l('bcde'), {}, 'a'))
def testCheck(self):
val = c.check(lambda x: len(x) > 3, c.get)
self.assertEqual(val.parse('abcde', []), good('abcde', [], 'abcde'))
self.assertEqual(val.parse('abc', []), m.zero)
def testUpdate(self):
v1 = c.update(lambda x: x + 'qrs').parse('abc', 18)
self.assertEqual(v1, good('abcqrs', 18, 'abcqrs'))
def testGet(self):
self.assertEqual(c.get.parse('abc', {}), good('abc', {}, 'abc'))
def testPut(self):
val = c.put('xyz')
self.assertEqual(val.parse('abc', []), good('xyz', [], 'xyz'))
def testUpdateState(self):
v1 = c.updateState(lambda x: x * 4).parse('abc', 18)
self.assertEqual(v1, good('abc', 72, 72))
def testGetState(self):
self.assertEqual(c.getState.parse('abc', 123), good('abc', 123, 123))
def testPutState(self):
v1 = c.putState(29).parse('abc123', 2)
self.assertEqual(v1, good('abc123', 29, 29))
def testMany0(self):
val = c.many0(iz1.literal(3))
self.assertEqual(val.parse(l([4,4,4]), {}), good(l([4,4,4]), {}, []))
self.assertEqual(val.parse(l([3,3,4,5]), {}), good(l([4,5]), {}, [3,3]))
def testMany1(self):
val = c.many1(iz1.literal(3))
self.assertEqual(val.parse(l([4,4,4]), {}), m.zero)
self.assertEqual(val.parse(l([3,3,4,5]), {}), good(l([4,5]), {}, [3,3]))
def testSeq(self):
val = c.seq([iz1.item, iz1.literal(2), iz1.literal(8)])
self.assertEqual(val.parse(l([3,2,4]), {}), m.zero)
self.assertEqual(val.parse(l([3,2,8,16]), {}), good(l([16]), {}, [3,2,8]))
def testAppP(self):
parser = c.appP(c.pure(lambda x,y,z: x + y * z),
iz1.item,
iz1.satisfy(lambda x: x > 2),
iz1.item)
v1 = parser.parse(l([1,2,3,4,5]), 'hi')
v2 = parser.parse(l([5,6,7,8,9]), 'bye')
v3 = parser.parse(l([5,6]), 'goodbye')
self.assertEqual(v1, m.zero)
self.assertEqual(v2, good(l([8,9]), 'bye', 47))
self.assertEqual(v3, m.zero)
def testAppPTypeError(self):
self.assertRaises(Exception, lambda: c.appP(lambda: None, iz1.item))
def testApp(self):
parser = c.app(lambda x,y,z: x + y * z,
iz1.item,
iz1.satisfy(lambda x: x > 2),
iz1.item)
v1 = parser.parse(l([1,2,3,4,5]), 'hi')
v2 = parser.parse(l([5,6,7,8,9]), 'bye')
v3 = parser.parse(l([5,6]), 'goodbye')
self.assertEqual(v1, m.zero)
self.assertEqual(v2, good(l([8,9]), 'bye', 47))
self.assertEqual(v3, m.zero)
def testSeq2R(self):
val = c.seq2R(iz1.literal(2), iz1.literal(3))
self.assertEqual(val.parse(l([4,5]), {}), m.zero)
self.assertEqual(val.parse(l([2,4,5]), {}), m.zero)
self.assertEqual(val.parse(l([2,3,4]), {}), good(l([4]), {}, 3))
def testSeq2L(self):
val = c.seq2L(iz1.literal(2), iz1.literal(3))
self.assertEqual(val.parse(l([4,5]), {}), m.zero)
self.assertEqual(val.parse(l([2,4,5]), {}), m.zero)
self.assertEqual(val.parse(l([2,3,4]), {}), good(l([4]), {}, 2))
def testRepeatCount0(self):
val = c.repeat(0, iz1.literal(4))
self.assertEqual(val.parse(l('0123'), {}), good(l('0123'), {}, []))
self.assertEqual(val.parse(l('4012'), {}), good(l('4012'), {}, []))
def testRepeatGreaterThan0(self):
val = c.repeat(2, iz1.literal('4'))
self.assertEqual(val.parse(l('012'), {}), m.zero)
self.assertEqual(val.parse(l('4012'), {}), m.zero)
self.assertEqual(val.parse(l('44012'), {}), good(l('012'), {}, ['4', '4']))
self.assertEqual(val.parse(l('444012'), {}), good(l('4012'), {}, ['4', '4']))
def testLookahead(self):
look = c.lookahead(iz1.literal(3))
self.assertEqual(look.parse(l([3,4,5]), {}), good(l([3,4,5]), {}, 3))
self.assertEqual(look.parse(l([2, 3,4,5]), {}), m.zero)
parser = c.seq2L(iz1.literal(2), look)
self.assertEqual(parser.parse(l([2,3,4,5]), None), good(l([3,4,5]), None, 2))
self.assertEqual(parser.parse(l([2,4,5]), None), m.zero)
self.assertEqual(parser.parse(l([3,4,5]), None), m.zero)
def testNot0(self):
val = c.not0(iz1.literal(2))
self.assertEqual(val.parse(l([2,3,4]), {}), m.zero)
self.assertEqual(val.parse(l([3,4,5]), {}), good(l([3,4,5]), {}, None))
def testAltBinaryRules(self):
g1, g2, b, e, e2 = c.pure(3), c.pure('hi'), c.zero, c.error('oops'), c.error('2nd')
r1, r3, r4 = good('abc', None, 3), m.zero, m.error('oops')
self.assertEqual(c.alt([g1, g2]).parse('abc', None), r1)
self.assertEqual(c.alt([g1, b]).parse('abc', None), r1)
self.assertEqual(c.alt([g1, e]).parse('abc', None), r1)
self.assertEqual(c.alt([b , g1]).parse('abc', None), r1)
self.assertEqual(c.alt([b , b]).parse('abc', None), r3)
self.assertEqual(c.alt([b , e]).parse('abc', None), r4)
self.assertEqual(c.alt([e , g1]).parse('abc', None), r4)
self.assertEqual(c.alt([e , b]).parse('abc', None), r4)
self.assertEqual(c.alt([e , e2]).parse('abc', None), r4)
def testAltCornerCases(self):
self.assertEqual(c.alt([]).parse(l([1,2,3]), None),
m.zero)
self.assertEqual(c.alt([c.pure('h')]).parse(l([1,2,3]), None),
good(l([1,2,3]), None, 'h'))
self.assertEqual(c.alt([c.error('oops')]).parse(l([1,2,3]), None),
m.error('oops'))
self.assertEqual(c.alt([c.zero]).parse(l([1,2,3]), None),
m.zero)
p1 = c.alt([c.zero, iz1.literal(1), iz1.literal(2), c.error('d')])
self.assertEqual(p1.parse(l([1,3,4]), None), good(l([3,4]), None, 1))
self.assertEqual(p1.parse(l([2,3,4]), None), good(l([3,4]), None, 2))
self.assertEqual(p1.parse(l([3,3,4]), None), m.error('d'))
def testOptional(self):
parser = c.optional(iz1.literal(3), 'blargh')
v1 = parser.parse(l([1,2,3]), 'hi')
v2 = parser.parse(l([3,2,1]), 'bye')
self.assertEqual(v1, good(l([1,2,3]), 'hi', 'blargh'))
self.assertEqual(v2, good(l([2,1]), 'bye', 3))
def testOptionalNoValue(self):
p = c.optional(iz1.literal(3))
v1 = p.parse(l([3,2,1]), None)
v2 = p.parse(l([1,2,3]), None)
self.assertEqual(v1, good(l([2,1]), None, 3))
self.assertEqual(v2, good(l([1,2,3]), None, None))
def testCatchError(self):
f1 = lambda e: c.pure(3)
f2 = lambda e: c.error('dead again')
# error -> good -- resumes parsing with tokens and state from before the error occurred
self.assertEqual(c.catchError(c.error('dead 1'), f1).parse('123', [2, 4]),
good('123', [2,4], 3))
# good -> good (unaffected by this combinator)
self.assertEqual(c.catchError(c.pure(18), f1).parse('123', [2,4]),
good('123', [2,4], 18))
# error -> error
self.assertEqual(c.catchError(c.error('dead 1'), f2).parse('123', [2,4]),
m.error('dead again'))
# good -> error is not possible with this combinator
def testMapError(self):
f = len
v1 = c.mapError(f, c.error('abcdef')).parse('123abc', None)
v2 = c.mapError(f, c.zero).parse('123abc', None)
v3 = c.mapError(f, c.pure(82)).parse('123abc', None)
self.assertEqual(v1, m.error(6))
self.assertEqual(v2, m.zero)
self.assertEqual(v3, good('123abc', None, 82))
def testCommit(self):
val = c.commit('bag-agg', iz1.literal(2))
self.assertEqual(val.parse(l([2,3,4]), 'hi'), good(l([3,4]), 'hi', 2))
self.assertEqual(val.parse(l([3,4,5]), 'hi'), m.error('bag-agg'))
def testAddError(self):
self.assertEqual(c.addError('oops', iz1.item).parse(l('abc'), None),
good(l('bc'), None, 'a'))
self.assertEqual(c.addError('oops', c.zero).parse(l('abc'), 12),
m.zero)
self.assertEqual(c.addError('oops', c.error(['err'])).parse(l('abc'), 12),
m.error(['oops', 'err']))
def testSepBy0(self):
parser = c.sepBy0(iz1.oneOf('pq'), iz1.oneOf('st'))
val1 = parser.parse(l('abc'), {})
val2 = parser.parse(l('ppabc'), {})
val3 = parser.parse(l('psabc'), {})
val4 = parser.parse(l('psqtqabc'), {})
self.assertEqual(val1, good(l('abc'), {}, None))
self.assertEqual(val2, good(l('pabc'), {}, ('p', [])))
self.assertEqual(val3, good(l('sabc'), {}, ('p', [])))
self.assertEqual(val4, good(l('abc'), {}, ('p', [('s', 'q'), ('t', 'q')])))
def testSepBy1(self):
parser = c.sepBy1(iz1.oneOf('pq'), iz1.oneOf('st'))
val1 = parser.parse(l('abc'), {})
val2 = parser.parse(l('ppabc'), {})
val3 = parser.parse(l('psabc'), {})
val4 = parser.parse(l('psqtqabc'), {})
self.assertEqual(val1, m.zero)
self.assertEqual(val2, good(l('pabc'), {}, ('p', [])))
self.assertEqual(val3, good(l('sabc'), {}, ('p', [])))
self.assertEqual(val4, good(l('abc'), {}, ('p', [('s', 'q'), ('t', 'q')])))
class BasicTokens(u.TestCase):
def testItemBasic(self):
self.assertEqual(iz1.item.parse(l(''), None), m.zero)
self.assertEqual(iz1.item.parse(l('abcdef'), None), good(l('bcdef'), None, 'a'))
def testLiteral(self):
val = iz1.literal(3)
self.assertEqual(val.parse(l([3,4,5]), {}), good(l([4,5]), {}, 3))
self.assertEqual(val.parse(l([4,5]), {}), m.zero)
def testSatisfy(self):
v1 = iz1.satisfy(lambda x: x > 3).parse(l([1,2,3]), 'bye')
v2 = iz1.satisfy(lambda x: x < 3).parse(l([1,2,3]), 'hi')
self.assertEqual(v1, m.zero)
self.assertEqual(v2, good(l([2,3]), 'hi', 1))
def testString(self):
parser = iz1.string('abc')
v1 = parser.parse(l('abcdef'), None)
v2 = parser.parse(l('abdef'), None)
self.assertEqual(v1, good(l('def'), None, 'abc'))
self.assertEqual(v2, m.zero)
def testStringAcceptsArray(self):
parser = iz1.string([1,2,3])
self.assertEqual(parser.parse(l([1,2,3,4,5]), None), good(l([4,5]), None, [1,2,3]))
self.assertEqual(parser.parse(l([1,2,4,5]), None), m.zero)
def testNot1(self):
val = iz1.not1(iz1.literal(2))
self.assertEqual(val.parse(l([2,3,4]), {}), m.zero)
self.assertEqual(val.parse(l([3,4,5]), {}), good(l([4,5]), {}, 3))
def testOneOf(self):
p = iz1.oneOf('abc')
self.assertEqual(p.parse(l('cqrs'), None), good(l('qrs'), None, 'c'))
self.assertEqual(p.parse(l('aqrs'), None), good(l('qrs'), None, 'a'))
self.assertEqual(p.parse(l('dqrs'), None), m.zero)
class PositionTokens(u.TestCase):
def testItemPosition(self):
self.assertEqual(iz2.item.parse(l(''), (1,1)), m.zero)
self.assertEqual(iz2.item.parse(l('abcdef'), (1,1)), good(l('bcdef'), (1,2), 'a'))
self.assertEqual(iz2.item.parse(l('\nbcdef'), (1,1)), good(l('bcdef'), (2,1), '\n'))
def testLiteral(self):
val = iz2.literal('3')
self.assertEqual(val.parse(l('345'), (3,8)), good(l('45'), (3,9), '3'))
self.assertEqual(val.parse(l('45'), (3,8)), m.zero)
def testSatisfy(self):
v1 = iz2.satisfy(lambda x: int(x) > 3).parse(l('123'), (2,2))
v2 = iz2.satisfy(lambda x: int(x) < 3).parse(l('123'), (2,2))
self.assertEqual(v1, m.zero)
self.assertEqual(v2, good(l('23'), (2,3), '1'))
def testString(self):
parser = iz2.string('abc')
v1 = parser.parse(l('abcdef'), (4,3))
v2 = parser.parse(l('abdef'), (4,3))
self.assertEqual(v1, good(l('def'), (4,6), 'abc'))
self.assertEqual(v2, m.zero)
def testNot1(self):
val = iz2.not1(iz2.literal('2'))
self.assertEqual(val.parse(l('234'), (1,1)), m.zero)
self.assertEqual(val.parse(l('345'), (1,1)), good(l('45'), (1,2), '3'))
def testOneOf(self):
p = iz2.oneOf('abc')
self.assertEqual(p.parse(l('cqrs'), (3,4)), good(l('qrs'), (3,5), 'c'))
self.assertEqual(p.parse(l('aqrs'), (8,1)), good(l('qrs'), (8,2), 'a'))
self.assertEqual(p.parse(l('dqrs'), (2,2)), m.zero)
class CountTokens(u.TestCase):
def testItemPosition(self):
self.assertEqual(iz3.item.parse(l(''), 8), m.zero)
self.assertEqual(iz3.item.parse(l('abcdef'), 5), good(l('bcdef'), 6, 'a'))
self.assertEqual(iz3.item.parse(l('\nbcdef'), 100), good(l('bcdef'), 101, '\n'))
def testLiteral(self):
val = iz3.literal('3')
self.assertEqual(val.parse(l('345'), 8), good(l('45'), 9, '3'))
self.assertEqual(val.parse(l('45'), 8), m.zero)
def testSatisfy(self):
v1 = iz3.satisfy(lambda x: int(x) > 3).parse(l('123'), 22)
v2 = iz3.satisfy(lambda x: int(x) < 3).parse(l('123'), 22)
self.assertEqual(v1, m.zero)
self.assertEqual(v2, good(l('23'), 23, '1'))
def testString(self):
parser = iz3.string('abc')
v1 = parser.parse(l('abcdef'), 43)
v2 = parser.parse(l('abdef'), 43)
self.assertEqual(v1, good(l('def'), 46, 'abc'))
self.assertEqual(v2, m.zero)
def testNot1(self):
val = iz3.not1(iz3.literal('2'))
self.assertEqual(val.parse(l('234'), 61), m.zero)
self.assertEqual(val.parse(l('345'), 61), good(l('45'), 62, '3'))
def testOneOf(self):
p = iz3.oneOf('abc')
self.assertEqual(p.parse(l('cqrs'), 4), good(l('qrs'), 5, 'c'))
self.assertEqual(p.parse(l('aqrs'), 8), good(l('qrs'), 9, 'a'))
self.assertEqual(p.parse(l('dqrs'), 7), m.zero)
if __name__ == "__main__":
u.main()
|
[
"unittest.main"
] |
[((15867, 15875), 'unittest.main', 'u.main', ([], {}), '()\n', (15873, 15875), True, 'import unittest as u\n')]
|
import sys
from docxtpl import DocxTemplate
import jinja2
import json
import os
import ast
files=sys.argv[1]
# print(files)
# print(sys.argv[2])
context = eval(sys.argv[2])
print(type(context))
doc = DocxTemplate(files+'\Http\Controllers\Full_Auth\Full_AuthComplete.docx')
doc.render(context)
doc.save(files+'\Http\Controllers\Full_Auth\Full_Complete.docx')
|
[
"docxtpl.DocxTemplate"
] |
[((219, 297), 'docxtpl.DocxTemplate', 'DocxTemplate', (["(files + '\\\\Http\\\\Controllers\\\\Full_Auth\\\\Full_AuthComplete.docx')"], {}), "(files + '\\\\Http\\\\Controllers\\\\Full_Auth\\\\Full_AuthComplete.docx')\n", (231, 297), False, 'from docxtpl import DocxTemplate\n')]
|
# Generated by Django 3.2.2 on 2021-05-25 09:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('background', '0002_auto_20210524_1900'),
]
operations = [
migrations.RenameField(
model_name='concrete_flight',
old_name='flight',
new_name='flight_number',
),
migrations.RenameField(
model_name='concrete_flight',
old_name='id',
new_name='user_id',
),
migrations.RenameField(
model_name='order',
old_name='flight',
new_name='flight_number',
),
migrations.RenameField(
model_name='order',
old_name='user',
new_name='user_id',
),
migrations.AlterField(
model_name='order',
name='flight_type',
field=models.CharField(choices=[('3', '商务舱'), ('4', '经济舱'), ('2', '高端经济舱'), ('1', '头等舱')], max_length=1, verbose_name='舱位类型'),
),
]
|
[
"django.db.models.CharField",
"django.db.migrations.RenameField"
] |
[((238, 339), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""concrete_flight"""', 'old_name': '"""flight"""', 'new_name': '"""flight_number"""'}), "(model_name='concrete_flight', old_name='flight',\n new_name='flight_number')\n", (260, 339), False, 'from django.db import migrations, models\n'), ((392, 483), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""concrete_flight"""', 'old_name': '"""id"""', 'new_name': '"""user_id"""'}), "(model_name='concrete_flight', old_name='id',\n new_name='user_id')\n", (414, 483), False, 'from django.db import migrations, models\n'), ((536, 628), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""order"""', 'old_name': '"""flight"""', 'new_name': '"""flight_number"""'}), "(model_name='order', old_name='flight', new_name=\n 'flight_number')\n", (558, 628), False, 'from django.db import migrations, models\n'), ((680, 759), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""order"""', 'old_name': '"""user"""', 'new_name': '"""user_id"""'}), "(model_name='order', old_name='user', new_name='user_id')\n", (702, 759), False, 'from django.db import migrations, models\n'), ((921, 1044), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('3', '商务舱'), ('4', '经济舱'), ('2', '高端经济舱'), ('1', '头等舱')]", 'max_length': '(1)', 'verbose_name': '"""舱位类型"""'}), "(choices=[('3', '商务舱'), ('4', '经济舱'), ('2', '高端经济舱'), ('1',\n '头等舱')], max_length=1, verbose_name='舱位类型')\n", (937, 1044), False, 'from django.db import migrations, models\n')]
|
# Generated by Django 3.2.3 on 2021-05-23 19:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('covid', '0011_auto_20210522_2141'),
]
operations = [
migrations.AddField(
model_name='service',
name='consent',
field=models.BooleanField(default=False, help_text='I acknowledge that the details entered by me are correct. In case of spam reports against this posting, I am bound to being banned from this platform.'),
),
]
|
[
"django.db.models.BooleanField"
] |
[((334, 542), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""I acknowledge that the details entered by me are correct. In case of spam reports against this posting, I am bound to being banned from this platform."""'}), "(default=False, help_text=\n 'I acknowledge that the details entered by me are correct. In case of spam reports against this posting, I am bound to being banned from this platform.'\n )\n", (353, 542), False, 'from django.db import migrations, models\n')]
|
import ndspy
import ndspy.rom
import ndspy.narc
import code
import io
import os
import os.path
from os import path
import json
import copy
import re
# code.interact(local=dict(globals(), **locals()))
######################### FILE SPECIFIC CONSTANTS #############################
def set_global_vars():
global ROM_NAME, TYPES, CATEGORIES, EFFECT_CATEGORIES, EFFECTS, STATUSES, TARGETS, STATS, PROPERTIES, MOVE_NAMES, MOVES_NARC_FORMAT, RESULT_EFFECTS
with open(f'session_settings.json', "r") as outfile:
settings = json.load(outfile)
ROM_NAME = settings['rom_name']
TYPES = ["Normal", "Fighting", "Flying", "Poison", "Ground", "Rock", "Bug", "Ghost", "Steel", "Fire", "Water","Grass","Electric","Psychic","Ice","Dragon","Dark","Fairy"]
CATEGORIES = ["Status","Physical","Special"]
EFFECT_CATEGORIES = ["No Special Effect", "Status Inflicting","Target Stat Changing","Healing","Chance to Inflict Status","Raising Target's Stat along Attack", "Lowering Target's Stat along Attack","Raise user stats","Lifesteal","OHKO","Weather","Safeguard", "Force Switch Out", "Unique Effect"]
EFFECTS = open(f'Reference_Files/effects.txt', "r").read().splitlines()
STATUSES = ["None","Visible","Temporary","Infatuation", "Trapped"]
TARGETS = ["Any adjacent","Random (User/ Adjacent ally)","Random adjacent ally","Any adjacent opponent","All excluding user","All adjacent opponents","User's party","User","Entire Field","Random adjacent opponent","Field Itself","Opponent's side of field","User's side of field","User (Selects target automatically)"]
STATS = ["None", "Attack", "Defense", "Special Attack", "Special Defense", "Speed", "Accuracy", "Evasion", "All" ]
PROPERTIES = ["contact","requires_charge","recharge_turn","blocked_by_protect","reflected_by_magic_coat","stolen_by_snatch","copied_by_mirror_move","punch_move","sound_move","grounded_by_gravity","defrosts_targets","hits_non-adjacent_opponents","healing_move","hits_through_substitute"]
MOVE_NAMES = open(f'{ROM_NAME}/texts/moves.txt', mode="r").read().splitlines()
for i,move in enumerate(MOVE_NAMES):
MOVE_NAMES[i] = re.sub(r'[^A-Za-z0-9 \-]+', '', move)
RESULT_EFFECTS = open(f'Reference_Files/result_effects.txt', "r").read().splitlines()
MOVES_NARC_FORMAT = [[1, "type"],
[1, "effect_category"],
[1, "category"],
[1, "power"],
[1, "accuracy"],
[1, "pp"],
[1, "priority"],
[1, "hits"],
[2, "result_effect"],
[1, "effect_chance"],
[1, "status"],
[1, "min_turns"],
[1, "max_turns"],
[1, "crit"],
[1, "flinch"],
[2, "effect"],
[1, "recoil"],
[1, "healing"],
[1, "target"],
[1, "stat_1"],
[1, "stat_2"],
[1, "stat_3"],
[1, "magnitude_1"],
[1, "magnitude_2"],
[1, "magnitude_3"],
[1, "stat_chance_1"],
[1, "stat_chance_2"],
[1, "stat_chance_3"],
[2, "flag"], ## Flag is always 53 53
[2, "properties"]]
#################################################################
def output_moves_json(narc):
set_global_vars()
data_index = 0
for data in narc.files:
data_name = data_index
read_narc_data(data, MOVES_NARC_FORMAT, data_name)
data_index += 1
def read_narc_data(data, narc_format, file_name):
stream = io.BytesIO(data)
move = {"raw": {}, "readable": {} }
#USE THE FORMAT LIST TO PARSE BYTES
for entry in narc_format:
move["raw"][entry[1]] = read_bytes(stream, entry[0])
#CONVERT TO READABLE FORMAT USING CONSTANTS/TEXT BANKS
move["readable"] = to_readable(move["raw"], file_name)
#OUTPUT TO JSON
if not os.path.exists(f'{ROM_NAME}/json/moves'):
os.makedirs(f'{ROM_NAME}/json/moves')
with open(f'{ROM_NAME}/json/moves/{file_name}.json', "w") as outfile:
json.dump(move, outfile)
def to_readable(raw, file_name):
readable = copy.deepcopy(raw)
readable["index"] = file_name
readable["name"] = MOVE_NAMES[file_name]
readable["type"] = TYPES[raw["type"]]
readable["effect_category"] = EFFECT_CATEGORIES[raw["effect_category"]]
readable["category"] = CATEGORIES[raw["category"]]
#special case for tri attack
if raw["result_effect"] == 65535:
readable["result_effect"] = EFFECTS[36]
else:
readable["result_effect"] = RESULT_EFFECTS[raw["result_effect"]]
readable["effect"] = EFFECTS[raw["effect"]]
readable["status"] = STATUSES[raw["status"]]
if raw["recoil"] > 0:
readable["recoil"] = 256 - raw["recoil"]
readable["target"] = TARGETS[raw["target"]]
readable["stat_1"] = STATS[raw["stat_1"]]
readable["stat_2"] = STATS[raw["stat_2"]]
readable["stat_3"] = STATS[raw["stat_3"]]
if raw["magnitude_1"] > 6:
readable["magnitude_1"] = raw["magnitude_1"] - 256
if raw["magnitude_2"] > 6:
readable["magnitude_2"] = raw["magnitude_2"] - 256
if raw["magnitude_3"] > 6:
readable["magnitude_3"] = raw["magnitude_3"] - 256
index = 8
binary_hits = bin(raw["hits"])[2:].zfill(index)
hits = ["min_hits", "max_hits"]
for hit in hits:
amount = int(binary_hits[index-4:index],2)
readable[hit] = amount
index -= 4
index = 14
binary_props = bin(raw["properties"])[2:].zfill(index)
for prop in PROPERTIES:
amount = int(binary_props[index - 1])
readable[prop] = amount
index -= 1
return readable
def read_bytes(stream, n):
return int.from_bytes(stream.read(n), 'little')
|
[
"json.dump",
"copy.deepcopy",
"io.BytesIO",
"json.load",
"os.makedirs",
"os.path.exists",
"re.sub"
] |
[((3141, 3157), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (3151, 3157), False, 'import io\n'), ((3688, 3706), 'copy.deepcopy', 'copy.deepcopy', (['raw'], {}), '(raw)\n', (3701, 3706), False, 'import copy\n'), ((528, 546), 'json.load', 'json.load', (['outfile'], {}), '(outfile)\n', (537, 546), False, 'import json\n'), ((2107, 2144), 're.sub', 're.sub', (['"""[^A-Za-z0-9 \\\\-]+"""', '""""""', 'move'], {}), "('[^A-Za-z0-9 \\\\-]+', '', move)\n", (2113, 2144), False, 'import re\n'), ((3457, 3497), 'os.path.exists', 'os.path.exists', (['f"""{ROM_NAME}/json/moves"""'], {}), "(f'{ROM_NAME}/json/moves')\n", (3471, 3497), False, 'import os\n'), ((3501, 3538), 'os.makedirs', 'os.makedirs', (['f"""{ROM_NAME}/json/moves"""'], {}), "(f'{ROM_NAME}/json/moves')\n", (3512, 3538), False, 'import os\n'), ((3615, 3639), 'json.dump', 'json.dump', (['move', 'outfile'], {}), '(move, outfile)\n', (3624, 3639), False, 'import json\n')]
|
#!/usr/bin/env python3
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import subprocess
import csv
import io
import os
from rules_python.python.runfiles import runfiles
# This provides some tests for built RPMs, mostly by taking the built RPM and
# running rpm queries on it.
#
# Useful reading:
#
# - RPM queryformat documentation (shortish):
# https://rpm.org/user_doc/query_format.html
#
# - In-depth RPM query documentation:
# http://ftp.rpm.org/max-rpm/s1-rpm-query-parts.html
#
# - Specifically, about the --qf/--queryformat syntax:
# http://ftp.rpm.org/max-rpm/s1-rpm-query-parts.html#S3-RPM-QUERY-QUERYFORMAT-OPTION
#
# - --queryformat tags list: http://ftp.rpm.org/max-rpm/ch-queryformat-tags.html
#
class PkgRpmBasicTest(unittest.TestCase):
def setUp(self):
self.runfiles = runfiles.Create()
self.test_rpm_path = self.runfiles.Rlocation(
"rules_pkg/experimental/tests/rpm/test_rpm.rpm")
self.test_rpm_bzip2_path = self.runfiles.Rlocation(
"rules_pkg/experimental/tests/rpm/test_rpm-bzip2.rpm")
self.maxDiff = None
def test_scriptlet_content(self):
expected = b"""\
preinstall scriptlet (using /bin/sh):
echo pre
postinstall scriptlet (using /bin/sh):
echo post
preuninstall scriptlet (using /bin/sh):
echo preun
postuninstall scriptlet (using /bin/sh):
echo postun
"""
output = subprocess.check_output(
["rpm", "-qp", "--scripts", self.test_rpm_path])
self.assertEqual(output, expected)
def test_basic_headers(self):
fields = {
"NAME": b"test_rpm",
"VERSION": b"1.1.1",
"RELEASE": b"2222",
"ARCH": b"noarch",
"GROUP": b"Unspecified",
"SUMMARY": b"pkg_rpm test rpm summary",
}
for fieldname, expected in fields.items():
output = subprocess.check_output([
"rpm", "-qp", "--queryformat", "%{" + fieldname + "}",
self.test_rpm_path
])
self.assertEqual(
output, expected,
"RPM Tag {} does not match expected value".format(fieldname))
def test_contents(self):
manifest_file = self.runfiles.Rlocation(
"rules_pkg/experimental/tests/rpm/manifest.csv")
manifest_specs = {}
with open(manifest_file, "r", newline='', encoding="utf-8") as fh:
manifest_reader = csv.DictReader(fh)
manifest_specs = {r['path']: r for r in manifest_reader}
# It is not necessary to check for file sizes, as the hashes are
# sufficient for determining whether or not files are the same.
#
# This also simplifies behavior where RPM's size calculations have
# sometimes changed, e.g.:
#
# https://github.com/rpm-software-management/rpm/commit/2cf7096ba534b065feb038306c792784458ac9c7
rpm_queryformat = (
"[%{FILENAMES}"
",%{FILEDIGESTS}"
",%{FILEUSERNAME}"
",%{FILEGROUPNAME}"
",%{FILEMODES:octal}"
",%{FILEFLAGS:fflags}"
",%{FILELINKTOS}"
"\n]"
)
rpm_queryformat_fieldnames = [
"path",
"digest",
"user",
"group",
"mode",
"fflags",
"symlink",
]
rpm_output = subprocess.check_output(
["rpm", "-qp", "--queryformat", rpm_queryformat, self.test_rpm_path])
sio = io.StringIO(rpm_output.decode('utf-8'))
rpm_output_reader = csv.DictReader(
sio, fieldnames = rpm_queryformat_fieldnames)
for rpm_file_info in rpm_output_reader:
my_path = rpm_file_info['path']
self.assertIn(my_path, manifest_specs)
self.assertDictEqual(manifest_specs[my_path], rpm_file_info)
def test_preamble_metadata(self):
metadata_prefix = "rules_pkg/experimental/tests/rpm"
rpm_filename = os.path.basename(self.test_rpm_path)
rpm_basename = os.path.splitext(rpm_filename)[0]
# Tuples of:
# Metadata name, RPM Tag prefix, exclusion list (currently only support "startswith")
#
# The exclusions should probably be regexps at some point, but right
# now, our job is relatively easy. They only operate on the
# "capability" portion of the tag.
test_md = [
("conflicts", "CONFLICT", []),
# rpm packages implicitly provide themselves, something like:
# "test_rpm = 1.1.1-2222". We don't bother testing this, since we
# don't take direct action to specify it.
("provides", "PROVIDE", [rpm_basename]),
# Skip rpmlib-related requirements; they are often dependent on the
# version of `rpm` we are using.
("requires", "REQUIRE", ["rpmlib"]),
]
for (mdtype, tag, exclusions) in test_md:
md_file = self.runfiles.Rlocation(
os.path.join(metadata_prefix, mdtype + ".csv"))
with open(md_file, "r", newline='', encoding="utf-8") as fh:
md_reader = csv.DictReader(fh, delimiter=':')
# I heard you like functional programming ;)
#
# This produces a list of outputs whenever the "capability"
# attribute starts with any of the values in the "exclusions"
# list.
md_specs_unsorted = [line for line in md_reader
if not any(line['capability'].startswith(e)
for e in exclusions)]
# And this sorts it, ordering by the sorted "association list"
# form of the dictionary.
#
# The sorting of the key values is not necessary with versions
# of python3 (3.5+, I believe) that have dicts maintain
# insertion order.
md_specs = sorted(md_specs_unsorted,
key = lambda x: sorted(x.items()))
# This typically becomes the equivalent of:
#
# '[%{PROVIDENEVRS};%{PROVIDEFLAGS:deptype}\n]'
#
# as passed to `rpm --queryformat`
rpm_queryformat = (
# NEVRS = Name Epoch Version Release (plural), which look something like:
# rpmlib(CompressedFileNames) <= 3.0.4-1
# or:
# bash
"[%{{{tag}NEVRS}}"
# Flags associated with the dependency type. This used to
# evaluate in what "sense" the dependency was added.
#
# Values often include things like:
#
# - "interp" for scriptlet interpreter dependencies
# - "postun" for dependencies of the "postun" scriptlet
# - "manual" for values that are explicitly specified
":%{{{tag}FLAGS:deptype}}"
"\n]"
).format(tag = tag)
rpm_queryformat_fieldnames = [
"capability",
"sense",
]
rpm_output = subprocess.check_output(
["rpm", "-qp", "--queryformat", rpm_queryformat, self.test_rpm_path])
sio = io.StringIO(rpm_output.decode('utf-8'))
rpm_output_reader = csv.DictReader(
sio, delimiter=':', fieldnames=rpm_queryformat_fieldnames)
# Get everything in the same order as the read-in metadata file
rpm_outputs_filtered_unsorted = [line for line in rpm_output_reader
if not any(line['capability'].startswith(e)
for e in exclusions)]
rpm_outputs_filtered = sorted(rpm_outputs_filtered_unsorted, key = lambda x: sorted(x.items()))
for expected, actual in zip(md_specs, rpm_outputs_filtered):
self.assertDictEqual(expected, actual,
msg="{} metadata discrepancy".format(mdtype))
def test_compression_none_provided(self):
# Test when we don't provide "binary_payload_compression" to pkg_rpm
my_rpm = self.test_rpm_path
rpm_output = subprocess.check_output(
["rpm", "-qp", "--queryformat", "%{PAYLOADCOMPRESSOR}", my_rpm])
sio = io.StringIO(rpm_output.decode('utf-8'))
actual_compressor = sio.read()
# `bzip2` compression was, AFAICT, never the default for rpmbuild(8),
# and never will be, so this should be fine.
self.assertNotEqual(actual_compressor, 'bzip2')
def test_compression_passthrough(self):
# Test when we provide "binary_payload_compression" to pkg_rpm
my_rpm = self.test_rpm_bzip2_path
rpm_output = subprocess.check_output(
["rpm", "-qp", "--queryformat", "%{PAYLOADCOMPRESSOR}", my_rpm])
sio = io.StringIO(rpm_output.decode('utf-8'))
actual_compressor = sio.read()
self.assertEqual(actual_compressor, 'bzip2')
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"rules_python.python.runfiles.runfiles.Create",
"os.path.basename",
"csv.DictReader",
"subprocess.check_output",
"os.path.splitext",
"os.path.join"
] |
[((9800, 9815), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9813, 9815), False, 'import unittest\n'), ((1376, 1393), 'rules_python.python.runfiles.runfiles.Create', 'runfiles.Create', ([], {}), '()\n', (1391, 1393), False, 'from rules_python.python.runfiles import runfiles\n'), ((1950, 2022), 'subprocess.check_output', 'subprocess.check_output', (["['rpm', '-qp', '--scripts', self.test_rpm_path]"], {}), "(['rpm', '-qp', '--scripts', self.test_rpm_path])\n", (1973, 2022), False, 'import subprocess\n'), ((3963, 4060), 'subprocess.check_output', 'subprocess.check_output', (["['rpm', '-qp', '--queryformat', rpm_queryformat, self.test_rpm_path]"], {}), "(['rpm', '-qp', '--queryformat', rpm_queryformat,\n self.test_rpm_path])\n", (3986, 4060), False, 'import subprocess\n'), ((4153, 4211), 'csv.DictReader', 'csv.DictReader', (['sio'], {'fieldnames': 'rpm_queryformat_fieldnames'}), '(sio, fieldnames=rpm_queryformat_fieldnames)\n', (4167, 4211), False, 'import csv\n'), ((4567, 4603), 'os.path.basename', 'os.path.basename', (['self.test_rpm_path'], {}), '(self.test_rpm_path)\n', (4583, 4603), False, 'import os\n'), ((8958, 9050), 'subprocess.check_output', 'subprocess.check_output', (["['rpm', '-qp', '--queryformat', '%{PAYLOADCOMPRESSOR}', my_rpm]"], {}), "(['rpm', '-qp', '--queryformat',\n '%{PAYLOADCOMPRESSOR}', my_rpm])\n", (8981, 9050), False, 'import subprocess\n'), ((9519, 9611), 'subprocess.check_output', 'subprocess.check_output', (["['rpm', '-qp', '--queryformat', '%{PAYLOADCOMPRESSOR}', my_rpm]"], {}), "(['rpm', '-qp', '--queryformat',\n '%{PAYLOADCOMPRESSOR}', my_rpm])\n", (9542, 9611), False, 'import subprocess\n'), ((2434, 2538), 'subprocess.check_output', 'subprocess.check_output', (["['rpm', '-qp', '--queryformat', '%{' + fieldname + '}', self.test_rpm_path]"], {}), "(['rpm', '-qp', '--queryformat', '%{' + fieldname +\n '}', self.test_rpm_path])\n", (2457, 2538), False, 'import subprocess\n'), ((2997, 3015), 'csv.DictReader', 'csv.DictReader', (['fh'], {}), '(fh)\n', (3011, 3015), False, 'import csv\n'), ((4627, 4657), 'os.path.splitext', 'os.path.splitext', (['rpm_filename'], {}), '(rpm_filename)\n', (4643, 4657), False, 'import os\n'), ((7839, 7936), 'subprocess.check_output', 'subprocess.check_output', (["['rpm', '-qp', '--queryformat', rpm_queryformat, self.test_rpm_path]"], {}), "(['rpm', '-qp', '--queryformat', rpm_queryformat,\n self.test_rpm_path])\n", (7862, 7936), False, 'import subprocess\n'), ((8041, 8114), 'csv.DictReader', 'csv.DictReader', (['sio'], {'delimiter': '""":"""', 'fieldnames': 'rpm_queryformat_fieldnames'}), "(sio, delimiter=':', fieldnames=rpm_queryformat_fieldnames)\n", (8055, 8114), False, 'import csv\n'), ((5600, 5646), 'os.path.join', 'os.path.join', (['metadata_prefix', "(mdtype + '.csv')"], {}), "(metadata_prefix, mdtype + '.csv')\n", (5612, 5646), False, 'import os\n'), ((5750, 5783), 'csv.DictReader', 'csv.DictReader', (['fh'], {'delimiter': '""":"""'}), "(fh, delimiter=':')\n", (5764, 5783), False, 'import csv\n')]
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from deckhand.engine import document_validation
from deckhand import errors
from deckhand.tests.unit.engine import base as test_base
from deckhand import types
class TestDocumentValidationNegative(test_base.TestDocumentValidationBase):
"""Negative testing suite for document validation."""
# Control documents don't require layeringDefinition as none of them
# are rendered -- they are static documents. It is also not meaningful
# to encrypt control documents.
BASIC_CONTROL_PROPERTIES = (
'metadata',
'metadata.schema',
'metadata.name',
'schema',
)
BASIC_DOCUMENT_PROPERTIES = BASIC_CONTROL_PROPERTIES + (
'metadata.layeringDefinition',
'metadata.layeringDefinition.layer',
'metadata.storagePolicy',
)
CRITICAL_CONTROL_PROPERTIES = (
'schema',
'metadata',
'metadata.schema',
'metadata.name',
)
CRITICAL_DOCUMENT_PROPERTIES = CRITICAL_CONTROL_PROPERTIES + (
'metadata.layeringDefinition',
'metadata.layeringDefinition.layer',
'metadata.substitutions.0.dest',
'metadata.substitutions.0.dest.path',
'metadata.substitutions.0.src',
'metadata.substitutions.0.src.schema',
'metadata.substitutions.0.src.name',
'metadata.substitutions.0.src.path',
'metadata.storagePolicy',
)
def _do_validations(self, document_validator, expected, expected_err):
validations = document_validator.validate_all()
self.assertEqual(1, len(validations))
self.assertEqual('failure', validations[-1]['status'])
self.assertEqual({'version': '1.0', 'name': 'deckhand'},
validations[-1]['validator'])
self.assertEqual(types.DECKHAND_SCHEMA_VALIDATION,
validations[-1]['name'])
self.assertEqual(1, len(validations[-1]['errors']))
for key in ('name', 'schema', 'path', 'error_section',
'validation_schema', 'schema_path', 'message'):
self.assertIn(key, validations[-1]['errors'][-1])
self.assertEqual(expected['metadata']['name'],
validations[-1]['errors'][-1]['name'])
self.assertEqual(expected['schema'],
validations[-1]['errors'][-1]['schema'])
self.assertEqual(expected_err,
validations[-1]['errors'][-1]['message'])
def _test_missing_required_sections(self, document, properties_to_remove):
if document['metadata']['schema'].startswith(types.CONTROL):
critial_properties = self.CRITICAL_CONTROL_PROPERTIES
elif document['metadata']['schema'].startswith(types.DOCUMENT):
critial_properties = self.CRITICAL_DOCUMENT_PROPERTIES
else:
self.fail('Document `metadata.schema` must start with '
'"metadata/Document" or "metadata/Control".')
for idx, property_to_remove in enumerate(properties_to_remove):
missing_prop = property_to_remove.split('.')[-1]
invalid_data = self._corrupt_data(document, property_to_remove)
exception_raised = property_to_remove in critial_properties
expected_err_msg = "'%s' is a required property" % missing_prop
payload = [invalid_data]
doc_validator = document_validation.DocumentValidation(
payload, pre_validate=False)
if exception_raised:
self.assertRaises(
errors.InvalidDocumentFormat, doc_validator.validate_all)
else:
self._do_validations(doc_validator, invalid_data,
expected_err_msg)
def test_certificate_authority_key_missing_required_sections(self):
document = self._read_data('sample_certificate_authority_key')
properties_to_remove = tuple(self.BASIC_DOCUMENT_PROPERTIES) + (
'metadata.storagePolicy',)
self._test_missing_required_sections(document, properties_to_remove)
def test_certificate_authority_missing_required_sections(self):
document = self._read_data('sample_certificate_authority')
properties_to_remove = tuple(self.BASIC_DOCUMENT_PROPERTIES) + (
'metadata.storagePolicy',)
self._test_missing_required_sections(document, properties_to_remove)
def test_certificate_key_missing_required_sections(self):
document = self._read_data('sample_certificate_key')
properties_to_remove = tuple(self.BASIC_DOCUMENT_PROPERTIES) + (
'metadata.storagePolicy',)
self._test_missing_required_sections(document, properties_to_remove)
def test_certificate_missing_required_sections(self):
document = self._read_data('sample_certificate')
properties_to_remove = tuple(self.BASIC_DOCUMENT_PROPERTIES) + (
'metadata.storagePolicy',)
self._test_missing_required_sections(document, properties_to_remove)
def test_data_schema_missing_required_sections(self):
document = self._read_data('sample_data_schema')
properties_to_remove = tuple(self.BASIC_CONTROL_PROPERTIES)
self._test_missing_required_sections(document, properties_to_remove)
def test_generic_document_missing_required_sections(self):
document = self._read_data('sample_document')
properties_to_remove = self.CRITICAL_DOCUMENT_PROPERTIES
self._test_missing_required_sections(document, properties_to_remove)
def test_generic_document_missing_multiple_required_sections(self):
"""Validates that multiple errors are reported for a document with
multiple validation errors.
"""
document = self._read_data('sample_document')
properties_to_remove = (
'metadata.layeringDefinition.actions.0.method',
'metadata.layeringDefinition.actions.0.path',
'metadata.substitutions.0.dest.path',
'metadata.substitutions.0.src.name',
'metadata.substitutions.0.src.path',
'metadata.substitutions.0.src.schema',
)
for property_to_remove in properties_to_remove:
document = self._corrupt_data(document, property_to_remove)
doc_validator = document_validation.DocumentValidation(document)
e = self.assertRaises(errors.InvalidDocumentFormat,
doc_validator.validate_all)
for idx, property_to_remove in enumerate(properties_to_remove):
parts = property_to_remove.split('.')
missing_property = parts[-1]
error_re = r"%s is a required property" % missing_property
self.assertRegex(str(e.error_list).replace("\'", ""), error_re)
def test_layering_policy_missing_required_sections(self):
properties_to_remove = tuple(self.BASIC_CONTROL_PROPERTIES) + (
'data.layerOrder',)
document = self._read_data('sample_layering_policy')
self._test_missing_required_sections(document, properties_to_remove)
def test_document_invalid_layering_definition_action(self):
document = self._read_data('sample_document')
missing_data = self._corrupt_data(
document, 'metadata.layeringDefinition.actions.0.method',
'invalid', op='replace')
error_re = (
r".*invalid is not one of \[replace, delete, merge\]")
payload = [missing_data]
doc_validator = document_validation.DocumentValidation(payload)
e = self.assertRaises(errors.InvalidDocumentFormat,
doc_validator.validate_all)
self.assertRegex(str(e.error_list[0]).replace("\'", ""), error_re)
def test_passphrase_missing_required_sections(self):
document = self._read_data('sample_passphrase')
properties_to_remove = tuple(self.BASIC_DOCUMENT_PROPERTIES) + (
'metadata.storagePolicy',)
self._test_missing_required_sections(document, properties_to_remove)
def test_privatekey_missing_required_sections(self):
document = self._read_data('sample_private_key')
properties_to_remove = tuple(self.BASIC_DOCUMENT_PROPERTIES) + (
'metadata.storagePolicy',)
self._test_missing_required_sections(document, properties_to_remove)
def test_publickey_missing_required_sections(self):
document = self._read_data('sample_public_key')
properties_to_remove = tuple(self.BASIC_DOCUMENT_PROPERTIES) + (
'metadata.storagePolicy',)
self._test_missing_required_sections(document, properties_to_remove)
def test_validation_policy_missing_required_sections(self):
document = self._read_data('sample_validation_policy')
properties_to_remove = tuple(self.BASIC_CONTROL_PROPERTIES) + (
'data.validations', 'data.validations.0.name')
self._test_missing_required_sections(document, properties_to_remove)
@mock.patch.object(document_validation, 'LOG', autospec=True)
def test_invalid_document_schema_generates_error(self, mock_log):
document = self._read_data('sample_document')
document['schema'] = 'foo/bar/v1'
doc_validator = document_validation.DocumentValidation(document)
doc_validator.validate_all()
self.assertRegex(
mock_log.info.mock_calls[0][1][0],
'The provided document schema %s is not registered.'
% document['schema'])
@mock.patch.object(document_validation, 'LOG', autospec=True)
def test_invalid_document_schema_version_generates_error(self, mock_log):
document = self._read_data('sample_passphrase')
document['schema'] = 'deckhand/Passphrase/v5'
doc_validator = document_validation.DocumentValidation(document)
doc_validator.validate_all()
self.assertRegex(
mock_log.info.mock_calls[0][1][0],
'The provided document schema %s is not registered.'
% document['schema'])
def test_invalid_validation_schema_raises_runtime_error(self):
document = self._read_data('sample_passphrase')
# Validate that broken built-in base schema raises RuntimeError.
doc_validator = document_validation.DocumentValidation(document)
doc_validator._validators[0].base_schema = 'fake'
with self.assertRaisesRegexp(RuntimeError, 'Unknown error'):
doc_validator.validate_all()
# Validate that broken data schema for ``DataSchemaValidator`` raises
# RuntimeError.
document = self._read_data('sample_document')
data_schema = self._read_data('sample_data_schema')
data_schema['metadata']['name'] = document['schema']
data_schema['data'] = 'fake'
doc_validator = document_validation.DocumentValidation(
[document, data_schema], pre_validate=False)
with self.assertRaisesRegexp(RuntimeError, 'Unknown error'):
doc_validator.validate_all()
def test_parent_selector_but_no_actions_raises_validation_error(self):
# Verify that an error is thrown if parentSelector is specified but
# actions is missing altogether.
document = self._read_data('sample_document')
document['metadata']['layeringDefinition']['parentSelector'] = {
'some': 'label'
}
document['metadata']['layeringDefinition'].pop('actions')
doc_validator = document_validation.DocumentValidation(
[document], pre_validate=False)
self.assertRaises(
errors.InvalidDocumentFormat, doc_validator.validate_all)
# Verify that an error is thrown if parentSelector is specified but
# at least 1 action isn't specified.
document['metadata']['layeringDefinition']['actions'] = []
doc_validator = document_validation.DocumentValidation(
[document], pre_validate=False)
self.assertRaises(
errors.InvalidDocumentFormat, doc_validator.validate_all)
def test_actions_but_no_parent_selector_raises_validation_error(self):
# Verify that an error is thrown if actions are specified but
# parentSelector is missing altogether.
document = self._read_data('sample_document')
document['metadata']['layeringDefinition'].pop('parentSelector')
doc_validator = document_validation.DocumentValidation(
[document], pre_validate=False)
self.assertRaises(
errors.InvalidDocumentFormat, doc_validator.validate_all)
# Verify that an error is thrown if actions are specified but no
# parentSelector labels are.
document['metadata']['layeringDefinition']['parentSelector'] = {}
doc_validator = document_validation.DocumentValidation(
[document], pre_validate=False)
self.assertRaises(
errors.InvalidDocumentFormat, doc_validator.validate_all)
|
[
"mock.patch.object",
"deckhand.engine.document_validation.DocumentValidation"
] |
[((9630, 9690), 'mock.patch.object', 'mock.patch.object', (['document_validation', '"""LOG"""'], {'autospec': '(True)'}), "(document_validation, 'LOG', autospec=True)\n", (9647, 9690), False, 'import mock\n'), ((10146, 10206), 'mock.patch.object', 'mock.patch.object', (['document_validation', '"""LOG"""'], {'autospec': '(True)'}), "(document_validation, 'LOG', autospec=True)\n", (10163, 10206), False, 'import mock\n'), ((6939, 6987), 'deckhand.engine.document_validation.DocumentValidation', 'document_validation.DocumentValidation', (['document'], {}), '(document)\n', (6977, 6987), False, 'from deckhand.engine import document_validation\n'), ((8138, 8185), 'deckhand.engine.document_validation.DocumentValidation', 'document_validation.DocumentValidation', (['payload'], {}), '(payload)\n', (8176, 8185), False, 'from deckhand.engine import document_validation\n'), ((9882, 9930), 'deckhand.engine.document_validation.DocumentValidation', 'document_validation.DocumentValidation', (['document'], {}), '(document)\n', (9920, 9930), False, 'from deckhand.engine import document_validation\n'), ((10420, 10468), 'deckhand.engine.document_validation.DocumentValidation', 'document_validation.DocumentValidation', (['document'], {}), '(document)\n', (10458, 10468), False, 'from deckhand.engine import document_validation\n'), ((10900, 10948), 'deckhand.engine.document_validation.DocumentValidation', 'document_validation.DocumentValidation', (['document'], {}), '(document)\n', (10938, 10948), False, 'from deckhand.engine import document_validation\n'), ((11456, 11543), 'deckhand.engine.document_validation.DocumentValidation', 'document_validation.DocumentValidation', (['[document, data_schema]'], {'pre_validate': '(False)'}), '([document, data_schema],\n pre_validate=False)\n', (11494, 11543), False, 'from deckhand.engine import document_validation\n'), ((12111, 12181), 'deckhand.engine.document_validation.DocumentValidation', 'document_validation.DocumentValidation', (['[document]'], {'pre_validate': '(False)'}), '([document], pre_validate=False)\n', (12149, 12181), False, 'from deckhand.engine import document_validation\n'), ((12505, 12575), 'deckhand.engine.document_validation.DocumentValidation', 'document_validation.DocumentValidation', (['[document]'], {'pre_validate': '(False)'}), '([document], pre_validate=False)\n', (12543, 12575), False, 'from deckhand.engine import document_validation\n'), ((13031, 13101), 'deckhand.engine.document_validation.DocumentValidation', 'document_validation.DocumentValidation', (['[document]'], {'pre_validate': '(False)'}), '([document], pre_validate=False)\n', (13069, 13101), False, 'from deckhand.engine import document_validation\n'), ((13421, 13491), 'deckhand.engine.document_validation.DocumentValidation', 'document_validation.DocumentValidation', (['[document]'], {'pre_validate': '(False)'}), '([document], pre_validate=False)\n', (13459, 13491), False, 'from deckhand.engine import document_validation\n'), ((4009, 4076), 'deckhand.engine.document_validation.DocumentValidation', 'document_validation.DocumentValidation', (['payload'], {'pre_validate': '(False)'}), '(payload, pre_validate=False)\n', (4047, 4076), False, 'from deckhand.engine import document_validation\n')]
|
import importlib
import os
import traceback
from typing import Dict, Any, List
import unittest
from bblfsh_sonar_checks.utils import (
get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir,
list_checks, run_check, get_check_description, get_methods, Method, JClass,
JClassField, Argument, hash_node, run_checks
)
import bblfsh
def _get_check_modules(lang):
checks_dir = get_checks_dir(lang)
check_files = os.listdir(checks_dir)
check_modules = []
for f in check_files:
if not f.startswith("RSPEC-") or not f.endswith(".py"):
continue
check_modules.append((
importlib.import_module("bblfsh_sonar_checks.checks.{}.{}".format(lang, os.path.splitext(f)[0])),
os.path.join(checks_dir, f)))
return check_modules
class Test_10_Utils(unittest.TestCase):
def _parse_source(self, code):
self.client = bblfsh.BblfshClient("0.0.0.0:9432")
path = "../bblfsh_sonar_checks/fixtures/java/%s.java" % code
return self.client.parse(path).uast
def test_0010_getlanguages(self):
self.assertListEqual(get_languages(), ["java"])
def test_0020_getchecksdir(self):
# /home/juanjux/pyenv/versions/3.6.6/lib/python3.6/site-packages/bblfsh_sonar_checks/checks/java
jcheck_dir = get_checks_dir("java")
self.assertTrue(jcheck_dir.endswith("checks/java"))
def test_0030_runchecks(self):
uast = self._parse_source("RSPEC-1764")
checks = ["RSPEC-1764", "RSPEC-2447"]
results = run_checks(checks, "java", uast)
self.assertEqual(len(results), len(checks))
self.assertEqual(len(results["RSPEC-1764"]), 5)
self.assertEqual(len(results["RSPEC-2447"]), 0)
def test_0040_hashnode(self):
uast = self._parse_source("RSPEC-1764")
self.assertEqual(hash_node(uast).hexdigest(), "e3e8c1738c6a6d94276080d3c5322647")
self.assertEqual(hash_node(uast, ignore_sideness=False).hexdigest(), "63c68a8dfbb7c5c5c14efc3c0a2c65d9")
def test_0060_getmethods(self):
uast = self._parse_source("RSPEC-2447")
methods = get_methods(uast)
self.assertEqual(len(methods), 1)
self.assertIsInstance(methods[0], Method)
self.assertEqual(methods[0].name, "test")
self.assertListEqual(methods[0].modifiers, [])
self.assertListEqual(methods[0].modifiers, [])
self.assertIsNotNone(methods[0].return_)
self.assertIsInstance(methods[0].return_, Argument)
self.assertIsNotNone(methods[0].body)
self.assertIsNotNone(methods[0].node)
def test_0070_listchecks(self):
for c in list_checks("java"):
self.assertTrue(c.startswith("RSPEC-"))
self.assertTrue(c[6:].isdigit())
def test_0080_getcheckdescription(self):
url = get_check_description("RSPEC-1143", "java")
self.assertEqual(url , "https://rules.sonarsource.com/java/RSPEC-1143")
class Test_20_Checks(unittest.TestCase):
def setUp(self):
self.languages = get_languages()
self.check_funcs: Dict[str, Dict[str, Any]] = {}
self.fixtures: Dict[str, List[str]] = {}
fixtures_dir = get_fixtures_dir()
for lang in self.languages:
self.check_funcs[lang] = {path: module.check for (module, path) in _get_check_modules(lang)}
self.fixtures[lang] = [os.path.join(fixtures_dir, lang, i)
for i in os.listdir(os.path.join(fixtures_dir, lang))]
self.client = bblfsh.BblfshClient("0.0.0.0:9432")
def test_1000_own_fixtures(self):
for lang, checks in self.check_funcs.items():
for check_path, check_func in checks.items():
res = run_default_fixture(check_path, check_func, silent=True)
self.assertGreater(len(res), 0)
def test_2000_other_fixtures(self):
for lang in self.fixtures:
for check_code in list_checks(lang):
for fixture in self.fixtures[lang]:
if check_code in fixture:
continue
resp = self.client.parse(fixture)
self.assertEqual(resp.status, 0)
try:
run_check(check_code, lang, resp.uast)
except Exception:
self.fail("Check for code {} ({}) raised a Exception:".format(
check_code, lang, traceback.format_exc()))
|
[
"os.listdir",
"bblfsh_sonar_checks.utils.run_check",
"bblfsh_sonar_checks.utils.get_fixtures_dir",
"bblfsh_sonar_checks.utils.get_check_description",
"bblfsh_sonar_checks.utils.list_checks",
"bblfsh_sonar_checks.utils.run_checks",
"bblfsh_sonar_checks.utils.get_languages",
"bblfsh_sonar_checks.utils.get_checks_dir",
"bblfsh_sonar_checks.utils.hash_node",
"os.path.splitext",
"bblfsh_sonar_checks.utils.run_default_fixture",
"traceback.format_exc",
"bblfsh.BblfshClient",
"os.path.join",
"bblfsh_sonar_checks.utils.get_methods"
] |
[((417, 437), 'bblfsh_sonar_checks.utils.get_checks_dir', 'get_checks_dir', (['lang'], {}), '(lang)\n', (431, 437), False, 'from bblfsh_sonar_checks.utils import get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir, list_checks, run_check, get_check_description, get_methods, Method, JClass, JClassField, Argument, hash_node, run_checks\n'), ((456, 478), 'os.listdir', 'os.listdir', (['checks_dir'], {}), '(checks_dir)\n', (466, 478), False, 'import os\n'), ((924, 959), 'bblfsh.BblfshClient', 'bblfsh.BblfshClient', (['"""0.0.0.0:9432"""'], {}), "('0.0.0.0:9432')\n", (943, 959), False, 'import bblfsh\n'), ((1333, 1355), 'bblfsh_sonar_checks.utils.get_checks_dir', 'get_checks_dir', (['"""java"""'], {}), "('java')\n", (1347, 1355), False, 'from bblfsh_sonar_checks.utils import get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir, list_checks, run_check, get_check_description, get_methods, Method, JClass, JClassField, Argument, hash_node, run_checks\n'), ((1564, 1596), 'bblfsh_sonar_checks.utils.run_checks', 'run_checks', (['checks', '"""java"""', 'uast'], {}), "(checks, 'java', uast)\n", (1574, 1596), False, 'from bblfsh_sonar_checks.utils import get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir, list_checks, run_check, get_check_description, get_methods, Method, JClass, JClassField, Argument, hash_node, run_checks\n'), ((2150, 2167), 'bblfsh_sonar_checks.utils.get_methods', 'get_methods', (['uast'], {}), '(uast)\n', (2161, 2167), False, 'from bblfsh_sonar_checks.utils import get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir, list_checks, run_check, get_check_description, get_methods, Method, JClass, JClassField, Argument, hash_node, run_checks\n'), ((2675, 2694), 'bblfsh_sonar_checks.utils.list_checks', 'list_checks', (['"""java"""'], {}), "('java')\n", (2686, 2694), False, 'from bblfsh_sonar_checks.utils import get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir, list_checks, run_check, get_check_description, get_methods, Method, JClass, JClassField, Argument, hash_node, run_checks\n'), ((2853, 2896), 'bblfsh_sonar_checks.utils.get_check_description', 'get_check_description', (['"""RSPEC-1143"""', '"""java"""'], {}), "('RSPEC-1143', 'java')\n", (2874, 2896), False, 'from bblfsh_sonar_checks.utils import get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir, list_checks, run_check, get_check_description, get_methods, Method, JClass, JClassField, Argument, hash_node, run_checks\n'), ((3067, 3082), 'bblfsh_sonar_checks.utils.get_languages', 'get_languages', ([], {}), '()\n', (3080, 3082), False, 'from bblfsh_sonar_checks.utils import get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir, list_checks, run_check, get_check_description, get_methods, Method, JClass, JClassField, Argument, hash_node, run_checks\n'), ((3213, 3231), 'bblfsh_sonar_checks.utils.get_fixtures_dir', 'get_fixtures_dir', ([], {}), '()\n', (3229, 3231), False, 'from bblfsh_sonar_checks.utils import get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir, list_checks, run_check, get_check_description, get_methods, Method, JClass, JClassField, Argument, hash_node, run_checks\n'), ((3558, 3593), 'bblfsh.BblfshClient', 'bblfsh.BblfshClient', (['"""0.0.0.0:9432"""'], {}), "('0.0.0.0:9432')\n", (3577, 3593), False, 'import bblfsh\n'), ((1141, 1156), 'bblfsh_sonar_checks.utils.get_languages', 'get_languages', ([], {}), '()\n', (1154, 1156), False, 'from bblfsh_sonar_checks.utils import get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir, list_checks, run_check, get_check_description, get_methods, Method, JClass, JClassField, Argument, hash_node, run_checks\n'), ((3978, 3995), 'bblfsh_sonar_checks.utils.list_checks', 'list_checks', (['lang'], {}), '(lang)\n', (3989, 3995), False, 'from bblfsh_sonar_checks.utils import get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir, list_checks, run_check, get_check_description, get_methods, Method, JClass, JClassField, Argument, hash_node, run_checks\n'), ((768, 795), 'os.path.join', 'os.path.join', (['checks_dir', 'f'], {}), '(checks_dir, f)\n', (780, 795), False, 'import os\n'), ((3409, 3444), 'os.path.join', 'os.path.join', (['fixtures_dir', 'lang', 'i'], {}), '(fixtures_dir, lang, i)\n', (3421, 3444), False, 'import os\n'), ((3767, 3823), 'bblfsh_sonar_checks.utils.run_default_fixture', 'run_default_fixture', (['check_path', 'check_func'], {'silent': '(True)'}), '(check_path, check_func, silent=True)\n', (3786, 3823), False, 'from bblfsh_sonar_checks.utils import get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir, list_checks, run_check, get_check_description, get_methods, Method, JClass, JClassField, Argument, hash_node, run_checks\n'), ((1869, 1884), 'bblfsh_sonar_checks.utils.hash_node', 'hash_node', (['uast'], {}), '(uast)\n', (1878, 1884), False, 'from bblfsh_sonar_checks.utils import get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir, list_checks, run_check, get_check_description, get_methods, Method, JClass, JClassField, Argument, hash_node, run_checks\n'), ((1959, 1997), 'bblfsh_sonar_checks.utils.hash_node', 'hash_node', (['uast'], {'ignore_sideness': '(False)'}), '(uast, ignore_sideness=False)\n', (1968, 1997), False, 'from bblfsh_sonar_checks.utils import get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir, list_checks, run_check, get_check_description, get_methods, Method, JClass, JClassField, Argument, hash_node, run_checks\n'), ((3500, 3532), 'os.path.join', 'os.path.join', (['fixtures_dir', 'lang'], {}), '(fixtures_dir, lang)\n', (3512, 3532), False, 'import os\n'), ((4285, 4323), 'bblfsh_sonar_checks.utils.run_check', 'run_check', (['check_code', 'lang', 'resp.uast'], {}), '(check_code, lang, resp.uast)\n', (4294, 4323), False, 'from bblfsh_sonar_checks.utils import get_checks_dir, get_languages, run_default_fixture, get_fixtures_dir, list_checks, run_check, get_check_description, get_methods, Method, JClass, JClassField, Argument, hash_node, run_checks\n'), ((730, 749), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (746, 749), False, 'import os\n'), ((4495, 4517), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4515, 4517), False, 'import traceback\n')]
|
import tensorflow as tf
import logging
from typing import Tuple
import tensorflow_hub as hub
import sys
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class MultiHopAttentionModel:
def __init__(
self,
images: tf.Tensor,
captions: tf.Tensor,
captions_len: tf.Tensor,
margin: float,
joint_space: int,
num_layers: int,
attn_size: int,
attn_hops: int,
learning_rate: float = 0.0,
clip_value: int = 0,
decay_steps: float = sys.maxsize,
batch_hard: bool = False,
log_dir: str = "",
name: str = "",
):
# Name of the model
self.name = name
# Get images, captions, lengths and labels
self.images = images
self.captions = captions
self.captions_len = captions_len
# Create summary writers
if log_dir != "":
self.file_writer = tf.summary.FileWriter(log_dir + self.name)
self.train_loss_ph, self.train_loss_summary = self.create_summary(
"train_loss"
)
self.val_loss_ph, self.val_loss_summary = self.create_summary("val_loss")
self.val_recall_at_k_ph, self.val_recall_at_k_summary = self.create_summary(
"val_recall_at_k"
)
self.global_step = tf.Variable(0, trainable=False, name="global_step")
# Create dropout and weight decay placeholder
self.frob_norm_pen = tf.placeholder_with_default(
0.0, None, name="frob_norm_pen"
)
self.keep_prob = tf.placeholder_with_default(1.0, None, name="keep_prob")
self.weight_decay = tf.placeholder_with_default(0.0, None, name="weight_decay")
# Build model
self.image_encoded = self.image_encoder_graph(self.images, joint_space)
logger.info("Image encoder graph created...")
self.text_encoded = self.text_encoder_graph(
self.captions, self.captions_len, joint_space, num_layers, self.keep_prob
)
logger.info("Text encoder graph created...")
self.attended_images, self.image_alphas = self.attention_graph(
attn_size, attn_hops, self.image_encoded, "siamese_attention"
)
# Reusing the same variables that were used for the images
self.attended_captions, self.text_alphas = self.attention_graph(
attn_size, attn_hops, self.text_encoded, "siamese_attention"
)
logger.info("Attention graph created...")
self.loss = self.compute_loss(margin, attn_hops, batch_hard)
self.optimize = self.apply_gradients_op(
self.loss, learning_rate, clip_value, decay_steps
)
self.saver_loader = tf.train.Saver()
logger.info("Graph creation finished...")
@staticmethod
def image_encoder_graph(images: tf.Tensor, joint_space: int) -> tf.Tensor:
"""Extract higher level features from the image using a resnet152 pretrained on
ImageNet.
Args:
images: The input images.
joint_space: The space where the encoded images and text are going to be
projected to.
Returns:
The encoded image.
"""
with tf.variable_scope("image_encoder"):
resnet = hub.Module(
"https://tfhub.dev/google/imagenet/resnet_v2_152/feature_vector/3"
)
features = resnet(images, signature="image_feature_vector", as_dict=True)[
"resnet_v2_152/block4"
]
flatten = tf.reshape(features, (-1, features.shape[3]))
project_layer = tf.layers.dense(
flatten, joint_space, kernel_initializer=tf.glorot_uniform_initializer()
)
return tf.reshape(
project_layer, (-1, features.shape[1] * features.shape[2], joint_space)
)
@staticmethod
def text_encoder_graph(
captions: tf.Tensor,
captions_len: tf.Tensor,
joint_space: int,
num_layers: int,
keep_prob: float,
):
"""Encodes the text it gets as input using a bidirectional rnn.
Args:
captions: The inputs.
captions_len: The length of the inputs.
joint_space: The space where the encoded images and text are going to be
projected to.
num_layers: The number of layers in the Bi-RNN.
keep_prob: The inverse dropout probability.
Returns:
The encoded text.
"""
with tf.variable_scope(name_or_scope="text_encoder"):
elmo = hub.Module("https://tfhub.dev/google/elmo/2", trainable=True)
embeddings = elmo(
inputs={"tokens": captions, "sequence_len": captions_len},
signature="tokens",
as_dict=True,
)["elmo"]
cell_fw = tf.nn.rnn_cell.MultiRNNCell(
[
tf.nn.rnn_cell.DropoutWrapper(
tf.nn.rnn_cell.GRUCell(joint_space),
state_keep_prob=keep_prob,
input_size=(tf.shape(embeddings)[0], joint_space),
variational_recurrent=True,
dtype=tf.float32,
)
for _ in range(num_layers)
]
)
cell_bw = tf.nn.rnn_cell.MultiRNNCell(
[
tf.nn.rnn_cell.DropoutWrapper(
tf.nn.rnn_cell.GRUCell(joint_space),
state_keep_prob=keep_prob,
input_size=(tf.shape(embeddings)[0], joint_space),
variational_recurrent=True,
dtype=tf.float32,
)
for _ in range(num_layers)
]
)
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
embeddings,
sequence_length=captions_len,
dtype=tf.float32,
)
return tf.add(output_fw, output_bw) / 2
@staticmethod
def attention_graph(
attn_size: int, attn_hops: int, encoded_input: tf.Tensor, scope: str
):
"""Applies attention on the encoded image and the encoded text.
As per: https://arxiv.org/abs/1703.03130
The "A structured self-attentative sentence embedding" paper goes through
the attention mechanism applied here.
Args:
attn_size: The size of the attention.
attn_hops: How many hops of attention to apply.
encoded_input: The encoded input, can be both the image and the text.
scope: The scope of the graph block.
Returns:
Attended output.
"""
with tf.variable_scope(name_or_scope=scope, reuse=tf.AUTO_REUSE):
# Shape parameters
time_steps = tf.shape(encoded_input)[1]
hidden_size = encoded_input.get_shape()[2].value
# As per: http://proceedings.mlr.press/v9/glorot10a.html
# Trainable parameters
w_omega = tf.get_variable(
name="w_omega",
shape=[hidden_size, attn_size],
initializer=tf.glorot_uniform_initializer(),
)
b_omega = tf.get_variable(
name="b_omega", shape=[attn_size], initializer=tf.zeros_initializer()
)
u_omega = tf.get_variable(
name="u_omega",
shape=[attn_size, attn_hops],
initializer=tf.glorot_uniform_initializer(),
)
# Apply attention
# [B * T, H]
encoded_input_reshaped = tf.reshape(encoded_input, [-1, hidden_size])
# [B * T, A_size]
v = tf.tanh(tf.matmul(encoded_input_reshaped, w_omega) + b_omega)
# [B * T, A_heads]
vu = tf.matmul(v, u_omega)
# [B, T, A_hops]
vu = tf.reshape(vu, [-1, time_steps, attn_hops])
# [B, A_hops, T]
vu_transposed = tf.transpose(vu, [0, 2, 1])
# [B, A_hops, T]
alphas = tf.nn.softmax(vu_transposed, name="alphas", axis=2)
# [B, A_hops, H]
output = tf.matmul(alphas, encoded_input)
# [B, A_hops * H]
output = tf.layers.flatten(output)
# [B, A_hops * H] normalized output
output = tf.math.l2_normalize(output, axis=1)
return output, alphas
@staticmethod
def compute_frob_norm(attention_weights: tf.Tensor, attn_hops: int) -> tf.Tensor:
"""Computes the Frobenius norm of the attention weights tensor.
Args:
attention_weights: The attention weights.
attn_hops: The number of attention hops.
Returns:
The Frobenius norm of the attention weights tensor.
"""
attn_w_dot_product = tf.matmul(
attention_weights, tf.transpose(attention_weights, [0, 2, 1])
)
identity_matrix = tf.reshape(
tf.tile(tf.eye(attn_hops), [tf.shape(attention_weights)[0], 1]),
[-1, attn_hops, attn_hops],
)
return tf.reduce_mean(
tf.square(
tf.norm(attn_w_dot_product - identity_matrix, axis=[-2, -1], ord="fro")
)
)
@staticmethod
def triplet_loss(scores: tf.Tensor, margin: float, batch_hard: bool):
diagonal = tf.diag_part(scores)
# Compare every diagonal score to scores in its column
# All contrastive images for each sentence
# noinspection PyTypeChecker
cost_s = tf.maximum(0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores)
# Compare every diagonal score to scores in its row
# All contrastive sentences for each image
# noinspection PyTypeChecker
cost_im = tf.maximum(0.0, margin - diagonal + scores)
# Clear diagonals
cost_s = tf.linalg.set_diag(cost_s, tf.zeros(tf.shape(cost_s)[0]))
cost_im = tf.linalg.set_diag(cost_im, tf.zeros(tf.shape(cost_im)[0]))
if batch_hard:
logger.info("Training only on the hardest negatives...")
# For each positive pair (i,s) pick the hardest contrastive image
cost_s = tf.reduce_max(cost_s, axis=1)
# For each positive pair (i,s) pick the hardest contrastive sentence
cost_im = tf.reduce_max(cost_im, axis=0)
return tf.reduce_sum(cost_s) + tf.reduce_sum(cost_im)
def compute_loss(
self, margin: float, attn_hops: int, batch_hard: bool
) -> tf.Tensor:
"""Computes the final loss of the model.
1. Computes the Triplet loss: https://arxiv.org/abs/1707.05612 (Batch hard or
batch all)
2. Computes the Frob norm of the of the AA^T - I (image embeddings).
3. Computes the Frob norm of the of the AA^T - I (text embeddings).
4. Computes the L2 loss.
5. Adds all together to compute the loss.
Args:
margin: The contrastive margin.
attn_hops: The number of attention heads.
batch_hard: Whether to train on the hard negatives.
Returns:
The final loss to be optimized.
"""
with tf.variable_scope(name_or_scope="loss"):
scores = tf.matmul(
self.attended_images, self.attended_captions, transpose_b=True
)
triplet_loss = self.triplet_loss(scores, margin, batch_hard)
pen_image_alphas = (
self.compute_frob_norm(self.image_alphas, attn_hops)
* self.frob_norm_pen
)
pen_text_alphas = (
self.compute_frob_norm(self.text_alphas, attn_hops) * self.frob_norm_pen
)
l2_loss = (
tf.add_n(
[
tf.nn.l2_loss(v)
for v in tf.trainable_variables()
if "bias" not in v.name
]
)
* self.weight_decay
)
return triplet_loss + pen_image_alphas + pen_text_alphas + l2_loss
def apply_gradients_op(
self, loss: tf.Tensor, learning_rate: float, clip_value: int, decay_steps: float
) -> tf.Operation:
"""Applies the gradients on the variables.
Args:
loss: The computed loss.
learning_rate: The optimizer learning rate.
clip_value: The clipping value.
decay_steps: Decay the learning rate every decay_steps.
Returns:
An operation node to be executed in order to apply the computed gradients.
"""
with tf.variable_scope(name_or_scope="optimizer"):
learning_rate = tf.train.exponential_decay(
learning_rate,
self.global_step,
decay_steps,
0.5,
staircase=True,
name="lr_decay",
)
optimizer = tf.train.AdamOptimizer(learning_rate)
gradients, variables = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, clip_value)
return optimizer.apply_gradients(
zip(gradients, variables), global_step=self.global_step
)
def init(self, sess: tf.Session, checkpoint_path: str = None) -> None:
"""Initializes all variables in the graph.
Args:
sess: The active session.
checkpoint_path: Path to a valid checkpoint.
Returns:
None
"""
sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
if checkpoint_path is not None:
self.saver_loader.restore(sess, checkpoint_path)
def add_summary_graph(self, sess: tf.Session) -> None:
"""Adds the graph to tensorboard.
Args:
sess: The active session.
Returns:
None
"""
self.file_writer.add_graph(sess.graph)
@staticmethod
def create_summary(name: str) -> Tuple[tf.placeholder, tf.summary.scalar]:
"""Creates summary placeholder and node.
Args:
name: The name of the summary.
Returns:
The summary placeholder and it's node counterpart.
"""
input_ph = tf.placeholder(tf.float32, shape=None, name=name + "_pl")
summary = tf.summary.scalar(name, input_ph)
return input_ph, summary
def add_summary(self, sess: tf.Session, value: float) -> None:
"""Writes the summary to tensorboard.
Args:
sess: The active session.
value: The value to write.
Returns:
None
"""
self.file_writer.add_summary(
value, tf.train.global_step(sess, self.global_step)
)
def save_model(self, sess: tf.Session, save_path: str) -> None:
"""Dumps the model definition.
Args:
sess: The active session.
save_path: Where to save the model.
Returns:
"""
self.saver_loader.save(sess, save_path + self.name)
|
[
"tensorflow.reduce_sum",
"tensorflow.trainable_variables",
"tensorflow_hub.Module",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.diag_part",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.tables_initializer",
"tensorflow.reduce_max",
"tensorflow.clip_by_global_norm",
"tensorflow.nn.softmax",
"tensorflow.placeholder_with_default",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.summary.FileWriter",
"tensorflow.norm",
"tensorflow.train.global_step",
"tensorflow.train.Saver",
"tensorflow.summary.scalar",
"tensorflow.global_variables_initializer",
"tensorflow.eye",
"tensorflow.layers.flatten",
"tensorflow.add",
"tensorflow.transpose",
"tensorflow.zeros_initializer",
"tensorflow.train.exponential_decay",
"tensorflow.nn.rnn_cell.GRUCell",
"tensorflow.math.l2_normalize",
"tensorflow.glorot_uniform_initializer",
"logging.basicConfig",
"tensorflow.shape",
"tensorflow.nn.l2_loss",
"tensorflow.train.AdamOptimizer",
"logging.getLogger"
] |
[((105, 144), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (124, 144), False, 'import logging\n'), ((154, 181), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (171, 181), False, 'import logging\n'), ((1368, 1419), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'name': '"""global_step"""'}), "(0, trainable=False, name='global_step')\n", (1379, 1419), True, 'import tensorflow as tf\n'), ((1503, 1563), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(0.0)', 'None'], {'name': '"""frob_norm_pen"""'}), "(0.0, None, name='frob_norm_pen')\n", (1530, 1563), True, 'import tensorflow as tf\n'), ((1611, 1667), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(1.0)', 'None'], {'name': '"""keep_prob"""'}), "(1.0, None, name='keep_prob')\n", (1638, 1667), True, 'import tensorflow as tf\n'), ((1696, 1755), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(0.0)', 'None'], {'name': '"""weight_decay"""'}), "(0.0, None, name='weight_decay')\n", (1723, 1755), True, 'import tensorflow as tf\n'), ((2761, 2777), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2775, 2777), True, 'import tensorflow as tf\n'), ((9618, 9638), 'tensorflow.diag_part', 'tf.diag_part', (['scores'], {}), '(scores)\n', (9630, 9638), True, 'import tensorflow as tf\n'), ((10038, 10081), 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(margin - diagonal + scores)'], {}), '(0.0, margin - diagonal + scores)\n', (10048, 10081), True, 'import tensorflow as tf\n'), ((14576, 14633), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'None', 'name': "(name + '_pl')"}), "(tf.float32, shape=None, name=name + '_pl')\n", (14590, 14633), True, 'import tensorflow as tf\n'), ((14652, 14685), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['name', 'input_ph'], {}), '(name, input_ph)\n', (14669, 14685), True, 'import tensorflow as tf\n'), ((953, 995), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['(log_dir + self.name)'], {}), '(log_dir + self.name)\n', (974, 995), True, 'import tensorflow as tf\n'), ((3271, 3305), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""image_encoder"""'], {}), "('image_encoder')\n", (3288, 3305), True, 'import tensorflow as tf\n'), ((3328, 3406), 'tensorflow_hub.Module', 'hub.Module', (['"""https://tfhub.dev/google/imagenet/resnet_v2_152/feature_vector/3"""'], {}), "('https://tfhub.dev/google/imagenet/resnet_v2_152/feature_vector/3')\n", (3338, 3406), True, 'import tensorflow_hub as hub\n'), ((3599, 3644), 'tensorflow.reshape', 'tf.reshape', (['features', '(-1, features.shape[3])'], {}), '(features, (-1, features.shape[3]))\n', (3609, 3644), True, 'import tensorflow as tf\n'), ((3813, 3900), 'tensorflow.reshape', 'tf.reshape', (['project_layer', '(-1, features.shape[1] * features.shape[2], joint_space)'], {}), '(project_layer, (-1, features.shape[1] * features.shape[2],\n joint_space))\n', (3823, 3900), True, 'import tensorflow as tf\n'), ((4594, 4641), 'tensorflow.variable_scope', 'tf.variable_scope', ([], {'name_or_scope': '"""text_encoder"""'}), "(name_or_scope='text_encoder')\n", (4611, 4641), True, 'import tensorflow as tf\n'), ((4662, 4723), 'tensorflow_hub.Module', 'hub.Module', (['"""https://tfhub.dev/google/elmo/2"""'], {'trainable': '(True)'}), "('https://tfhub.dev/google/elmo/2', trainable=True)\n", (4672, 4723), True, 'import tensorflow_hub as hub\n'), ((5962, 6075), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', (['cell_fw', 'cell_bw', 'embeddings'], {'sequence_length': 'captions_len', 'dtype': 'tf.float32'}), '(cell_fw, cell_bw, embeddings,\n sequence_length=captions_len, dtype=tf.float32)\n', (5993, 6075), True, 'import tensorflow as tf\n'), ((6928, 6987), 'tensorflow.variable_scope', 'tf.variable_scope', ([], {'name_or_scope': 'scope', 'reuse': 'tf.AUTO_REUSE'}), '(name_or_scope=scope, reuse=tf.AUTO_REUSE)\n', (6945, 6987), True, 'import tensorflow as tf\n'), ((7855, 7899), 'tensorflow.reshape', 'tf.reshape', (['encoded_input', '[-1, hidden_size]'], {}), '(encoded_input, [-1, hidden_size])\n', (7865, 7899), True, 'import tensorflow as tf\n'), ((8056, 8077), 'tensorflow.matmul', 'tf.matmul', (['v', 'u_omega'], {}), '(v, u_omega)\n', (8065, 8077), True, 'import tensorflow as tf\n'), ((8124, 8167), 'tensorflow.reshape', 'tf.reshape', (['vu', '[-1, time_steps, attn_hops]'], {}), '(vu, [-1, time_steps, attn_hops])\n', (8134, 8167), True, 'import tensorflow as tf\n'), ((8225, 8252), 'tensorflow.transpose', 'tf.transpose', (['vu', '[0, 2, 1]'], {}), '(vu, [0, 2, 1])\n', (8237, 8252), True, 'import tensorflow as tf\n'), ((8303, 8354), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['vu_transposed'], {'name': '"""alphas"""', 'axis': '(2)'}), "(vu_transposed, name='alphas', axis=2)\n", (8316, 8354), True, 'import tensorflow as tf\n'), ((8405, 8437), 'tensorflow.matmul', 'tf.matmul', (['alphas', 'encoded_input'], {}), '(alphas, encoded_input)\n', (8414, 8437), True, 'import tensorflow as tf\n'), ((8489, 8514), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['output'], {}), '(output)\n', (8506, 8514), True, 'import tensorflow as tf\n'), ((8584, 8620), 'tensorflow.math.l2_normalize', 'tf.math.l2_normalize', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (8604, 8620), True, 'import tensorflow as tf\n'), ((9121, 9163), 'tensorflow.transpose', 'tf.transpose', (['attention_weights', '[0, 2, 1]'], {}), '(attention_weights, [0, 2, 1])\n', (9133, 9163), True, 'import tensorflow as tf\n'), ((10454, 10483), 'tensorflow.reduce_max', 'tf.reduce_max', (['cost_s'], {'axis': '(1)'}), '(cost_s, axis=1)\n', (10467, 10483), True, 'import tensorflow as tf\n'), ((10587, 10617), 'tensorflow.reduce_max', 'tf.reduce_max', (['cost_im'], {'axis': '(0)'}), '(cost_im, axis=0)\n', (10600, 10617), True, 'import tensorflow as tf\n'), ((10634, 10655), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cost_s'], {}), '(cost_s)\n', (10647, 10655), True, 'import tensorflow as tf\n'), ((10658, 10680), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cost_im'], {}), '(cost_im)\n', (10671, 10680), True, 'import tensorflow as tf\n'), ((11443, 11482), 'tensorflow.variable_scope', 'tf.variable_scope', ([], {'name_or_scope': '"""loss"""'}), "(name_or_scope='loss')\n", (11460, 11482), True, 'import tensorflow as tf\n'), ((11505, 11578), 'tensorflow.matmul', 'tf.matmul', (['self.attended_images', 'self.attended_captions'], {'transpose_b': '(True)'}), '(self.attended_images, self.attended_captions, transpose_b=True)\n', (11514, 11578), True, 'import tensorflow as tf\n'), ((12904, 12948), 'tensorflow.variable_scope', 'tf.variable_scope', ([], {'name_or_scope': '"""optimizer"""'}), "(name_or_scope='optimizer')\n", (12921, 12948), True, 'import tensorflow as tf\n'), ((12978, 13093), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['learning_rate', 'self.global_step', 'decay_steps', '(0.5)'], {'staircase': '(True)', 'name': '"""lr_decay"""'}), "(learning_rate, self.global_step, decay_steps, \n 0.5, staircase=True, name='lr_decay')\n", (13004, 13093), True, 'import tensorflow as tf\n'), ((13224, 13261), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (13246, 13261), True, 'import tensorflow as tf\n'), ((13364, 13409), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', 'clip_value'], {}), '(gradients, clip_value)\n', (13386, 13409), True, 'import tensorflow as tf\n'), ((15031, 15075), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'self.global_step'], {}), '(sess, self.global_step)\n', (15051, 15075), True, 'import tensorflow as tf\n'), ((6187, 6215), 'tensorflow.add', 'tf.add', (['output_fw', 'output_bw'], {}), '(output_fw, output_bw)\n', (6193, 6215), True, 'import tensorflow as tf\n'), ((7045, 7068), 'tensorflow.shape', 'tf.shape', (['encoded_input'], {}), '(encoded_input)\n', (7053, 7068), True, 'import tensorflow as tf\n'), ((9232, 9249), 'tensorflow.eye', 'tf.eye', (['attn_hops'], {}), '(attn_hops)\n', (9238, 9249), True, 'import tensorflow as tf\n'), ((9410, 9481), 'tensorflow.norm', 'tf.norm', (['(attn_w_dot_product - identity_matrix)'], {'axis': '[-2, -1]', 'ord': '"""fro"""'}), "(attn_w_dot_product - identity_matrix, axis=[-2, -1], ord='fro')\n", (9417, 9481), True, 'import tensorflow as tf\n'), ((13846, 13879), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (13877, 13879), True, 'import tensorflow as tf\n'), ((13881, 13904), 'tensorflow.tables_initializer', 'tf.tables_initializer', ([], {}), '()\n', (13902, 13904), True, 'import tensorflow as tf\n'), ((3747, 3778), 'tensorflow.glorot_uniform_initializer', 'tf.glorot_uniform_initializer', ([], {}), '()\n', (3776, 3778), True, 'import tensorflow as tf\n'), ((7385, 7416), 'tensorflow.glorot_uniform_initializer', 'tf.glorot_uniform_initializer', ([], {}), '()\n', (7414, 7416), True, 'import tensorflow as tf\n'), ((7534, 7556), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (7554, 7556), True, 'import tensorflow as tf\n'), ((7716, 7747), 'tensorflow.glorot_uniform_initializer', 'tf.glorot_uniform_initializer', ([], {}), '()\n', (7745, 7747), True, 'import tensorflow as tf\n'), ((7954, 7996), 'tensorflow.matmul', 'tf.matmul', (['encoded_input_reshaped', 'w_omega'], {}), '(encoded_input_reshaped, w_omega)\n', (7963, 7996), True, 'import tensorflow as tf\n'), ((9832, 9861), 'tensorflow.reshape', 'tf.reshape', (['diagonal', '[-1, 1]'], {}), '(diagonal, [-1, 1])\n', (9842, 9861), True, 'import tensorflow as tf\n'), ((10162, 10178), 'tensorflow.shape', 'tf.shape', (['cost_s'], {}), '(cost_s)\n', (10170, 10178), True, 'import tensorflow as tf\n'), ((10239, 10256), 'tensorflow.shape', 'tf.shape', (['cost_im'], {}), '(cost_im)\n', (10247, 10256), True, 'import tensorflow as tf\n'), ((5062, 5097), 'tensorflow.nn.rnn_cell.GRUCell', 'tf.nn.rnn_cell.GRUCell', (['joint_space'], {}), '(joint_space)\n', (5084, 5097), True, 'import tensorflow as tf\n'), ((5564, 5599), 'tensorflow.nn.rnn_cell.GRUCell', 'tf.nn.rnn_cell.GRUCell', (['joint_space'], {}), '(joint_space)\n', (5586, 5599), True, 'import tensorflow as tf\n'), ((9252, 9279), 'tensorflow.shape', 'tf.shape', (['attention_weights'], {}), '(attention_weights)\n', (9260, 9279), True, 'import tensorflow as tf\n'), ((12068, 12084), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), '(v)\n', (12081, 12084), True, 'import tensorflow as tf\n'), ((12118, 12142), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (12140, 12142), True, 'import tensorflow as tf\n'), ((5186, 5206), 'tensorflow.shape', 'tf.shape', (['embeddings'], {}), '(embeddings)\n', (5194, 5206), True, 'import tensorflow as tf\n'), ((5688, 5708), 'tensorflow.shape', 'tf.shape', (['embeddings'], {}), '(embeddings)\n', (5696, 5708), True, 'import tensorflow as tf\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['InstanceAttachmentArgs', 'InstanceAttachment']
@pulumi.input_type
class InstanceAttachmentArgs:
def __init__(__self__, *,
child_instance_id: pulumi.Input[str],
child_instance_region_id: pulumi.Input[str],
child_instance_type: pulumi.Input[str],
instance_id: pulumi.Input[str],
cen_owner_id: Optional[pulumi.Input[int]] = None,
child_instance_owner_id: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a InstanceAttachment resource.
:param pulumi.Input[str] child_instance_id: The ID of the child instance to attach.
:param pulumi.Input[str] child_instance_region_id: The region ID of the child instance to attach.
:param pulumi.Input[str] child_instance_type: The type of the associated network. Valid values: `VPC`, `VBR` and `CCN`.
:param pulumi.Input[str] instance_id: The ID of the CEN.
:param pulumi.Input[int] cen_owner_id: The account ID to which the CEN instance belongs.
:param pulumi.Input[int] child_instance_owner_id: The uid of the child instance. Only used when attach a child instance of other account.
"""
pulumi.set(__self__, "child_instance_id", child_instance_id)
pulumi.set(__self__, "child_instance_region_id", child_instance_region_id)
pulumi.set(__self__, "child_instance_type", child_instance_type)
pulumi.set(__self__, "instance_id", instance_id)
if cen_owner_id is not None:
pulumi.set(__self__, "cen_owner_id", cen_owner_id)
if child_instance_owner_id is not None:
pulumi.set(__self__, "child_instance_owner_id", child_instance_owner_id)
@property
@pulumi.getter(name="childInstanceId")
def child_instance_id(self) -> pulumi.Input[str]:
"""
The ID of the child instance to attach.
"""
return pulumi.get(self, "child_instance_id")
@child_instance_id.setter
def child_instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "child_instance_id", value)
@property
@pulumi.getter(name="childInstanceRegionId")
def child_instance_region_id(self) -> pulumi.Input[str]:
"""
The region ID of the child instance to attach.
"""
return pulumi.get(self, "child_instance_region_id")
@child_instance_region_id.setter
def child_instance_region_id(self, value: pulumi.Input[str]):
pulumi.set(self, "child_instance_region_id", value)
@property
@pulumi.getter(name="childInstanceType")
def child_instance_type(self) -> pulumi.Input[str]:
"""
The type of the associated network. Valid values: `VPC`, `VBR` and `CCN`.
"""
return pulumi.get(self, "child_instance_type")
@child_instance_type.setter
def child_instance_type(self, value: pulumi.Input[str]):
pulumi.set(self, "child_instance_type", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
"""
The ID of the CEN.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="cenOwnerId")
def cen_owner_id(self) -> Optional[pulumi.Input[int]]:
"""
The account ID to which the CEN instance belongs.
"""
return pulumi.get(self, "cen_owner_id")
@cen_owner_id.setter
def cen_owner_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cen_owner_id", value)
@property
@pulumi.getter(name="childInstanceOwnerId")
def child_instance_owner_id(self) -> Optional[pulumi.Input[int]]:
"""
The uid of the child instance. Only used when attach a child instance of other account.
"""
return pulumi.get(self, "child_instance_owner_id")
@child_instance_owner_id.setter
def child_instance_owner_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "child_instance_owner_id", value)
@pulumi.input_type
class _InstanceAttachmentState:
def __init__(__self__, *,
cen_owner_id: Optional[pulumi.Input[int]] = None,
child_instance_id: Optional[pulumi.Input[str]] = None,
child_instance_owner_id: Optional[pulumi.Input[int]] = None,
child_instance_region_id: Optional[pulumi.Input[str]] = None,
child_instance_type: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering InstanceAttachment resources.
:param pulumi.Input[int] cen_owner_id: The account ID to which the CEN instance belongs.
:param pulumi.Input[str] child_instance_id: The ID of the child instance to attach.
:param pulumi.Input[int] child_instance_owner_id: The uid of the child instance. Only used when attach a child instance of other account.
:param pulumi.Input[str] child_instance_region_id: The region ID of the child instance to attach.
:param pulumi.Input[str] child_instance_type: The type of the associated network. Valid values: `VPC`, `VBR` and `CCN`.
:param pulumi.Input[str] instance_id: The ID of the CEN.
:param pulumi.Input[str] status: The associating status of the network.
"""
if cen_owner_id is not None:
pulumi.set(__self__, "cen_owner_id", cen_owner_id)
if child_instance_id is not None:
pulumi.set(__self__, "child_instance_id", child_instance_id)
if child_instance_owner_id is not None:
pulumi.set(__self__, "child_instance_owner_id", child_instance_owner_id)
if child_instance_region_id is not None:
pulumi.set(__self__, "child_instance_region_id", child_instance_region_id)
if child_instance_type is not None:
pulumi.set(__self__, "child_instance_type", child_instance_type)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="cenOwnerId")
def cen_owner_id(self) -> Optional[pulumi.Input[int]]:
"""
The account ID to which the CEN instance belongs.
"""
return pulumi.get(self, "cen_owner_id")
@cen_owner_id.setter
def cen_owner_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cen_owner_id", value)
@property
@pulumi.getter(name="childInstanceId")
def child_instance_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the child instance to attach.
"""
return pulumi.get(self, "child_instance_id")
@child_instance_id.setter
def child_instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "child_instance_id", value)
@property
@pulumi.getter(name="childInstanceOwnerId")
def child_instance_owner_id(self) -> Optional[pulumi.Input[int]]:
"""
The uid of the child instance. Only used when attach a child instance of other account.
"""
return pulumi.get(self, "child_instance_owner_id")
@child_instance_owner_id.setter
def child_instance_owner_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "child_instance_owner_id", value)
@property
@pulumi.getter(name="childInstanceRegionId")
def child_instance_region_id(self) -> Optional[pulumi.Input[str]]:
"""
The region ID of the child instance to attach.
"""
return pulumi.get(self, "child_instance_region_id")
@child_instance_region_id.setter
def child_instance_region_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "child_instance_region_id", value)
@property
@pulumi.getter(name="childInstanceType")
def child_instance_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the associated network. Valid values: `VPC`, `VBR` and `CCN`.
"""
return pulumi.get(self, "child_instance_type")
@child_instance_type.setter
def child_instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "child_instance_type", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the CEN.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The associating status of the network.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
class InstanceAttachment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cen_owner_id: Optional[pulumi.Input[int]] = None,
child_instance_id: Optional[pulumi.Input[str]] = None,
child_instance_owner_id: Optional[pulumi.Input[int]] = None,
child_instance_region_id: Optional[pulumi.Input[str]] = None,
child_instance_type: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a CEN child instance attachment resource that associate the network(VPC, CCN, VBR) with the CEN instance.
->**NOTE:** Available in 1.42.0+
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
config = pulumi.Config()
name = config.get("name")
if name is None:
name = "tf-testAccCenInstanceAttachmentBasic"
cen = alicloud.cen.Instance("cen", description="terraform01")
vpc = alicloud.vpc.Network("vpc", cidr_block="192.168.0.0/16")
foo = alicloud.cen.InstanceAttachment("foo",
instance_id=cen.id,
child_instance_id=vpc.id,
child_instance_type="VPC",
child_instance_region_id="cn-beijing")
```
## Import
CEN instance can be imported using the id, e.g.
```sh
$ pulumi import alicloud:cen/instanceAttachment:InstanceAttachment example cen-m7i7pjmkon********:vpc-2ze2w07mcy9nz********:VPC:cn-beijing
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] cen_owner_id: The account ID to which the CEN instance belongs.
:param pulumi.Input[str] child_instance_id: The ID of the child instance to attach.
:param pulumi.Input[int] child_instance_owner_id: The uid of the child instance. Only used when attach a child instance of other account.
:param pulumi.Input[str] child_instance_region_id: The region ID of the child instance to attach.
:param pulumi.Input[str] child_instance_type: The type of the associated network. Valid values: `VPC`, `VBR` and `CCN`.
:param pulumi.Input[str] instance_id: The ID of the CEN.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: InstanceAttachmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a CEN child instance attachment resource that associate the network(VPC, CCN, VBR) with the CEN instance.
->**NOTE:** Available in 1.42.0+
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
config = pulumi.Config()
name = config.get("name")
if name is None:
name = "tf-testAccCenInstanceAttachmentBasic"
cen = alicloud.cen.Instance("cen", description="terraform01")
vpc = alicloud.vpc.Network("vpc", cidr_block="192.168.0.0/16")
foo = alicloud.cen.InstanceAttachment("foo",
instance_id=cen.id,
child_instance_id=vpc.id,
child_instance_type="VPC",
child_instance_region_id="cn-beijing")
```
## Import
CEN instance can be imported using the id, e.g.
```sh
$ pulumi import alicloud:cen/instanceAttachment:InstanceAttachment example cen-m7i7pjmkon********:vpc-2ze2w07mcy9nz********:VPC:cn-beijing
```
:param str resource_name: The name of the resource.
:param InstanceAttachmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InstanceAttachmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cen_owner_id: Optional[pulumi.Input[int]] = None,
child_instance_id: Optional[pulumi.Input[str]] = None,
child_instance_owner_id: Optional[pulumi.Input[int]] = None,
child_instance_region_id: Optional[pulumi.Input[str]] = None,
child_instance_type: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InstanceAttachmentArgs.__new__(InstanceAttachmentArgs)
__props__.__dict__["cen_owner_id"] = cen_owner_id
if child_instance_id is None and not opts.urn:
raise TypeError("Missing required property 'child_instance_id'")
__props__.__dict__["child_instance_id"] = child_instance_id
__props__.__dict__["child_instance_owner_id"] = child_instance_owner_id
if child_instance_region_id is None and not opts.urn:
raise TypeError("Missing required property 'child_instance_region_id'")
__props__.__dict__["child_instance_region_id"] = child_instance_region_id
if child_instance_type is None and not opts.urn:
raise TypeError("Missing required property 'child_instance_type'")
__props__.__dict__["child_instance_type"] = child_instance_type
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["status"] = None
super(InstanceAttachment, __self__).__init__(
'alicloud:cen/instanceAttachment:InstanceAttachment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cen_owner_id: Optional[pulumi.Input[int]] = None,
child_instance_id: Optional[pulumi.Input[str]] = None,
child_instance_owner_id: Optional[pulumi.Input[int]] = None,
child_instance_region_id: Optional[pulumi.Input[str]] = None,
child_instance_type: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None) -> 'InstanceAttachment':
"""
Get an existing InstanceAttachment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] cen_owner_id: The account ID to which the CEN instance belongs.
:param pulumi.Input[str] child_instance_id: The ID of the child instance to attach.
:param pulumi.Input[int] child_instance_owner_id: The uid of the child instance. Only used when attach a child instance of other account.
:param pulumi.Input[str] child_instance_region_id: The region ID of the child instance to attach.
:param pulumi.Input[str] child_instance_type: The type of the associated network. Valid values: `VPC`, `VBR` and `CCN`.
:param pulumi.Input[str] instance_id: The ID of the CEN.
:param pulumi.Input[str] status: The associating status of the network.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _InstanceAttachmentState.__new__(_InstanceAttachmentState)
__props__.__dict__["cen_owner_id"] = cen_owner_id
__props__.__dict__["child_instance_id"] = child_instance_id
__props__.__dict__["child_instance_owner_id"] = child_instance_owner_id
__props__.__dict__["child_instance_region_id"] = child_instance_region_id
__props__.__dict__["child_instance_type"] = child_instance_type
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["status"] = status
return InstanceAttachment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="cenOwnerId")
def cen_owner_id(self) -> pulumi.Output[Optional[int]]:
"""
The account ID to which the CEN instance belongs.
"""
return pulumi.get(self, "cen_owner_id")
@property
@pulumi.getter(name="childInstanceId")
def child_instance_id(self) -> pulumi.Output[str]:
"""
The ID of the child instance to attach.
"""
return pulumi.get(self, "child_instance_id")
@property
@pulumi.getter(name="childInstanceOwnerId")
def child_instance_owner_id(self) -> pulumi.Output[int]:
"""
The uid of the child instance. Only used when attach a child instance of other account.
"""
return pulumi.get(self, "child_instance_owner_id")
@property
@pulumi.getter(name="childInstanceRegionId")
def child_instance_region_id(self) -> pulumi.Output[str]:
"""
The region ID of the child instance to attach.
"""
return pulumi.get(self, "child_instance_region_id")
@property
@pulumi.getter(name="childInstanceType")
def child_instance_type(self) -> pulumi.Output[str]:
"""
The type of the associated network. Valid values: `VPC`, `VBR` and `CCN`.
"""
return pulumi.get(self, "child_instance_type")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Output[str]:
"""
The ID of the CEN.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The associating status of the network.
"""
return pulumi.get(self, "status")
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.ResourceOptions",
"pulumi.set"
] |
[((2111, 2148), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""childInstanceId"""'}), "(name='childInstanceId')\n", (2124, 2148), False, 'import pulumi\n'), ((2491, 2534), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""childInstanceRegionId"""'}), "(name='childInstanceRegionId')\n", (2504, 2534), False, 'import pulumi\n'), ((2919, 2958), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""childInstanceType"""'}), "(name='childInstanceType')\n", (2932, 2958), False, 'import pulumi\n'), ((3345, 3377), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""instanceId"""'}), "(name='instanceId')\n", (3358, 3377), False, 'import pulumi\n'), ((3669, 3701), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cenOwnerId"""'}), "(name='cenOwnerId')\n", (3682, 3701), False, 'import pulumi\n'), ((4049, 4091), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""childInstanceOwnerId"""'}), "(name='childInstanceOwnerId')\n", (4062, 4091), False, 'import pulumi\n'), ((6724, 6756), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cenOwnerId"""'}), "(name='cenOwnerId')\n", (6737, 6756), False, 'import pulumi\n'), ((7104, 7141), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""childInstanceId"""'}), "(name='childInstanceId')\n", (7117, 7141), False, 'import pulumi\n'), ((7504, 7546), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""childInstanceOwnerId"""'}), "(name='childInstanceOwnerId')\n", (7517, 7546), False, 'import pulumi\n'), ((7987, 8030), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""childInstanceRegionId"""'}), "(name='childInstanceRegionId')\n", (8000, 8030), False, 'import pulumi\n'), ((8435, 8474), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""childInstanceType"""'}), "(name='childInstanceType')\n", (8448, 8474), False, 'import pulumi\n'), ((8881, 8913), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""instanceId"""'}), "(name='instanceId')\n", (8894, 8913), False, 'import pulumi\n'), ((18886, 18918), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cenOwnerId"""'}), "(name='cenOwnerId')\n", (18899, 18918), False, 'import pulumi\n'), ((19129, 19166), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""childInstanceId"""'}), "(name='childInstanceId')\n", (19142, 19166), False, 'import pulumi\n'), ((19367, 19409), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""childInstanceOwnerId"""'}), "(name='childInstanceOwnerId')\n", (19380, 19409), False, 'import pulumi\n'), ((19670, 19713), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""childInstanceRegionId"""'}), "(name='childInstanceRegionId')\n", (19683, 19713), False, 'import pulumi\n'), ((19935, 19974), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""childInstanceType"""'}), "(name='childInstanceType')\n", (19948, 19974), False, 'import pulumi\n'), ((20213, 20245), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""instanceId"""'}), "(name='instanceId')\n", (20226, 20245), False, 'import pulumi\n'), ((1584, 1644), 'pulumi.set', 'pulumi.set', (['__self__', '"""child_instance_id"""', 'child_instance_id'], {}), "(__self__, 'child_instance_id', child_instance_id)\n", (1594, 1644), False, 'import pulumi\n'), ((1653, 1727), 'pulumi.set', 'pulumi.set', (['__self__', '"""child_instance_region_id"""', 'child_instance_region_id'], {}), "(__self__, 'child_instance_region_id', child_instance_region_id)\n", (1663, 1727), False, 'import pulumi\n'), ((1736, 1800), 'pulumi.set', 'pulumi.set', (['__self__', '"""child_instance_type"""', 'child_instance_type'], {}), "(__self__, 'child_instance_type', child_instance_type)\n", (1746, 1800), False, 'import pulumi\n'), ((1809, 1857), 'pulumi.set', 'pulumi.set', (['__self__', '"""instance_id"""', 'instance_id'], {}), "(__self__, 'instance_id', instance_id)\n", (1819, 1857), False, 'import pulumi\n'), ((2290, 2327), 'pulumi.get', 'pulumi.get', (['self', '"""child_instance_id"""'], {}), "(self, 'child_instance_id')\n", (2300, 2327), False, 'import pulumi\n'), ((2426, 2470), 'pulumi.set', 'pulumi.set', (['self', '"""child_instance_id"""', 'value'], {}), "(self, 'child_instance_id', value)\n", (2436, 2470), False, 'import pulumi\n'), ((2690, 2734), 'pulumi.get', 'pulumi.get', (['self', '"""child_instance_region_id"""'], {}), "(self, 'child_instance_region_id')\n", (2700, 2734), False, 'import pulumi\n'), ((2847, 2898), 'pulumi.set', 'pulumi.set', (['self', '"""child_instance_region_id"""', 'value'], {}), "(self, 'child_instance_region_id', value)\n", (2857, 2898), False, 'import pulumi\n'), ((3136, 3175), 'pulumi.get', 'pulumi.get', (['self', '"""child_instance_type"""'], {}), "(self, 'child_instance_type')\n", (3146, 3175), False, 'import pulumi\n'), ((3278, 3324), 'pulumi.set', 'pulumi.set', (['self', '"""child_instance_type"""', 'value'], {}), "(self, 'child_instance_type', value)\n", (3288, 3324), False, 'import pulumi\n'), ((3492, 3523), 'pulumi.get', 'pulumi.get', (['self', '"""instance_id"""'], {}), "(self, 'instance_id')\n", (3502, 3523), False, 'import pulumi\n'), ((3610, 3648), 'pulumi.set', 'pulumi.set', (['self', '"""instance_id"""', 'value'], {}), "(self, 'instance_id', value)\n", (3620, 3648), False, 'import pulumi\n'), ((3858, 3890), 'pulumi.get', 'pulumi.get', (['self', '"""cen_owner_id"""'], {}), "(self, 'cen_owner_id')\n", (3868, 3890), False, 'import pulumi\n'), ((3989, 4028), 'pulumi.set', 'pulumi.set', (['self', '"""cen_owner_id"""', 'value'], {}), "(self, 'cen_owner_id', value)\n", (3999, 4028), False, 'import pulumi\n'), ((4297, 4340), 'pulumi.get', 'pulumi.get', (['self', '"""child_instance_owner_id"""'], {}), "(self, 'child_instance_owner_id')\n", (4307, 4340), False, 'import pulumi\n'), ((4461, 4511), 'pulumi.set', 'pulumi.set', (['self', '"""child_instance_owner_id"""', 'value'], {}), "(self, 'child_instance_owner_id', value)\n", (4471, 4511), False, 'import pulumi\n'), ((6913, 6945), 'pulumi.get', 'pulumi.get', (['self', '"""cen_owner_id"""'], {}), "(self, 'cen_owner_id')\n", (6923, 6945), False, 'import pulumi\n'), ((7044, 7083), 'pulumi.set', 'pulumi.set', (['self', '"""cen_owner_id"""', 'value'], {}), "(self, 'cen_owner_id', value)\n", (7054, 7083), False, 'import pulumi\n'), ((7293, 7330), 'pulumi.get', 'pulumi.get', (['self', '"""child_instance_id"""'], {}), "(self, 'child_instance_id')\n", (7303, 7330), False, 'import pulumi\n'), ((7439, 7483), 'pulumi.set', 'pulumi.set', (['self', '"""child_instance_id"""', 'value'], {}), "(self, 'child_instance_id', value)\n", (7449, 7483), False, 'import pulumi\n'), ((7752, 7795), 'pulumi.get', 'pulumi.get', (['self', '"""child_instance_owner_id"""'], {}), "(self, 'child_instance_owner_id')\n", (7762, 7795), False, 'import pulumi\n'), ((7916, 7966), 'pulumi.set', 'pulumi.set', (['self', '"""child_instance_owner_id"""', 'value'], {}), "(self, 'child_instance_owner_id', value)\n", (7926, 7966), False, 'import pulumi\n'), ((8196, 8240), 'pulumi.get', 'pulumi.get', (['self', '"""child_instance_region_id"""'], {}), "(self, 'child_instance_region_id')\n", (8206, 8240), False, 'import pulumi\n'), ((8363, 8414), 'pulumi.set', 'pulumi.set', (['self', '"""child_instance_region_id"""', 'value'], {}), "(self, 'child_instance_region_id', value)\n", (8373, 8414), False, 'import pulumi\n'), ((8662, 8701), 'pulumi.get', 'pulumi.get', (['self', '"""child_instance_type"""'], {}), "(self, 'child_instance_type')\n", (8672, 8701), False, 'import pulumi\n'), ((8814, 8860), 'pulumi.set', 'pulumi.set', (['self', '"""child_instance_type"""', 'value'], {}), "(self, 'child_instance_type', value)\n", (8824, 8860), False, 'import pulumi\n'), ((9038, 9069), 'pulumi.get', 'pulumi.get', (['self', '"""instance_id"""'], {}), "(self, 'instance_id')\n", (9048, 9069), False, 'import pulumi\n'), ((9166, 9204), 'pulumi.set', 'pulumi.set', (['self', '"""instance_id"""', 'value'], {}), "(self, 'instance_id', value)\n", (9176, 9204), False, 'import pulumi\n'), ((9378, 9404), 'pulumi.get', 'pulumi.get', (['self', '"""status"""'], {}), "(self, 'status')\n", (9388, 9404), False, 'import pulumi\n'), ((9491, 9524), 'pulumi.set', 'pulumi.set', (['self', '"""status"""', 'value'], {}), "(self, 'status', value)\n", (9501, 9524), False, 'import pulumi\n'), ((19076, 19108), 'pulumi.get', 'pulumi.get', (['self', '"""cen_owner_id"""'], {}), "(self, 'cen_owner_id')\n", (19086, 19108), False, 'import pulumi\n'), ((19309, 19346), 'pulumi.get', 'pulumi.get', (['self', '"""child_instance_id"""'], {}), "(self, 'child_instance_id')\n", (19319, 19346), False, 'import pulumi\n'), ((19606, 19649), 'pulumi.get', 'pulumi.get', (['self', '"""child_instance_owner_id"""'], {}), "(self, 'child_instance_owner_id')\n", (19616, 19649), False, 'import pulumi\n'), ((19870, 19914), 'pulumi.get', 'pulumi.get', (['self', '"""child_instance_region_id"""'], {}), "(self, 'child_instance_region_id')\n", (19880, 19914), False, 'import pulumi\n'), ((20153, 20192), 'pulumi.get', 'pulumi.get', (['self', '"""child_instance_type"""'], {}), "(self, 'child_instance_type')\n", (20163, 20192), False, 'import pulumi\n'), ((20361, 20392), 'pulumi.get', 'pulumi.get', (['self', '"""instance_id"""'], {}), "(self, 'instance_id')\n", (20371, 20392), False, 'import pulumi\n'), ((20557, 20583), 'pulumi.get', 'pulumi.get', (['self', '"""status"""'], {}), "(self, 'status')\n", (20567, 20583), False, 'import pulumi\n'), ((1907, 1957), 'pulumi.set', 'pulumi.set', (['__self__', '"""cen_owner_id"""', 'cen_owner_id'], {}), "(__self__, 'cen_owner_id', cen_owner_id)\n", (1917, 1957), False, 'import pulumi\n'), ((2018, 2090), 'pulumi.set', 'pulumi.set', (['__self__', '"""child_instance_owner_id"""', 'child_instance_owner_id'], {}), "(__self__, 'child_instance_owner_id', child_instance_owner_id)\n", (2028, 2090), False, 'import pulumi\n'), ((5969, 6019), 'pulumi.set', 'pulumi.set', (['__self__', '"""cen_owner_id"""', 'cen_owner_id'], {}), "(__self__, 'cen_owner_id', cen_owner_id)\n", (5979, 6019), False, 'import pulumi\n'), ((6074, 6134), 'pulumi.set', 'pulumi.set', (['__self__', '"""child_instance_id"""', 'child_instance_id'], {}), "(__self__, 'child_instance_id', child_instance_id)\n", (6084, 6134), False, 'import pulumi\n'), ((6195, 6267), 'pulumi.set', 'pulumi.set', (['__self__', '"""child_instance_owner_id"""', 'child_instance_owner_id'], {}), "(__self__, 'child_instance_owner_id', child_instance_owner_id)\n", (6205, 6267), False, 'import pulumi\n'), ((6329, 6403), 'pulumi.set', 'pulumi.set', (['__self__', '"""child_instance_region_id"""', 'child_instance_region_id'], {}), "(__self__, 'child_instance_region_id', child_instance_region_id)\n", (6339, 6403), False, 'import pulumi\n'), ((6460, 6524), 'pulumi.set', 'pulumi.set', (['__self__', '"""child_instance_type"""', 'child_instance_type'], {}), "(__self__, 'child_instance_type', child_instance_type)\n", (6470, 6524), False, 'import pulumi\n'), ((6573, 6621), 'pulumi.set', 'pulumi.set', (['__self__', '"""instance_id"""', 'instance_id'], {}), "(__self__, 'instance_id', instance_id)\n", (6583, 6621), False, 'import pulumi\n'), ((6665, 6703), 'pulumi.set', 'pulumi.set', (['__self__', '"""status"""', 'status'], {}), "(__self__, 'status', status)\n", (6675, 6703), False, 'import pulumi\n'), ((14625, 14649), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (14647, 14649), False, 'import pulumi\n'), ((18211, 18240), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (18233, 18240), False, 'import pulumi\n')]
|
import time,os
from tester import counter, measure_handler
from jobs import Contextual_Job_processor
from multiprocessing import Process
manager = Contextual_Job_processor()
handler = Process(target = measure_handler, args = [manager.result_queue])
handler.daemon = True
handler.start()
for i in range(4):
manager.enqueue_measure(counter, {'N':i}, 'test', 'tester_%d'%i, 'some author')
while True:
print(os.getcwd())
job_list = manager.get_measure_queue()
for x in job_list:
pass
#print("%s\tstatus: %s\tprogress: %.2f\tResult: %s"%(x['name'],x['status'],x['progress'],str(x['result'])))
#print("\n")
time.sleep(0.5)
|
[
"os.getcwd",
"multiprocessing.Process",
"time.sleep",
"jobs.Contextual_Job_processor"
] |
[((148, 174), 'jobs.Contextual_Job_processor', 'Contextual_Job_processor', ([], {}), '()\n', (172, 174), False, 'from jobs import Contextual_Job_processor\n'), ((185, 245), 'multiprocessing.Process', 'Process', ([], {'target': 'measure_handler', 'args': '[manager.result_queue]'}), '(target=measure_handler, args=[manager.result_queue])\n', (192, 245), False, 'from multiprocessing import Process\n'), ((642, 657), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (652, 657), False, 'import time, os\n'), ((413, 424), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (422, 424), False, 'import time, os\n')]
|
# -*- coding: utf-8 -*-
#
import numpy
import pytest
import quadpy
from quadpy.nball.helpers import integrate_monomial_over_unit_nball
from helpers import check_degree
@pytest.mark.parametrize(
"scheme,tol",
[(quadpy.disk.Albrecht(k), 1.0e-14) for k in range(1, 9)]
+ [(quadpy.disk.CoolsHaegemans(k), 1.0e-14) for k in range(1, 4)]
+ [(quadpy.disk.CoolsKim(k), 1.0e-14) for k in range(1, 4)]
+ [(quadpy.disk.HaegemansPiessens(), 1.0e-14)]
+ [
(quadpy.disk.HammerStroud(k), 1.0e-14)
for k in ["11-2", "12-2", "13-2", "17", "18", "19", "20", "21"]
]
+ [(quadpy.disk.Lether(k), 1.0e-14) for k in range(1, 6)]
+ [(quadpy.disk.Peirce1957(k), 1.0e-14) for k in range(1, 6)]
+ [(quadpy.disk.RabinowitzRichter(k), 1.0e-14) for k in range(1, 7)]
+ [
(quadpy.disk.Stroud(k), 1.0e-14)
for k in [
"S2 3-1",
"S2 3-2",
"S2 4-1",
"S2 5-1",
"S2 5-2",
"S2 7-1",
"S2 7-2",
"S2 9-1",
"S2 9-2",
"S2 9-3",
"S2 9-4",
"S2 9-5",
"S2 11-1",
"S2 11-2",
"S2 11-3",
"S2 11-4",
"S2 13-1",
"S2 13-2",
"S2 15-1",
"S2 15-2",
"S2 17-1",
]
]
+ [(quadpy.disk.WissmannBecker(k), 1.0e-14) for k in ["6-1", "6-2", "8-1"]],
)
def test_scheme(scheme, tol):
assert scheme.points.dtype == numpy.float64, scheme.name
assert scheme.weights.dtype == numpy.float64, scheme.name
degree = check_degree(
lambda poly: quadpy.disk.integrate(poly, [0.0, 0.0], 1.0, scheme),
integrate_monomial_over_unit_nball,
2,
scheme.degree + 1,
tol=tol,
)
assert degree == scheme.degree, "{} -- Observed: {} expected: {}".format(
scheme.name, degree, scheme.degree
)
return
@pytest.mark.parametrize("scheme", [quadpy.disk.Lether(3)])
def test_show(scheme):
quadpy.disk.show(scheme)
return
if __name__ == "__main__":
# scheme_ = quadpy.disk.Lether(5)
scheme_ = quadpy.disk.Albrecht(8)
test_scheme(scheme_, 1.0e-14)
test_show(scheme_)
|
[
"quadpy.disk.Stroud",
"quadpy.disk.HaegemansPiessens",
"quadpy.disk.WissmannBecker",
"quadpy.disk.integrate",
"quadpy.disk.Peirce1957",
"quadpy.disk.Lether",
"quadpy.disk.CoolsHaegemans",
"quadpy.disk.CoolsKim",
"quadpy.disk.Albrecht",
"quadpy.disk.show",
"quadpy.disk.HammerStroud",
"quadpy.disk.RabinowitzRichter"
] |
[((2025, 2049), 'quadpy.disk.show', 'quadpy.disk.show', (['scheme'], {}), '(scheme)\n', (2041, 2049), False, 'import quadpy\n'), ((2142, 2165), 'quadpy.disk.Albrecht', 'quadpy.disk.Albrecht', (['(8)'], {}), '(8)\n', (2162, 2165), False, 'import quadpy\n'), ((1974, 1995), 'quadpy.disk.Lether', 'quadpy.disk.Lether', (['(3)'], {}), '(3)\n', (1992, 1995), False, 'import quadpy\n'), ((1636, 1688), 'quadpy.disk.integrate', 'quadpy.disk.integrate', (['poly', '[0.0, 0.0]', '(1.0)', 'scheme'], {}), '(poly, [0.0, 0.0], 1.0, scheme)\n', (1657, 1688), False, 'import quadpy\n'), ((1359, 1388), 'quadpy.disk.WissmannBecker', 'quadpy.disk.WissmannBecker', (['k'], {}), '(k)\n', (1385, 1388), False, 'import quadpy\n'), ((813, 834), 'quadpy.disk.Stroud', 'quadpy.disk.Stroud', (['k'], {}), '(k)\n', (831, 834), False, 'import quadpy\n'), ((731, 763), 'quadpy.disk.RabinowitzRichter', 'quadpy.disk.RabinowitzRichter', (['k'], {}), '(k)\n', (760, 763), False, 'import quadpy\n'), ((665, 690), 'quadpy.disk.Peirce1957', 'quadpy.disk.Peirce1957', (['k'], {}), '(k)\n', (687, 690), False, 'import quadpy\n'), ((603, 624), 'quadpy.disk.Lether', 'quadpy.disk.Lether', (['k'], {}), '(k)\n', (621, 624), False, 'import quadpy\n'), ((479, 506), 'quadpy.disk.HammerStroud', 'quadpy.disk.HammerStroud', (['k'], {}), '(k)\n', (503, 506), False, 'import quadpy\n'), ((419, 450), 'quadpy.disk.HaegemansPiessens', 'quadpy.disk.HaegemansPiessens', ([], {}), '()\n', (448, 450), False, 'import quadpy\n'), ((355, 378), 'quadpy.disk.CoolsKim', 'quadpy.disk.CoolsKim', (['k'], {}), '(k)\n', (375, 378), False, 'import quadpy\n'), ((221, 244), 'quadpy.disk.Albrecht', 'quadpy.disk.Albrecht', (['k'], {}), '(k)\n', (241, 244), False, 'import quadpy\n'), ((285, 314), 'quadpy.disk.CoolsHaegemans', 'quadpy.disk.CoolsHaegemans', (['k'], {}), '(k)\n', (311, 314), False, 'import quadpy\n')]
|
from LucidDynamodb import DynamoDb
import os
AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
db = DynamoDb(region_name="us-east-1",
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
|
[
"LucidDynamodb.DynamoDb",
"os.getenv"
] |
[((65, 95), 'os.getenv', 'os.getenv', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (74, 95), False, 'import os\n'), ((120, 154), 'os.getenv', 'os.getenv', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (129, 154), False, 'import os\n'), ((160, 279), 'LucidDynamodb.DynamoDb', 'DynamoDb', ([], {'region_name': '"""us-east-1"""', 'aws_access_key_id': 'AWS_ACCESS_KEY_ID', 'aws_secret_access_key': 'AWS_SECRET_ACCESS_KEY'}), "(region_name='us-east-1', aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n", (168, 279), False, 'from LucidDynamodb import DynamoDb\n')]
|
# NOTE: this example needs gepetto-gui to be installed
# usage: launch gepetto-gui and then run this test
import pinocchio as pin
import numpy as np
import sys
import os
from os.path import dirname, join, abspath
from pinocchio.visualize import GepettoVisualizer
# Load the URDF model.
# Conversion with str seems to be necessary when executing this file with ipython
pinocchio_model_dir = join(dirname(dirname(str(abspath(__file__)))),"models")
model_path = join(pinocchio_model_dir,"example-robot-data/robots")
mesh_dir = pinocchio_model_dir
urdf_filename = "talos_reduced.urdf"
urdf_model_path = join(join(model_path,"talos_data/robots"),urdf_filename)
model, collision_model, visual_model = pin.buildModelsFromUrdf(urdf_model_path, mesh_dir, pin.JointModelFreeFlyer())
viz = GepettoVisualizer(model, collision_model, visual_model)
# Initialize the viewer.
try:
viz.initViewer()
except ImportError as err:
print("Error while initializing the viewer. It seems you should install gepetto-viewer")
print(err)
sys.exit(0)
try:
viz.loadViewerModel("pinocchio")
except AttributeError as err:
print("Error while loading the viewer model. It seems you should start gepetto-viewer")
print(err)
sys.exit(0)
# Display a robot configuration.
q0 = pin.neutral(model)
viz.display(q0)
# Display another robot.
viz2 = GepettoVisualizer(model, collision_model, visual_model)
viz2.initViewer(viz.viewer)
viz2.loadViewerModel(rootNodeName = "pinocchio2")
q = q0.copy()
q[1] = 1.0
viz2.display(q)
|
[
"os.path.abspath",
"pinocchio.JointModelFreeFlyer",
"pinocchio.neutral",
"os.path.join",
"pinocchio.visualize.GepettoVisualizer",
"sys.exit"
] |
[((463, 517), 'os.path.join', 'join', (['pinocchio_model_dir', '"""example-robot-data/robots"""'], {}), "(pinocchio_model_dir, 'example-robot-data/robots')\n", (467, 517), False, 'from os.path import dirname, join, abspath\n'), ((784, 839), 'pinocchio.visualize.GepettoVisualizer', 'GepettoVisualizer', (['model', 'collision_model', 'visual_model'], {}), '(model, collision_model, visual_model)\n', (801, 839), False, 'from pinocchio.visualize import GepettoVisualizer\n'), ((1278, 1296), 'pinocchio.neutral', 'pin.neutral', (['model'], {}), '(model)\n', (1289, 1296), True, 'import pinocchio as pin\n'), ((1346, 1401), 'pinocchio.visualize.GepettoVisualizer', 'GepettoVisualizer', (['model', 'collision_model', 'visual_model'], {}), '(model, collision_model, visual_model)\n', (1363, 1401), False, 'from pinocchio.visualize import GepettoVisualizer\n'), ((608, 645), 'os.path.join', 'join', (['model_path', '"""talos_data/robots"""'], {}), "(model_path, 'talos_data/robots')\n", (612, 645), False, 'from os.path import dirname, join, abspath\n'), ((751, 776), 'pinocchio.JointModelFreeFlyer', 'pin.JointModelFreeFlyer', ([], {}), '()\n', (774, 776), True, 'import pinocchio as pin\n'), ((1031, 1042), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1039, 1042), False, 'import sys\n'), ((1227, 1238), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1235, 1238), False, 'import sys\n'), ((418, 435), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (425, 435), False, 'from os.path import dirname, join, abspath\n')]
|
import setuptools
with open("requirements.txt") as f:
required_dependencies = f.read().strip().split("\n")
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="sappears",
version="0.0.2",
author="<NAME>",
author_email="<EMAIL>",
description="A package to find strings in python modules",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/srcolinas/sappears",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=required_dependencies,
)
|
[
"setuptools.find_packages"
] |
[((506, 532), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (530, 532), False, 'import setuptools\n')]
|
import copy
import datetime
from dataclasses import asdict
from patient_abm import PATIENT_ABM_DIR
from patient_abm.agent.patient import (
PatientAgent,
PatientRecordEntry,
wrap_fhir_resource,
)
from patient_abm.data_handler.fhir import FHIRHandler, create_fhir_bundle
from patient_abm.utils import string_to_datetime
TEST_DATA_DIR = PATIENT_ABM_DIR / "tests" / "data"
def test_initialize_patient_agent_minimal():
patient_id = 2468
gender = "female"
birth_date = "1985-05-24"
start_time = "2020-12-01"
agent_id = 12345
created_at = "2021-03-09"
patient = PatientAgent(
patient_id=patient_id,
gender=gender,
birth_date=birth_date,
start_time=start_time,
id_=agent_id,
created_at=created_at,
)
patient_profile = {
"resource_type": "Patient",
"id": str(patient_id),
"gender": gender,
"birth_date": birth_date,
}
wrapped_entry = wrap_fhir_resource(
patient_profile,
patient_time=string_to_datetime(start_time),
environment_id=-1,
tag="patient_profile",
)
wrapped_entry["real_time"] = string_to_datetime(created_at)
wrapped_entry["record_index"] = 0
wrapped_entry["entry_id"] = patient.record[0].entry_id
initial_patient_resource = PatientRecordEntry(**wrapped_entry)
assert patient.id == agent_id
assert patient.created_at == string_to_datetime(created_at)
assert patient.__repr__() == (
f"PatientAgent(id={agent_id}, "
f"created_at={string_to_datetime(created_at)})"
)
assert patient.record == [initial_patient_resource]
def test_initialize_patient_agent_with_comorbities():
patient_id = 2468
gender = "female"
birth_date = "1985-05-24"
start_time = "2020-12-01"
agent_id = 12345
created_at = "2021-03-09"
comorbidity = {
"resource_type": "Condition",
"name": "Diabetes",
"start": "2018-10-21",
}
wrapped_comorbidity = wrap_fhir_resource(
comorbidity,
patient_time=string_to_datetime(comorbidity["start"]),
environment_id=-1,
tag="comorbidity_diabetes",
)
wrapped_comorbidity["real_time"] = string_to_datetime(created_at)
telecom = {"system": "phone", "value": "555-113-5410", "use": "home"}
age = 35
kwargs = {
"patient__telecom": telecom,
"age": age,
}
initial_record = [wrapped_comorbidity]
patient = PatientAgent(
patient_id=patient_id,
gender=gender,
birth_date=birth_date,
start_time=start_time,
id_=agent_id,
created_at=created_at,
record=initial_record,
**kwargs,
)
patient_profile = {
"resource_type": "Patient",
"id": str(patient_id),
"gender": gender,
"birth_date": birth_date,
"telecom": telecom,
}
wrapped_patient_profile = wrap_fhir_resource(
patient_profile,
patient_time=string_to_datetime(start_time),
environment_id=-1,
tag="patient_profile",
)
wrapped_patient_profile["real_time"] = string_to_datetime(created_at)
wrapped_patient_profile["record_index"] = 0
wrapped_patient_profile["entry_id"] = patient.record[0].entry_id
initial_patient_entry = PatientRecordEntry(**wrapped_patient_profile)
wrapped_comorbidity["real_time"] = string_to_datetime(created_at)
wrapped_comorbidity["record_index"] = 1
wrapped_comorbidity["entry_id"] = patient.record[1].entry_id
comorbidity_entry = PatientRecordEntry(**wrapped_comorbidity)
assert patient.id == agent_id
assert patient.created_at == string_to_datetime(created_at)
assert (
patient.__repr__() == f"PatientAgent(id={agent_id}, "
f"created_at={string_to_datetime(created_at)})"
)
assert patient.age == age
assert patient.patient__telecom == telecom
assert len(patient.record) == 2
assert patient.record[0] == initial_patient_entry
assert patient.record[1] == comorbidity_entry
assert len(patient.conditions) == 1
assert len(patient.medications) == 0
assert len(patient.actions) == 0
def test_update_patient_agent():
patient_id = 2468
gender = "female"
birth_date = "1985-05-24"
patient_start_time = "2020-12-01"
agent_id = 12345
created_at = "2021-03-09"
patient = PatientAgent(
patient_id=patient_id,
gender=gender,
birth_date=birth_date,
patient_start_time=patient_start_time,
id_=agent_id,
created_at=created_at,
)
assert len(patient.record) == 1
assert len(patient.conditions) == 0
comorbidity = {
"resource_type": "Condition",
"name": "Diabetes",
"start": "2018-10-21",
}
wrapped_comorbidity = wrap_fhir_resource(
comorbidity,
patient_time=string_to_datetime(comorbidity["start"]),
environment_id=-1,
tag="comorbidity_diabetes",
)
wrapped_comorbidity["real_time"] = string_to_datetime(created_at)
patient.update([wrapped_comorbidity])
wrapped_comorbidity["record_index"] = 1
wrapped_comorbidity["entry_id"] = patient.record[1].entry_id
assert len(patient.record) == 2
assert len(patient.conditions) == 1
assert asdict(patient.record[-1]) == wrapped_comorbidity
patient.update([wrapped_comorbidity], skip_existing=False)
assert len(patient.record) == 3
assert len(patient.conditions) == 1
assert asdict(patient.record[-1]) == wrapped_comorbidity
patient.update([wrapped_comorbidity], skip_existing=True)
assert len(patient.record) == 3
assert len(patient.conditions) == 1
assert asdict(patient.record[-1]) == wrapped_comorbidity
assert patient.conditions["active"][0]
assert patient.conditions["end"][0] is None
wrapped_comorbidity = copy.deepcopy(wrapped_comorbidity)
wrapped_comorbidity["entry"]["end"] = "2020-10-21"
patient.update([wrapped_comorbidity], skip_existing=True)
assert len(patient.record) == 4
assert len(patient.conditions) == 1
assert asdict(patient.record[-1]) == wrapped_comorbidity
assert not patient.conditions["active"][0]
assert patient.conditions["end"][0] == string_to_datetime(
wrapped_comorbidity["entry"]["end"]
)
patient_agent_path = TEST_DATA_DIR / "patient_agent.tar"
patient.save(patient_agent_path)
_patient = PatientAgent.load(patient_agent_path)
dfs = ["conditions", "medications", "actions"]
for attr_name in PatientAgent.serialisable_attributes:
if attr_name in dfs:
# NOTE: there is some discreapncy between the two data frames:
# in one, the timestamps can appear with timezone tz="tzutc()"
# whereas in the other they are tz="UTC", and this causes the
# equality check to fail - this is why .astype(str) has been
# applied to both dataframes. We leave it to future work to
# iron out this detail
assert (
getattr(patient, attr_name)
.astype(str)
.equals(getattr(_patient, attr_name).astype(str))
)
else:
assert getattr(patient, attr_name) == getattr(_patient, attr_name)
patient_agent_path.unlink()
def test_patient_conditions_update():
patient_id = 2468
gender = "female"
birth_date = "1985-05-24"
patient = PatientAgent(
patient_id=patient_id,
gender=gender,
birth_date=birth_date,
)
condition_entry = {
"resource_type": "Condition",
"name": "Fever",
"start": "2021-03-21",
}
patient_time = datetime.datetime.now(patient.tz)
patient.update([wrap_fhir_resource(condition_entry, patient_time)])
|
[
"copy.deepcopy",
"dataclasses.asdict",
"datetime.datetime.now",
"patient_abm.agent.patient.wrap_fhir_resource",
"patient_abm.agent.patient.PatientAgent.load",
"patient_abm.agent.patient.PatientAgent",
"patient_abm.utils.string_to_datetime",
"patient_abm.agent.patient.PatientRecordEntry"
] |
[((601, 738), 'patient_abm.agent.patient.PatientAgent', 'PatientAgent', ([], {'patient_id': 'patient_id', 'gender': 'gender', 'birth_date': 'birth_date', 'start_time': 'start_time', 'id_': 'agent_id', 'created_at': 'created_at'}), '(patient_id=patient_id, gender=gender, birth_date=birth_date,\n start_time=start_time, id_=agent_id, created_at=created_at)\n', (613, 738), False, 'from patient_abm.agent.patient import PatientAgent, PatientRecordEntry, wrap_fhir_resource\n'), ((1164, 1194), 'patient_abm.utils.string_to_datetime', 'string_to_datetime', (['created_at'], {}), '(created_at)\n', (1182, 1194), False, 'from patient_abm.utils import string_to_datetime\n'), ((1324, 1359), 'patient_abm.agent.patient.PatientRecordEntry', 'PatientRecordEntry', ([], {}), '(**wrapped_entry)\n', (1342, 1359), False, 'from patient_abm.agent.patient import PatientAgent, PatientRecordEntry, wrap_fhir_resource\n'), ((2226, 2256), 'patient_abm.utils.string_to_datetime', 'string_to_datetime', (['created_at'], {}), '(created_at)\n', (2244, 2256), False, 'from patient_abm.utils import string_to_datetime\n'), ((2483, 2658), 'patient_abm.agent.patient.PatientAgent', 'PatientAgent', ([], {'patient_id': 'patient_id', 'gender': 'gender', 'birth_date': 'birth_date', 'start_time': 'start_time', 'id_': 'agent_id', 'created_at': 'created_at', 'record': 'initial_record'}), '(patient_id=patient_id, gender=gender, birth_date=birth_date,\n start_time=start_time, id_=agent_id, created_at=created_at, record=\n initial_record, **kwargs)\n', (2495, 2658), False, 'from patient_abm.agent.patient import PatientAgent, PatientRecordEntry, wrap_fhir_resource\n'), ((3143, 3173), 'patient_abm.utils.string_to_datetime', 'string_to_datetime', (['created_at'], {}), '(created_at)\n', (3161, 3173), False, 'from patient_abm.utils import string_to_datetime\n'), ((3320, 3365), 'patient_abm.agent.patient.PatientRecordEntry', 'PatientRecordEntry', ([], {}), '(**wrapped_patient_profile)\n', (3338, 3365), False, 'from patient_abm.agent.patient import PatientAgent, PatientRecordEntry, wrap_fhir_resource\n'), ((3406, 3436), 'patient_abm.utils.string_to_datetime', 'string_to_datetime', (['created_at'], {}), '(created_at)\n', (3424, 3436), False, 'from patient_abm.utils import string_to_datetime\n'), ((3571, 3612), 'patient_abm.agent.patient.PatientRecordEntry', 'PatientRecordEntry', ([], {}), '(**wrapped_comorbidity)\n', (3589, 3612), False, 'from patient_abm.agent.patient import PatientAgent, PatientRecordEntry, wrap_fhir_resource\n'), ((4398, 4551), 'patient_abm.agent.patient.PatientAgent', 'PatientAgent', ([], {'patient_id': 'patient_id', 'gender': 'gender', 'birth_date': 'birth_date', 'patient_start_time': 'patient_start_time', 'id_': 'agent_id', 'created_at': 'created_at'}), '(patient_id=patient_id, gender=gender, birth_date=birth_date,\n patient_start_time=patient_start_time, id_=agent_id, created_at=created_at)\n', (4410, 4551), False, 'from patient_abm.agent.patient import PatientAgent, PatientRecordEntry, wrap_fhir_resource\n'), ((5042, 5072), 'patient_abm.utils.string_to_datetime', 'string_to_datetime', (['created_at'], {}), '(created_at)\n', (5060, 5072), False, 'from patient_abm.utils import string_to_datetime\n'), ((5886, 5920), 'copy.deepcopy', 'copy.deepcopy', (['wrapped_comorbidity'], {}), '(wrapped_comorbidity)\n', (5899, 5920), False, 'import copy\n'), ((6454, 6491), 'patient_abm.agent.patient.PatientAgent.load', 'PatientAgent.load', (['patient_agent_path'], {}), '(patient_agent_path)\n', (6471, 6491), False, 'from patient_abm.agent.patient import PatientAgent, PatientRecordEntry, wrap_fhir_resource\n'), ((7466, 7539), 'patient_abm.agent.patient.PatientAgent', 'PatientAgent', ([], {'patient_id': 'patient_id', 'gender': 'gender', 'birth_date': 'birth_date'}), '(patient_id=patient_id, gender=gender, birth_date=birth_date)\n', (7478, 7539), False, 'from patient_abm.agent.patient import PatientAgent, PatientRecordEntry, wrap_fhir_resource\n'), ((7716, 7749), 'datetime.datetime.now', 'datetime.datetime.now', (['patient.tz'], {}), '(patient.tz)\n', (7737, 7749), False, 'import datetime\n'), ((1428, 1458), 'patient_abm.utils.string_to_datetime', 'string_to_datetime', (['created_at'], {}), '(created_at)\n', (1446, 1458), False, 'from patient_abm.utils import string_to_datetime\n'), ((3681, 3711), 'patient_abm.utils.string_to_datetime', 'string_to_datetime', (['created_at'], {}), '(created_at)\n', (3699, 3711), False, 'from patient_abm.utils import string_to_datetime\n'), ((5314, 5340), 'dataclasses.asdict', 'asdict', (['patient.record[-1]'], {}), '(patient.record[-1])\n', (5320, 5340), False, 'from dataclasses import asdict\n'), ((5516, 5542), 'dataclasses.asdict', 'asdict', (['patient.record[-1]'], {}), '(patient.record[-1])\n', (5522, 5542), False, 'from dataclasses import asdict\n'), ((5717, 5743), 'dataclasses.asdict', 'asdict', (['patient.record[-1]'], {}), '(patient.record[-1])\n', (5723, 5743), False, 'from dataclasses import asdict\n'), ((6127, 6153), 'dataclasses.asdict', 'asdict', (['patient.record[-1]'], {}), '(patient.record[-1])\n', (6133, 6153), False, 'from dataclasses import asdict\n'), ((6268, 6323), 'patient_abm.utils.string_to_datetime', 'string_to_datetime', (["wrapped_comorbidity['entry']['end']"], {}), "(wrapped_comorbidity['entry']['end'])\n", (6286, 6323), False, 'from patient_abm.utils import string_to_datetime\n'), ((1034, 1064), 'patient_abm.utils.string_to_datetime', 'string_to_datetime', (['start_time'], {}), '(start_time)\n', (1052, 1064), False, 'from patient_abm.utils import string_to_datetime\n'), ((2076, 2116), 'patient_abm.utils.string_to_datetime', 'string_to_datetime', (["comorbidity['start']"], {}), "(comorbidity['start'])\n", (2094, 2116), False, 'from patient_abm.utils import string_to_datetime\n'), ((3003, 3033), 'patient_abm.utils.string_to_datetime', 'string_to_datetime', (['start_time'], {}), '(start_time)\n', (3021, 3033), False, 'from patient_abm.utils import string_to_datetime\n'), ((4892, 4932), 'patient_abm.utils.string_to_datetime', 'string_to_datetime', (["comorbidity['start']"], {}), "(comorbidity['start'])\n", (4910, 4932), False, 'from patient_abm.utils import string_to_datetime\n'), ((7770, 7819), 'patient_abm.agent.patient.wrap_fhir_resource', 'wrap_fhir_resource', (['condition_entry', 'patient_time'], {}), '(condition_entry, patient_time)\n', (7788, 7819), False, 'from patient_abm.agent.patient import PatientAgent, PatientRecordEntry, wrap_fhir_resource\n'), ((1556, 1586), 'patient_abm.utils.string_to_datetime', 'string_to_datetime', (['created_at'], {}), '(created_at)\n', (1574, 1586), False, 'from patient_abm.utils import string_to_datetime\n'), ((3809, 3839), 'patient_abm.utils.string_to_datetime', 'string_to_datetime', (['created_at'], {}), '(created_at)\n', (3827, 3839), False, 'from patient_abm.utils import string_to_datetime\n')]
|
#!/usr/bin/env python
import re
import os
import helix.azure_storage
import helix.event
import helix.settings
import helix.logs
log = helix.logs.get_logger()
acceptableXUnitFileNames = [
"testResults.xml",
"test-results.xml",
"test_results.xml"
]
class HelixHelper:
def __init__(self, settings):
self.settings = settings
self.event_client = helix.event.create_from_uri(settings.event_uri)
self.upload_client = helix.azure_storage.get_upload_client(settings)
def error(self, error_type, message, log_uri=None):
self.event_client.error(self.settings, error_type, message, log_uri)
def xunit(self, results_uri, test_count):
self.event_client.send(
{
'Type': 'XUnitTestResult',
'WorkItemId': self.settings.workitem_id,
'WorkItemFriendlyName': self.settings.workitem_friendly_name,
'CorrelationId': self.settings.correlation_id,
'ResultsXmlUri': results_uri,
'TestCount': test_count,
}
)
def upload_file_to_storage(self, file_path):
""" Copy file specified to azure storage account using Helix infrastructure
:param file_path: Path to file to be copied to Azure storage
:type file_path:string
"""
try:
return self.upload_client.upload(file_path, os.path.basename(file_path))
except ValueError:
self.error("FailedUpload", "Failed to upload "+file_path+"after retry")
def findXUnitResults(search_dir):
for root, dirs, files in os.walk(search_dir):
for file_name in files:
if file_name in acceptableXUnitFileNames:
return os.path.join(root, file_name)
return None
def main():
settings = helix.settings.settings_from_env()
if settings.output_uri is None or settings.event_uri is None:
log.error("Unable to report xunit results: output_uri and/or event_uri are not set.")
return 1
helper = HelixHelper(settings)
working_dir = settings.workitem_working_dir
results_path = findXUnitResults(working_dir)
if results_path is None:
log.error("Unable to report xunit results: no test results xml file found.")
return 2
log.info("Uploading results from {}".format(results_path))
with open(results_path, encoding="utf-8") as result_file:
test_count = 0
total_regex = re.compile(r'total="(\d+)"')
for line in result_file:
if '<assembly ' in line:
match = total_regex.search(line)
if match is not None:
test_count = int(match.groups()[0])
break
result_url = helper.upload_file_to_storage(results_path)
log.info("Sending completion event")
helper.xunit(result_url, test_count)
return 0
if __name__ == '__main__':
import sys
sys.exit(main())
|
[
"os.path.basename",
"os.walk",
"os.path.join",
"re.compile"
] |
[((1600, 1619), 'os.walk', 'os.walk', (['search_dir'], {}), '(search_dir)\n', (1607, 1619), False, 'import os\n'), ((2455, 2483), 're.compile', 're.compile', (['"""total="(\\\\d+)\\""""'], {}), '(\'total="(\\\\d+)"\')\n', (2465, 2483), False, 'import re\n'), ((1396, 1423), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (1412, 1423), False, 'import os\n'), ((1730, 1759), 'os.path.join', 'os.path.join', (['root', 'file_name'], {}), '(root, file_name)\n', (1742, 1759), False, 'import os\n')]
|
from django.apps import apps
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
def explore(request):
peoples_fetch_qs = apps.get_model('star_wars_test', 'PeopleFetch').objects.all().order_by('-fetching_date')
context = {"collections": peoples_fetch_qs}
return render(request, 'explore.html', context)
def collection_download(request, collection_id):
from django.http import FileResponse
PeopleFetch = apps.get_model('star_wars_test', 'PeopleFetch')
people_fetch = get_object_or_404(PeopleFetch, id=collection_id)
path_to_file = people_fetch.file_path
return FileResponse(open(path_to_file, 'rb'))
def collection(request, collection_id):
PeopleFetch = apps.get_model('star_wars_test', 'PeopleFetch')
people_fetch = get_object_or_404(PeopleFetch, id=collection_id)
# TODO: create a form to validate the get param limit is a well formatted integer
limit = int(request.GET.get("limit", 10))
next_limit = limit + 10
html_table = people_fetch.get_html_etl_table(limit=limit)
context = {
"collection": people_fetch,
"next_limit": next_limit,
"html_table": html_table,
}
return render(request, 'collection.html', context)
def fetch(request):
apps.get_model('star_wars_test', 'PeopleFetch').objects.fetch()
return HttpResponseRedirect("/")
|
[
"django.shortcuts.render",
"django.shortcuts.get_object_or_404",
"django.http.HttpResponseRedirect",
"django.apps.apps.get_model"
] |
[((324, 364), 'django.shortcuts.render', 'render', (['request', '"""explore.html"""', 'context'], {}), "(request, 'explore.html', context)\n", (330, 364), False, 'from django.shortcuts import render, get_object_or_404\n'), ((475, 522), 'django.apps.apps.get_model', 'apps.get_model', (['"""star_wars_test"""', '"""PeopleFetch"""'], {}), "('star_wars_test', 'PeopleFetch')\n", (489, 522), False, 'from django.apps import apps\n'), ((542, 590), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['PeopleFetch'], {'id': 'collection_id'}), '(PeopleFetch, id=collection_id)\n', (559, 590), False, 'from django.shortcuts import render, get_object_or_404\n'), ((743, 790), 'django.apps.apps.get_model', 'apps.get_model', (['"""star_wars_test"""', '"""PeopleFetch"""'], {}), "('star_wars_test', 'PeopleFetch')\n", (757, 790), False, 'from django.apps import apps\n'), ((810, 858), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['PeopleFetch'], {'id': 'collection_id'}), '(PeopleFetch, id=collection_id)\n', (827, 858), False, 'from django.shortcuts import render, get_object_or_404\n'), ((1218, 1261), 'django.shortcuts.render', 'render', (['request', '"""collection.html"""', 'context'], {}), "(request, 'collection.html', context)\n", (1224, 1261), False, 'from django.shortcuts import render, get_object_or_404\n'), ((1363, 1388), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/"""'], {}), "('/')\n", (1383, 1388), False, 'from django.http import HttpResponseRedirect\n'), ((1288, 1335), 'django.apps.apps.get_model', 'apps.get_model', (['"""star_wars_test"""', '"""PeopleFetch"""'], {}), "('star_wars_test', 'PeopleFetch')\n", (1302, 1335), False, 'from django.apps import apps\n'), ((176, 223), 'django.apps.apps.get_model', 'apps.get_model', (['"""star_wars_test"""', '"""PeopleFetch"""'], {}), "('star_wars_test', 'PeopleFetch')\n", (190, 223), False, 'from django.apps import apps\n')]
|
import chess
import chess.pgn
import math
from collections.abc import Mapping
import json
from random import randrange
import berserk
import time
import itertools
client = berserk.Client()
upgradeToBot = False
if upgradeToBot:
client.account.upgrade_to_bot()
token = ""
with open('.apitoken') as f:
token = f.read().replace("\n", "")
session = berserk.TokenSession("" + token)
client = berserk.Client(session)
gameRunning = False
isWhite = True
board = chess.Board()
legalMoves = []
maxMoves = 1000
maxGames = 1
def getLegalMoves(board):
legalMoves = []
for move in board.legal_moves:
legalMoves.append(str(move))
return legalMoves
def split(word):
return [char for char in word]
def isDict(variable):
return isinstance(variable, Mapping)
def isEmptyDict(inDict):
return not bool(inDict)
currentDepth = 0
movesMade = 0
def makeBoardMove(board, move):
move = str(move)
global currentDepth
if move != "":
# board.push(chess.Move.from_uci(move))
newMove = chess.Move.from_uci(move)
board.push(newMove)
currentDepth += 1
return board
def resetBoard(board):
global currentDepth
#print("resetting depth: ", currentDepth)
while currentDepth - movesMade != 0:
board.pop()
currentDepth -= 1
def resetBoardX(board, steps):
global currentDepth
#print("resetting depth x: ", currentDepth)
for step in range(steps):
if currentDepth - movesMade != 0:
currentDepth -= 1
board.pop()
def getScorePieces(board):
chars = split(str(board))
score = 0
# make both players dont want to draw
if board.is_stalemate():
#print("HYPOTHETICAL STALEMATE")
if board.turn == chess.WHITE:
return -10
else:
return 10
if board.can_claim_draw():
# todo do min/max depending if below/over 0
# print("CAN CLAIM DRAW")
if board.turn == chess.WHITE:
return -10
else:
return 10
i = 0
for char in chars:
if char == ' ' or char == '\n':
continue
row = int(i/8)
column = i % 8
i += 1
distanceToCenter = math.sqrt((row-3.5)**2 + (column-3.5)**2)
positionValue = -(distanceToCenter/1000000.0)
#print("distance", multiplier)
# Black pieces
if char == "r":
score -= 5.63
score -= positionValue
elif char == "n":
score -= 3.05
score -= positionValue
elif char == "q":
score -= 9.5
elif char == "b":
score -= 3.33
score -= positionValue
elif char == "k":
score -= 1000
score += positionValue
elif char == "p":
score -= 1
score -= positionValue
# White pieces
elif char == "R":
score += 5.63
score += positionValue
elif char == "N":
score += 3.05
score += positionValue
elif char == "Q":
score += 9.5
elif char == "B":
score += 3.33
score += positionValue
elif char == "K":
score += 1000
score -= positionValue
elif char == "P":
score += 1
score += positionValue
# print(board)
# print(score)
return score
def getMoveRandom(board):
legalMoves = getLegalMoves(board)
#print("legal moves: ", len(legalMoves))
moveIndex = randrange(0, len(legalMoves))
return str(legalMoves[moveIndex])
def should_accept(arg):
return not gameRunning
should_decline_if_in_game = True
for event in client.bots.stream_incoming_events():
if event['type'] == 'challenge':
print("event--------------------------------")
print(event)
print("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
if should_accept(event):
client.bots.accept_challenge(event['challenge']['id'])
gameID = event['challenge']['id']
# event['challenge']['color'] is opponents color
isWhite = event['challenge']['color'] == "black"
print("is white")
print(isWhite)
print("starting challange")
gameRunning = True
break
elif should_decline_if_in_game:
client.bots.decline_challenge(event['challenge']['id'])
print("declining challange")
print("done with loop")
if not gameRunning:
print("setting custom id @@@@@@@@@@@@@@@@@@@@@@")
gameID = "vztNjpfq"
print("gameid: ", gameID)
def getMoveFirst(board):
legalMoves = getLegalMoves(board)
return str(legalMoves[0])
def getBestMove1Depth(board):
legalMoves = getLegalMoves(board)
if len(legalMoves) == 0:
return ""
bestScore = -10000
colorScoreCorrection = 1
bestMove = ""
if board.turn == chess.BLACK:
colorScoreCorrection = -1
for move in legalMoves:
move = str(move)
# board.push(chess.Move.from_uci(move))
makeBoardMove(board, move)
moveScore = getScorePieces(board) * colorScoreCorrection
if moveScore > bestScore:
bestScore = moveScore
bestMove = move
resetBoardX(board, 1)
return bestMove
def getScoreAfterMove(board, move):
move = str(move)
makeBoardMove(board, move)
score = getScorePieces(board)
resetBoardX(board, 1)
return score
def searchNode(board, node, moves, previousMoves, queue):
minScore = 10000
maxScore = -10000
legalMoves = getLegalMoves(board)
for move in legalMoves:
#currentMoveTree[move] = {}
score = getScoreAfterMove(board, move)
#currentMoveTree[move][SCORE] = score
#currentMoveTree[move][MOVES] = {}
maxScore = max(maxScore, score)
minScore = min(minScore, score)
# propegate score upwards (TODO?)
if board.turn == chess.WHITE:
node["score"] = maxScore
elif board.turn == chess.BLACK:
node["score"] = minScore
node["moves"][move] = {}
node["moves"][move]["score"] = score
node["moves"][move]["moves"] = {}
#print("node: ", node)
nextQueueMove = []
nextQueueMove.extend(previousMoves)
nextQueueMove.append(move)
queue.insert(0, nextQueueMove)
def getBestMoveSearchTree(board, nNodes):
bestMove = getMoveRandom(board)
moves = {}
searchedNodes = 0
queue = []
searchDepth = 0
legalMoves = getLegalMoves(board)
if len(legalMoves) == 0:
return ""
SCORE = "score"
MOVES = "moves"
for move in legalMoves:
moves[move] = {}
moves[move][SCORE] = getScoreAfterMove(board, move)
moves[move][MOVES] = {}
sortedMoves = sorted(moves.items(), key=lambda x: x[1][SCORE])
for sortedMove in sortedMoves:
if True:
queue.append([sortedMove[0]])
else:
if board.turn == chess.WHITE:
queue.append([sortedMove[0]])
elif board.turn == chess.BLACK:
queue.insert(0, [sortedMove[0]])
#print("queue", queue)
# always start from current board state
while(searchedNodes < nNodes):
searchedNodes += 1
if len(queue) == 0:
break
queueMove = queue.pop()
madeMoves = queueMove.copy()
while len(queueMove) > 0:
nextMove = queueMove.pop()
if nextMove in getLegalMoves(board):
searchDepth += 1
if board.turn == chess.WHITE:
print("WHITE making move")
else:
print("BLACK making move")
print("move:", nextMove)
makeBoardMove(board, nextMove)
print(board)
if False:
if board.turn == chess.WHITE:
#nextMove = sortedMoves[len(sortedMoves)-1][0]
nextMove = queue[len(sortedMoves)-1]
elif board.turn == chess.BLACK:
#nextMove = sortedMoves[0][0]
nextMove = queue[0]
print("current Depth", searchDepth)
print("queue popped:", madeMoves)
prevMoveNode = moves[nextMove]
#searchNode(board, prevMoveNode, moves, madeMoves, queue)
#print("queue", queue)
#moves[nextMove]["moves"] = currentMoveTree
# board is reset to current state
resetBoardX(board, searchDepth)
searchDepth = 0
#moves[move] = {}
#moves[move][SCORE] = getScoreAfterMove(board, move)
#moves[move][MOVES] = {}
#print("actual moves", json.dumps(moves, indent= 4))
#print("actual moves", json.dumps(moves, indent= 4))
sortedMoves = sorted(moves.items(), key=lambda x: x[1][SCORE])
bestMove = ""
if board.turn == chess.WHITE:
bestMove = sortedMoves[len(sortedMoves)-1][0]
elif board.turn == chess.BLACK:
bestMove = sortedMoves[0][0]
#print("___________________RETURNING BEST MOVE__________________________")
return str(bestMove)
def getBestMove2Depth(board):
originalBoard = board.copy()
legalMoves = getLegalMoves(board)
bestScore = -10000
colorScoreCorrection = 1
bestMove = ""
if board.turn == chess.BLACK:
colorScoreCorrection = -1
for move in legalMoves:
move = str(move)
board = originalBoard.copy()
makeBoardMove(board, move)
bestOpponentMove = getBestMove1Depth(board)
makeBoardMove(board, bestOpponentMove)
scoreD2 = getScorePieces(board) * colorScoreCorrection
if scoreD2 > bestScore:
bestScore = scoreD2
bestMove = move
board = originalBoard.copy()
return bestMove
def getBestMove3Depth(board):
originalBoard = board.copy()
legalMoves = getLegalMoves(board)
bestScore = -10000
colorScoreCorrection = 1
bestMove = ""
if board.turn == chess.BLACK:
colorScoreCorrection = -1
for move in legalMoves:
move = str(move)
board = originalBoard.copy()
makeBoardMove(board, move)
bestOpponentMove = getBestMove2Depth(board)
makeBoardMove(board, bestOpponentMove)
originalBoard2 = board.copy()
legalMoves2 = getLegalMoves(board)
for move2 in legalMoves2:
move2 = str(move2)
board = originalBoard2.copy()
makeBoardMove(board, move2)
scoreD2 = getScorePieces(board) * colorScoreCorrection
if scoreD2 > bestScore:
bestScore = scoreD2
bestMove = move
board = originalBoard.copy()
return bestMove
def getBestMove3Depthv0(board):
legalMoves = getLegalMoves(board)
bestScore = -10000
colorScoreCorrection = 1
bestMove = ""
if board.turn == chess.BLACK:
colorScoreCorrection = -1
for move in legalMoves:
move = str(move)
makeBoardMove(board, move)
bestOpponentMove = getBestMove2Depth(board)
makeBoardMove(board, bestOpponentMove)
scoreD2 = getScorePieces(board) * colorScoreCorrection * -1
if scoreD2 > bestScore:
bestScore = scoreD2
bestMove = move
resetBoard(board)
return bestMove
def getBestMove2Depthv2(board):
legalMoves = getLegalMoves(board)
bestScore = -10000
colorScoreCorrection = 1
bestMove = ""
if board.turn == chess.BLACK:
colorScoreCorrection = -1
for move in legalMoves:
move = str(move)
board.push(chess.Move.from_uci(move))
# opponent makes a move
bestOpponentMove = getBestMove1Depth(board)
board.push(chess.Move.from_uci(bestOpponentMove))
# responding move
legalMoves = getLegalMoves(board)
for moveDepth2 in legalMoves:
moveDepth2 = str(moveDepth2)
board.push(chess.Move.from_uci(moveDepth2))
moveScoreDepth2 = getScorePieces(board) * colorScoreCorrection
if moveScoreDepth2 > bestScore:
bestScore = moveScoreDepth2
bestMove = move
# reset until opponent move
board.pop()
# reset opponent move
board.pop()
#moveScore = getScorePieces(board) * colorScoreCorrection
# Reset board
board.pop()
return bestMove
def getBestMove2Depthv3(board):
legalMoves = getLegalMoves(board)
bestScore = -10000
colorScoreCorrection = 1
bestMove = ""
if board.turn == chess.BLACK:
colorScoreCorrection = -1
for move in legalMoves:
move = str(move)
board.push(chess.Move.from_uci(move))
# opponent makes a move
bestOpponentMove = getBestMove2Depthv2(board)
board.push(chess.Move.from_uci(bestOpponentMove))
# responding move
legalMoves = getLegalMoves(board)
for moveDepth2 in legalMoves:
moveDepth2 = str(moveDepth2)
board.push(chess.Move.from_uci(moveDepth2))
moveScoreDepth2 = getScorePieces(board) * colorScoreCorrection
if moveScoreDepth2 > bestScore:
bestScore = moveScoreDepth2
bestMove = move
# reset until opponent move
board.pop()
# reset opponent move
board.pop()
#moveScore = getScorePieces(board) * colorScoreCorrection
# Reset board
board.pop()
return bestMove
def getBestMoveDepth2(board):
# print("iteration@@@@@@@@@@@@@@@@@@@@@@@@@")
legalMoves = getLegalMoves(board)
bestScore = -10000
colorScoreCorrection = 1
bestMove = ""
if board.turn == chess.BLACK:
colorScoreCorrection = -1
for move in legalMoves:
move = str(move)
makeBoardMove(board, move)
bestOpponentMove = getBestMove1Depth(board)
makeBoardMove(board, bestOpponentMove)
bestMoveDepth2 = getBestMove1Depth(board)
makeBoardMove(board, bestMoveDepth2)
scoreD2 = getScorePieces(board) * colorScoreCorrection
resetBoard(board)
if scoreD2 > bestScore:
#print("setting new best move")
bestScore = scoreD2
bestMove = move
#print("legal moves: ", legalMoves)
#print("making move: ", bestMove)
return bestMove
def getBestMoveDepth3(board):
# print("iteration@@@@@@@@@@@@@@@@@@@@@@@@@")
legalMoves = getLegalMoves(board)
bestScore = -10000
colorScoreCorrection = 1
bestMove = ""
if board.turn == chess.BLACK:
colorScoreCorrection = -1
for move in legalMoves:
move = str(move)
makeBoardMove(board, move)
bestOpponentMove = getBestMoveDepth2(board)
makeBoardMove(board, bestOpponentMove)
bestMoveDepth2 = getBestMoveDepth2(board)
makeBoardMove(board, bestMoveDepth2)
bestOpponentMove2 = getBestMove1Depth(board)
makeBoardMove(board, bestOpponentMove2)
bestMoveDepth3 = getBestMove1Depth(board)
makeBoardMove(board, bestMoveDepth3)
scoreD3 = getScorePieces(board) * colorScoreCorrection
resetBoard(board)
if scoreD3 > bestScore:
#print("setting new best move")
bestScore = scoreD3
bestMove = move
#print("legal moves: ", legalMoves)
#print("making move: ", bestMove)
return bestMove
nWhiteWon = 0
nBlackWon = 0
nDraw = 0
getScorePieces(board)
for g in range(maxGames):
board = chess.Board()
savedGame = chess.pgn.Game()
savedGame.headers["Event"] = "Example"
node = None
for m in range(maxMoves):
print("starting stuff")
print("move: ", m)
humanMove = ""
if isWhite and m == 0:
print("passing")
client.bots.make_move(gameID, "e2e4")
makeBoardMove(board, "e2e4")
gen = client.bots.stream_game_state(gameID)
breakLoop = False
while not breakLoop:
print("looping stuff 1")
for element in gen:
print("looping stuff")
if(element):
if element['type'] == "gameState":
moves = element['moves'].split(" ")
humanMove = moves[len(moves)-1]
print("latest move: ")
print(humanMove)
makeBoardMove(board, humanMove)
print("made human move")
breakLoop = True
gen.close()
print("doing normal stuff")
if m % 10 == 0:
print("move", m)
# Check if game is over
if board.is_stalemate():
nDraw += 1
break
if board.is_game_over():
result = board.result()
if result == "1/2-1/2":
nDraw += 1
if result == "1-0":
nWhiteWon += 1
if result == "0-1":
nBlackWon += 1
savedGame.headers["Result"] = result
break
move = ""
if board.turn == chess.WHITE:
#move = getBestMoveDepth2(board)
#move = getMoveRandom(board)
#move = getBestMoveSearchTree(board, 1000)
move = getBestMove2Depth(board)
#move = getBestMove1Depth(board)
#move = getBestMoveSearchTree(board, 2000)
if board.turn == chess.BLACK:
#move = getBestMoveDepth2(board)
#move = getBestMoveSearchTree(board, 1000)
#move = getBestMove2Depth(board)
#move = getMoveRandom(board)
move = getBestMove2Depth(board)
#move = getBestMoveSearchTree(board, 2000)
#move = getBestMove1Depth(board)
print("best move: ")
print(move)
makeBoardMove(board, move)
client.bots.make_move(gameID, move)
movesMade += 1
print("made computer move")
#print("moves made: ", movesMade)
# board.push(chess.Move.from_uci(move))
#print("trying move", move)
if m == 0:
node = savedGame.add_main_variation(chess.Move.from_uci(move))
else:
node = node.add_main_variation(chess.Move.from_uci(move))
#node.comment = "value: " + str(getScorePieces(board))
print("----------------------------------------------")
print(str(savedGame))
if True:
print("white won", nWhiteWon)
print("black won", nBlackWon)
print("draw", nDraw)
|
[
"chess.Move.from_uci",
"math.sqrt",
"berserk.Client",
"chess.Board",
"chess.pgn.Game",
"berserk.TokenSession"
] |
[((174, 190), 'berserk.Client', 'berserk.Client', ([], {}), '()\n', (188, 190), False, 'import berserk\n'), ((358, 390), 'berserk.TokenSession', 'berserk.TokenSession', (["('' + token)"], {}), "('' + token)\n", (378, 390), False, 'import berserk\n'), ((400, 423), 'berserk.Client', 'berserk.Client', (['session'], {}), '(session)\n', (414, 423), False, 'import berserk\n'), ((469, 482), 'chess.Board', 'chess.Board', ([], {}), '()\n', (480, 482), False, 'import chess\n'), ((15809, 15822), 'chess.Board', 'chess.Board', ([], {}), '()\n', (15820, 15822), False, 'import chess\n'), ((15840, 15856), 'chess.pgn.Game', 'chess.pgn.Game', ([], {}), '()\n', (15854, 15856), False, 'import chess\n'), ((1043, 1068), 'chess.Move.from_uci', 'chess.Move.from_uci', (['move'], {}), '(move)\n', (1062, 1068), False, 'import chess\n'), ((2242, 2291), 'math.sqrt', 'math.sqrt', (['((row - 3.5) ** 2 + (column - 3.5) ** 2)'], {}), '((row - 3.5) ** 2 + (column - 3.5) ** 2)\n', (2251, 2291), False, 'import math\n'), ((11831, 11856), 'chess.Move.from_uci', 'chess.Move.from_uci', (['move'], {}), '(move)\n', (11850, 11856), False, 'import chess\n'), ((11962, 11999), 'chess.Move.from_uci', 'chess.Move.from_uci', (['bestOpponentMove'], {}), '(bestOpponentMove)\n', (11981, 11999), False, 'import chess\n'), ((12936, 12961), 'chess.Move.from_uci', 'chess.Move.from_uci', (['move'], {}), '(move)\n', (12955, 12961), False, 'import chess\n'), ((13069, 13106), 'chess.Move.from_uci', 'chess.Move.from_uci', (['bestOpponentMove'], {}), '(bestOpponentMove)\n', (13088, 13106), False, 'import chess\n'), ((12173, 12204), 'chess.Move.from_uci', 'chess.Move.from_uci', (['moveDepth2'], {}), '(moveDepth2)\n', (12192, 12204), False, 'import chess\n'), ((13280, 13311), 'chess.Move.from_uci', 'chess.Move.from_uci', (['moveDepth2'], {}), '(moveDepth2)\n', (13299, 13311), False, 'import chess\n'), ((18496, 18521), 'chess.Move.from_uci', 'chess.Move.from_uci', (['move'], {}), '(move)\n', (18515, 18521), False, 'import chess\n'), ((18580, 18605), 'chess.Move.from_uci', 'chess.Move.from_uci', (['move'], {}), '(move)\n', (18599, 18605), False, 'import chess\n')]
|
# Copyright © 2021 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-few-public-methods
"""The simple pay-api client is defined here."""
import copy
import json
from enum import Enum
from functools import wraps
import requests
from flask import current_app
from mhr_api.services.payment import TransactionTypes
MSG_CLIENT_CREDENTIALS_REQ_FAILED = 'Client credentials request failed'
MSG_INVALID_HTTP_VERB = 'Invalid HTTP verb'
# Mapping from PPR transaction to Pay API filing type
TRANSACTION_TO_FILING_TYPE = {
'SEARCH': 'MSRCH',
'SEARCH_COMBO': 'CSRCH',
'SEARCH_STAFF': 'MSRCS',
'SEARCH_STAFF_COMBO': 'CSRCS',
'CERTIFIED': 'MHRCD'
}
# Mapping from normal filing type to staff version of filing type
TO_STAFF_FILING_TYPE = {
'MSRCH': 'MSRCS',
'CSRCH': 'CSRCS'
}
PAYMENT_FILING_TYPE_TEMPLATE = {
'filingTypeCode': '',
'priority': False,
'futureEffective': False,
'quantity': 1
}
PAYMENT_REQUEST_TEMPLATE = {
'filingInfo': {
'filingIdentifier': '',
'folioNumber': '',
'filingTypes': [
{
'filingTypeCode': '',
'priority': False,
'futureEffective': False,
'quantity': 1
}
]
},
'businessInfo': {
'corpType': 'MHR'
},
'details': [
{
'label': '',
'value': ''
}
]
}
PAYMENT_REFUND_TEMPLATE = {
'reason': 'Immediate transaction rollback.'
}
PATH_PAYMENT = 'payment-requests'
PATH_REFUND = 'payment-requests/{invoice_id}/refunds'
PATH_INVOICE = 'payment-requests/{invoice_id}'
PATH_RECEIPT = 'payment-requests/{invoice_id}/receipts'
STATUS_COMPLETED = 'COMPLETED'
STATUS_CREATED = 'CREATED'
STATUS_PAID = 'PAID'
STATUS_APPROVED = 'APPROVED'
class ApiClientException(Exception):
"""Capture api request call error information."""
def __init__(self, wrapped_err=None, body=None, message='Exception', status_code=500):
"""Set up the exception."""
self.body = body
self.err = wrapped_err
if wrapped_err:
self.message = '{msg}: {desc}'.format(msg=message, desc=str(wrapped_err))
else:
self.message = message
# Map HTTP status if the wrapped error has an HTTP status code
self.status_code = wrapped_err.status if wrapped_err and hasattr(wrapped_err, 'status') else status_code
super().__init__(self.message)
class ApiRequestError(Exception):
"""Capture api request call error information."""
def __init__(self, response=None, message='API request failed'):
"""Set up the exception."""
if response is not None:
self.status_code = response.status_code
try:
self.json_data = json.loads(response.text)
except Exception: # noqa: B902; return nicer default error
current_app.logger.error('Pay api non-JSON response: ' + response.text)
self.json_data = {'message': 'Error parsing payment error response as JSON.'}
self.json_data['status_code'] = response.status_code
self.detail = self.json_data.get('detail', '')
self.title = self.json_data.get('title', '')
self.message = str(response.status_code) + ': ' + self.detail
else:
self.message = message
self.json_data = None
super().__init__(self.message)
class HttpVerbs(Enum):
"""Enumeration of HTTP verbs."""
GET = 'get'
POST = 'post'
PUT = 'put'
DELETE = 'delete'
PATCH = 'patch'
OPTIONS = 'options'
HEAD = 'head'
class BaseClient:
"""Base class for common api call properties and functions."""
def __init__(self, jwt=None, account_id=None, api_key=None, details=None):
"""Set the API URL from the env variables PAYMENT_SVC_PREFIX and PAYMENT_SVC_URL."""
service_url = current_app.config.get('PAYMENT_SVC_URL')
self.api_url = service_url + '/' if service_url[-1] != '/' else service_url
self.jwt = jwt
self.account_id = account_id
self.api_key = api_key
if details and 'label' in details and 'value' in details:
self.detail_label = details['label']
self.detail_value = details['value']
else:
self.detail_label = None
self.detail_value = None
def call_api(self, # pylint: disable=too-many-arguments
method,
relative_path,
data=None,
token=None,
include_account: bool = True):
"""Call the Pay API."""
try:
headers = {
'Authorization': 'Bearer ' + token if token is not None else 'Bearer ' + self.jwt,
'Content-Type': 'application/json'
}
if include_account and self.account_id:
headers['Account-Id'] = self.account_id
if self.api_key:
headers['x-apikey'] = self.api_key
# current_app.logger.debug(json.dumps(headers))
url = self.api_url + relative_path
# current_app.logger.debug(method.value + ' url=' + url)
if data:
# current_app.logger.debug(json.dumps(data))
response = requests.request(
method.value,
url,
params=None,
json=data,
headers=headers
)
else:
response = requests.request(
method.value,
url,
params=None,
headers=headers
)
if response is not None:
if self.account_id:
current_app.logger.info('Account ' + self.account_id + ' pay api response=' + response.text)
else:
current_app.logger.info('Pay api response=' + response.text)
if not response.ok:
raise ApiRequestError(response, str(response.status_code) + ': ' + response.text)
return json.loads(response.text)
except (ApiRequestError) as err:
current_app.logger.error('call_api error: ' + err.message)
raise err
class SBCPaymentClient(BaseClient):
"""Pay API client implementation."""
@staticmethod
def create_payment_search_data(selections, mhr_id=None, # pylint: disable=too-many-branches
client_reference_id=None,
staff_gov=False):
"""Build the payment-request body formatted as JSON."""
data = copy.deepcopy(PAYMENT_REQUEST_TEMPLATE)
mhr_count: int = 0
combo_count: int = 0
for match in selections:
if match.get('includeLienInfo', False):
combo_count += 1
else:
mhr_count += 1
if mhr_count > 0:
filing_type = TRANSACTION_TO_FILING_TYPE[TransactionTypes.SEARCH.value]
if staff_gov:
filing_type = TO_STAFF_FILING_TYPE[filing_type]
data['filingInfo']['filingTypes'][0]['filingTypeCode'] = filing_type
data['filingInfo']['filingTypes'][0]['quantity'] = mhr_count
else:
filing_type = TRANSACTION_TO_FILING_TYPE[TransactionTypes.SEARCH_COMBO.value]
if staff_gov:
filing_type = TO_STAFF_FILING_TYPE[filing_type]
data['filingInfo']['filingTypes'][0]['filingTypeCode'] = filing_type
data['filingInfo']['filingTypes'][0]['quantity'] = combo_count
if mhr_count > 0 and combo_count > 0:
filing_type = TRANSACTION_TO_FILING_TYPE[TransactionTypes.SEARCH_COMBO.value]
if staff_gov:
filing_type = TO_STAFF_FILING_TYPE[filing_type]
extra_filing_data = copy.deepcopy(PAYMENT_FILING_TYPE_TEMPLATE)
extra_filing_data['filingTypeCode'] = filing_type
extra_filing_data['quantity'] = combo_count
data['filingInfo']['filingTypes'].append(extra_filing_data)
if mhr_id:
data['filingInfo']['filingIdentifier'] = mhr_id
else:
del data['filingInfo']['filingIdentifier']
if client_reference_id:
data['filingInfo']['folioNumber'] = client_reference_id
else:
del data['filingInfo']['folioNumber']
return data
@staticmethod
def create_payment_staff_search_data(selections, transaction_info, mhr_id, client_reference_id=None):
"""Build the staff search payment-request body formatted as JSON."""
data = SBCPaymentClient.create_payment_search_data(selections, mhr_id, client_reference_id, False)
if transaction_info.get('waiveFees'):
for filing_type in data['filingInfo']['filingTypes']:
filing_type['waiveFees'] = True
else:
# set up FAS payment
if 'routingSlipNumber' in transaction_info:
account_info = {
'routingSlip': transaction_info['routingSlipNumber']
}
data['accountInfo'] = account_info
# setup BCOL account payment
elif 'bcolAccountNumber' in transaction_info:
account_info = {
'bcolAccountNumber': transaction_info['bcolAccountNumber']
}
if 'datNumber' in transaction_info:
account_info['datNumber'] = transaction_info['datNumber']
data['accountInfo'] = account_info
return data
@staticmethod
def create_payment_data(transaction_type, quantity=1, ppr_id=None, client_reference_id=None, processing_fee=None):
"""Build the payment-request body formatted as JSON."""
data = copy.deepcopy(PAYMENT_REQUEST_TEMPLATE)
filing_type = TRANSACTION_TO_FILING_TYPE[transaction_type]
data['filingInfo']['filingTypes'][0]['filingTypeCode'] = filing_type
if quantity != 1:
data['filingInfo']['filingTypes'][0]['quantity'] = quantity
if ppr_id:
data['filingInfo']['filingIdentifier'] = ppr_id
else:
del data['filingInfo']['filingIdentifier']
if processing_fee:
# alter fee code to staff fee code
if filing_type in TO_STAFF_FILING_TYPE:
data['filingInfo']['filingTypes'][0]['filingTypeCode'] = TO_STAFF_FILING_TYPE[filing_type]
# add processing fee item
processing_filing_type = TRANSACTION_TO_FILING_TYPE[processing_fee]
data['filingInfo']['filingTypes'].append({
'filingTypeCode': processing_filing_type,
'priority': False,
'futureEffective': False,
'quantity': 1
})
if client_reference_id:
data['filingInfo']['folioNumber'] = client_reference_id
else:
del data['filingInfo']['folioNumber']
return data
@staticmethod
def create_payment_staff_registration_data(transaction_info, client_reference_id=None, processing_fee=None):
"""Build the payment-request body formatted as JSON."""
data = copy.deepcopy(PAYMENT_REQUEST_TEMPLATE)
filing_type = TRANSACTION_TO_FILING_TYPE[transaction_info['transactionType']]
data['filingInfo']['filingTypes'][0]['filingTypeCode'] = filing_type
if transaction_info['feeQuantity'] != 1:
data['filingInfo']['filingTypes'][0]['quantity'] = transaction_info['feeQuantity']
if 'transaction_id' in transaction_info:
data['filingInfo']['filingIdentifier'] = transaction_info['transaction_id']
else:
del data['filingInfo']['filingIdentifier']
if processing_fee:
# alter fee code to staff fee code
if filing_type in TO_STAFF_FILING_TYPE:
data['filingInfo']['filingTypes'][0]['filingTypeCode'] = TO_STAFF_FILING_TYPE[filing_type]
# add processing fee item
processing_filing_type = TRANSACTION_TO_FILING_TYPE[processing_fee]
data['filingInfo']['filingTypes'].append({
'filingTypeCode': processing_filing_type,
'priority': False,
'futureEffective': False,
'quantity': 1
})
if client_reference_id:
data['filingInfo']['folioNumber'] = client_reference_id
else:
del data['filingInfo']['folioNumber']
if 'waiveFees' in transaction_info and transaction_info['waiveFees']:
data['filingInfo']['filingTypes'][0]['waiveFees'] = True
# set up FAS payment
elif 'routingSlipNumber' in transaction_info:
account_info = {
'routingSlip': transaction_info['routingSlipNumber']
}
data['accountInfo'] = account_info
# setup BCOL account payment
elif 'bcolAccountNumber' in transaction_info:
account_info = {
'bcolAccountNumber': transaction_info['bcolAccountNumber']
}
if 'datNumber' in transaction_info:
account_info['datNumber'] = transaction_info['datNumber']
data['accountInfo'] = account_info
return data
def create_payment( # pylint: disable=too-many-arguments
self,
transaction_type,
quantity=1,
ppr_id=None,
client_reference_id=None,
processing_fee=None
):
"""Submit a payment request for the PPR API transaction."""
data = SBCPaymentClient.create_payment_data(
transaction_type, quantity, ppr_id, client_reference_id, processing_fee)
if self.detail_label and self.detail_value:
data['details'][0]['label'] = self.detail_label
data['details'][0]['value'] = self.detail_value
else:
del data['details']
# current_app.logger.debug('create paymnent payload:')
# current_app.logger.debug(json.dumps(data))
invoice_data = self.call_api(HttpVerbs.POST, PATH_PAYMENT, data)
invoice_id = str(invoice_data['id'])
receipt_path = self.api_url.replace('https://', '')
receipt_path = receipt_path[receipt_path.find('/'): None] + PATH_RECEIPT.format(invoice_id=invoice_id)
# Return the pay reference to include in the API response.
pay_reference = {
'invoiceId': invoice_id,
'receipt': receipt_path
}
return pay_reference
def create_payment_search(self, selections, mhr_id=None, client_reference_id=None, staff_gov=False):
"""Submit a non-staff payment search request for the MHR API transaction."""
data = SBCPaymentClient.create_payment_search_data(selections, mhr_id, client_reference_id, staff_gov)
if self.detail_label and self.detail_value:
data['details'][0]['label'] = self.detail_label
data['details'][0]['value'] = self.detail_value
else:
del data['details']
# current_app.logger.debug('create non-staff search paymnent payload:')
# current_app.logger.debug(json.dumps(data))
invoice_data = self.call_api(HttpVerbs.POST, PATH_PAYMENT, data)
invoice_id = str(invoice_data['id'])
receipt_path = self.api_url.replace('https://', '')
receipt_path = receipt_path[receipt_path.find('/'): None] + PATH_RECEIPT.format(invoice_id=invoice_id)
# Return the pay reference to include in the API response.
pay_reference = {
'invoiceId': invoice_id,
'receipt': receipt_path
}
return pay_reference
def create_payment_staff_search(self, selections, transaction_info, mhr_id=None, client_reference_id=None):
"""Submit a staff search payment request for the PPR API transaction."""
current_app.logger.debug('Setting up staff search data.')
data = SBCPaymentClient.create_payment_staff_search_data(selections,
transaction_info,
mhr_id,
client_reference_id)
if self.detail_label and self.detail_value:
data['details'][0]['label'] = self.detail_label
data['details'][0]['value'] = self.detail_value
else:
del data['details']
current_app.logger.debug('staff search create payment payload for account: ' + self.account_id)
current_app.logger.debug(json.dumps(data))
invoice_data = self.call_api(HttpVerbs.POST, PATH_PAYMENT, data, include_account=False)
return SBCPaymentClient.build_pay_reference(invoice_data, self.api_url)
def create_payment_staff_registration(self, transaction_info, client_reference_id=None, processing_fee=None):
"""Submit a staff registration payment request for the PPR API transaction."""
data = SBCPaymentClient.create_payment_staff_registration_data(
transaction_info, client_reference_id, processing_fee)
if self.detail_label and self.detail_value:
data['details'][0]['label'] = self.detail_label
data['details'][0]['value'] = self.detail_value
else:
del data['details']
current_app.logger.debug('staff registration create payment payload: ')
current_app.logger.debug(json.dumps(data))
invoice_data = self.call_api(HttpVerbs.POST, PATH_PAYMENT, data, include_account=False)
return SBCPaymentClient.build_pay_reference(invoice_data, self.api_url)
def cancel_payment(self, invoice_id):
"""Immediately cancel or refund the transaction payment as a state rollback."""
# Payment status does not matter with immediate refunds: always use the refund endpoint.
current_app.logger.info('Calling pay api to refund payment')
request_path = PATH_REFUND.format(invoice_id=invoice_id)
return self.call_api(HttpVerbs.POST,
request_path,
data=PAYMENT_REFUND_TEMPLATE,
token=SBCPaymentClient.get_sa_token())
def get_payment(self, invoice_id):
"""Fetch the current state of the payment invoice."""
request_path = PATH_INVOICE.format(invoice_id=invoice_id)
return self.call_api(HttpVerbs.GET, request_path)
@staticmethod
def get_sa_token():
"""Refunds must be submitted with a PPR service account token. Request one from the OIDC service."""
oidc_token_url = current_app.config.get('JWT_OIDC_TOKEN_URL')
client_id = current_app.config.get('ACCOUNT_SVC_CLIENT_ID')
client_secret = current_app.config.get('ACCOUNT_SVC_CLIENT_SECRET')
current_app.logger.info(f'Calling OIDC api to get token: URL = {oidc_token_url}, client_id={client_id}.')
try:
headers = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
data = f'grant_type=client_credentials&scope=openid&client_id={client_id}&client_secret={client_secret}'
response = requests.request(
HttpVerbs.POST.value,
oidc_token_url,
data=data,
params=None,
headers=headers
)
if not response or not response.ok:
raise ApiRequestError(response)
response_json = json.loads(response.text)
token = response_json['access_token']
current_app.logger.info('Have new sa token from OIDC.')
return token
except (ApiRequestError) as err:
current_app.logger.error(err.message)
raise err
@staticmethod
def build_pay_reference(invoice_data, api_url: str):
"""Build a payment reference from the pay api response invoice info."""
invoice_id = str(invoice_data['id'])
receipt_path = api_url.replace('https://', '')
receipt_path = receipt_path[receipt_path.find('/'): None] + PATH_RECEIPT.format(invoice_id=invoice_id)
# Return the pay reference to include in the API response.
pay_reference = {
'invoiceId': invoice_id,
'receipt': receipt_path
}
return pay_reference
|
[
"copy.deepcopy",
"json.loads",
"flask.current_app.config.get",
"json.dumps",
"flask.current_app.logger.debug",
"requests.request",
"flask.current_app.logger.info",
"flask.current_app.logger.error"
] |
[((4459, 4500), 'flask.current_app.config.get', 'current_app.config.get', (['"""PAYMENT_SVC_URL"""'], {}), "('PAYMENT_SVC_URL')\n", (4481, 4500), False, 'from flask import current_app\n'), ((7252, 7291), 'copy.deepcopy', 'copy.deepcopy', (['PAYMENT_REQUEST_TEMPLATE'], {}), '(PAYMENT_REQUEST_TEMPLATE)\n', (7265, 7291), False, 'import copy\n'), ((10439, 10478), 'copy.deepcopy', 'copy.deepcopy', (['PAYMENT_REQUEST_TEMPLATE'], {}), '(PAYMENT_REQUEST_TEMPLATE)\n', (10452, 10478), False, 'import copy\n'), ((11853, 11892), 'copy.deepcopy', 'copy.deepcopy', (['PAYMENT_REQUEST_TEMPLATE'], {}), '(PAYMENT_REQUEST_TEMPLATE)\n', (11866, 11892), False, 'import copy\n'), ((16568, 16625), 'flask.current_app.logger.debug', 'current_app.logger.debug', (['"""Setting up staff search data."""'], {}), "('Setting up staff search data.')\n", (16592, 16625), False, 'from flask import current_app\n'), ((17171, 17271), 'flask.current_app.logger.debug', 'current_app.logger.debug', (["('staff search create payment payload for account: ' + self.account_id)"], {}), "(\n 'staff search create payment payload for account: ' + self.account_id)\n", (17195, 17271), False, 'from flask import current_app\n'), ((18061, 18132), 'flask.current_app.logger.debug', 'current_app.logger.debug', (['"""staff registration create payment payload: """'], {}), "('staff registration create payment payload: ')\n", (18085, 18132), False, 'from flask import current_app\n'), ((18596, 18656), 'flask.current_app.logger.info', 'current_app.logger.info', (['"""Calling pay api to refund payment"""'], {}), "('Calling pay api to refund payment')\n", (18619, 18656), False, 'from flask import current_app\n'), ((19340, 19384), 'flask.current_app.config.get', 'current_app.config.get', (['"""JWT_OIDC_TOKEN_URL"""'], {}), "('JWT_OIDC_TOKEN_URL')\n", (19362, 19384), False, 'from flask import current_app\n'), ((19405, 19452), 'flask.current_app.config.get', 'current_app.config.get', (['"""ACCOUNT_SVC_CLIENT_ID"""'], {}), "('ACCOUNT_SVC_CLIENT_ID')\n", (19427, 19452), False, 'from flask import current_app\n'), ((19477, 19528), 'flask.current_app.config.get', 'current_app.config.get', (['"""ACCOUNT_SVC_CLIENT_SECRET"""'], {}), "('ACCOUNT_SVC_CLIENT_SECRET')\n", (19499, 19528), False, 'from flask import current_app\n'), ((19537, 19652), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""Calling OIDC api to get token: URL = {oidc_token_url}, client_id={client_id}."""'], {}), "(\n f'Calling OIDC api to get token: URL = {oidc_token_url}, client_id={client_id}.'\n )\n", (19560, 19652), False, 'from flask import current_app\n'), ((6703, 6728), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (6713, 6728), False, 'import json\n'), ((8477, 8520), 'copy.deepcopy', 'copy.deepcopy', (['PAYMENT_FILING_TYPE_TEMPLATE'], {}), '(PAYMENT_FILING_TYPE_TEMPLATE)\n', (8490, 8520), False, 'import copy\n'), ((17300, 17316), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (17310, 17316), False, 'import json\n'), ((18166, 18182), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (18176, 18182), False, 'import json\n'), ((19948, 20048), 'requests.request', 'requests.request', (['HttpVerbs.POST.value', 'oidc_token_url'], {'data': 'data', 'params': 'None', 'headers': 'headers'}), '(HttpVerbs.POST.value, oidc_token_url, data=data, params=\n None, headers=headers)\n', (19964, 20048), False, 'import requests\n'), ((20264, 20289), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (20274, 20289), False, 'import json\n'), ((20352, 20407), 'flask.current_app.logger.info', 'current_app.logger.info', (['"""Have new sa token from OIDC."""'], {}), "('Have new sa token from OIDC.')\n", (20375, 20407), False, 'from flask import current_app\n'), ((3321, 3346), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (3331, 3346), False, 'import json\n'), ((5859, 5935), 'requests.request', 'requests.request', (['method.value', 'url'], {'params': 'None', 'json': 'data', 'headers': 'headers'}), '(method.value, url, params=None, json=data, headers=headers)\n', (5875, 5935), False, 'import requests\n'), ((6099, 6164), 'requests.request', 'requests.request', (['method.value', 'url'], {'params': 'None', 'headers': 'headers'}), '(method.value, url, params=None, headers=headers)\n', (6115, 6164), False, 'import requests\n'), ((6783, 6841), 'flask.current_app.logger.error', 'current_app.logger.error', (["('call_api error: ' + err.message)"], {}), "('call_api error: ' + err.message)\n", (6807, 6841), False, 'from flask import current_app\n'), ((20487, 20524), 'flask.current_app.logger.error', 'current_app.logger.error', (['err.message'], {}), '(err.message)\n', (20511, 20524), False, 'from flask import current_app\n'), ((3436, 3507), 'flask.current_app.logger.error', 'current_app.logger.error', (["('Pay api non-JSON response: ' + response.text)"], {}), "('Pay api non-JSON response: ' + response.text)\n", (3460, 3507), False, 'from flask import current_app\n'), ((6357, 6453), 'flask.current_app.logger.info', 'current_app.logger.info', (["('Account ' + self.account_id + ' pay api response=' + response.text)"], {}), "('Account ' + self.account_id + ' pay api response=' +\n response.text)\n", (6380, 6453), False, 'from flask import current_app\n'), ((6492, 6552), 'flask.current_app.logger.info', 'current_app.logger.info', (["('Pay api response=' + response.text)"], {}), "('Pay api response=' + response.text)\n", (6515, 6552), False, 'from flask import current_app\n')]
|
import os
import cv2
import sys
import time
import collections
import torch
import argparse
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from config import *
from torch.autograd import Variable
from torch.utils import data
from dataLoader import TestLoader
import fpn_resnet as models
#import fpn_resnet_dcn as models
# c++ version pse based on opencv 3+
from pse import pse
# python pse
# from pypse import pse as pypse
import glob
import shutil
#import onnx
USE_TF = False
class Detector():
def __init__(self, model_path):
# init Model
#self._model = torch.jit.load(model_path)
#self._model = onnx.load(model_path)
self._model = models.resnet50(pretrained=True, num_classes=7, scale=1)
print(len(list(self._model.parameters())))
print(self._model.conv1.weight)
return
for param in self._model.parameters():
param.requires_grad = False
if torch.cuda.is_available() and GPU:
self._model = self._model.cuda()
else:
self._model = self._model.cpu()
if os.path.isfile(model_path):
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
d = collections.OrderedDict()
for key, value in checkpoint['state_dict'].items():
tmp = key[7:]
d[tmp] = value
self._model.load_state_dict(d)
else:
print("No checkpoint found at '{}'".format(model_path))
self._model.eval()
example = torch.rand(1, 3, 800, 800)
example = Variable(example.cuda())
#torch.onnx.export(self._model, example, "model.proto", verbose=True)
traced_script_module = torch.jit.trace(self._model, (example))
traced_script_module.save("./model.pt")
def detect(self, img):
startTime0 = time.time()
self.bboxes = []
data_loader = TestLoader(img, long_size=DETE_IMG_SIZE)
test_loader = torch.utils.data.DataLoader(
data_loader,
batch_size=1,
shuffle=False,
num_workers=2,
drop_last=True)
for idx, (scale, img) in enumerate(test_loader):
if torch.cuda.is_available() and GPU:
#img = Variable(img.cuda(), volatile=True)
img = Variable(img.cuda())
else:
#img = Variable(img.cpu(), volatile=True)
img = Variable(img.cpu())
#org_img = org_img.numpy().astype('uint8')[0]
#text_box = org_img.copy()
outputs = self._model(img)
score = torch.sigmoid(outputs[:, 0, :, :])
outputs = (torch.sign(outputs - DETE_BINARY_TH) + 1) / 2
text = outputs[:, 0, :, :]
kernels = outputs[:, 0:7, :, :] * text
score = score.data.cpu().numpy()[0].astype(np.float32)
text = text.data.cpu().numpy()[0].astype(np.uint8)
kernels = kernels.data.cpu().numpy()[0].astype(np.uint8)
cv2.imwrite("./7.jpg", kernels[0]*255)
#cv2.imwrite("./6.jpg", kernels[1]*255)
#cv2.imwrite("./5.jpg", kernels[2]*255)
#cv2.imwrite("./4.jpg", kernels[3]*255)
#cv2.imwrite("./3.jpg", kernels[4]*255)
#cv2.imwrite("./2.jpg", kernels[5]*255)
#cv2.imwrite("./1.jpg", kernels[6]*255)
if USE_TF:
mask_res, label_values = pse(kernels, 5.0)
mask_res = np.array(mask_res)
mask_res_resized = cv2.resize(mask_res, (mask_res.shape[1], mask_res.shape[0]), interpolation=cv2.INTER_NEAREST)
boxes = []
for label_value in label_values:
#(y,x)
points = np.argwhere(mask_res_resized==label_value)
points = points[:, (1,0)]
rect = cv2.minAreaRect(points)
box = cv2.boxPoints(rect) / (scale, scale)
box = box.astype('int32')
self.bboxes.append(box.reshape(-1))
return
# c++ version pse
pred = pse(kernels, 5.0)
# python version pse
# pred = pypse(kernels, 5.0)
if(len(pred) == 0):
continue
self.bboxes = pred
#print(self.bboxes, scale)
#self.bboxes = self.bboxes / scale
#self.bboxes = self.bboxes.astype('int32').tolist()
# label = pred
# label_num = np.max(label) + 1
# whereup = 0
# startTime = time.time()
# for i in range(1, label_num):
# startTime1 = time.time()
# points = np.array(np.where(label == i)).transpose((1, 0))[:, ::-1]
# whereup = whereup + time.time() - startTime1
# if points.shape[0] < DETE_MIN_AREA:
# continue
# score_i = np.mean(score[label == i])
# if score_i < DETE_MIN_SCORE:
# continue
# #if i == 2:
# #print(points)
# rect = cv2.minAreaRect(points)
# bbox = cv2.boxPoints(rect) / (scale/2, scale/2)
# bbox = bbox.astype('int32')
# self.bboxes.append(bbox.reshape(-1))
print("Later:", time.time() - startTime)
print("Total:", time.time() - startTime0)
#print(bboxes)
def draw_bbox(img, bboxes, output_path):
for bbox in bboxes:
cv2.drawContours(img, [bbox.reshape(4, 2)], -1, (0, 255, 0), 2)
cv2.imwrite(output_path, img)
if __name__ == '__main__':
output_Path = "./TestResult/"
if os.path.exists(output_Path):
shutil.rmtree(output_Path)
os.makedirs(output_Path)
model_path = "./checkpoints/checkpoint.pth.tar"
#model_path = "./model.pt"
dete_line = Detector(model_path)
imgList = glob.glob(os.path.join("./TestImgs/", "*.jpg"))
startTime = time.time()
for img_path in imgList:
print(img_path)
img = cv2.imread(img_path)
if torch.cuda.is_available() and GPU:
torch.cuda.synchronize()
start = time.time()
dete_line.detect(img)
fileSave = output_Path + os.path.split(img_path)[1]
draw_bbox(img, dete_line.bboxes, fileSave)
if torch.cuda.is_available() and GPU:
torch.cuda.synchronize()
end = time.time()
print("Time is {0}s".format(end - start))
print("Total is {0}s".format(time.time() - startTime))
|
[
"torch.cuda.synchronize",
"os.path.isfile",
"cv2.boxPoints",
"cv2.minAreaRect",
"shutil.rmtree",
"os.path.join",
"torch.utils.data.DataLoader",
"cv2.imwrite",
"torch.load",
"os.path.exists",
"torch.sign",
"cv2.resize",
"torch.jit.trace",
"fpn_resnet.resnet50",
"torch.cuda.is_available",
"torch.rand",
"numpy.argwhere",
"os.makedirs",
"dataLoader.TestLoader",
"time.time",
"cv2.imread",
"torch.sigmoid",
"numpy.array",
"pse.pse",
"collections.OrderedDict",
"os.path.split"
] |
[((5724, 5753), 'cv2.imwrite', 'cv2.imwrite', (['output_path', 'img'], {}), '(output_path, img)\n', (5735, 5753), False, 'import cv2\n'), ((5827, 5854), 'os.path.exists', 'os.path.exists', (['output_Path'], {}), '(output_Path)\n', (5841, 5854), False, 'import os\n'), ((5895, 5919), 'os.makedirs', 'os.makedirs', (['output_Path'], {}), '(output_Path)\n', (5906, 5919), False, 'import os\n'), ((6128, 6139), 'time.time', 'time.time', ([], {}), '()\n', (6137, 6139), False, 'import time\n'), ((698, 754), 'fpn_resnet.resnet50', 'models.resnet50', ([], {'pretrained': '(True)', 'num_classes': '(7)', 'scale': '(1)'}), '(pretrained=True, num_classes=7, scale=1)\n', (713, 754), True, 'import fpn_resnet as models\n'), ((1110, 1136), 'os.path.isfile', 'os.path.isfile', (['model_path'], {}), '(model_path)\n', (1124, 1136), False, 'import os\n'), ((1575, 1601), 'torch.rand', 'torch.rand', (['(1)', '(3)', '(800)', '(800)'], {}), '(1, 3, 800, 800)\n', (1585, 1601), False, 'import torch\n'), ((1754, 1791), 'torch.jit.trace', 'torch.jit.trace', (['self._model', 'example'], {}), '(self._model, example)\n', (1769, 1791), False, 'import torch\n'), ((1904, 1915), 'time.time', 'time.time', ([], {}), '()\n', (1913, 1915), False, 'import time\n'), ((1963, 2003), 'dataLoader.TestLoader', 'TestLoader', (['img'], {'long_size': 'DETE_IMG_SIZE'}), '(img, long_size=DETE_IMG_SIZE)\n', (1973, 2003), False, 'from dataLoader import TestLoader\n'), ((2026, 2130), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['data_loader'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(2)', 'drop_last': '(True)'}), '(data_loader, batch_size=1, shuffle=False,\n num_workers=2, drop_last=True)\n', (2053, 2130), False, 'import torch\n'), ((5864, 5890), 'shutil.rmtree', 'shutil.rmtree', (['output_Path'], {}), '(output_Path)\n', (5877, 5890), False, 'import shutil\n'), ((6074, 6110), 'os.path.join', 'os.path.join', (['"""./TestImgs/"""', '"""*.jpg"""'], {}), "('./TestImgs/', '*.jpg')\n", (6086, 6110), False, 'import os\n'), ((6207, 6227), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (6217, 6227), False, 'import cv2\n'), ((6340, 6351), 'time.time', 'time.time', ([], {}), '()\n', (6349, 6351), False, 'import time\n'), ((6596, 6607), 'time.time', 'time.time', ([], {}), '()\n', (6605, 6607), False, 'import time\n'), ((960, 985), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (983, 985), False, 'import torch\n'), ((1163, 1228), 'torch.load', 'torch.load', (['model_path'], {'map_location': '(lambda storage, loc: storage)'}), '(model_path, map_location=lambda storage, loc: storage)\n', (1173, 1228), False, 'import torch\n'), ((1245, 1270), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (1268, 1270), False, 'import collections\n'), ((2674, 2708), 'torch.sigmoid', 'torch.sigmoid', (['outputs[:, 0, :, :]'], {}), '(outputs[:, 0, :, :])\n', (2687, 2708), False, 'import torch\n'), ((3081, 3121), 'cv2.imwrite', 'cv2.imwrite', (['"""./7.jpg"""', '(kernels[0] * 255)'], {}), "('./7.jpg', kernels[0] * 255)\n", (3092, 3121), False, 'import cv2\n'), ((4219, 4236), 'pse.pse', 'pse', (['kernels', '(5.0)'], {}), '(kernels, 5.0)\n', (4222, 4236), False, 'from pse import pse\n'), ((6248, 6273), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6271, 6273), False, 'import torch\n'), ((6299, 6323), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (6321, 6323), False, 'import torch\n'), ((6506, 6531), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6529, 6531), False, 'import torch\n'), ((6557, 6581), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (6579, 6581), False, 'import torch\n'), ((2261, 2286), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2284, 2286), False, 'import torch\n'), ((3496, 3513), 'pse.pse', 'pse', (['kernels', '(5.0)'], {}), '(kernels, 5.0)\n', (3499, 3513), False, 'from pse import pse\n'), ((3541, 3559), 'numpy.array', 'np.array', (['mask_res'], {}), '(mask_res)\n', (3549, 3559), True, 'import numpy as np\n'), ((3595, 3693), 'cv2.resize', 'cv2.resize', (['mask_res', '(mask_res.shape[1], mask_res.shape[0])'], {'interpolation': 'cv2.INTER_NEAREST'}), '(mask_res, (mask_res.shape[1], mask_res.shape[0]), interpolation=\n cv2.INTER_NEAREST)\n', (3605, 3693), False, 'import cv2\n'), ((6416, 6439), 'os.path.split', 'os.path.split', (['img_path'], {}), '(img_path)\n', (6429, 6439), False, 'import os\n'), ((6691, 6702), 'time.time', 'time.time', ([], {}), '()\n', (6700, 6702), False, 'import time\n'), ((2732, 2768), 'torch.sign', 'torch.sign', (['(outputs - DETE_BINARY_TH)'], {}), '(outputs - DETE_BINARY_TH)\n', (2742, 2768), False, 'import torch\n'), ((3821, 3865), 'numpy.argwhere', 'np.argwhere', (['(mask_res_resized == label_value)'], {}), '(mask_res_resized == label_value)\n', (3832, 3865), True, 'import numpy as np\n'), ((3937, 3960), 'cv2.minAreaRect', 'cv2.minAreaRect', (['points'], {}), '(points)\n', (3952, 3960), False, 'import cv2\n'), ((5466, 5477), 'time.time', 'time.time', ([], {}), '()\n', (5475, 5477), False, 'import time\n'), ((5519, 5530), 'time.time', 'time.time', ([], {}), '()\n', (5528, 5530), False, 'import time\n'), ((3987, 4006), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (4000, 4006), False, 'import cv2\n')]
|
# *- coding: utf-8 -*
from django.utils.translation import ugettext_lazy as _
import re
from django import template
from django.core.urlresolvers import reverse, NoReverseMatch
from django.contrib.humanize.templatetags.humanize import intcomma
register = template.Library()
@register.simple_tag(takes_context=True)
def active(context, pattern_or_urlname):
try:
pattern = '^' + reverse(pattern_or_urlname) + '$'
except NoReverseMatch:
pattern = pattern_or_urlname
path = context['request'].path
if re.match(pattern, path):
return 'active'
return ''
@register.filter
def currency_format(value):
if value is None or value == '':
return ""
value = intcomma(str("%.2f" % float(value)))
return value[:-3] if value.endswith('.00') else value
|
[
"django.template.Library",
"re.match",
"django.core.urlresolvers.reverse"
] |
[((256, 274), 'django.template.Library', 'template.Library', ([], {}), '()\n', (272, 274), False, 'from django import template\n'), ((531, 554), 're.match', 're.match', (['pattern', 'path'], {}), '(pattern, path)\n', (539, 554), False, 'import re\n'), ((391, 418), 'django.core.urlresolvers.reverse', 'reverse', (['pattern_or_urlname'], {}), '(pattern_or_urlname)\n', (398, 418), False, 'from django.core.urlresolvers import reverse, NoReverseMatch\n')]
|
"""
qm8 dataset loader.
"""
import os
import deepchem
import logging
logger = logging.getLogger(__name__)
DEFAULT_DIR = deepchem.utils.get_data_dir()
GDB8_URL = 'http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/gdb8.tar.gz'
QM8_CSV_URL = 'http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/qm8.csv'
def load_qm8(featurizer='CoulombMatrix',
split='random',
reload=True,
move_mean=True,
data_dir=None,
save_dir=None,
**kwargs):
"""Load QM8 Datasets
The QM8 is the dataset used in a study on modeling quantum
mechanical calculations of electronic spectra and excited
state energy of small molecules. Multiple methods, including
time-dependent density functional theories (TDDFT) and
second-order approximate coupled-cluster (CC2), are applied to
a collection of molecules that include up to eight heavy atoms
(also a subset of the GDB-17 database). In our collection,
there are four excited state properties calculated by four
different methods on 22 thousand samples:
S_0 -> S_1 transition energy E_1 and the corresponding oscillator strength f_1
S_0 -> S_2 transition energy E_2 and the corresponding oscillator strength f_2
The source data files (downloadable from moleculenet.ai):
qm8.sdf: molecular structures
qm8.sdf.csv: tables for molecular properties
Column 1: Molecule ID (gdb9 index) mapping to the .sdf file
Columns 2-5: RI-CC2/def2TZVP; E1, E2, f1, f2 in atomic units. f1, f2 in length representation
Columns 6-9: LR-TDPBE0/def2SVP; E1, E2, f1, f2 in atomic units. f1, f2 in length representation
Columns 10-13: LR-TDPBE0/def2TZVP; E1, E2, f1, f2 in atomic units. f1, f2 in length representation
Columns 14-17: LR-TDCAM-B3LYP/def2TZVP; E1, E2, f1, f2 in atomic units. f1, f2 in length representation
Reference:
Blum, <NAME>., and <NAME>. "970 million druglike small molecules for virtual screening in the chemical universe database GDB-13." Journal of the American Chemical Society 131.25 (2009): 8732-8733.
<NAME>, et al. "Electronic spectra from TDDFT and machine learning in chemical space." The Journal of chemical physics 143.8 (2015): 084111.
"""
qm8_tasks = [
"E1-CC2", "E2-CC2", "f1-CC2", "f2-CC2", "E1-PBE0", "E2-PBE0", "f1-PBE0",
"f2-PBE0", "E1-PBE0", "E2-PBE0", "f1-PBE0", "f2-PBE0", "E1-CAM", "E2-CAM",
"f1-CAM", "f2-CAM"
]
if data_dir is None:
data_dir = DEFAULT_DIR
if save_dir is None:
save_dir = DEFAULT_DIR
if reload:
save_folder = os.path.join(save_dir, "qm8-featurized")
if not move_mean:
save_folder = os.path.join(save_folder, str(featurizer) + "_mean_unmoved")
else:
save_folder = os.path.join(save_folder, str(featurizer))
if featurizer == "smiles2img":
img_spec = kwargs.get("img_spec", "std")
save_folder = os.path.join(save_folder, img_spec)
save_folder = os.path.join(save_folder, str(split))
loaded, all_dataset, transformers = deepchem.utils.save.load_dataset_from_disk(
save_folder)
if loaded:
return qm8_tasks, all_dataset, transformers
if featurizer in ['CoulombMatrix', 'BPSymmetryFunctionInput', 'MP', 'Raw']:
dataset_file = os.path.join(data_dir, "qm8.sdf")
if not os.path.exists(dataset_file):
deepchem.utils.download_url(url=GDB8_URL, dest_dir=data_dir)
deepchem.utils.untargz_file(
os.path.join(data_dir, 'gdb8.tar.gz'), data_dir)
else:
dataset_file = os.path.join(data_dir, "qm8.csv")
if not os.path.exists(dataset_file):
deepchem.utils.download_url(url=QM8_CSV_URL, dest_dir=data_dir)
if featurizer in ['CoulombMatrix', 'BPSymmetryFunctionInput', 'MP', 'Raw']:
if featurizer == 'CoulombMatrix':
featurizer = deepchem.feat.CoulombMatrix(26)
elif featurizer == 'BPSymmetryFunctionInput':
featurizer = deepchem.feat.BPSymmetryFunctionInput(26)
elif featurizer == 'Raw':
featurizer = deepchem.feat.RawFeaturizer()
elif featurizer == 'MP':
featurizer = deepchem.feat.WeaveFeaturizer(
graph_distance=False, explicit_H=True)
loader = deepchem.data.SDFLoader(tasks=qm8_tasks, featurizer=featurizer)
else:
if featurizer == 'ECFP':
featurizer = deepchem.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = deepchem.feat.ConvMolFeaturizer()
elif featurizer == 'Weave':
featurizer = deepchem.feat.WeaveFeaturizer()
elif featurizer == "smiles2img":
img_spec = kwargs.get("img_spec", "std")
img_size = kwargs.get("img_size", 80)
featurizer = deepchem.feat.SmilesToImage(
img_size=img_size, img_spec=img_spec)
loader = deepchem.data.CSVLoader(
tasks=qm8_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file)
if split == None:
raise ValueError()
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter(),
'stratified': deepchem.splits.SingletaskStratifiedSplitter(task_number=0)
}
splitter = splitters[split]
frac_train = kwargs.get("frac_train", 0.8)
frac_valid = kwargs.get('frac_valid', 0.1)
frac_test = kwargs.get('frac_test', 0.1)
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(
dataset,
frac_train=frac_train,
frac_valid=frac_valid,
frac_test=frac_test)
transformers = [
deepchem.trans.NormalizationTransformer(
transform_y=True, dataset=train_dataset, move_mean=move_mean)
]
for transformer in transformers:
train_dataset = transformer.transform(train_dataset)
valid_dataset = transformer.transform(valid_dataset)
test_dataset = transformer.transform(test_dataset)
if reload:
deepchem.utils.save.save_dataset_to_disk(
save_folder, train_dataset, valid_dataset, test_dataset, transformers)
return qm8_tasks, (train_dataset, valid_dataset, test_dataset), transformers
|
[
"deepchem.utils.get_data_dir",
"deepchem.feat.WeaveFeaturizer",
"deepchem.data.SDFLoader",
"deepchem.splits.RandomSplitter",
"os.path.join",
"deepchem.utils.download_url",
"deepchem.utils.save.load_dataset_from_disk",
"deepchem.feat.RawFeaturizer",
"deepchem.feat.CircularFingerprint",
"os.path.exists",
"deepchem.data.CSVLoader",
"deepchem.feat.BPSymmetryFunctionInput",
"deepchem.splits.IndexSplitter",
"deepchem.utils.save.save_dataset_to_disk",
"deepchem.feat.CoulombMatrix",
"deepchem.feat.SmilesToImage",
"deepchem.feat.ConvMolFeaturizer",
"deepchem.trans.NormalizationTransformer",
"deepchem.splits.SingletaskStratifiedSplitter",
"logging.getLogger"
] |
[((86, 113), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (103, 113), False, 'import logging\n'), ((131, 160), 'deepchem.utils.get_data_dir', 'deepchem.utils.get_data_dir', ([], {}), '()\n', (158, 160), False, 'import deepchem\n'), ((2625, 2665), 'os.path.join', 'os.path.join', (['save_dir', '"""qm8-featurized"""'], {}), "(save_dir, 'qm8-featurized')\n", (2637, 2665), False, 'import os\n'), ((3089, 3144), 'deepchem.utils.save.load_dataset_from_disk', 'deepchem.utils.save.load_dataset_from_disk', (['save_folder'], {}), '(save_folder)\n', (3131, 3144), False, 'import deepchem\n'), ((3323, 3356), 'os.path.join', 'os.path.join', (['data_dir', '"""qm8.sdf"""'], {}), "(data_dir, 'qm8.sdf')\n", (3335, 3356), False, 'import os\n'), ((3592, 3625), 'os.path.join', 'os.path.join', (['data_dir', '"""qm8.csv"""'], {}), "(data_dir, 'qm8.csv')\n", (3604, 3625), False, 'import os\n'), ((4250, 4313), 'deepchem.data.SDFLoader', 'deepchem.data.SDFLoader', ([], {'tasks': 'qm8_tasks', 'featurizer': 'featurizer'}), '(tasks=qm8_tasks, featurizer=featurizer)\n', (4273, 4313), False, 'import deepchem\n'), ((4837, 4928), 'deepchem.data.CSVLoader', 'deepchem.data.CSVLoader', ([], {'tasks': 'qm8_tasks', 'smiles_field': '"""smiles"""', 'featurizer': 'featurizer'}), "(tasks=qm8_tasks, smiles_field='smiles', featurizer=\n featurizer)\n", (4860, 4928), False, 'import deepchem\n'), ((5060, 5091), 'deepchem.splits.IndexSplitter', 'deepchem.splits.IndexSplitter', ([], {}), '()\n', (5089, 5091), False, 'import deepchem\n'), ((5110, 5142), 'deepchem.splits.RandomSplitter', 'deepchem.splits.RandomSplitter', ([], {}), '()\n', (5140, 5142), False, 'import deepchem\n'), ((5165, 5224), 'deepchem.splits.SingletaskStratifiedSplitter', 'deepchem.splits.SingletaskStratifiedSplitter', ([], {'task_number': '(0)'}), '(task_number=0)\n', (5209, 5224), False, 'import deepchem\n'), ((5611, 5717), 'deepchem.trans.NormalizationTransformer', 'deepchem.trans.NormalizationTransformer', ([], {'transform_y': '(True)', 'dataset': 'train_dataset', 'move_mean': 'move_mean'}), '(transform_y=True, dataset=\n train_dataset, move_mean=move_mean)\n', (5650, 5717), False, 'import deepchem\n'), ((5957, 6072), 'deepchem.utils.save.save_dataset_to_disk', 'deepchem.utils.save.save_dataset_to_disk', (['save_folder', 'train_dataset', 'valid_dataset', 'test_dataset', 'transformers'], {}), '(save_folder, train_dataset,\n valid_dataset, test_dataset, transformers)\n', (5997, 6072), False, 'import deepchem\n'), ((2953, 2988), 'os.path.join', 'os.path.join', (['save_folder', 'img_spec'], {}), '(save_folder, img_spec)\n', (2965, 2988), False, 'import os\n'), ((3369, 3397), 'os.path.exists', 'os.path.exists', (['dataset_file'], {}), '(dataset_file)\n', (3383, 3397), False, 'import os\n'), ((3406, 3466), 'deepchem.utils.download_url', 'deepchem.utils.download_url', ([], {'url': 'GDB8_URL', 'dest_dir': 'data_dir'}), '(url=GDB8_URL, dest_dir=data_dir)\n', (3433, 3466), False, 'import deepchem\n'), ((3638, 3666), 'os.path.exists', 'os.path.exists', (['dataset_file'], {}), '(dataset_file)\n', (3652, 3666), False, 'import os\n'), ((3675, 3738), 'deepchem.utils.download_url', 'deepchem.utils.download_url', ([], {'url': 'QM8_CSV_URL', 'dest_dir': 'data_dir'}), '(url=QM8_CSV_URL, dest_dir=data_dir)\n', (3702, 3738), False, 'import deepchem\n'), ((3879, 3910), 'deepchem.feat.CoulombMatrix', 'deepchem.feat.CoulombMatrix', (['(26)'], {}), '(26)\n', (3906, 3910), False, 'import deepchem\n'), ((4373, 4417), 'deepchem.feat.CircularFingerprint', 'deepchem.feat.CircularFingerprint', ([], {'size': '(1024)'}), '(size=1024)\n', (4406, 4417), False, 'import deepchem\n'), ((3514, 3551), 'os.path.join', 'os.path.join', (['data_dir', '"""gdb8.tar.gz"""'], {}), "(data_dir, 'gdb8.tar.gz')\n", (3526, 3551), False, 'import os\n'), ((3982, 4023), 'deepchem.feat.BPSymmetryFunctionInput', 'deepchem.feat.BPSymmetryFunctionInput', (['(26)'], {}), '(26)\n', (4019, 4023), False, 'import deepchem\n'), ((4475, 4508), 'deepchem.feat.ConvMolFeaturizer', 'deepchem.feat.ConvMolFeaturizer', ([], {}), '()\n', (4506, 4508), False, 'import deepchem\n'), ((4075, 4104), 'deepchem.feat.RawFeaturizer', 'deepchem.feat.RawFeaturizer', ([], {}), '()\n', (4102, 4104), False, 'import deepchem\n'), ((4562, 4593), 'deepchem.feat.WeaveFeaturizer', 'deepchem.feat.WeaveFeaturizer', ([], {}), '()\n', (4591, 4593), False, 'import deepchem\n'), ((4155, 4223), 'deepchem.feat.WeaveFeaturizer', 'deepchem.feat.WeaveFeaturizer', ([], {'graph_distance': '(False)', 'explicit_H': '(True)'}), '(graph_distance=False, explicit_H=True)\n', (4184, 4223), False, 'import deepchem\n'), ((4745, 4810), 'deepchem.feat.SmilesToImage', 'deepchem.feat.SmilesToImage', ([], {'img_size': 'img_size', 'img_spec': 'img_spec'}), '(img_size=img_size, img_spec=img_spec)\n', (4772, 4810), False, 'import deepchem\n')]
|
# ===================================================================================== #
# Module for solving Ising models exactly.
#
# Distributed with ConIII.
#
# NOTE: This code needs cleanup.
#
# Author : <NAME>, <EMAIL>
# ===================================================================================== #
#
# MIT License
#
# Copyright (c) 2019 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import mpmath as mp
import scipy.special as ss
from itertools import combinations
import sys
np.set_printoptions(threshold=sys.maxsize)
def write_eqns(n, sym, corrTermsIx, suffix='', high_prec=False):
"""Create strings for writing out the equations and then write them to file.
TODO: This code needs some cleanup.
Parameters
----------
n : int
number of spins
sym : int
value of 1 will use {-1,1} formulation, 0 means {0,1}
corrTermsIx : list of ndarrays
Allows specification of arbitrary correlations to constrain using an index based
structure. These should be index arrays as would be returned by np.where that
specify which correlations to write down. Each consecutive array should specify
a matrix of sequentially increasing dimension.
[Nx1, NxN, NxNxN, ...]
suffix : str, ''
high_prec : bool, False
"""
import re
assert sym in [0,1], "sym argument must be 0 or 1."
abc = '<KEY>'
expterms = [] # 2**N exponential corrTermsIx
binstates = [] # all binary states as strings
signs = [] # coefficient for all numerator terms when computing correlations
br = "[]"
ix0 = 0
# default suffix for high precision files
if high_prec:
suffix += '_hp'
# Collect all corrTermsIx in the partition function.
for state in range(2**n):
binstates.append("{0:b}".format(state))
if len(binstates[state])<n:
binstates[state] = "0"*(n-len(binstates[state])) + binstates[state]
expterms.append( '' )
# Get corrTermsIx corresponding to each of the ith order term.
if sym:
for i in range(len(corrTermsIx)):
expterms[state] += get_terms11(corrTermsIx[i], abc[i], binstates[state], br, ix0)
else:
for i in range(len(corrTermsIx)):
expterms[state] += get_terms01(corrTermsIx[i], abc[i], binstates[state], br, ix0)
expterms[state] = re.sub(r'\+0\+','+', expterms[state])
expterms[state] = re.sub(r'\)\+0',')', expterms[state])
expterms[state] += ', '
# Collect all terms with corresponding prefix in the equation to solve.
for state in range(2**n):
for i in range(len(corrTermsIx)):
if state==0:
signs.append([])
# Get corrTermsIx corresponding to each of the ith order term.
if sym:
signs_ = _compute_signs(corrTermsIx[i], expterms[state], binstates[state])
else:
signs_ = _compute_signs(corrTermsIx[i], expterms[state], binstates[state], False)
# expand the length of signs if we haven't reached those constraints yet before
if len(signs[i])<signs_.size:
for j in range(signs_.size-len(signs[i])):
signs[i].append(np.zeros(0, dtype=int))
for j in range(signs_.size):
signs[i][j] = np.append(signs[i][j], signs_[j])
Z = ''.join(expterms)
# Account for fact that symmetric Python had inverted the order of the states.
if sym:
extra = '\n Pout = Pout[::-1]'
else:
extra = ''
# write to files
write_py(n, sym, corrTermsIx, signs, expterms, Z,
extra=extra,
suffix=suffix,
high_prec=high_prec)
def write_py(n, sym, contraintTermsIx, signs, expterms, Z,
extra='',
suffix='',
high_prec=False):
"""
Write out Ising equations for Python.
Parameters
----------
n : int
System size.
contraintTermsIx : list of str
signs : list of ndarray
Sign for each term in the numerator when computing correlations.
expterms : list of str
Every single energy term.
Z : str
Energies for all states that will be put into partition function.
extra : str, ''
any extra lines to add at the end
suffix : str, ''
high_prec : bool, False
If True, write version that uses mpmath for high precision calculations.
"""
import time
import os
abc = 'HJKLMNOPQRSTUVWXYZABCDE'
fname = 'ising_eqn/ising_eqn_%d%s.py'%(n,suffix)
print("Generating file ./%s"%fname)
if not os.path.isdir('./ising_eqn'):
os.makedirs('./ising_eqn')
f = open(fname,'w')
# insert license
try:
license = open('LICENSE.txt','r').readlines()
for el in license:
el = '# '+el
f.write(el)
f.write('\n')
except FileNotFoundError:
print("License file not found...")
f.write("# Equations for %d-spin Ising model.\n\n"%n)
f.write("# ")
f.write(time.strftime("Written on %Y/%m/%d.")+"\n")
if high_prec:
f.write("from numpy import zeros, array, prod\n")
f.write("from ..enumerate import mp_fast_logsumexp as fast_logsumexp\n")
f.write("from mpmath import exp, isnan\n\n")
else:
f.write("from numpy import zeros, exp, array, prod, isnan\n")
f.write("from ..enumerate import fast_logsumexp\n\n")
# Keep these as string because they need to grow in the loop and then can just be
# added all at once at the end.
fargs = "def calc_observables(params):\n"
if high_prec:
vardec = ' Cout = zeros(('+str(sum([len(i) for i in signs]))+'), dtype=object)\n' # string of variable declarations
else:
vardec = ' Cout = zeros(('+str(sum([len(i) for i in signs]))+'))\n' # string of variable declarations
eqns = '' # string of equations to compute
ix = np.hstack(( 0, np.cumsum([len(i) for i in signs]) ))
for i in range(len(contraintTermsIx)):
vardec += ' '+abc[i]+' = params['+str(ix[i])+':'+str(ix[i+1])+']\n'
if sym:
k = 0
for i in range(len(contraintTermsIx)):
for j in range(len(signs[i])):
eqns += (" num = fast_logsumexp(energyTerms, "+
str(signs[i][j]).replace('1 ','1,').replace('1\n','1,\n')+
")\n Cout["+str(k)+"] = exp( num[0] - logZ ) * num[1]\n")
k += 1
else:
k = 0
for i in range(len(contraintTermsIx)):
for j in range(len(signs[i])):
eqns += (" num = fast_logsumexp(energyTerms, "+
str(signs[i][j]).replace('0 ','0,').replace('1 ','1,').replace('0\n','0,\n').replace('1\n','1,\n')+
")\n Cout["+str(k)+"] = exp( num[0] - logZ ) * num[1]\n")
k += 1
# Write out correlation terms
f.write(fargs)
f.write((" \"\"\"\n Give all parameters concatenated into one array from lowest to highest order.\n"+
" Returns all correlations.\n \"\"\"\n"))
f.write(vardec)
_write_energy_terms(f, Z)
f.write(eqns)
if high_prec:
f.write(" for i in range(Cout.size):\n if isnan(Cout[i]):\n Cout[i] = 0.\n")
else:
f.write(" Cout[isnan(Cout)] = 0.\n")
f.write(" return(Cout)\n\n")
# Write equations for probabilities of all states.
#f.write("def p("+string.join([i+"," for i in abc[:len(contraintTermsIx)]])+"):\n")
f.write("def p(params):\n")
f.write((" \"\"\"\n Give all parameters concatenated into one array from lowest to highest order.\n"+
" Returns probabilities of all configurations.\n \"\"\"\n"))
f.write(vardec)
# Output variable decs and put params into explicit parameters.
ix = np.hstack(( 0, np.cumsum([len(i) for i in signs]) ))
vardec = ''
for i in range(len(contraintTermsIx)):
vardec += ' '+abc[i]+' = params['+str(ix[i])+':'+str(ix[i+1])+']\n'
if high_prec:
vardec += ' Pout = zeros(('+str(2**n)+'), dtype=object)\n' # string of variable declarations
else:
vardec += ' Pout = zeros(('+str(2**n)+'))\n' # string of variable declarations
f.write(vardec)
_write_energy_terms(f, Z)
# each probability equation
for i in range(len(expterms)):
f.write(' Pout['+str(i)+'] = exp( '+expterms[i][:-2]+' - logZ )\n')
f.write(extra)
f.write("\n return(Pout)\n")
f.close()
def _write_energy_terms(f, Z):
"""Split expression for energy terms for each term in Z into multiple lines and write
out nicely into file.
Parameters
----------
f : file
Z : list of str
Energy terms to write out.
"""
f.write(' energyTerms = array([')
i=0
while i<len(Z):
iend=i+100
# end line on a +
while iend<len(Z) and Z[iend-1]!='+':
iend+=1
if iend>=len(Z):
# ignore comma at end of line
f.write(' '+Z[i:-1]+'])\n logZ = fast_logsumexp(energyTerms)[0]\n')
else:
f.write(' '+Z[i:iend]+'\n')
i=iend
def _compute_signs(subix, expterm, binstate, sym=True):
"""Iterate through terms that belong in the numerator for each constraint and keep
track of the sign of those terms.
Parameters
----------
subix : list
expterm : list of str
binstate : list of str
sym : bool, True
Returns
-------
ndarray
Sign of each exponential term in numerator.
"""
if len(subix)==0:
return
if sym:
downSpin = -1
signs = np.ones(len(subix[0]), dtype=int)
for i in range(len(subix[0])):
if np.mod( sum([binstate[k[i]]=="1" for k in subix]),2 ):
signs[i] = downSpin
else:
downSpin = 0
signs = np.ones(len(subix[0]), dtype=int)
for i in range(len(subix[0])):
if np.mod( any([binstate[k[i]]=="0" for k in subix]),2 ):
signs[i] = downSpin
return signs
def get_terms11(subix, prefix, binstate, br, ix0):
"""
Specific to {-1,1}.
"""
j = 0
s = ''
if len(subix)==0:
return s
for i in range(len(subix[0])):
if np.mod( sum([binstate[k[j]]=="1" for k in subix]),2 ):
s += '-'
else:
s += '+'
s += prefix+br[0]+str(j+ix0)+br[1]
j += 1
return s
def get_terms01(subix, prefix, binstate, br, ix0):
"""
Specific to {0,1}.
"""
j = 0
s = ''
if len(subix)==0:
return s
for i in range(len(subix[0])):
if np.all( [binstate[k[j]]=="1" for k in subix] ):
s += '+'+prefix+br[0]+str(j+ix0)+br[1]
j += 1
if s=='':
s = '+0'
return s
def get_terms(subix, prefix, binstate, br, ix0):
"""
Spins are put in explicitly
"""
j = 0
s = ''
if len(subix)==0:
return s
for i in range(len(subix[0])):
s += '+'+prefix+br[0]+str(j+ix0)+br[1]
for k in range(len(subix)):
s += '*s'+br[0]+str(subix[k][i])+br[1]
j += 1
if s=='':
s = '+0'
return s
def get_3idx(n):
"""Get binary 3D matrix with truth values where index values correspond to the index
of all possible ijk parameters. We can do this by recognizing that the pattern along
each plane in the third dimension is like the upper triangle pattern that just moves
up and over by one block each cut lower into the box.
"""
b = np.zeros((n,n,n))
c = np.triu(np.ones((n-1,n-1))==1,1)
for i in range(n-1):
# shunt this diagonal matrix over every descent into a lower plane in the box
# the plane xz
if i==0:
b[i,(1+i):,(1+i):] = c
else:
b[i,(1+i):,(1+i):] = c[:-i,:-i]
return b
def get_nidx(k, n):
"""
Get the kth order indices corresponding to all the states in which k elements
are firing up out of n spins. The ordering correspond to that returned by
bin_states().
One can check this code for correctness by comparing with get_3idx()
>>>>>
print where(exact.get_3idx(4))
print where(exact.get_nidx(3,4))
<<<<<
"""
if k==n:
return np.reshape(list(range(n)),(n,1))
elif k<n:
allStates = bin_states(n)
statesix = np.sum(allStates,1)==k
ix = []
for s in allStates[statesix,:]:
j = 0
for i in np.argwhere(s==1).flatten():
if len(ix)<(j+1):
ix.append([])
ix[j].append(i)
j += 1
return np.array(ix)[:,::-1] # make sure last idx increases first
def pairwise(n, sym=0, **kwargs):
"""Wrapper for writing pairwise maxent model (Ising) files.
Parameters
----------
n : int
System size.
sym : int, 0
Can be 0 or 1.
**kwargs
Returns
-------
None
"""
assert sym==0 or sym==1
print("Writing equations for pairwise Ising model with %d spins."%n)
if sym:
write_eqns(n, sym, [np.where(np.ones((n))==1),
np.where(np.triu(np.ones((n,n)),k=1)==1)],
suffix='_sym',
**kwargs)
else:
write_eqns(n, sym, [np.where(np.ones((n))==1),
np.where(np.triu(np.ones((n,n)),k=1)==1)],
**kwargs)
def triplet(n, sym=0, **kwargs):
"""Wrapper for writing triplet-order maxent model.
Parameters
----------
n : int
System size.
sym : int, 0
Can be 0 or 1.
**kwargs
Returns
-------
None
"""
assert sym==0 or sym==1
print("Writing equations for Ising model with triplet interactions and %d spins."%n)
if sym:
write_eqns(n,sym,[(range(n),),
list(zip(*list(combinations(range(n),2)))),
list(zip(*list(combinations(range(n),3))))],
suffix='_sym_triplet',
**kwargs)
else:
write_eqns(n,sym,[(range(n),),
list(zip(*list(combinations(range(n),2)))),
list(zip(*list(combinations(range(n),3))))],
suffix='_triplet',
**kwargs)
def _write_matlab(n, terms, fitterms, expterms, Z, suffix=''):
"""
DEPRECATED: code here for future referencing
Write out equations to solve for matlab.
"""
import time
abc = 'HJKLMNOPQRSTUVWXYZABCDE'
vardec = ''
# Write function to solve to file.
f = open('ising_eqn_%d%s.m'%(n,suffix),'w')
f.write("% Equations of %d-spin Ising model.\n\n"%n)
f.write(time.strftime("%Y/%m/%d")+"\n")
f.write("% Give each set of parameters concatenated into one array.\n\n")
# Keep these as string because they need to grow in the loop and then can just be
# added all at once at the end.
f.write("function Cout = calc_observables(params)\n")
f.write('\tCout = zeros('+str(sum([len(i) for i in fitterms]))+',1);\n') # string of variable declarations
eqns = '' # string of equations to compute
ix = np.hstack(( 0,np.cumsum([len(i) for i in fitterms]) ))+1
for i in range(len(terms)):
vardec += '\t'+abc[i]+' = params('+str(ix[i])+':'+str(ix[i+1]-1)+');\n'
k = 0
for i in range(len(terms)):
for j in range(len(fitterms[i])):
eqns += "\tCout("+str(k+1)+") = ("+fitterms[i][j]+")/Z;\n"
k += 1
f.write(vardec)
f.write("\tZ = "+Z+";\n")
f.write(eqns)
f.close()
g = open('probs'+str(n)+'.m','w')
g.write("% File for getting the probabilities of Ising model.\n% ")
g.write(time.strftime("%Y/%m/%d")+"\n")
# Write equations for probabilities of all states.
g.write("function Pout = p(params)\n")
g.write(vardec)
g.write(' Pout = zeros('+str(2**n)+',1);\n') # string of variable declarations
g.write(' Z = '+Z+';\n')
for i in range(len(expterms)):
g.write(' Pout('+str(i+1)+') = '+expterms[i]+'/Z;\n')
g.close()
def fast_logsumexp(X, coeffs=None):
"""Simplified version of logsumexp to do correlation calculation in Ising equation
files. Scipy's logsumexp can be around 10x slower in comparison.
Parameters
----------
X : ndarray
Terms inside logs.
coeffs : ndarray
Factors in front of exponentials.
Returns
-------
float
Value of magnitude of quantity inside log (the sum of exponentials).
float
Sign.
"""
Xmx = max(X)
if coeffs is None:
y = np.exp(X-Xmx).sum()
else:
y = np.exp(X-Xmx).dot(coeffs)
if y<0:
return np.log(np.abs(y))+Xmx, -1.
return np.log(y)+Xmx, 1.
def mp_fast_logsumexp(X, coeffs=None):
"""fast_logsumexp for high precision numbers using mpmath.
Parameters
----------
X : ndarray
Terms inside logs.
coeffs : ndarray
Factors in front of exponentials.
Returns
-------
float
Value of magnitude of quantity inside log (the sum of exponentials).
float
Sign.
"""
Xmx = max(X)
if coeffs is None:
y = sum(map(mp.exp, X-Xmx))
else:
y = np.array(coeffs).dot(list(map(mp.exp, X-Xmx)))
if y<0:
return mp.log(abs(y))+Xmx, -1.
return mp.log(y)+Xmx, 1.
if __name__=='__main__':
"""When run with Python, this will write the equations for the Ising model
into file ising_eqn_[n][_sym] where n will be replaced by the system size
and the suffix '_sym' is included if the equations are written in the
{-1,+1} basis.
To write the Ising model equations for a system of size 3 in the {0,1} basis, call
>>> python enumerate.py 3
For the {-1,1} basis, call
>>> python enumerate.py 3 1
To include triplet order interactions, include a 3 at the very end
>>> python enumerate.py 3 0 3
To write high precision, include an '-hp=true' as the last argument.
>>> python enumerate.py 3 0 3 -hp=true
"""
import sys
args = [i for i in sys.argv if '-'!=i[0]]
kwargs = [i for i in sys.argv if '-'==i[0]]
n = int(args[1])
if len(args)==2:
sym = 0
order = 2
elif len(args)==3:
sym = int(args[2])
assert sym==0 or sym==1
order = 2
elif len(args)==4:
sym = int(args[2])
order = int(args[3])
else:
raise Exception("Unrecognized arguments.")
# parse kwargs
if len(kwargs):
if '-hp='==kwargs[0][:4]:
if kwargs[0][4:].lower()=='true':
high_prec = True
elif kwargs[0][4:].lower()=='false':
high_prec = False
else:
raise Exception("Unrecognized value for hp.")
else:
high_prec = False
else:
# default kwargs
high_prec = False
if order==2:
pairwise(n, sym, high_prec=high_prec)
elif order==3:
triplet(n, sym, high_prec=high_prec)
else:
raise NotImplementedError("Only order up to 3 implemented for this convenient interface.")
|
[
"numpy.set_printoptions",
"numpy.sum",
"os.makedirs",
"numpy.log",
"numpy.abs",
"os.path.isdir",
"mpmath.log",
"numpy.zeros",
"time.strftime",
"numpy.ones",
"numpy.append",
"numpy.array",
"numpy.exp",
"numpy.argwhere",
"re.sub",
"numpy.all"
] |
[((1545, 1587), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (1564, 1587), True, 'import numpy as np\n'), ((12751, 12770), 'numpy.zeros', 'np.zeros', (['(n, n, n)'], {}), '((n, n, n))\n', (12759, 12770), True, 'import numpy as np\n'), ((3451, 3490), 're.sub', 're.sub', (['"""\\\\+0\\\\+"""', '"""+"""', 'expterms[state]'], {}), "('\\\\+0\\\\+', '+', expterms[state])\n", (3457, 3490), False, 'import re\n'), ((3515, 3554), 're.sub', 're.sub', (['"""\\\\)\\\\+0"""', '""")"""', 'expterms[state]'], {}), "('\\\\)\\\\+0', ')', expterms[state])\n", (3521, 3554), False, 'import re\n'), ((5717, 5745), 'os.path.isdir', 'os.path.isdir', (['"""./ising_eqn"""'], {}), "('./ising_eqn')\n", (5730, 5745), False, 'import os\n'), ((5755, 5781), 'os.makedirs', 'os.makedirs', (['"""./ising_eqn"""'], {}), "('./ising_eqn')\n", (5766, 5781), False, 'import os\n'), ((11838, 11886), 'numpy.all', 'np.all', (["[(binstate[k[j]] == '1') for k in subix]"], {}), "([(binstate[k[j]] == '1') for k in subix])\n", (11844, 11886), True, 'import numpy as np\n'), ((6150, 6187), 'time.strftime', 'time.strftime', (['"""Written on %Y/%m/%d."""'], {}), "('Written on %Y/%m/%d.')\n", (6163, 6187), False, 'import time\n'), ((12785, 12808), 'numpy.ones', 'np.ones', (['(n - 1, n - 1)'], {}), '((n - 1, n - 1))\n', (12792, 12808), True, 'import numpy as np\n'), ((15954, 15979), 'time.strftime', 'time.strftime', (['"""%Y/%m/%d"""'], {}), "('%Y/%m/%d')\n", (15967, 15979), False, 'import time\n'), ((16962, 16987), 'time.strftime', 'time.strftime', (['"""%Y/%m/%d"""'], {}), "('%Y/%m/%d')\n", (16975, 16987), False, 'import time\n'), ((18012, 18021), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (18018, 18021), True, 'import numpy as np\n'), ((18628, 18637), 'mpmath.log', 'mp.log', (['y'], {}), '(y)\n', (18634, 18637), True, 'import mpmath as mp\n'), ((4419, 4452), 'numpy.append', 'np.append', (['signs[i][j]', 'signs_[j]'], {}), '(signs[i][j], signs_[j])\n', (4428, 4452), True, 'import numpy as np\n'), ((13581, 13601), 'numpy.sum', 'np.sum', (['allStates', '(1)'], {}), '(allStates, 1)\n', (13587, 13601), True, 'import numpy as np\n'), ((13866, 13878), 'numpy.array', 'np.array', (['ix'], {}), '(ix)\n', (13874, 13878), True, 'import numpy as np\n'), ((17878, 17893), 'numpy.exp', 'np.exp', (['(X - Xmx)'], {}), '(X - Xmx)\n', (17884, 17893), True, 'import numpy as np\n'), ((17920, 17935), 'numpy.exp', 'np.exp', (['(X - Xmx)'], {}), '(X - Xmx)\n', (17926, 17935), True, 'import numpy as np\n'), ((18518, 18534), 'numpy.array', 'np.array', (['coeffs'], {}), '(coeffs)\n', (18526, 18534), True, 'import numpy as np\n'), ((17981, 17990), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (17987, 17990), True, 'import numpy as np\n'), ((4324, 4346), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (4332, 4346), True, 'import numpy as np\n'), ((13699, 13718), 'numpy.argwhere', 'np.argwhere', (['(s == 1)'], {}), '(s == 1)\n', (13710, 13718), True, 'import numpy as np\n'), ((14342, 14352), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (14349, 14352), True, 'import numpy as np\n'), ((14541, 14551), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (14548, 14551), True, 'import numpy as np\n'), ((14405, 14420), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (14412, 14420), True, 'import numpy as np\n'), ((14604, 14619), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (14611, 14619), True, 'import numpy as np\n')]
|
from validation.value_validation import ValidateValue
def risk_classification(patient):
classification = RiskFactorAssessment(patient).result_risk_classification()
return classification
def total_risk_factors(patient):
total = RiskFactorAssessment(patient).net_risk_factors()
return total
class Patient(object):
def __init__(self, sex, age, smoker, sedentary, bmi, waist_girth, male_family_death_before_55, female_family_death_before_65, systolic, diastolic, hypertensive, ldl, hdl, using_lipid_lowering_medication, cholesterol, fasting_glucose, oral_glucose_tolerance):
self._sex = ValidateValue(sex).valueIsMaleFemale()
self._age = ValidateValue(age).valueIsPositiveInteger()
self._smoker = ValidateValue(smoker).valueIsYesNo()
self._sedentary = ValidateValue(sedentary).valueIsYesNo()
self._bmi = ValidateValue(bmi).valueIsPositiveInteger()
self._waist_girth = ValidateValue(waist_girth).valueIsPositiveInteger()
self._male_family_death_before_55 = ValidateValue(male_family_death_before_55).valueIsYesNo()
self._female_family_death_before_65 = ValidateValue(female_family_death_before_65).valueIsYesNo()
self._systolic = ValidateValue(systolic).valueIsPositiveInteger()
self._diastolic = ValidateValue(diastolic).valueIsPositiveInteger()
self._hypertensive = ValidateValue(hypertensive).valueIsYesNo()
self._ldl = ValidateValue(ldl).valueIsPositiveInteger()
self._hdl = ValidateValue(hdl).valueIsPositiveInteger()
self._using_lipid_lowering_medication = ValidateValue(using_lipid_lowering_medication).valueIsYesNo()
self._cholesterol = ValidateValue(cholesterol).valueIsPositiveInteger()
self._fasting_glucose = ValidateValue(fasting_glucose).valueIsPositiveInteger()
self._oral_glucose_tolerance = ValidateValue(oral_glucose_tolerance).valueIsPositiveInteger()
class RiskFactorAssessment(object):
def __init__(self, patient):
self._patient = patient
def result_risk_classification(self):
classification = RiskFactorAssessmentClassification().patient_risk_classification(self.net_risk_factors())
return classification
def net_risk_factors(self):
total = self._get_risk_factor_count() - self._get_negative_risk_factor_count()
return total
def _get_risk_factor_count(self):
_count_risk_factors = [
self._is_age_risk(),
self._is_obesity_risk(),
self._patient._smoker,
self._patient._sedentary,
self._is_familial_risk(),
self._is_systolic_risk(),
self._is_diastolic_risk(),
self._patient._hypertensive,
self._is_dyslipidemia_risk(),
self._is_pre_diabetes_risk()
]
return _count_risk_factors.count(True)
def _get_negative_risk_factor_count(self):
_count_negative_risk_factor = [ self._is_hdl_negative_risk() ]
return _count_negative_risk_factor.count(True)
def _is_obesity_risk(self):
if self._patient._bmi and self._patient._waist_girth == 0:
return False
elif self._patient._bmi > 30:
return True
elif self._patient._waist_girth > 40 and self._patient._sex == 'male' or self._patient._waist_girth > 35 and self._patient._sex == 'female':
return True
else:
return False
def _is_age_risk(self):
if (self._patient._sex == "male" and self._patient._age >= 45) or (self._patient._sex == "female" and self._patient._age >=55):
return True
else:
return False
def _is_familial_risk(self):
if self._patient._male_family_death_before_55 == True or self._patient._female_family_death_before_65 == True:
return True
else:
return False
def _is_systolic_risk(self):
if self._patient._systolic >= 120:
return True
else:
return False
def _is_diastolic_risk(self):
if self._patient._diastolic >= 80:
return True
else:
return False
def _is_dyslipidemia_risk(self):
if self._patient._ldl > 130 or self._patient._hdl < 40 or self._patient._cholesterol > 200:
return True
else:
return False
def _is_pre_diabetes_risk(self):
if self._patient._fasting_glucose >= 100 and self._patient._fasting_glucose <= 126 or self._patient._oral_glucose_tolerance >= 140 and self._patient._oral_glucose_tolerance < 200:
return True
else:
return False
def _is_hdl_negative_risk(self):
if self._patient._hdl > 60:
return True
else:
return False
class RiskFactorAssessmentClassification(object):
def __init__(self):
pass
def patient_risk_classification(self, value):
if value <= 1:
return self._low_risk_category(value)
if value == 2:
return self._moderate_risk_category(value)
if value > 2:
return self._high_risk_category(value)
def _low_risk_category(self, value):
return("Your risk total is %s. You are at a low risk for cardiovascular disease. Medical check-up no necessary for participation in physical activity." % value)
def _moderate_risk_category(self, value):
return("Your risk total is %s. You are at a moderate risk for cardiovascular disease. Medical check-up recommended for participation in vigorous physical activity." % value)
def _high_risk_category(self, value):
return("Your risk total is %s. You are at a high risk for cardiovascular disease. Medical check-up highly recommended before any physical activity." % value)
|
[
"validation.value_validation.ValidateValue"
] |
[((614, 632), 'validation.value_validation.ValidateValue', 'ValidateValue', (['sex'], {}), '(sex)\n', (627, 632), False, 'from validation.value_validation import ValidateValue\n'), ((673, 691), 'validation.value_validation.ValidateValue', 'ValidateValue', (['age'], {}), '(age)\n', (686, 691), False, 'from validation.value_validation import ValidateValue\n'), ((740, 761), 'validation.value_validation.ValidateValue', 'ValidateValue', (['smoker'], {}), '(smoker)\n', (753, 761), False, 'from validation.value_validation import ValidateValue\n'), ((803, 827), 'validation.value_validation.ValidateValue', 'ValidateValue', (['sedentary'], {}), '(sedentary)\n', (816, 827), False, 'from validation.value_validation import ValidateValue\n'), ((863, 881), 'validation.value_validation.ValidateValue', 'ValidateValue', (['bmi'], {}), '(bmi)\n', (876, 881), False, 'from validation.value_validation import ValidateValue\n'), ((935, 961), 'validation.value_validation.ValidateValue', 'ValidateValue', (['waist_girth'], {}), '(waist_girth)\n', (948, 961), False, 'from validation.value_validation import ValidateValue\n'), ((1031, 1073), 'validation.value_validation.ValidateValue', 'ValidateValue', (['male_family_death_before_55'], {}), '(male_family_death_before_55)\n', (1044, 1073), False, 'from validation.value_validation import ValidateValue\n'), ((1135, 1179), 'validation.value_validation.ValidateValue', 'ValidateValue', (['female_family_death_before_65'], {}), '(female_family_death_before_65)\n', (1148, 1179), False, 'from validation.value_validation import ValidateValue\n'), ((1220, 1243), 'validation.value_validation.ValidateValue', 'ValidateValue', (['systolic'], {}), '(systolic)\n', (1233, 1243), False, 'from validation.value_validation import ValidateValue\n'), ((1295, 1319), 'validation.value_validation.ValidateValue', 'ValidateValue', (['diastolic'], {}), '(diastolic)\n', (1308, 1319), False, 'from validation.value_validation import ValidateValue\n'), ((1374, 1401), 'validation.value_validation.ValidateValue', 'ValidateValue', (['hypertensive'], {}), '(hypertensive)\n', (1387, 1401), False, 'from validation.value_validation import ValidateValue\n'), ((1437, 1455), 'validation.value_validation.ValidateValue', 'ValidateValue', (['ldl'], {}), '(ldl)\n', (1450, 1455), False, 'from validation.value_validation import ValidateValue\n'), ((1501, 1519), 'validation.value_validation.ValidateValue', 'ValidateValue', (['hdl'], {}), '(hdl)\n', (1514, 1519), False, 'from validation.value_validation import ValidateValue\n'), ((1593, 1639), 'validation.value_validation.ValidateValue', 'ValidateValue', (['using_lipid_lowering_medication'], {}), '(using_lipid_lowering_medication)\n', (1606, 1639), False, 'from validation.value_validation import ValidateValue\n'), ((1683, 1709), 'validation.value_validation.ValidateValue', 'ValidateValue', (['cholesterol'], {}), '(cholesterol)\n', (1696, 1709), False, 'from validation.value_validation import ValidateValue\n'), ((1767, 1797), 'validation.value_validation.ValidateValue', 'ValidateValue', (['fasting_glucose'], {}), '(fasting_glucose)\n', (1780, 1797), False, 'from validation.value_validation import ValidateValue\n'), ((1862, 1899), 'validation.value_validation.ValidateValue', 'ValidateValue', (['oral_glucose_tolerance'], {}), '(oral_glucose_tolerance)\n', (1875, 1899), False, 'from validation.value_validation import ValidateValue\n')]
|
import pandas as pd
from riskiq.api import Client
from urllib.parse import urlparse
import requests
import base64
import json
API_SECRET="<KEY>"
API_KEY="/<KEY>"
client = Client(API_SECRET, API_KEY)
df= pd.read_csv('./LengthOfDomains.csv')
domains= df['0']
errors=0
results=[]
url_to_scan = ''
url = f'https://api.riskiq.net/v1/ssl/cert/host?host={url_to_scan}'
headers = {'Accept': 'application/json','Authorization': 'Basic OGMxY2YxYTczN2MyMDRiOTovWE9YMW11SnEvM1d5OWFLU1FTdXRWRllGdGwyWFZURQ=='}
for index, domain in enumerate(domains):
new_domain = urlparse(domain).netloc
print(f'Run {index}/{len(domains)}')
url = f'https://api.riskiq.net/v1/ssl/cert/host?host={new_domain}'
res = requests.get(url, headers=headers)
data =json.loads(res.text)
content = data['content']
final=0
if content != [] :
first_seen= int(content[0]['firstSeen'])
last_seen = int (content[0]['lastSeen'])
final=last_seen-first_seen
results.append(final)
df['6']=results
df.drop('Unnamed: 0', inplace=True, axis=1)
try:
df.to_csv('./CollectSSL.csv')
except:
textfile = open("CollectSSL.txt", "w")
for element in results:
textfile.write(element + "\n")
textfile.close()
|
[
"json.loads",
"pandas.read_csv",
"riskiq.api.Client",
"requests.get",
"urllib.parse.urlparse"
] |
[((173, 200), 'riskiq.api.Client', 'Client', (['API_SECRET', 'API_KEY'], {}), '(API_SECRET, API_KEY)\n', (179, 200), False, 'from riskiq.api import Client\n'), ((206, 242), 'pandas.read_csv', 'pd.read_csv', (['"""./LengthOfDomains.csv"""'], {}), "('./LengthOfDomains.csv')\n", (217, 242), True, 'import pandas as pd\n'), ((713, 747), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (725, 747), False, 'import requests\n'), ((758, 778), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (768, 778), False, 'import json\n'), ((567, 583), 'urllib.parse.urlparse', 'urlparse', (['domain'], {}), '(domain)\n', (575, 583), False, 'from urllib.parse import urlparse\n')]
|
from __future__ import unicode_literals
from actstream.models import Action
from mayan.apps.common.tests import GenericViewTestCase
from ..events import event_tag_created, event_tag_edited
from ..models import Tag
from ..permissions import permission_tag_create, permission_tag_edit
from .mixins import TagTestMixin, TagViewTestMixin
class TagEventsTestCase(TagTestMixin, TagViewTestMixin, GenericViewTestCase):
def test_tag_create_event_no_permissions(self):
action_count = Action.objects.count()
response = self._request_test_tag_create_view()
self.assertEqual(response.status_code, 403)
self.assertEqual(Action.objects.count(), action_count)
def test_tag_create_event_with_permissions(self):
self.grant_permission(permission=permission_tag_create)
action_count = Action.objects.count()
response = self._request_test_tag_create_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(Action.objects.count(), action_count + 1)
event = Action.objects.first()
tag = Tag.objects.first()
self.assertEqual(event.verb, event_tag_created.id)
self.assertEqual(event.target, tag)
self.assertEqual(event.actor, self._test_case_user)
def test_tag_edit_event_no_permissions(self):
self._create_test_tag()
action_count = Action.objects.count()
response = self._request_test_tag_edit_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(Action.objects.count(), action_count)
def test_tag_edit_event_with_access(self):
self._create_test_tag()
self.grant_access(
obj=self.test_tag, permission=permission_tag_edit
)
action_count = Action.objects.count()
response = self._request_test_tag_edit_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(Action.objects.count(), action_count + 1)
event = Action.objects.first()
self.assertEqual(event.verb, event_tag_edited.id)
self.assertEqual(event.target, self.test_tag)
self.assertEqual(event.actor, self._test_case_user)
|
[
"actstream.models.Action.objects.count",
"actstream.models.Action.objects.first"
] |
[((493, 515), 'actstream.models.Action.objects.count', 'Action.objects.count', ([], {}), '()\n', (513, 515), False, 'from actstream.models import Action\n'), ((832, 854), 'actstream.models.Action.objects.count', 'Action.objects.count', ([], {}), '()\n', (852, 854), False, 'from actstream.models import Action\n'), ((1049, 1071), 'actstream.models.Action.objects.first', 'Action.objects.first', ([], {}), '()\n', (1069, 1071), False, 'from actstream.models import Action\n'), ((1378, 1400), 'actstream.models.Action.objects.count', 'Action.objects.count', ([], {}), '()\n', (1398, 1400), False, 'from actstream.models import Action\n'), ((1776, 1798), 'actstream.models.Action.objects.count', 'Action.objects.count', ([], {}), '()\n', (1796, 1798), False, 'from actstream.models import Action\n'), ((1991, 2013), 'actstream.models.Action.objects.first', 'Action.objects.first', ([], {}), '()\n', (2011, 2013), False, 'from actstream.models import Action\n'), ((651, 673), 'actstream.models.Action.objects.count', 'Action.objects.count', ([], {}), '()\n', (671, 673), False, 'from actstream.models import Action\n'), ((990, 1012), 'actstream.models.Action.objects.count', 'Action.objects.count', ([], {}), '()\n', (1010, 1012), False, 'from actstream.models import Action\n'), ((1534, 1556), 'actstream.models.Action.objects.count', 'Action.objects.count', ([], {}), '()\n', (1554, 1556), False, 'from actstream.models import Action\n'), ((1932, 1954), 'actstream.models.Action.objects.count', 'Action.objects.count', ([], {}), '()\n', (1952, 1954), False, 'from actstream.models import Action\n')]
|
import argparse
import os
import shutil
import time
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torchvision.models.resnet import resnet18
from pytorch_nndct import Pruner
from pytorch_nndct import InputSpec
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir',
default='/scratch/workspace/dataset/imagenet/pytorch',
help='Data set directory.')
parser.add_argument(
'--pretrained',
default='/scratch/workspace/wangyu/nndct_test_data/models/resnet18.pth',
help='Trained model file path.')
parser.add_argument(
'--ratio',
default=0.1,
type=float,
help='Desired pruning ratio. The larger this value, the smaller'
'the model after pruning.')
parser.add_argument(
'--ana',
default=False,
type=bool,
help='Whether to perform model analysis.')
args, _ = parser.parse_known_args()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions
for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by every 2 epochs"""
lr = lr * (0.1**(epoch // 2))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader), [batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
model = model.cuda()
images = images.cuda()
target = target.cuda()
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
progress.display(i)
def evaluate(val_loader, model, criterion):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader), [batch_time, losses, top1, top5], prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
model = model.cuda()
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 50 == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(
top1=top1, top5=top5))
return top1.avg, top5.avg
# Evaluation function provided to pruner must use model as the first argument.
def ana_eval_fn(model, val_loader, loss_fn):
return evaluate(val_loader, model, loss_fn)[1]
if __name__ == '__main__':
model = resnet18().cpu()
model.load_state_dict(torch.load(args.pretrained))
batch_size = 128
workers = 4
traindir = os.path.join(args.data_dir, 'train')
valdir = os.path.join(args.data_dir, 'validation')
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=workers,
pin_memory=True)
val_dataset = datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=workers,
pin_memory=True)
criterion = torch.nn.CrossEntropyLoss().cuda()
pruner = Pruner(model, InputSpec(shape=(3, 224, 224), dtype=torch.float32))
if args.ana:
pruner.ana(ana_eval_fn, args=(val_loader, criterion), gpus=[0, 1, 2, 3])
model = pruner.prune(ratio=args.ratio)
pruner.summary(model)
lr = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr, weight_decay=1e-4)
best_acc5 = 0
epochs = 1
for epoch in range(epochs):
adjust_learning_rate(optimizer, epoch, lr)
train(train_loader, model, criterion, optimizer, epoch)
acc1, acc5 = evaluate(val_loader, model, criterion)
# remember best acc@1 and save checkpoint
is_best = acc5 > best_acc5
best_acc5 = max(acc5, best_acc5)
if is_best:
model.save('resnet18_sparse.pth.tar')
torch.save(model.state_dict(), 'resnet18_final.pth.tar')
|
[
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.Resize",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torchvision.transforms.RandomResizedCrop",
"time.time",
"torch.save",
"torchvision.models.resnet.resnet18",
"pytorch_nndct.InputSpec",
"shutil.copyfile",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.Normalize",
"torch.no_grad",
"os.path.join",
"torchvision.transforms.ToTensor"
] |
[((277, 302), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (300, 302), False, 'import argparse\n'), ((1516, 1543), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (1526, 1543), False, 'import torch\n'), ((3363, 3374), 'time.time', 'time.time', ([], {}), '()\n', (3372, 3374), False, 'import time\n'), ((5701, 5737), 'os.path.join', 'os.path.join', (['args.data_dir', '"""train"""'], {}), "(args.data_dir, 'train')\n", (5713, 5737), False, 'import os\n'), ((5749, 5790), 'os.path.join', 'os.path.join', (['args.data_dir', '"""validation"""'], {}), "(args.data_dir, 'validation')\n", (5761, 5790), False, 'import os\n'), ((5806, 5881), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (5826, 5881), True, 'import torchvision.transforms as transforms\n'), ((6145, 6267), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'workers', 'pin_memory': '(True)'}), '(train_dataset, batch_size=batch_size, shuffle=\n True, num_workers=workers, pin_memory=True)\n', (6172, 6267), False, 'import torch\n'), ((6526, 6647), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'workers', 'pin_memory': '(True)'}), '(val_dataset, batch_size=batch_size, shuffle=\n False, num_workers=workers, pin_memory=True)\n', (6553, 6647), False, 'import torch\n'), ((1562, 1609), 'shutil.copyfile', 'shutil.copyfile', (['filename', '"""model_best.pth.tar"""'], {}), "(filename, 'model_best.pth.tar')\n", (1577, 1609), False, 'import shutil\n'), ((2307, 2322), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2320, 2322), False, 'import torch\n'), ((4077, 4088), 'time.time', 'time.time', ([], {}), '()\n', (4086, 4088), False, 'import time\n'), ((4500, 4515), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4513, 4515), False, 'import torch\n'), ((4527, 4538), 'time.time', 'time.time', ([], {}), '()\n', (4536, 4538), False, 'import time\n'), ((5625, 5652), 'torch.load', 'torch.load', (['args.pretrained'], {}), '(args.pretrained)\n', (5635, 5652), False, 'import torch\n'), ((6750, 6801), 'pytorch_nndct.InputSpec', 'InputSpec', ([], {'shape': '(3, 224, 224)', 'dtype': 'torch.float32'}), '(shape=(3, 224, 224), dtype=torch.float32)\n', (6759, 6801), False, 'from pytorch_nndct import InputSpec\n'), ((5123, 5134), 'time.time', 'time.time', ([], {}), '()\n', (5132, 5134), False, 'import time\n'), ((5584, 5594), 'torchvision.models.resnet.resnet18', 'resnet18', ([], {}), '()\n', (5592, 5594), False, 'from torchvision.models.resnet import resnet18\n'), ((6689, 6716), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (6714, 6716), False, 'import torch\n'), ((3482, 3493), 'time.time', 'time.time', ([], {}), '()\n', (3491, 3493), False, 'import time\n'), ((4048, 4059), 'time.time', 'time.time', ([], {}), '()\n', (4057, 4059), False, 'import time\n'), ((5983, 6016), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (6011, 6016), True, 'import torchvision.transforms as transforms\n'), ((6028, 6061), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (6059, 6061), True, 'import torchvision.transforms as transforms\n'), ((6073, 6094), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6092, 6094), True, 'import torchvision.transforms as transforms\n'), ((6384, 6406), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (6401, 6406), True, 'import torchvision.transforms as transforms\n'), ((6418, 6444), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (6439, 6444), True, 'import torchvision.transforms as transforms\n'), ((6456, 6477), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6475, 6477), True, 'import torchvision.transforms as transforms\n'), ((5092, 5103), 'time.time', 'time.time', ([], {}), '()\n', (5101, 5103), False, 'import time\n')]
|
#------------------------------------------------------------------------------
# Modeling backend setups
__all__ = []
# Preload functions:
# Some backends depend on more complicated modules and thus on a preload
# function.
def __numcosmo_preload():
import gi
gi.require_version("NumCosmoMath", "1.0")
gi.require_version("NumCosmo", "1.0")
# Backend dictionary __backends:
# Dictonary controling the backends, it must test if the backend is available
# and loadable.
# - name: The backend name;
# - module: The actual module name, must be a .py file inside the modbackend
# directory;
# - prereqs: modules that need to be loadable to allow the backend to work;
# - preload: an optional function that must be called before the modules in
# prereqs are tested;
# - available: must always starts False;
__backends = {'ct': {'name': 'cluster_toolkit+astropy', 'available': False,
'module': 'cluster_toolkit',
'prereqs': ['cluster_toolkit', 'astropy']},
'nc': {'name': 'NumCosmo', 'available': False,
'module': 'numcosmo',
'prereqs': ['gi.repository.NumCosmoMath', 'gi.repository.NumCosmo'],
'preload': __numcosmo_preload},
'ccl': {'name': 'ccl', 'available': False,
'module': 'ccl',
'prereqs': ['pyccl']},
'notabackend': {'name': 'notaname', 'available': False,
'module': 'notamodule',
'prereqs': ['notaprerq']}}
|
[
"gi.require_version"
] |
[((279, 320), 'gi.require_version', 'gi.require_version', (['"""NumCosmoMath"""', '"""1.0"""'], {}), "('NumCosmoMath', '1.0')\n", (297, 320), False, 'import gi\n'), ((325, 362), 'gi.require_version', 'gi.require_version', (['"""NumCosmo"""', '"""1.0"""'], {}), "('NumCosmo', '1.0')\n", (343, 362), False, 'import gi\n')]
|
import logging
import theano
from theano.gradient import disconnected_grad
from theano import tensor
from blocks.graph import ComputationGraph
from blocks.filter import VariableFilter
from blocks.bricks import Linear, NDimensionalSoftmax
from blocks.bricks.base import application
from blocks.roles import OUTPUT, add_role, WEIGHT
from blocks.utils import dict_subset, shared_floatx_nans
from blocks_extras.bricks.sequence_generator2 import SoftmaxReadout, MergeReadout
logger = logging.getLogger(__name__)
class ReinforceReadout(SoftmaxReadout):
def __init__(self, reward_brick, entropy=None, **kwargs):
super(ReinforceReadout, self).__init__(**kwargs)
self.reward_brick = reward_brick
self.entropy_coof = entropy
self.value_prediction = Linear(output_dim=1, name='value_prediction')
self.children += [
reward_brick, self.value_prediction]
self.costs.inputs += ['attended', 'attended_mask']
def _push_allocation_config(self):
super(ReinforceReadout, self)._push_allocation_config()
self.value_prediction.input_dim = self.get_dim('states')
@application
def costs(self, application_call, prediction, prediction_mask,
groundtruth, groundtruth_mask,
**inputs):
states = disconnected_grad(inputs['states'])
merged = self.merge(**dict_subset(inputs, self.merge_names))
# Compute log-probabilities for the predicted tokens
log_probs = -self.all_scores(prediction, merged) * prediction_mask
# Compute per-token rewards
rewards = self.reward_brick.apply(prediction, prediction_mask,
groundtruth, groundtruth_mask).sum(axis=-1)
# Encourage entropy by adding negated log-probs to the rewards
application_call.add_auxiliary_variable(log_probs, name='log_probs')
if self.entropy_coof:
rewards += self.entropy_coof * disconnected_grad(-log_probs)
future_rewards = rewards[::-1].cumsum(axis=0)[::-1]
baselines = self.value_prediction.apply(states)[:, :, 0]
application_call.add_auxiliary_variable(
baselines, name='baselines')
# Compute baseline error
centered_future_rewards = future_rewards - baselines
baseline_errors = (
(centered_future_rewards *
disconnected_grad(prediction_mask)) ** 2).sum(axis=0)
application_call.add_auxiliary_variable(
baseline_errors, name='baseline_errors')
# The gradient of this will be the REINFORCE 1-sample
# gradient estimate
costs = (disconnected_grad(centered_future_rewards)
* log_probs
* prediction_mask).sum(axis=0)
# Add auxiliary variables for intermediate steps of the computation
application_call.add_auxiliary_variable(
rewards, name='rewards')
application_call.add_auxiliary_variable(
log_probs.copy(), name='prediction_log_probs')
return costs
class CriticReadout(MergeReadout):
def __init__(self, num_tokens,
value_softmax=False, same_value_for_wrong=False,
groundtruth_word_bonus=False, dueling_outputs=False, **kwargs):
self.value_softmax = value_softmax
self.same_value_for_wrong = same_value_for_wrong
self.groundtruth_word_bonus = groundtruth_word_bonus
self.dueling_outputs = dueling_outputs
super(CriticReadout, self).__init__(post_merge_dim=num_tokens, **kwargs)
self.costs.inputs = ([
'prediction', 'prediction_mask',
'groundtruth', 'groundtruth_mask']
+ self.input_names)
def _allocate(self):
w = shared_floatx_nans((self.get_dim('states'),), name='add_weights')
add_role(w, WEIGHT)
self.parameters.append(w)
def _initialize(self):
self.weights_init.initialize(self.parameters[0], self.rng)
# For compatibility with Blocks-extras
def sample(self):
raise NotImplementedError()
# For compatibility with Blocks-extras
def scores(self):
pass
@application
def costs(self, prediction, prediction_mask,
groundtruth, groundtruth_mask, **inputs):
outputs = self.all_outputs(groundtruth, groundtruth_mask, **inputs)
# It does not matter what we return here, as long as it contains
# the values in the computation graph.
return outputs.sum()
@application
def all_outputs(self, application_call, groundtruth, groundtruth_mask, **inputs):
outputs = self.merge(**dict_subset(inputs, self.merge_names))
indices = tensor.repeat(
tensor.arange(groundtruth.shape[1]), groundtruth.shape[0])
if self.value_softmax:
logger.debug('Applying value softmax')
outputs = (tensor.addbroadcast(outputs[:, :, :1], 2)
+ self.softmax.apply(outputs[:, :, 1:], extra_ndim=1))
if self.same_value_for_wrong:
logger.debug('Same value for apriori wrong actions')
wrong_output = outputs[:, :, 0]
outputs = outputs[:, :, 1:]
wrong_mask = tensor.ones_like(outputs[0])
wrong_mask = tensor.set_subtensor(
wrong_mask[indices, groundtruth.T.flatten()], 0)
outputs = (outputs * (1 - wrong_mask)
+ wrong_output[:, :, None] * wrong_mask)
application_call.add_auxiliary_variable(wrong_mask, name='wrong_mask')
if self.groundtruth_word_bonus:
logger.debug('Bonus for grondtruth words')
wrong_mask = tensor.ones_like(outputs[0])
wrong_mask = tensor.set_subtensor(
wrong_mask[indices, groundtruth.T.flatten()], 0)
w, = self.parameters
bonuses = inputs['states'].dot(w)
outputs += bonuses[:, :, None] * (1 - wrong_mask)[None, :, :]
if self.dueling_outputs:
logger.debug('Dueling outputs a-la dueling networks')
base_output = outputs[:, :, [0]]
dueling_outputs = outputs[:, :, 1:]
outputs = base_output + dueling_outputs - dueling_outputs.mean(axis=2, keepdims=True)
return outputs
@application
def outputs(self, groundtruth, groundtruth_mask, **inputs):
# Copy-pasted from all_outputs, because Theano does not support ellipsis
outputs = self.merge(**dict_subset(inputs, self.merge_names))
indices = tensor.repeat(
tensor.arange(groundtruth.shape[1]), groundtruth.shape[0])
if self.value_softmax:
logger.debug('Applying value softmax')
outputs = (tensor.addbroadcast(outputs[:, :1], 1)
+ self.softmax.apply(outputs[:, 1:]))
if self.same_value_for_wrong:
logger.debug('Same value for apriori wrong actions')
wrong_output = outputs[:, 0]
outputs = outputs[:, 1:]
wrong_mask = tensor.ones_like(outputs)
wrong_mask = tensor.set_subtensor(
wrong_mask[indices, groundtruth.T.flatten()], 0)
outputs = (outputs * (1 - wrong_mask)
+ wrong_output[:, None] * wrong_mask)
if self.groundtruth_word_bonus:
logger.debug('Bonus for grondtruth words')
wrong_mask = tensor.ones_like(outputs)
wrong_mask = tensor.set_subtensor(
wrong_mask[indices, groundtruth.T.flatten()], 0)
w, = self.parameters
bonuses = inputs['states'].dot(w)
outputs = outputs + bonuses[:, None] * (1 - wrong_mask)
if self.dueling_outputs:
logger.debug('Dueling outputs a-la dueling networks')
base_output = outputs[:, [0]]
dueling_outputs = outputs[:, 1:]
outputs = base_output + dueling_outputs - dueling_outputs.mean(axis=1, keepdims=True)
return outputs
class ActorCriticReadout(SoftmaxReadout):
"""Actor-critic
Params
------
bos_token : int
The token used to pad critic input. Critic needs to do
at least one extra step compared to the actor in order
to get the first glimpse of the ground-truth sequence
before predicting the actual values.
"""
def __init__(self, reward_brick,
compute_targets, solve_bellman,
freeze_actor, freeze_critic, critic_uses_actor_states,
critic_uses_groundtruth,
critic=None, critic_burnin_steps=None,
critic_loss=None,
critic_policy_t=None,
entropy_reward_coof=None, cross_entropy_reward_coof=None,
trpo_coef=None,
discount=None,
value_penalty=None, value_penalty_type=None,
accumulate_outputs=False, use_value_biases=None,
actor_grad_estimate=None,
bos_token=None,
**kwargs):
super(ActorCriticReadout, self).__init__(**kwargs)
self.reward_brick = reward_brick
self.critic = critic
self.freeze_actor = freeze_actor
self.freeze_critic = freeze_critic
self.critic_uses_actor_states = critic_uses_actor_states
self.critic_uses_groundtruth = (
critic_uses_groundtruth if critic_uses_groundtruth is not None else True)
self.critic_burnin_steps = (
critic_burnin_steps if critic_burnin_steps is not None else 0)
self.critic_loss = (
critic_loss if critic_loss is not None else "L2")
self.value_summand = Linear(output_dim=1, name='summand')
self.softmax_t = 1.
self.critic_policy_t = (
critic_policy_t if critic_policy_t is not None else 1.0)
self.epsilon = 0.
self.discount = (
discount if discount is not None else 1.)
self.entropy_reward_coof = (
entropy_reward_coof if entropy_reward_coof is not None else 0.)
self.cross_entropy_reward_coof = (
cross_entropy_reward_coof if cross_entropy_reward_coof is not None else 0.)
self.trpo_coef = (
trpo_coef if trpo_coef is not None else 0.)
self.value_penalty = value_penalty
self.value_penalty_type = (
value_penalty_type if value_penalty_type is not None else "L2")
self.compute_targets = compute_targets
self.solve_bellman = solve_bellman
self.accumulate_outputs = accumulate_outputs
self.use_value_biases = (
use_value_biases if use_value_biases is not None else True)
self.actor_grad_estimate = (
actor_grad_estimate if actor_grad_estimate else 'all_actions')
self.bos_token = bos_token
self.softmax = NDimensionalSoftmax()
self.children += [reward_brick, self.value_summand, self.softmax]
if self.critic:
self.children.append(self.critic)
self.costs.inputs += ['attended', 'attended_mask']
def _push_allocation_config(self):
super(ActorCriticReadout, self)._push_allocation_config()
self.value_summand.input_dim = self.get_dim('attended')
@application
def scores(self, **inputs):
merged = self.merge(**dict_subset(inputs, self.merge_names))
return self.softmax.log_probabilities(
merged * self.softmax_t, extra_ndim=merged.ndim - 2)
@application
def costs(self, application_call, prediction, prediction_mask,
groundtruth, groundtruth_mask,
**inputs):
def _prediction_subtensor(data):
if data.ndim != 3:
raise ValueError
flat_data = data.reshape((
data.shape[0] * data.shape[1],
data.shape[2]))
flat_data = flat_data[
tensor.arange(flat_data.shape[0]), prediction.flatten()]
return flat_data.reshape((
prediction.shape[0], prediction.shape[1]))
attended = disconnected_grad(inputs.pop('attended'))
attended_mask = disconnected_grad(inputs.pop('attended_mask'))
# Compute the rewards
rewards = self.reward_brick.apply(
prediction, prediction_mask,
groundtruth, groundtruth_mask)[:, :, 0]
future_rewards = rewards[::-1].cumsum(axis=0)[::-1]
# Compute the critic outputs
if self.critic:
padding = tensor.repeat(
tensor.fill(prediction[0:1], self.bos_token), 1, axis=0)
mask_padding = tensor.repeat(
tensor.fill(prediction_mask[0:1], 1.), 1, axis=0)
padded_prediction = tensor.concatenate([padding, prediction])
padded_prediction_mask = tensor.concatenate([mask_padding, prediction_mask])
if self.critic_uses_groundtruth:
critic_context = groundtruth
critic_context_mask = groundtruth_mask
else:
critic_context = tensor.zeros_like(groundtruth[0:1])
critic_context_mask = tensor.zeros_like(groundtruth_mask[0:1])
critic_kwargs = dict(
prediction=padded_prediction, prediction_mask=padded_prediction_mask,
groundtruth=critic_context, groundtruth_mask=critic_context_mask,
inputs=critic_context, inputs_mask=critic_context_mask)
if self.critic_uses_actor_states:
extra_inputs = disconnected_grad(inputs['states'])
# We don't need the very last hidden state of the actor
# in extra_inputs. We have to add something instead for the shapes
# to match. It doesn't matter at all, what exactly we add.
critic_kwargs['extra_inputs'] = tensor.concatenate(
[extra_inputs, tensor.zeros_like(extra_inputs[0:1])])
critic_cg = ComputationGraph(self.critic.costs(**critic_kwargs))
outputs, = VariableFilter(
applications=[self.critic.generator.readout.all_outputs],
roles=[OUTPUT])(critic_cg)
# The first subtensor should be discarded, because it was outputted
# for the padding. In addition to that Q-values from the first
# 'critic_burnin_steps' will be ignored, see later in the code.
outputs = outputs[1:]
else:
outputs = self.merge(**dict_subset(inputs, self.merge_names))
prediction_outputs = _prediction_subtensor(outputs)
# Compute Q adjustments
adjustments = outputs
prediction_adjustments = prediction_outputs
if self.accumulate_outputs:
prediction_adjustments = prediction_outputs.cumsum(axis=0)
adjustments = tensor.inc_subtensor(
adjustments[1:], prediction_adjustments[:-1][:, :, None])
# Compute shared additive biases for all Q values
if self.use_value_biases:
value_biases = (
self.value_summand.apply(attended)[:, :, 0]
* attended_mask).sum(axis=0)
else:
value_biases = tensor.zeros_like(adjustments[0, :, 0])
values = adjustments + value_biases[None, :, None]
prediction_values = prediction_adjustments + value_biases[None, :]
rolled_prediction_mask = tensor.roll(prediction_mask, -1, axis=0)
rolled_prediction_mask = tensor.set_subtensor(
rolled_prediction_mask[-1], 0)
# Compute probabilities
logs = self.scores(use_epsilon=False, **inputs)
probs = tensor.exp(logs)
if self.trpo_coef:
logger.debug("Using TRPO coefficient of {}".format(self.trpo_coef))
old_probs = tensor.tensor3('probs')
else:
old_probs = tensor.zeros_like(probs)
prediction_logs = _prediction_subtensor(logs)
# Compute value targets
value_targets = (disconnected_grad(probs) * values).sum(axis=-1)
value_targets = tensor.roll(value_targets, -1, axis=0)
value_targets = (self.discount * value_targets * rolled_prediction_mask
+ rewards)
value_targets = value_targets.astype(theano.config.floatX)
total_costs = 0
# Compute critic cost
if not self.compute_targets:
logger.debug("Using given targets")
value_targets = tensor.matrix('value_targets')
if self.solve_bellman == 'no':
logger.debug("Not solving Bellman, just predicting the rewards")
value_targets = rewards.copy(name='value_targets')
elif self.solve_bellman == 'without_dp':
future_rewards = rewards[::-1].cumsum(axis=0)[::-1]
logger.debug("Solving Bellman, but without DP")
value_targets = future_rewards
elif self.solve_bellman is not True:
raise ValueError()
critic_errors = prediction_values - value_targets
if self.critic_loss == 'L2':
logger.debug("L2 loss for the critic")
critic_costs_per_char = critic_errors ** 2 * prediction_mask
elif self.critic_loss == 'huber':
logger.debug("Huber loss for the critic")
use_L2 = tensor.lt(abs(critic_errors), 0.5)
critic_costs_per_char = (use_L2 * critic_errors ** 2 +
(1 - use_L2) * abs(critic_errors)) * prediction_mask
else:
raise ValueError()
critic_costs = critic_costs_per_char[self.critic_burnin_steps:].sum(axis=0)
if not self.freeze_critic:
total_costs += critic_costs
# Compute critic Monte-Carlo cost
critic_monte_carlo_costs = (
(((prediction_values - future_rewards) ** 2) * prediction_mask)
[self.critic_burnin_steps:].sum(axis=0))
# Value penalty
if self.value_penalty:
logger.debug("Use value penalty")
if self.value_penalty_type == 'L2':
value_deviations = (values - values.mean(axis=-1, keepdims=True)) ** 2
elif self.value_penalty_type == 'L1':
value_deviations = abs(values - values.mean(axis=-1, keepdims=True))
else:
raise ValueError("unknown value penalty type {}".format(self.value_penalty_type))
if not self.freeze_critic:
total_costs += (
self.value_penalty *
(value_deviations.sum(axis=-1) * prediction_mask)
[self.critic_burnin_steps:].sum(axis=0))
# Compute actor cost
if self.critic:
# The actor cost will be minimized, that's why values
# must be negated.
est_name = self.actor_grad_estimate
if est_name == 'all_actions':
disadvantages = disconnected_grad(
values.max(axis=-1)[:, :, None] - values)
actor_costs = ((probs * disadvantages).sum(axis=-1)
* prediction_mask)
actor_costs = actor_costs[self.critic_burnin_steps:]
elif est_name.startswith('1_action'):
# Here we do not provide a target for the first step for
# the reason we lack an estimate of the value of the initial state.
# This is how our critic works.
# Hopefully the network won't unlearn
# to produce a BOS first.
future_reward_estimate = (future_rewards
if est_name.endswith('unbiased')
else prediction_values)
weights = -disconnected_grad(
future_reward_estimate[1:] + rewards[:-1] - prediction_values[:-1])
actor_costs = ((prediction_logs[1:] * weights) * prediction_mask[1:])
actor_costs = actor_costs[self.critic_burnin_steps + 1:]
else:
raise ValueError
actor_costs = actor_costs.sum(axis=0)
actor_entropies = (probs * -logs).sum(axis=-1) * prediction_mask
actor_entropies = actor_entropies[self.critic_burnin_steps:].sum(axis=0)
old_actor_cross_entropies = (old_probs * -logs).sum(axis=-1) * prediction_mask
old_actor_cross_entropies = old_actor_cross_entropies[self.critic_burnin_steps:].sum(axis=0)
critic_policy = disconnected_grad(
self.softmax.apply(self.critic_policy_t * values, extra_ndim=1))
critic_cross_entropies = (
(critic_policy * -logs).sum(axis=-1)
* prediction_mask)
critic_cross_entropies = critic_cross_entropies[self.critic_burnin_steps:].sum(axis=0)
actor_costs_with_penalties = (
actor_costs
- self.entropy_reward_coof * actor_entropies
# But really, should it be minus here, below?
- self.cross_entropy_reward_coof * critic_cross_entropies
+ self.trpo_coef * old_actor_cross_entropies)
if not self.freeze_actor:
total_costs += actor_costs_with_penalties
else:
total_costs += disconnected_grad(actor_costs_with_penalties)
# Add auxiliary variables for intermediate steps of the computation
application_call.add_auxiliary_variable(
rewards, name='rewards')
application_call.add_auxiliary_variable(
value_biases, name='value_biases')
application_call.add_auxiliary_variable(
values.copy(), name='values')
application_call.add_auxiliary_variable(
outputs.copy(), name='outputs')
application_call.add_auxiliary_variable(
prediction_values, name='prediction_values')
application_call.add_auxiliary_variable(
prediction_outputs, name='prediction_outputs')
application_call.add_auxiliary_variable(
value_targets.copy(), name='value_targets')
application_call.add_auxiliary_variable(
probs.copy(), name='probs')
application_call.add_auxiliary_variable(
prediction_logs, name='prediction_log_probs')
# Compute some statistics for debugging
last_character_mask = prediction_mask - rolled_prediction_mask
last_character_costs = (critic_costs_per_char * last_character_mask).sum(axis=0)
mean2_output = (
((prediction_outputs ** 2) * prediction_mask).sum()
/ prediction_mask.sum()) ** 0.5
max_output = abs(prediction_outputs * prediction_mask).max()
expected_reward = (probs[0] * values[0]).sum(axis=-1)
application_call.add_auxiliary_variable(
last_character_costs, name='last_character_costs')
application_call.add_auxiliary_variable(
critic_costs.mean(), name='mean_critic_cost')
application_call.add_auxiliary_variable(
critic_monte_carlo_costs.mean(), name='mean_critic_monte_carlo_cost')
if self.critic:
application_call.add_auxiliary_variable(
actor_costs.mean(), name='mean_actor_cost')
application_call.add_auxiliary_variable(
actor_entropies.mean(), name='mean_actor_entropy')
application_call.add_auxiliary_variable(
expected_reward.mean(), name='mean_expected_reward')
application_call.add_auxiliary_variable(
mean2_output, name='mean2_output')
application_call.add_auxiliary_variable(
max_output, name='max_output')
return total_costs
|
[
"theano.tensor.tensor3",
"blocks.bricks.Linear",
"theano.tensor.ones_like",
"theano.tensor.addbroadcast",
"theano.tensor.concatenate",
"theano.tensor.inc_subtensor",
"theano.tensor.set_subtensor",
"theano.tensor.zeros_like",
"theano.tensor.arange",
"theano.gradient.disconnected_grad",
"blocks.roles.add_role",
"blocks.utils.dict_subset",
"theano.tensor.fill",
"theano.tensor.matrix",
"theano.tensor.roll",
"blocks.filter.VariableFilter",
"theano.tensor.exp",
"blocks.bricks.NDimensionalSoftmax",
"logging.getLogger"
] |
[((481, 508), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (498, 508), False, 'import logging\n'), ((781, 826), 'blocks.bricks.Linear', 'Linear', ([], {'output_dim': '(1)', 'name': '"""value_prediction"""'}), "(output_dim=1, name='value_prediction')\n", (787, 826), False, 'from blocks.bricks import Linear, NDimensionalSoftmax\n'), ((1305, 1340), 'theano.gradient.disconnected_grad', 'disconnected_grad', (["inputs['states']"], {}), "(inputs['states'])\n", (1322, 1340), False, 'from theano.gradient import disconnected_grad\n'), ((3834, 3853), 'blocks.roles.add_role', 'add_role', (['w', 'WEIGHT'], {}), '(w, WEIGHT)\n', (3842, 3853), False, 'from blocks.roles import OUTPUT, add_role, WEIGHT\n'), ((9677, 9713), 'blocks.bricks.Linear', 'Linear', ([], {'output_dim': '(1)', 'name': '"""summand"""'}), "(output_dim=1, name='summand')\n", (9683, 9713), False, 'from blocks.bricks import Linear, NDimensionalSoftmax\n'), ((10851, 10872), 'blocks.bricks.NDimensionalSoftmax', 'NDimensionalSoftmax', ([], {}), '()\n', (10870, 10872), False, 'from blocks.bricks import Linear, NDimensionalSoftmax\n'), ((15406, 15446), 'theano.tensor.roll', 'tensor.roll', (['prediction_mask', '(-1)'], {'axis': '(0)'}), '(prediction_mask, -1, axis=0)\n', (15417, 15446), False, 'from theano import tensor\n'), ((15480, 15531), 'theano.tensor.set_subtensor', 'tensor.set_subtensor', (['rolled_prediction_mask[-1]', '(0)'], {}), '(rolled_prediction_mask[-1], 0)\n', (15500, 15531), False, 'from theano import tensor\n'), ((15650, 15666), 'theano.tensor.exp', 'tensor.exp', (['logs'], {}), '(logs)\n', (15660, 15666), False, 'from theano import tensor\n'), ((16069, 16107), 'theano.tensor.roll', 'tensor.roll', (['value_targets', '(-1)'], {'axis': '(0)'}), '(value_targets, -1, axis=0)\n', (16080, 16107), False, 'from theano import tensor\n'), ((4731, 4766), 'theano.tensor.arange', 'tensor.arange', (['groundtruth.shape[1]'], {}), '(groundtruth.shape[1])\n', (4744, 4766), False, 'from theano import tensor\n'), ((5227, 5255), 'theano.tensor.ones_like', 'tensor.ones_like', (['outputs[0]'], {}), '(outputs[0])\n', (5243, 5255), False, 'from theano import tensor\n'), ((5686, 5714), 'theano.tensor.ones_like', 'tensor.ones_like', (['outputs[0]'], {}), '(outputs[0])\n', (5702, 5714), False, 'from theano import tensor\n'), ((6571, 6606), 'theano.tensor.arange', 'tensor.arange', (['groundtruth.shape[1]'], {}), '(groundtruth.shape[1])\n', (6584, 6606), False, 'from theano import tensor\n'), ((7041, 7066), 'theano.tensor.ones_like', 'tensor.ones_like', (['outputs'], {}), '(outputs)\n', (7057, 7066), False, 'from theano import tensor\n'), ((7411, 7436), 'theano.tensor.ones_like', 'tensor.ones_like', (['outputs'], {}), '(outputs)\n', (7427, 7436), False, 'from theano import tensor\n'), ((12737, 12778), 'theano.tensor.concatenate', 'tensor.concatenate', (['[padding, prediction]'], {}), '([padding, prediction])\n', (12755, 12778), False, 'from theano import tensor\n'), ((12816, 12867), 'theano.tensor.concatenate', 'tensor.concatenate', (['[mask_padding, prediction_mask]'], {}), '([mask_padding, prediction_mask])\n', (12834, 12867), False, 'from theano import tensor\n'), ((14833, 14911), 'theano.tensor.inc_subtensor', 'tensor.inc_subtensor', (['adjustments[1:]', 'prediction_adjustments[:-1][:, :, None]'], {}), '(adjustments[1:], prediction_adjustments[:-1][:, :, None])\n', (14853, 14911), False, 'from theano import tensor\n'), ((15198, 15237), 'theano.tensor.zeros_like', 'tensor.zeros_like', (['adjustments[0, :, 0]'], {}), '(adjustments[0, :, 0])\n', (15215, 15237), False, 'from theano import tensor\n'), ((15798, 15821), 'theano.tensor.tensor3', 'tensor.tensor3', (['"""probs"""'], {}), "('probs')\n", (15812, 15821), False, 'from theano import tensor\n'), ((15860, 15884), 'theano.tensor.zeros_like', 'tensor.zeros_like', (['probs'], {}), '(probs)\n', (15877, 15884), False, 'from theano import tensor\n'), ((16460, 16490), 'theano.tensor.matrix', 'tensor.matrix', (['"""value_targets"""'], {}), "('value_targets')\n", (16473, 16490), False, 'from theano import tensor\n'), ((1372, 1409), 'blocks.utils.dict_subset', 'dict_subset', (['inputs', 'self.merge_names'], {}), '(inputs, self.merge_names)\n', (1383, 1409), False, 'from blocks.utils import dict_subset, shared_floatx_nans\n'), ((1961, 1990), 'theano.gradient.disconnected_grad', 'disconnected_grad', (['(-log_probs)'], {}), '(-log_probs)\n', (1978, 1990), False, 'from theano.gradient import disconnected_grad\n'), ((4647, 4684), 'blocks.utils.dict_subset', 'dict_subset', (['inputs', 'self.merge_names'], {}), '(inputs, self.merge_names)\n', (4658, 4684), False, 'from blocks.utils import dict_subset, shared_floatx_nans\n'), ((4895, 4936), 'theano.tensor.addbroadcast', 'tensor.addbroadcast', (['outputs[:, :, :1]', '(2)'], {}), '(outputs[:, :, :1], 2)\n', (4914, 4936), False, 'from theano import tensor\n'), ((6487, 6524), 'blocks.utils.dict_subset', 'dict_subset', (['inputs', 'self.merge_names'], {}), '(inputs, self.merge_names)\n', (6498, 6524), False, 'from blocks.utils import dict_subset, shared_floatx_nans\n'), ((6735, 6773), 'theano.tensor.addbroadcast', 'tensor.addbroadcast', (['outputs[:, :1]', '(1)'], {}), '(outputs[:, :1], 1)\n', (6754, 6773), False, 'from theano import tensor\n'), ((11326, 11363), 'blocks.utils.dict_subset', 'dict_subset', (['inputs', 'self.merge_names'], {}), '(inputs, self.merge_names)\n', (11337, 11363), False, 'from blocks.utils import dict_subset, shared_floatx_nans\n'), ((12540, 12584), 'theano.tensor.fill', 'tensor.fill', (['prediction[0:1]', 'self.bos_token'], {}), '(prediction[0:1], self.bos_token)\n', (12551, 12584), False, 'from theano import tensor\n'), ((12655, 12693), 'theano.tensor.fill', 'tensor.fill', (['prediction_mask[0:1]', '(1.0)'], {}), '(prediction_mask[0:1], 1.0)\n', (12666, 12693), False, 'from theano import tensor\n'), ((13064, 13099), 'theano.tensor.zeros_like', 'tensor.zeros_like', (['groundtruth[0:1]'], {}), '(groundtruth[0:1])\n', (13081, 13099), False, 'from theano import tensor\n'), ((13138, 13178), 'theano.tensor.zeros_like', 'tensor.zeros_like', (['groundtruth_mask[0:1]'], {}), '(groundtruth_mask[0:1])\n', (13155, 13178), False, 'from theano import tensor\n'), ((13531, 13566), 'theano.gradient.disconnected_grad', 'disconnected_grad', (["inputs['states']"], {}), "(inputs['states'])\n", (13548, 13566), False, 'from theano.gradient import disconnected_grad\n'), ((14039, 14131), 'blocks.filter.VariableFilter', 'VariableFilter', ([], {'applications': '[self.critic.generator.readout.all_outputs]', 'roles': '[OUTPUT]'}), '(applications=[self.critic.generator.readout.all_outputs],\n roles=[OUTPUT])\n', (14053, 14131), False, 'from blocks.filter import VariableFilter\n'), ((21308, 21353), 'theano.gradient.disconnected_grad', 'disconnected_grad', (['actor_costs_with_penalties'], {}), '(actor_costs_with_penalties)\n', (21325, 21353), False, 'from theano.gradient import disconnected_grad\n'), ((11910, 11943), 'theano.tensor.arange', 'tensor.arange', (['flat_data.shape[0]'], {}), '(flat_data.shape[0])\n', (11923, 11943), False, 'from theano import tensor\n'), ((14486, 14523), 'blocks.utils.dict_subset', 'dict_subset', (['inputs', 'self.merge_names'], {}), '(inputs, self.merge_names)\n', (14497, 14523), False, 'from blocks.utils import dict_subset, shared_floatx_nans\n'), ((15997, 16021), 'theano.gradient.disconnected_grad', 'disconnected_grad', (['probs'], {}), '(probs)\n', (16014, 16021), False, 'from theano.gradient import disconnected_grad\n'), ((2381, 2415), 'theano.gradient.disconnected_grad', 'disconnected_grad', (['prediction_mask'], {}), '(prediction_mask)\n', (2398, 2415), False, 'from theano.gradient import disconnected_grad\n'), ((2645, 2687), 'theano.gradient.disconnected_grad', 'disconnected_grad', (['centered_future_rewards'], {}), '(centered_future_rewards)\n', (2662, 2687), False, 'from theano.gradient import disconnected_grad\n'), ((13900, 13936), 'theano.tensor.zeros_like', 'tensor.zeros_like', (['extra_inputs[0:1]'], {}), '(extra_inputs[0:1])\n', (13917, 13936), False, 'from theano import tensor\n'), ((19752, 19841), 'theano.gradient.disconnected_grad', 'disconnected_grad', (['(future_reward_estimate[1:] + rewards[:-1] - prediction_values[:-1])'], {}), '(future_reward_estimate[1:] + rewards[:-1] -\n prediction_values[:-1])\n', (19769, 19841), False, 'from theano.gradient import disconnected_grad\n')]
|
from abc import ABC
import numpy as np
from pydyn.base.expr import Expression, Expr, Manifold
from pydyn.operations.transpose import Transpose
from pydyn.utils.errors import UndefinedCaseError, ExpressionMismatchError
class MatrixExpr(Expr, ABC):
def __init__(self):
super().__init__()
self.type = Expression.MATRIX
def __str__(self):
raise NotImplementedError
def __add__(self, other):
from pydyn.operations.addition import MAdd
return MAdd(self, other)
def __iadd__(self, other):
from pydyn.operations.addition import MAdd
return MAdd(self, other)
def __mul__(self, other):
from pydyn.operations.multiplication import SMMul, MVMul, MMMul
from pydyn.base.scalars import Scalar
if type(other) == float or type(other) == int:
other = Scalar('(' + str(other) + ')', value=other, attr=['Constant'])
if other.type == Expression.SCALAR:
return SMMul(self, other)
elif other.type == Expression.VECTOR:
if type(other) == type(Transpose(None)):
raise ExpressionMismatchError
else:
return MVMul(self, other)
elif other.type == Expression.MATRIX:
return MMMul(self, other)
else:
raise UndefinedCaseError
class Matrix(MatrixExpr, ABC):
def __init__(self, s=None, size=(3, 3), value=None, attr=None):
super().__init__()
self.name = s
self.size = size
if value is None:
self.value = np.empty(size, dtype='object')
else:
self.value = value
if attr is None:
self.attr = []
else:
self.attr = attr
if 'SymmetricMatrix' in self.attr:
self.isSymmetric = True
else:
self.isSymmetric = False
def __str__(self):
return self.name
def delta(self):
if self.isOnes or self.isZero or self.isConstant:
return Matrix('O', attr=['Constant', 'Zero'])
else:
from pydyn.operations.geometry import Delta
return Delta(self)
def variation_vector(self):
return self.delta()
def diff(self):
if self.isConstant:
return Matrix(s='0', size=self.size, attr=['Constant', 'Zero'])
else:
return Matrix(s='dot_' + self.name, size=self.size)
def integrate(self):
if self.isConstant:
raise NotImplementedError
else:
s = self.name
if 'dot_' in s:
s.replace('dot_', '')
return Matrix(s=s, size=self.size)
else:
return Matrix(s='int_' + s, size=self.size)
class SkewSymmMatrix(Matrix, ABC):
def __init__(self):
super().__init__()
self.attr.append('SkewSymmetry')
class SO3(Matrix, Manifold, ABC):
def __init__(self, s=None, size=(3, 3), value=None, attr=None):
super().__init__(s, size, value, attr)
super(Manifold, self).__init__()
self.tangent_vector = '\\Omega_{' + self.name + '}'
self.variation_vector = '\\eta_{' + self.name + '}'
if attr is None:
attr = []
attr.append('Manifold')
self.attr = attr
def delta(self):
from pydyn.operations.multiplication import MMMul
from pydyn.operations.geometry import Hat
return MMMul(self, Hat(self.get_variation_vector()))
def get_tangent_vector(self):
from pydyn.base.vectors import TSO3
return TSO3(self.tangent_vector, SO3=self)
def get_variation_vector(self):
from pydyn.base.vectors import Vector
return Vector(self.variation_vector)
def diff(self):
from pydyn.operations.multiplication import MMMul
from pydyn.operations.geometry import Hat
return MMMul(self, Hat(self.get_tangent_vector()))
ZeroMatrix = Matrix('0', attr=['Constant', 'Zero'])
IdentityMatrix = Matrix('I', attr=['Constant', 'Identity'])
O = ZeroMatrix
I = IdentityMatrix
def getMatrices(x):
if isinstance(x, list):
vars_ = x
elif isinstance(x, str):
vars_ = x.split()
else:
return None
s = []
for v in vars_:
s.append(Matrix(v))
return tuple(s)
|
[
"pydyn.operations.transpose.Transpose",
"pydyn.base.vectors.TSO3",
"numpy.empty",
"pydyn.operations.geometry.Delta",
"pydyn.base.vectors.Vector",
"pydyn.operations.multiplication.MVMul",
"pydyn.operations.multiplication.MMMul",
"pydyn.operations.multiplication.SMMul",
"pydyn.operations.addition.MAdd"
] |
[((494, 511), 'pydyn.operations.addition.MAdd', 'MAdd', (['self', 'other'], {}), '(self, other)\n', (498, 511), False, 'from pydyn.operations.addition import MAdd\n'), ((610, 627), 'pydyn.operations.addition.MAdd', 'MAdd', (['self', 'other'], {}), '(self, other)\n', (614, 627), False, 'from pydyn.operations.addition import MAdd\n'), ((3573, 3608), 'pydyn.base.vectors.TSO3', 'TSO3', (['self.tangent_vector'], {'SO3': 'self'}), '(self.tangent_vector, SO3=self)\n', (3577, 3608), False, 'from pydyn.base.vectors import TSO3\n'), ((3707, 3736), 'pydyn.base.vectors.Vector', 'Vector', (['self.variation_vector'], {}), '(self.variation_vector)\n', (3713, 3736), False, 'from pydyn.base.vectors import Vector\n'), ((978, 996), 'pydyn.operations.multiplication.SMMul', 'SMMul', (['self', 'other'], {}), '(self, other)\n', (983, 996), False, 'from pydyn.operations.multiplication import SMMul, MVMul, MMMul\n'), ((1563, 1593), 'numpy.empty', 'np.empty', (['size'], {'dtype': '"""object"""'}), "(size, dtype='object')\n", (1571, 1593), True, 'import numpy as np\n'), ((2140, 2151), 'pydyn.operations.geometry.Delta', 'Delta', (['self'], {}), '(self)\n', (2145, 2151), False, 'from pydyn.operations.geometry import Delta\n'), ((1183, 1201), 'pydyn.operations.multiplication.MVMul', 'MVMul', (['self', 'other'], {}), '(self, other)\n', (1188, 1201), False, 'from pydyn.operations.multiplication import SMMul, MVMul, MMMul\n'), ((1267, 1285), 'pydyn.operations.multiplication.MMMul', 'MMMul', (['self', 'other'], {}), '(self, other)\n', (1272, 1285), False, 'from pydyn.operations.multiplication import MMMul\n'), ((1078, 1093), 'pydyn.operations.transpose.Transpose', 'Transpose', (['None'], {}), '(None)\n', (1087, 1093), False, 'from pydyn.operations.transpose import Transpose\n')]
|
#!/usr/bin/env python
from __future__ import print_function
from pyomo.environ import *
from pyomo.core.base import Constraint, Objective, Suffix, minimize
from pyomo.opt import ProblemFormat, SolverFactory
from nmpc_mhe.dync.DynGenv2 import DynGen
from sample_mods.bfb.nob5_hi_t import bfb_dae
from snapshots.snap_shot import snap
import sys, os
import itertools, sys
from pyomo.opt import ReaderFactory, ResultsFormat, ProblemFormat
from numpy.random import normal as npm
import random
from shutil import copyfile
states = ["Hgc", "Nsc", "Hsc", "Hge", "Nse", "Hse"]
# x_noisy = ["Ngb", "Hgb", "Ngc", "Hgc", "Nsc", "Hsc", "Nge", "Hge", "Nse", "Hse", "mom"]
# x_noisy = ["Hse"]
x_noisy = ["Hgc", "Nsc", "Hsc", "Hge", "Nse", "Hse"]
u = ["u1"]
u_bounds = {"u1":(162.183495794 * 0.0005, 162.183495794 * 10000)}
ref_state = {("c_capture", ((),)): 0.50}
# ref_state = {("c_capture", ((),)): 0.66} nominal
# Known targets 0.38, 0.4, 0.5
nfe_mhe = 10
y = ["Tgb", "vg"]
nfet = 10
ncpx = 3
nfex = 5
tfe = [i for i in range(1, nfe_mhe + 1)]
lfe = [i for i in range(1, nfex + 1)]
lcp = [i for i in range(1, ncpx + 1)]
lc = ['c', 'h', 'n']
y_vars = {
"Tgb": [i for i in itertools.product(lfe, lcp)],
"vg": [i for i in itertools.product(lfe, lcp)]
}
# x_vars = dict()
x_vars = {
# "Nge": [i for i in itertools.product(lfe, lcp, lc)],
# "Hge": [i for i in itertools.product(lfe, lcp)],
"Nsc": [i for i in itertools.product(lfe, lcp, lc)],
"Hsc": [i for i in itertools.product(lfe, lcp)],
"Nse": [i for i in itertools.product(lfe, lcp, lc)],
"Hse": [i for i in itertools.product(lfe, lcp)],
"Hgc": [i for i in itertools.product(lfe, lcp)],
"Hge": [i for i in itertools.product(lfe, lcp)],
# "mom": [i for i in itertools.product(lfe, lcp)]
}
# States -- (5 * 3 + 6) * fe_x * cp_x.
# For fe_x = 5 and cp_x = 3 we will have 315 differential-states.
s = DynGen(bfb_dae, 800/nfe_mhe, states, u, k_aug_executable="/home/dav0/k2/KKT_matrix/src/k_aug/k_aug")
# 10 fe & _t=1000 definitely degenerate
# 10 fe & _t=900 definitely degenerate
# 10 fe & _t=120 sort-of degenerate
# 10 fe & _t=50 sort-of degenerate
# 10 fe & _t=50 eventually sort-of degenerate
# 10 fe & _t=1 eventually sort-of degenerate
s.SteadyRef.dref = snap
s.load_iguess_steady()
s.SteadyRef.create_bounds()
s.get_state_vars()
s.SteadyRef.report_zL(filename="mult_ss")
s.load_d_s(s.PlantSample)
s.ipopt.solve(s.SteadyRef, keepfiles=True)
finame = s.ipopt._soln_file
cwd = os.getcwd()
filename = "/home/dav0/nmpc_mhe_q/testing/ref_ss.sol"
# copyfile(finame, cwd + "/ref_ss.sol")
with open("file_a", "w") as file:
for var in s.SteadyRef.component_data_objects(Var):
var.set_value(0)
val = var.value
file.write(str(val))
file.write('\n')
file.close()
reader = ReaderFactory(ResultsFormat.sol)
results = reader(filename)
_, smapid = s.SteadyRef.write("whathevs.nl", format=ProblemFormat.nl)
smap = s.SteadyRef.solutions.symbol_map[smapid]
results._smap = smap
s.SteadyRef.solutions.load_from(results)
with open("file_b", "w") as file:
for var in s.SteadyRef.component_data_objects(Var):
val = var.value
file.write(str(val))
file.write('\n')
file.close()
s.ipopt.solve(s.SteadyRef, tee=True, load_solutions=False, report_timing=True)
s.param_writer(s.SteadyRef, "gimmemyparams.json")
with open("params_a", "w") as file:
for param in s.SteadyRef.component_data_objects(Param):
file.write(str(param) + "\n")
file.close()
for param in s.SteadyRef.component_data_objects(Param):
param = 0
with open("params_b", "w") as file:
for param in s.SteadyRef.component_data_objects(Param):
file.write(str(param) + "\n")
file.close()
dir(param)
s.param_reader(s.SteadyRef, "gimmemyparams.json")
with open("params_c", "w") as file:
for param in s.SteadyRef.component_data_objects(Param):
file.write(str(param) + "\n")
file.close()
# example
|
[
"nmpc_mhe.dync.DynGenv2.DynGen",
"pyomo.opt.ReaderFactory",
"os.getcwd",
"itertools.product"
] |
[((1953, 2060), 'nmpc_mhe.dync.DynGenv2.DynGen', 'DynGen', (['bfb_dae', '(800 / nfe_mhe)', 'states', 'u'], {'k_aug_executable': '"""/home/dav0/k2/KKT_matrix/src/k_aug/k_aug"""'}), "(bfb_dae, 800 / nfe_mhe, states, u, k_aug_executable=\n '/home/dav0/k2/KKT_matrix/src/k_aug/k_aug')\n", (1959, 2060), False, 'from nmpc_mhe.dync.DynGenv2 import DynGen\n'), ((2536, 2547), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2545, 2547), False, 'import sys, os\n'), ((2861, 2893), 'pyomo.opt.ReaderFactory', 'ReaderFactory', (['ResultsFormat.sol'], {}), '(ResultsFormat.sol)\n', (2874, 2893), False, 'from pyomo.opt import ReaderFactory, ResultsFormat, ProblemFormat\n'), ((1167, 1194), 'itertools.product', 'itertools.product', (['lfe', 'lcp'], {}), '(lfe, lcp)\n', (1184, 1194), False, 'import itertools, sys\n'), ((1219, 1246), 'itertools.product', 'itertools.product', (['lfe', 'lcp'], {}), '(lfe, lcp)\n', (1236, 1246), False, 'import itertools, sys\n'), ((1438, 1469), 'itertools.product', 'itertools.product', (['lfe', 'lcp', 'lc'], {}), '(lfe, lcp, lc)\n', (1455, 1469), False, 'import itertools, sys\n'), ((1501, 1528), 'itertools.product', 'itertools.product', (['lfe', 'lcp'], {}), '(lfe, lcp)\n', (1518, 1528), False, 'import itertools, sys\n'), ((1560, 1591), 'itertools.product', 'itertools.product', (['lfe', 'lcp', 'lc'], {}), '(lfe, lcp, lc)\n', (1577, 1591), False, 'import itertools, sys\n'), ((1623, 1650), 'itertools.product', 'itertools.product', (['lfe', 'lcp'], {}), '(lfe, lcp)\n', (1640, 1650), False, 'import itertools, sys\n'), ((1682, 1709), 'itertools.product', 'itertools.product', (['lfe', 'lcp'], {}), '(lfe, lcp)\n', (1699, 1709), False, 'import itertools, sys\n'), ((1741, 1768), 'itertools.product', 'itertools.product', (['lfe', 'lcp'], {}), '(lfe, lcp)\n', (1758, 1768), False, 'import itertools, sys\n')]
|
#!/usr/bin/env python
from obfsproxy.transports.base import BaseTransport
import obfsproxy.common.log as logging
from obfsproxy.network.buffer import Buffer
from bananaphone import rh_build_encoder_factory, rh_decoder
log = logging.get_obfslogger()
class BananaphoneTransport(BaseTransport):
def __init__(self, transport_config):
super(BananaphoneTransport, self).__init__()
if transport_config.is_managed_mode and self.initiator:
# managed client configured in handle_socks_args()
return
self.encode = self.encoder_factory()
self.decode = self.decoder_factory()
@classmethod
def get_codec_factories(self, encodingSpec, modelName, corpus, order, abridged):
if modelName == 'markov':
args = [ corpus, order, abridged ]
elif modelName == 'random':
args = [ corpus ]
else:
log.error("BananaphoneTransport: unsupported model type")
return
# expensive model building operation
log.warning("Bananaphone: building encoder %s model" % modelName)
encoder_factory = rh_build_encoder_factory(encodingSpec, modelName, *args)
decoder_factory = lambda: rh_decoder(encodingSpec)
return (encoder_factory, decoder_factory)
def handle_socks_args(self, args):
# client case, managed mode
if not args:
log.error("BananaphoneTransport: must specify server transport options")
return
for arg in args:
key,value = arg.split('=')
setattr(self, key, value)
# BUG: modify bananaphone.py to
# accept the abridged arg as boolean?
if hasattr(self,'abridged'):
self.abridged = '--abridged'
else:
# this is the only transport option which has a default value
self.abridged = None
encoder_factory, decoder_factory = self.get_codec_factories(self.encodingSpec, self.modelName, self.corpus, self.order, self.abridged)
self.encode = encoder_factory()
self.decode = decoder_factory()
@classmethod
def setup(cls, transport_config):
if transport_config.is_managed_mode and cls.initiator:
# managed client configured in handle_socks_args()
return
if transport_config.is_managed_mode:
transport_options = transport_config.getServerTransportOptions()
if not transport_options:
log.error("BananaphoneTransport: must specify server transport options")
return
# XXX server case, managed mode is used
for key in transport_options.keys():
setattr(cls, key, transport_options[key])
# BUG: modify bananaphone.py to
# accept the abridged arg as boolean?
if hasattr(cls,'abridged'):
cls.abridged = '--abridged'
else:
# this is the only transport option which has a default value
cls.abridged = None
encoder_factory, decoder_factory = cls.get_codec_factories(cls.encodingSpec, cls.modelName, cls.corpus, cls.order, cls.abridged)
cls.encoder_factory = staticmethod(encoder_factory)
cls.decoder_factory = staticmethod(decoder_factory)
@classmethod
def get_public_server_options(cls, transport_options):
""" Only tell BridgeDB about our encodingSpec transportOption
"""
return dict(encodingSpec = transport_options['encodingSpec'])
def circuitConnected(self):
self.encoder = self.encode > self.circuit.downstream.write
self.decoder = self.decode > self.circuit.upstream.write
def receivedDownstream(self, data):
self.decoder.send(data.read())
def receivedUpstream(self, data):
self.encoder.send(data.read())
# XXX these options are mandatory
@classmethod
def register_external_mode_cli(cls, subparser):
subparser.add_argument('--corpus', type=str, help='Corpus file of words')
subparser.add_argument('--encoding_spec', type=str, dest='encodingSpec', help='reverse hash encoding specification')
subparser.add_argument('--model', type=str, dest='modelName')
subparser.add_argument('--order', type=int)
subparser.add_argument('--abridged', action='store_true', default=False,)
super(BananaphoneTransport, cls).register_external_mode_cli(subparser)
@classmethod
def validate_external_mode_cli(cls, args):
# XXX client/server case, external mode
cls.corpus = args.corpus
cls.encodingSpec = args.encodingSpec
cls.modelName = args.modelName
cls.order = args.order
cls.abridged = args.abridged
super(BananaphoneTransport, cls).validate_external_mode_cli(args)
class BananaphoneClient(BananaphoneTransport):
initiator = True
class BananaphoneServer(BananaphoneTransport):
initiator = False
|
[
"bananaphone.rh_decoder",
"obfsproxy.common.log.get_obfslogger",
"bananaphone.rh_build_encoder_factory"
] |
[((228, 252), 'obfsproxy.common.log.get_obfslogger', 'logging.get_obfslogger', ([], {}), '()\n', (250, 252), True, 'import obfsproxy.common.log as logging\n'), ((1130, 1186), 'bananaphone.rh_build_encoder_factory', 'rh_build_encoder_factory', (['encodingSpec', 'modelName', '*args'], {}), '(encodingSpec, modelName, *args)\n', (1154, 1186), False, 'from bananaphone import rh_build_encoder_factory, rh_decoder\n'), ((1221, 1245), 'bananaphone.rh_decoder', 'rh_decoder', (['encodingSpec'], {}), '(encodingSpec)\n', (1231, 1245), False, 'from bananaphone import rh_build_encoder_factory, rh_decoder\n')]
|
import statistics
power_arr = [4, 5, 2, 6, 3, 7, 8, 9, 6, 5, 2]
median = statistics.median(power_arr)
print("The median is", median)
|
[
"statistics.median"
] |
[((73, 101), 'statistics.median', 'statistics.median', (['power_arr'], {}), '(power_arr)\n', (90, 101), False, 'import statistics\n')]
|
from __future__ import unicode_literals, print_function
import pickle
import random
from pathlib import Path
import spacy
from spacy.util import minibatch, compounding
def ner_trainig():
with open (r'C:\Users\Akash\Desktop\be_proj\Data\NER_training_data.pickle', 'rb') as fp:
TRAIN_DATA = pickle.load(fp)
LABEL = ['BRANCH']
nlp = spacy.blank('en')#created a blank model
ner = nlp.create_pipe('ner')#created a pipeline ner
nlp.add_pipe(ner)#added NER to the spcy pipeline
for l in LABEL:
ner.add_label(l)#added labels for the component
optimizer = nlp.begin_training()
n_iter = 10
pipe_exceptions = ["ner", "trf_wordpiecer", "trf_tok2vec"]
other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]
with nlp.disable_pipes(*other_pipes): # only train NER
for itn in range(n_iter):
random.shuffle(TRAIN_DATA) #shuffled the dataset
losses = {}
batches = minibatch(TRAIN_DATA, size=compounding(1.0, 4.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer,
drop=0.35,
losses=losses)
print('Losses', losses)
#test_text = "how many electronics students are there in IT branch ?"
#doc = nlp(test_text)
#print("Entities in '%s'" % test_text)
#for ent in doc.ents:
# print(ent.label_, ent.text)
nlp.meta["name"] = "NER_mod" # rename model
nlp.to_disk(r"D:\projects\col_chatbot - Copy\college_bot\NERdata")
#text = "how many electronics students are there in IT branch ?"
#doc = model(text)
#print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
|
[
"pickle.load",
"random.shuffle",
"spacy.blank",
"spacy.util.compounding"
] |
[((351, 368), 'spacy.blank', 'spacy.blank', (['"""en"""'], {}), "('en')\n", (362, 368), False, 'import spacy\n'), ((301, 316), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (312, 316), False, 'import pickle\n'), ((887, 913), 'random.shuffle', 'random.shuffle', (['TRAIN_DATA'], {}), '(TRAIN_DATA)\n', (901, 913), False, 'import random\n'), ((1017, 1045), 'spacy.util.compounding', 'compounding', (['(1.0)', '(4.0)', '(1.001)'], {}), '(1.0, 4.0, 1.001)\n', (1028, 1045), False, 'from spacy.util import minibatch, compounding\n')]
|