code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import sqlite3
X = sqlite3.connect('NeDB.db')
Y = X.cursor()
Y.execute('''CREATE TABLE IF NOT EXISTS EMPLOYEE (
ID integer,
Name text NOT NULL,
Date_Join text,
Place text,
Age integer,
Salary real);''')
Y.execute('''INSERT INTO Employee VALUES (1,'John','2020-03-01','Kerala',32,25000),(2,'Adam','2020-01-01','TN',22,30000),(3,'Mary','2022-01-01','Karnataka',24,120000)
,(4,'Jacob','2022-01-01','Mharashtra',24,430000),(5,'Johny','2022-01-01','Karnataka',24,34000),(6,'Lynda','2022-01-01','Delhi',24,56700),
(7,'Smith','2022-01-01','Kerala',24,234000),(8,'Gem','2022-01-01','Karnataka',24,120000)''')
data = Y.execute("SELECT * from Employee LIMIT 2 OFFSET 4");
for k in data:
print (k)
X.commit()
Y.close()
| [
"sqlite3.connect"
] | [((25, 51), 'sqlite3.connect', 'sqlite3.connect', (['"""NeDB.db"""'], {}), "('NeDB.db')\n", (40, 51), False, 'import sqlite3\n')] |
import os
import sys
from gensim.scripts.glove2word2vec import glove2word2vec
from kge.misc import kge_base_dir
def _convert_to_word2vec(
filename: str
):
"""
Convert file of pretrained embeddings in GloVe format to word2vec format.
"""
folder = os.path.join(kge_base_dir(), "pretrained")
input_file = os.path.join(folder, filename)
index = filename.rindex(".")
output_file = filename[0:index] + "_word2vec" + filename[index:len(filename)]
output_file = os.path.join(folder, output_file)
glove2word2vec(input_file, output_file)
# give file name in folder 'pretrained' as first command line argument
if __name__ == '__main__':
_convert_to_word2vec(sys.argv[1])
| [
"os.path.join",
"gensim.scripts.glove2word2vec.glove2word2vec",
"kge.misc.kge_base_dir"
] | [((334, 364), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (346, 364), False, 'import os\n'), ((498, 531), 'os.path.join', 'os.path.join', (['folder', 'output_file'], {}), '(folder, output_file)\n', (510, 531), False, 'import os\n'), ((536, 575), 'gensim.scripts.glove2word2vec.glove2word2vec', 'glove2word2vec', (['input_file', 'output_file'], {}), '(input_file, output_file)\n', (550, 575), False, 'from gensim.scripts.glove2word2vec import glove2word2vec\n'), ((287, 301), 'kge.misc.kge_base_dir', 'kge_base_dir', ([], {}), '()\n', (299, 301), False, 'from kge.misc import kge_base_dir\n')] |
# Generated by Django 2.1.7 on 2019-02-14 13:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Pedido',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modificado em')),
('active', models.BooleanField(default=True, verbose_name='ativo')),
('status', models.IntegerField(blank=True, choices=[(0, 'Aberto'), (1, 'Enviado'), (2, 'Finalizado'), (3, 'Cancelado')], default=0, verbose_name='Situação')),
('parceiro', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='parceiro')),
],
options={
'verbose_name': 'pedido',
'verbose_name_plural': 'pedidos',
},
),
migrations.CreateModel(
name='PedidoItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| [
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency"
] | [((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((435, 528), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (451, 528), False, 'from django.db import migrations, models\n'), ((555, 620), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""criado em"""'}), "(auto_now_add=True, verbose_name='criado em')\n", (575, 620), False, 'from django.db import migrations, models\n'), ((652, 717), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'verbose_name': '"""modificado em"""'}), "(auto_now=True, verbose_name='modificado em')\n", (672, 717), False, 'from django.db import migrations, models\n'), ((747, 802), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'verbose_name': '"""ativo"""'}), "(default=True, verbose_name='ativo')\n", (766, 802), False, 'from django.db import migrations, models\n'), ((832, 981), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'choices': "[(0, 'Aberto'), (1, 'Enviado'), (2, 'Finalizado'), (3, 'Cancelado')]", 'default': '(0)', 'verbose_name': '"""Situação"""'}), "(blank=True, choices=[(0, 'Aberto'), (1, 'Enviado'), (2,\n 'Finalizado'), (3, 'Cancelado')], default=0, verbose_name='Situação')\n", (851, 981), False, 'from django.db import migrations, models\n'), ((1009, 1130), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""parceiro"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL, verbose_name='parceiro')\n", (1026, 1130), False, 'from django.db import migrations, models\n'), ((1390, 1483), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1406, 1483), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 <NAME>
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""Debug utilities"""
import inspect
import traceback
import time
def log_time(fd):
timestr = "Logging time: %s" % time.ctime(time.time())
print >>fd, "="*len(timestr)
print >>fd, timestr
print >>fd, "="*len(timestr)
print >>fd, ""
def log_last_error(fname, context=None):
"""Log last error in filename *fname* -- *context*: string (optional)"""
fd = open(fname, 'a')
log_time(fd)
if context:
print >>fd, "Context"
print >>fd, "-------"
print >>fd, ""
print >>fd, context
print >>fd, ""
print >>fd, "Traceback"
print >>fd, "---------"
print >>fd, ""
traceback.print_exc(file=fd)
print >>fd, ""
print >>fd, ""
def log_dt(fname, context, t0):
fd = open(fname, 'a')
log_time(fd)
print >>fd, "%s: %d ms" % (context, 10*round(1e2*(time.time()-t0)))
print >>fd, ""
print >>fd, ""
def caller_name(skip=2):
"""Get a name of a caller in the format module.class.method
`skip` specifies how many levels of stack to skip while getting caller
name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.
An empty string is returned if skipped levels exceed stack height
"""
stack = inspect.stack()
start = 0 + skip
if len(stack) < start + 1:
return ''
parentframe = stack[start][0]
name = []
module = inspect.getmodule(parentframe)
# `modname` can be None when frame is executed directly in console
# TODO(techtonik): consider using __main__
if module:
name.append(module.__name__)
# detect classname
if 'self' in parentframe.f_locals:
# I don't know any way to detect call from the object method
# XXX: there seems to be no way to detect static method call - it will
# be just a function call
name.append(parentframe.f_locals['self'].__class__.__name__)
codename = parentframe.f_code.co_name
if codename != '<module>': # top level usually
name.append( codename ) # function or a method
del parentframe
return ".".join(name)
| [
"inspect.getmodule",
"traceback.print_exc",
"time.time",
"inspect.stack"
] | [((837, 865), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'fd'}), '(file=fd)\n', (856, 865), False, 'import traceback\n'), ((1459, 1474), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (1472, 1474), False, 'import inspect\n'), ((1620, 1650), 'inspect.getmodule', 'inspect.getmodule', (['parentframe'], {}), '(parentframe)\n', (1637, 1650), False, 'import inspect\n'), ((293, 304), 'time.time', 'time.time', ([], {}), '()\n', (302, 304), False, 'import time\n'), ((1041, 1052), 'time.time', 'time.time', ([], {}), '()\n', (1050, 1052), False, 'import time\n')] |
import smtplib
import sys
import time
from datetime import datetime
# variable
trigger = 0
myName = "<NAME>"
myEmail = "<EMAIL>"
myPass = "<PASSWORD>"
myEmailSMTP = "smtp.yourEmailProvider.com" #for gmail: smtp.gmail.com for outlook: smtp.office365.com
mySMTPPort = 587
receivers = {"receiver name": "<EMAIL>"}
emailSubject = "I'm Pytomation Mail"
emailBody = """
Hello there,
Feel free to use this Pytomation Mail and modify it \
base on your needs
Thanks and Regards,
<NAME>
"""
# function
def initial_setup():
try:
broad_caster = smtplib.SMTP(myEmailSMTP, mySMTPPort)
broad_caster.ehlo()
broad_caster.starttls()
broad_caster.login(myEmail, myPass)
except IOError as err:
print(str(err))
time.sleep(1.0)
sys.exit()
return broad_caster
def get_date_time():
date_and_time = datetime.now()
str_date_time = date_and_time.strftime('%b %-d,%Y, %-I:%M%p')
return str_date_time
def get_sender(sender_name, sender_email):
from_sender = "from:" + " " + sender_name + " " + "<" + sender_email + ">"
return from_sender
def get_receiver(receiver_name, receiver_email):
to_receiver = "to:" + " " + receiver_name + " " + "<" + receiver_email + ">"
return to_receiver
def get_email_message(email_subject, email_body):
email_message = "subject:" + " " + email_subject + "\n" + email_body
return email_message
if __name__ == "__main__":
broadCaster = initial_setup()
if trigger >= len(receivers):
print("Enter receiver name and email next time")
else:
for receiverName, receiverEmail in receivers.items():
fromSender = get_sender(myName, myEmail)
toReceiver = get_receiver(receiverName, receiverEmail)
emailMessage = get_email_message(emailSubject, emailBody)
messenger = fromSender + "\n" + toReceiver + "\n" + emailMessage
broadCaster.sendmail(myEmail, receiverEmail, messenger)
sendDateTime = get_date_time()
print("e-mail sent successfully to {} at {} \n".format(receiverName, sendDateTime))
broadCaster.quit() | [
"datetime.datetime.now",
"smtplib.SMTP",
"time.sleep",
"sys.exit"
] | [((855, 869), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (867, 869), False, 'from datetime import datetime\n'), ((552, 589), 'smtplib.SMTP', 'smtplib.SMTP', (['myEmailSMTP', 'mySMTPPort'], {}), '(myEmailSMTP, mySMTPPort)\n', (564, 589), False, 'import smtplib\n'), ((753, 768), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (763, 768), False, 'import time\n'), ((777, 787), 'sys.exit', 'sys.exit', ([], {}), '()\n', (785, 787), False, 'import sys\n')] |
import Tkinter
parent_widget = Tkinter.Tk()
scale_widget = Tkinter.Scale(parent_widget, from_=0, to=100,
orient=Tkinter.HORIZONTAL)
scale_widget.set(25)
scale_widget.pack()
Tkinter.mainloop() | [
"Tkinter.Tk",
"Tkinter.mainloop",
"Tkinter.Scale"
] | [((31, 43), 'Tkinter.Tk', 'Tkinter.Tk', ([], {}), '()\n', (41, 43), False, 'import Tkinter\n'), ((59, 131), 'Tkinter.Scale', 'Tkinter.Scale', (['parent_widget'], {'from_': '(0)', 'to': '(100)', 'orient': 'Tkinter.HORIZONTAL'}), '(parent_widget, from_=0, to=100, orient=Tkinter.HORIZONTAL)\n', (72, 131), False, 'import Tkinter\n'), ((202, 220), 'Tkinter.mainloop', 'Tkinter.mainloop', ([], {}), '()\n', (218, 220), False, 'import Tkinter\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 24 14:12:38 2022
@author: j64280
"""
import alluvial
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm
list_2017 = pd.read_csv('parrainagestotal_2017.csv',sep=';')
list_2022 = pd.read_csv('parrainagestotal_2022.csv',sep=';')
list_2017['Candidat 2022']='Pas de parrainage en 2022'
n_2017,_=list_2017.shape
n_2022,_=list_2022.shape
n=0
for i in range(n_2017):
ind = ((list_2022.Nom == list_2017.Nom.iloc[i]) & (list_2022.Prénom == list_2017.Prénom.iloc[i])
& (list_2022.Département == list_2017.Département.iloc[i]) & (list_2022.Circonscription == list_2017.Circonscription.iloc[i]))
if ind.any():
list_2017['Candidat 2022'].iloc[i] = list_2022[ind].Candidat.values[0]
list_2017['Candidat-e parrainé-e'].iloc[i] = list_2017['Candidat-e parrainé-e'].iloc[i] + ' '
conserve_2017 = ['<NAME> ','<NAME> ', '<NAME> ','<NAME> ',
'<NAME> ','<NAME> ',
'<NAME> ','<NAME> ','<NAME> ','ASSELINEAU François ',
'<NAME> ','<NAME> ','<NAME> ','<NAME> ','<NAME> ',
'ALLIOT-<NAME> ','<NAME> ','<NAME> ','<NAME> ',
'<NAME> ','<NAME> ','<NAME> ']
conserve_2022 = ['<NAME>','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','<NAME>','<NAME>','<NAME>',
'<NAME>','ASSELINEAU François','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','<NAME>','<NAME>','<NAME>']
list_2017_filtered = list_2017[list_2017['Candidat 2022']!='Pas de parrainage en 2022']
n_2017_filtered,_=list_2017_filtered.shape
couple_par = []
for i in range(n_2017_filtered):
if list_2017_filtered['Candidat-e parrainé-e'].iloc[i] in conserve_2017:
if list_2017_filtered['Candidat 2022'].iloc[i] in conserve_2022:
couple_par.append([list_2017_filtered['Candidat-e parrainé-e'].iloc[i], list_2017_filtered['Candidat 2022'].iloc[i]])
#%%
cmap = matplotlib.cm.get_cmap('jet')
ax = alluvial.plot(
couple_par, alpha=0.8, color_side=0, rand_seed=4, figsize=(10,15),
disp_width=True, wdisp_sep=' '*2, fontname='Monospace',
colors = cmap(np.linspace(0,8,len(conserve_2017)) % 1),
a_sort=conserve_2017[::-1],b_sort=conserve_2022[::-1])
ax.set_title('Transferts de parrainage entre 2017 et 2022', fontsize=14, fontname='Monospace')
plt.text(1.1,-150,'@Alexandre_Goupy')
plt.savefig('report_signatures.png',bbox_inches='tight',dpi=200)#,transparent=True) | [
"matplotlib.pyplot.text",
"matplotlib.pyplot.savefig",
"pandas.read_csv"
] | [((232, 281), 'pandas.read_csv', 'pd.read_csv', (['"""parrainagestotal_2017.csv"""'], {'sep': '""";"""'}), "('parrainagestotal_2017.csv', sep=';')\n", (243, 281), True, 'import pandas as pd\n'), ((293, 342), 'pandas.read_csv', 'pd.read_csv', (['"""parrainagestotal_2022.csv"""'], {'sep': '""";"""'}), "('parrainagestotal_2022.csv', sep=';')\n", (304, 342), True, 'import pandas as pd\n'), ((2393, 2432), 'matplotlib.pyplot.text', 'plt.text', (['(1.1)', '(-150)', '"""@Alexandre_Goupy"""'], {}), "(1.1, -150, '@Alexandre_Goupy')\n", (2401, 2432), True, 'import matplotlib.pyplot as plt\n'), ((2431, 2497), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""report_signatures.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('report_signatures.png', bbox_inches='tight', dpi=200)\n", (2442, 2497), True, 'import matplotlib.pyplot as plt\n')] |
"""
Icegrams: A trigrams library for Icelandic
CFFI builder for _trie module
Copyright (C) 2020 <NAME>.
Original author: <NAME>
This software is licensed under the MIT License:
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This module only runs at setup/installation time. It is invoked
from setup.py as requested by the cffi_modules=[] parameter of the
setup() function. It causes the _trie.*.so CFFI wrapper library
to be built from its source in trie.cpp.
"""
import os
import platform
import cffi
# Don't change the name of this variable unless you
# change it in setup.py as well
ffibuilder = cffi.FFI()
_PATH = os.path.dirname(__file__) or "."
WINDOWS = platform.system() == "Windows"
MACOS = platform.system() == "Darwin"
# What follows is the actual Python-wrapped C interface to trie.*.so
# It must be kept in sync with trie.h
declarations = """
typedef unsigned int UINT;
typedef uint8_t BYTE;
typedef uint32_t UINT32;
typedef uint64_t UINT64;
typedef void VOID;
UINT mapping(const BYTE* pbMap, const BYTE* pbWord);
UINT bitselect(const BYTE* pb, UINT n);
UINT retrieve(const BYTE* pb, UINT nStart, UINT n);
UINT lookupFrequency(const BYTE* pb,
UINT nQuantumSize, UINT nIndex);
UINT64 lookupMonotonic(const BYTE* pb,
UINT nQuantumSize, UINT nIndex);
VOID lookupPairMonotonic(const BYTE* pb,
UINT nQuantumSize, UINT nIndex,
UINT64* pn1, UINT64* pn2);
UINT64 lookupPartition(const BYTE* pb,
UINT nOuterQuantum, UINT nInnerQuantum, UINT nIndex);
VOID lookupPairPartition(const BYTE* pb,
UINT nQuantumSize, UINT nInnerQuantum, UINT nIndex,
UINT64* pn1, UINT64* pn2);
UINT searchMonotonic(const BYTE* pb,
UINT nQuantumSize, UINT nP1, UINT nP2, UINT64 n);
UINT searchMonotonicPrefix(const BYTE* pb,
UINT nQuantumSize, UINT nP1, UINT nP2, UINT64 n);
UINT searchPartition(const BYTE* pb,
UINT nOuterQuantum, UINT nInnerQuantum,
UINT nP1, UINT nP2, UINT64 n);
UINT searchPartitionPrefix(const BYTE* pb,
UINT nOuterQuantum, UINT nInnerQuantum,
UINT nP1, UINT nP2, UINT64 n);
"""
# Do the magic CFFI incantations necessary to get CFFI and setuptools
# to compile trie.cpp at setup time, generate a .so library and
# wrap it so that it is callable from Python and PyPy as _trie
if WINDOWS:
extra_compile_args = ["/Zc:offsetof-"]
elif MACOS:
os.environ["CFLAGS"] = "-stdlib=libc++" # Fixes PyPy build on macOS 10.15.6+
extra_compile_args = ["-mmacosx-version-min=10.7", "-stdlib=libc++"]
else:
# Adding -O3 to the compiler arguments doesn't seem to make
# any discernible difference in lookup speed
extra_compile_args = ["-std=c++11"]
ffibuilder.set_source(
"icegrams._trie",
# trie.cpp is written in C++ but must export a pure C interface.
# This is the reason for the "extern 'C' { ... }" wrapper.
'extern "C" {\n' + declarations + "\n}\n",
source_extension=".cpp",
sources=["src/icegrams/trie.cpp"],
extra_compile_args=extra_compile_args,
)
ffibuilder.cdef(declarations)
if __name__ == "__main__":
ffibuilder.compile(verbose=False)
| [
"os.path.dirname",
"platform.system",
"cffi.FFI"
] | [((1749, 1759), 'cffi.FFI', 'cffi.FFI', ([], {}), '()\n', (1757, 1759), False, 'import cffi\n'), ((1769, 1794), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1784, 1794), False, 'import os\n'), ((1812, 1829), 'platform.system', 'platform.system', ([], {}), '()\n', (1827, 1829), False, 'import platform\n'), ((1851, 1868), 'platform.system', 'platform.system', ([], {}), '()\n', (1866, 1868), False, 'import platform\n')] |
import json
import platform
import gym
import six.moves.urllib as urlparse
from six import iteritems
from environments.mujoco.rand_param_envs.gym import error, version
from environments.mujoco.rand_param_envs.gym.scoreboard.client import http_client
verify_ssl_certs = True # [SECURITY CRITICAL] only turn this off while debugging
http_client = http_client.RequestsClient(verify_ssl_certs=verify_ssl_certs)
def _build_api_url(url, query):
scheme, netloc, path, base_query, fragment = urlparse.urlsplit(url)
if base_query:
query = "%s&%s" % (base_query, query)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def _strip_nulls(params):
if isinstance(params, dict):
stripped = {}
for key, value in iteritems(params):
value = _strip_nulls(value)
if value is not None:
stripped[key] = value
return stripped
else:
return params
class APIRequestor(object):
def __init__(self, key=None, api_base=None):
self.api_base = api_base or gym.scoreboard.api_base
self.api_key = key
self._client = http_client
def request(self, method, url, params=None, headers=None):
rbody, rcode, rheaders, my_api_key = self.request_raw(
method.lower(), url, params, headers
)
resp = self.interpret_response(rbody, rcode, rheaders)
return resp, my_api_key
def handle_api_error(self, rbody, rcode, resp, rheaders):
# Rate limits were previously coded as 400's with code 'rate_limit'
if rcode == 429:
raise error.RateLimitError(resp.get("detail"), rbody, rcode, resp, rheaders)
elif rcode in [400, 404]:
type = resp.get("type")
if type == "about:blank":
type = None
raise error.InvalidRequestError(
resp.get("detail"), type, rbody, rcode, resp, rheaders
)
elif rcode == 401:
raise error.AuthenticationError(
resp.get("detail"), rbody, rcode, resp, rheaders
)
else:
detail = resp.get("detail")
# This information will only be returned to developers of
# the OpenAI Gym Scoreboard.
dev_info = resp.get("dev_info")
if dev_info:
detail = "{}\n\n<dev_info>\n{}\n</dev_info>".format(
detail, dev_info["traceback"]
)
raise error.APIError(detail, rbody, rcode, resp, rheaders)
def request_raw(self, method, url, params=None, supplied_headers=None):
"""
Mechanism for issuing an API call
"""
if self.api_key:
my_api_key = self.api_key
else:
my_api_key = gym.scoreboard.api_key
if my_api_key is None:
raise error.AuthenticationError(
"""You must provide an OpenAI Gym API key.
(HINT: Set your API key using "gym.scoreboard.api_key = .." or "export OPENAI_GYM_API_KEY=..."). You can find your API key in the OpenAI Gym web interface: https://gym.openai.com/settings/profile."""
)
abs_url = "%s%s" % (self.api_base, url)
if params:
encoded_params = json.dumps(_strip_nulls(params))
else:
encoded_params = None
if method == "get" or method == "delete":
if params:
abs_url = _build_api_url(abs_url, encoded_params)
post_data = None
elif method == "post":
post_data = encoded_params
else:
raise error.APIConnectionError(
"Unrecognized HTTP method %r. This may indicate a bug in the "
"OpenAI Gym bindings. Please contact <EMAIL> for "
"assistance." % (method,)
)
ua = {
"bindings_version": version.VERSION,
"lang": "python",
"publisher": "openai",
"httplib": self._client.name,
}
for attr, func in [
["lang_version", platform.python_version],
["platform", platform.platform],
]:
try:
val = func()
except Exception as e:
val = "!! %s" % (e,)
ua[attr] = val
headers = {
"Openai-Gym-User-Agent": json.dumps(ua),
"User-Agent": "Openai-Gym/v1 PythonBindings/%s" % (version.VERSION,),
"Authorization": "Bearer %s" % (my_api_key,),
}
if method == "post":
headers["Content-Type"] = "application/json"
if supplied_headers is not None:
for key, value in supplied_headers.items():
headers[key] = value
rbody, rcode, rheaders = self._client.request(
method, abs_url, headers, post_data
)
return rbody, rcode, rheaders, my_api_key
def interpret_response(self, rbody, rcode, rheaders):
content_type = rheaders.get("Content-Type", "")
if content_type.startswith("text/plain"):
# Pass through plain text
resp = rbody
if not (200 <= rcode < 300):
self.handle_api_error(rbody, rcode, {}, rheaders)
else:
# TODO: Be strict about other Content-Types
try:
if hasattr(rbody, "decode"):
rbody = rbody.decode("utf-8")
resp = json.loads(rbody)
except Exception:
raise error.APIError(
"Invalid response body from API: %s "
"(HTTP response code was %d)" % (rbody, rcode),
rbody,
rcode,
rheaders,
)
if not (200 <= rcode < 300):
self.handle_api_error(rbody, rcode, resp, rheaders)
return resp
| [
"json.loads",
"six.moves.urllib.urlsplit",
"environments.mujoco.rand_param_envs.gym.scoreboard.client.http_client.RequestsClient",
"json.dumps",
"environments.mujoco.rand_param_envs.gym.error.APIConnectionError",
"environments.mujoco.rand_param_envs.gym.error.APIError",
"environments.mujoco.rand_param_e... | [((349, 410), 'environments.mujoco.rand_param_envs.gym.scoreboard.client.http_client.RequestsClient', 'http_client.RequestsClient', ([], {'verify_ssl_certs': 'verify_ssl_certs'}), '(verify_ssl_certs=verify_ssl_certs)\n', (375, 410), False, 'from environments.mujoco.rand_param_envs.gym.scoreboard.client import http_client\n'), ((494, 516), 'six.moves.urllib.urlsplit', 'urlparse.urlsplit', (['url'], {}), '(url)\n', (511, 516), True, 'import six.moves.urllib as urlparse\n'), ((595, 655), 'six.moves.urllib.urlunsplit', 'urlparse.urlunsplit', (['(scheme, netloc, path, query, fragment)'], {}), '((scheme, netloc, path, query, fragment))\n', (614, 655), True, 'import six.moves.urllib as urlparse\n'), ((765, 782), 'six.iteritems', 'iteritems', (['params'], {}), '(params)\n', (774, 782), False, 'from six import iteritems\n'), ((2865, 3145), 'environments.mujoco.rand_param_envs.gym.error.AuthenticationError', 'error.AuthenticationError', (['"""You must provide an OpenAI Gym API key.\n\n(HINT: Set your API key using "gym.scoreboard.api_key = .." or "export OPENAI_GYM_API_KEY=..."). You can find your API key in the OpenAI Gym web interface: https://gym.openai.com/settings/profile."""'], {}), '(\n """You must provide an OpenAI Gym API key.\n\n(HINT: Set your API key using "gym.scoreboard.api_key = .." or "export OPENAI_GYM_API_KEY=..."). You can find your API key in the OpenAI Gym web interface: https://gym.openai.com/settings/profile."""\n )\n', (2890, 3145), False, 'from environments.mujoco.rand_param_envs.gym import error, version\n'), ((4370, 4384), 'json.dumps', 'json.dumps', (['ua'], {}), '(ua)\n', (4380, 4384), False, 'import json\n'), ((3616, 3787), 'environments.mujoco.rand_param_envs.gym.error.APIConnectionError', 'error.APIConnectionError', (["('Unrecognized HTTP method %r. This may indicate a bug in the OpenAI Gym bindings. Please contact <EMAIL> for assistance.'\n % (method,))"], {}), "(\n 'Unrecognized HTTP method %r. This may indicate a bug in the OpenAI Gym bindings. Please contact <EMAIL> for assistance.'\n % (method,))\n", (3640, 3787), False, 'from environments.mujoco.rand_param_envs.gym import error, version\n'), ((5464, 5481), 'json.loads', 'json.loads', (['rbody'], {}), '(rbody)\n', (5474, 5481), False, 'import json\n'), ((2494, 2546), 'environments.mujoco.rand_param_envs.gym.error.APIError', 'error.APIError', (['detail', 'rbody', 'rcode', 'resp', 'rheaders'], {}), '(detail, rbody, rcode, resp, rheaders)\n', (2508, 2546), False, 'from environments.mujoco.rand_param_envs.gym import error, version\n'), ((5534, 5665), 'environments.mujoco.rand_param_envs.gym.error.APIError', 'error.APIError', (["('Invalid response body from API: %s (HTTP response code was %d)' % (rbody,\n rcode))", 'rbody', 'rcode', 'rheaders'], {}), "(\n 'Invalid response body from API: %s (HTTP response code was %d)' % (\n rbody, rcode), rbody, rcode, rheaders)\n", (5548, 5665), False, 'from environments.mujoco.rand_param_envs.gym import error, version\n')] |
# -*- coding: UTF-8 -*-
from pyhanlp import *
from DataBaseOperator import DBConnector
class DicBuilder:
def __init__(self, id=-1):
"""
构建金融行业字典
:param id: 从数据库中通过index选择文本,默认-1即全选
"""
self.id = id
self.stopWords_path = r'EmotionBasedDic/stopwords.txt'
self.negativeWords_path = r'EmotionBasedDic/TsingHua/tsinghua.negative.gb.txt'
self.positiveWords_path = r'EmotionBasedDic/TsingHua/tsinghua.positive.gb.txt'
self.financeDic_path = r'EmotionBasedDic/FinanceWordDic.txt'
def loadContent(self):
"""
加载数据库中的文本
:return: ((文本,),)
"""
dbConnector = DBConnector.DBConnector()
data = dbConnector.selectContent(self.id)
return data
def split_word(self, data):
"""
使用hanlp进行分词
:param data:传入文本
:return:
"""
splitWords = HanLP.segment(data)
tmp = []
for i in splitWords:
tmp.append(str(i).split('/'))
return tmp
def remove_attribute(self, data):
"""
去除词性
:param data:传入[[文本,词性],]的列表
:return: [文本,]
"""
wordList = []
for i in data:
wordList.append(i[0])
return wordList
def rubbish_dic(self):
"""
生成需要去除的词汇列表(停止词,感情词汇,标点符号)
:return:[无用词汇,]
"""
with open(self.stopWords_path, encoding='utf8') as file:
stopWords = file.readlines()
for i in range(len(stopWords)):
stopWords[i] = stopWords[i].strip()
with open(self.negativeWords_path, encoding='gbk') as file:
negativeWords = file.readlines()
for i in range(len(negativeWords)):
negativeWords[i] = negativeWords[i].strip()
with open(self.positiveWords_path, encoding='gbk') as file:
positiveWords = file.readlines()
for i in range(len(positiveWords)):
positiveWords[i] = positiveWords[i].strip()
punctuationList = list(r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~“”?,!【】()、。:;’‘……¥·""")
emptyList = ['']
return stopWords + negativeWords + positiveWords + punctuationList + emptyList
def remove_rubbish(self, data, rubbishList):
"""
移除文本列表中的垃圾词汇
:param data: 待移除垃圾词汇的列表[文本,]
:param rubbishList: 垃圾词汇列表
:return: 移除垃圾词汇后的文本列表
"""
tmp = data
for i in tmp:
if i.strip() in rubbishList or self.is_number(i.strip()):
tmp.remove(i)
return tmp
def is_number(self, n):
"""
判断参数是否为数字
:param n:传入待判断参数
:return: 若为数字则True,其他False
"""
try:
float(n)
except:
return False
return True
def remove_duplicate(self, data):
"""
去除列表重复值
:param:data:传入待去重列表
:return: 返回去重后的列表
"""
tmp = set(data)
return list(tmp)
def write_dic(self, data):
"""
将金融词典写入FinanceWordDic.txt
:param data: 去除垃圾词后的词汇列表
"""
with open(self.financeDic_path, 'w', encoding='utf8') as file:
for i in data:
file.write(i + '\n')
def build_dic(self):
"""
建立金融行业相关字典
:return: 字典
"""
data = self.loadContent()
rubbishDic = self.rubbish_dic()
wordList = []
for d in data:
print(d)
wordList += self.split_word(d[0])
wordList = self.remove_attribute(wordList)
wordList = self.remove_duplicate(wordList)
self.remove_rubbish(wordList, rubbishDic)
self.write_dic(wordList)
print('字典构建已完成!')
return wordList
# if __name__ == '__main__':
# test = DicBuilder()
# test.build_dic()
| [
"DataBaseOperator.DBConnector.DBConnector"
] | [((670, 695), 'DataBaseOperator.DBConnector.DBConnector', 'DBConnector.DBConnector', ([], {}), '()\n', (693, 695), False, 'from DataBaseOperator import DBConnector\n')] |
from .utils import draw_first_k_couples, batch_2x2_inv, batch_2x2_ellipse, arange_sequence, piecewise_arange
import torch
def stable_sort_residuals(residuals, ransidx):
logres = torch.log(residuals + 1e-10)
minlogres = torch.min(logres)
maxlogres = torch.max(logres)
sorting_score = ransidx.unsqueeze(0).float() + 0.99 * (logres - minlogres) / (maxlogres - minlogres)
sorting_idxes = torch.argsort(sorting_score, dim=-1) # (niters, numsamples)
iters_range = torch.arange(residuals.shape[0], device=residuals.device)
return residuals[iters_range.unsqueeze(-1), sorting_idxes], sorting_idxes
def group_sum_and_cumsum(scores_mat, end_group_idx, group_idx=None):
cumulative_scores = torch.cumsum(scores_mat, dim=1)
ending_cumusums = cumulative_scores[:, end_group_idx]
shifted_ending_cumusums = torch.cat(
[torch.zeros(size=(ending_cumusums.shape[0], 1), dtype=ending_cumusums.dtype, device=scores_mat.device),
ending_cumusums[:, :-1]], dim=1)
grouped_sums = ending_cumusums - shifted_ending_cumusums
if group_idx is not None:
grouped_cumsums = cumulative_scores - shifted_ending_cumusums[:, group_idx]
return grouped_sums, grouped_cumsums
return grouped_sums, None
def confidence_based_inlier_selection(residuals, ransidx, rdims, idxoffsets, dv, min_confidence):
numransacs = rdims.shape[0]
numiters = residuals.shape[0]
sorted_res, sorting_idxes = stable_sort_residuals(residuals, ransidx)
sorted_res_sqr = sorted_res ** 2
too_perfect_fits = sorted_res_sqr <= 1e-8
end_rans_indexing = torch.cumsum(rdims, dim=0)-1
_, inv_indices, res_dup_counts = torch.unique_consecutive(sorted_res_sqr.half().float(), dim=1, return_counts=True, return_inverse=True)
duplicates_per_sample = res_dup_counts[inv_indices]
inlier_weights = (1./duplicates_per_sample).repeat(numiters, 1)
inlier_weights[too_perfect_fits] = 0.
balanced_rdims, weights_cumsums = group_sum_and_cumsum(inlier_weights, end_rans_indexing, ransidx)
progressive_inl_rates = weights_cumsums.float() / (balanced_rdims.repeat_interleave(rdims, dim=1)).float()
good_inl_mask = (sorted_res_sqr * min_confidence <= progressive_inl_rates) | too_perfect_fits
inlier_weights[~good_inl_mask] = 0.
inlier_counts_matrix, _ = group_sum_and_cumsum(inlier_weights, end_rans_indexing)
inl_counts, inl_iters = torch.max(inlier_counts_matrix.long(), dim=0)
relative_inl_idxes = arange_sequence(inl_counts)
inl_ransidx = torch.arange(numransacs, device=dv).repeat_interleave(inl_counts)
inl_sampleidx = sorting_idxes[inl_iters.repeat_interleave(inl_counts),
idxoffsets[inl_ransidx] + relative_inl_idxes]
highest_accepted_sqr_residuals = sorted_res_sqr[inl_iters, idxoffsets + inl_counts - 1]
expected_extra_inl = balanced_rdims[inl_iters, torch.arange(numransacs, device=dv)].float() * highest_accepted_sqr_residuals
return inl_ransidx, inl_sampleidx, inl_counts, inl_iters, inl_counts.float()/expected_extra_inl
def sample_padded_inliers(xsamples, ysamples, inlier_counts, inl_ransidx, inl_sampleidx, numransacs, dv):
maxinliers = torch.max(inlier_counts).item()
padded_inlier_x = torch.zeros(size=(numransacs, maxinliers, 2), device=dv)
padded_inlier_y = torch.zeros(size=(numransacs, maxinliers, 2), device=dv)
padded_inlier_x[inl_ransidx, piecewise_arange(inl_ransidx)] = xsamples[inl_sampleidx]
padded_inlier_y[inl_ransidx, piecewise_arange(inl_ransidx)] = ysamples[inl_sampleidx]
return padded_inlier_x, padded_inlier_y
def ransac(xsamples, ysamples, rdims, config, iters=128, refit=True):
DET_THR = config['detected_scale_rate_threshold']
MIN_CONFIDENCE = config['min_confidence']
dv = config['device']
numransacs = rdims.shape[0]
numsamples = xsamples.shape[0]
ransidx = torch.arange(numransacs, device=dv).repeat_interleave(rdims)
idxoffsets = torch.cat([torch.tensor([0], device=dv), torch.cumsum(rdims[:-1], dim=0)], dim=0)
rand_samples_rel = draw_first_k_couples(iters, rdims, dv)
rand_samples_abs = rand_samples_rel + idxoffsets
sampled_x = torch.transpose(xsamples[rand_samples_abs], dim0=1,
dim1=2) # (niters, 2, numransacs, 2) -> (niters, numransacs, 2, 2)
sampled_y = torch.transpose(ysamples[rand_samples_abs], dim0=1, dim1=2)
# minimal fit for sampled_x @ A^T = sampled_y
affinities_fit = torch.transpose(batch_2x2_inv(sampled_x, check_dets=True) @ sampled_y, -1, -2)
if not refit:
eigenvals, eigenvecs = batch_2x2_ellipse(affinities_fit)
bad_ones = (eigenvals[..., 1] < 1/DET_THR**2) | (eigenvals[..., 0] > DET_THR**2)
affinities_fit[bad_ones] = torch.eye(2, device=dv)
y_pred = (affinities_fit[:, ransidx] @ xsamples.unsqueeze(-1)).squeeze(-1)
residuals = torch.norm(y_pred - ysamples, dim=-1) # (niters, numsamples)
inl_ransidx, inl_sampleidx, \
inl_counts, inl_iters, \
inl_confidence = confidence_based_inlier_selection(residuals, ransidx,
rdims, idxoffsets, dv=dv, min_confidence=MIN_CONFIDENCE)
if len(inl_sampleidx) == 0:
# If no inliers have been found, there is nothing to re-fit!
refit = False
if not refit:
return inl_sampleidx, \
affinities_fit[inl_iters, torch.arange(inl_iters.shape[0], device=dv)], \
inl_confidence, inl_counts
# Organize inliers found into a matrix for efficient GPU re-fitting.
# Cope with the irregular number of inliers per sample by padding with zeros
padded_inlier_x, padded_inlier_y = sample_padded_inliers(xsamples, ysamples, inl_counts, inl_ransidx, inl_sampleidx,
numransacs, dv)
# A @ pad_x.T = pad_y.T
# A = pad_y.T @ pad_x @ (pad_x.T @ pad_x)^-1
refit_affinity = padded_inlier_y.transpose(-2, -1) @ padded_inlier_x @ batch_2x2_inv(
padded_inlier_x.transpose(-2, -1) @ padded_inlier_x, check_dets=True)
# Filter out degenerate affinities with large scale changes
eigenvals, eigenvecs = batch_2x2_ellipse(refit_affinity)
bad_ones = (eigenvals[..., 1] < 1/DET_THR**2) | (eigenvals[..., 0] > DET_THR**2)
refit_affinity[bad_ones] = torch.eye(2, device=dv)
y_pred = (refit_affinity[ransidx] @ xsamples.unsqueeze(-1)).squeeze(-1)
residuals = torch.norm(y_pred - ysamples, dim=-1)
inl_ransidx, inl_sampleidx, \
inl_counts, inl_iters, inl_confidence = confidence_based_inlier_selection(residuals.unsqueeze(0), ransidx,
rdims, idxoffsets, dv=dv, min_confidence=MIN_CONFIDENCE)
return inl_sampleidx, refit_affinity, inl_confidence, inl_counts
| [
"torch.log",
"torch.eye",
"torch.max",
"torch.min",
"torch.transpose",
"torch.tensor",
"torch.argsort",
"torch.norm",
"torch.zeros",
"torch.cumsum",
"torch.arange"
] | [((184, 212), 'torch.log', 'torch.log', (['(residuals + 1e-10)'], {}), '(residuals + 1e-10)\n', (193, 212), False, 'import torch\n'), ((229, 246), 'torch.min', 'torch.min', (['logres'], {}), '(logres)\n', (238, 246), False, 'import torch\n'), ((263, 280), 'torch.max', 'torch.max', (['logres'], {}), '(logres)\n', (272, 280), False, 'import torch\n'), ((408, 444), 'torch.argsort', 'torch.argsort', (['sorting_score'], {'dim': '(-1)'}), '(sorting_score, dim=-1)\n', (421, 444), False, 'import torch\n'), ((488, 545), 'torch.arange', 'torch.arange', (['residuals.shape[0]'], {'device': 'residuals.device'}), '(residuals.shape[0], device=residuals.device)\n', (500, 545), False, 'import torch\n'), ((720, 751), 'torch.cumsum', 'torch.cumsum', (['scores_mat'], {'dim': '(1)'}), '(scores_mat, dim=1)\n', (732, 751), False, 'import torch\n'), ((3253, 3309), 'torch.zeros', 'torch.zeros', ([], {'size': '(numransacs, maxinliers, 2)', 'device': 'dv'}), '(size=(numransacs, maxinliers, 2), device=dv)\n', (3264, 3309), False, 'import torch\n'), ((3332, 3388), 'torch.zeros', 'torch.zeros', ([], {'size': '(numransacs, maxinliers, 2)', 'device': 'dv'}), '(size=(numransacs, maxinliers, 2), device=dv)\n', (3343, 3388), False, 'import torch\n'), ((4187, 4246), 'torch.transpose', 'torch.transpose', (['xsamples[rand_samples_abs]'], {'dim0': '(1)', 'dim1': '(2)'}), '(xsamples[rand_samples_abs], dim0=1, dim1=2)\n', (4202, 4246), False, 'import torch\n'), ((4355, 4414), 'torch.transpose', 'torch.transpose', (['ysamples[rand_samples_abs]'], {'dim0': '(1)', 'dim1': '(2)'}), '(ysamples[rand_samples_abs], dim0=1, dim1=2)\n', (4370, 4414), False, 'import torch\n'), ((4893, 4930), 'torch.norm', 'torch.norm', (['(y_pred - ysamples)'], {'dim': '(-1)'}), '(y_pred - ysamples, dim=-1)\n', (4903, 4930), False, 'import torch\n'), ((6326, 6349), 'torch.eye', 'torch.eye', (['(2)'], {'device': 'dv'}), '(2, device=dv)\n', (6335, 6349), False, 'import torch\n'), ((6443, 6480), 'torch.norm', 'torch.norm', (['(y_pred - ysamples)'], {'dim': '(-1)'}), '(y_pred - ysamples, dim=-1)\n', (6453, 6480), False, 'import torch\n'), ((1606, 1632), 'torch.cumsum', 'torch.cumsum', (['rdims'], {'dim': '(0)'}), '(rdims, dim=0)\n', (1618, 1632), False, 'import torch\n'), ((4773, 4796), 'torch.eye', 'torch.eye', (['(2)'], {'device': 'dv'}), '(2, device=dv)\n', (4782, 4796), False, 'import torch\n'), ((860, 966), 'torch.zeros', 'torch.zeros', ([], {'size': '(ending_cumusums.shape[0], 1)', 'dtype': 'ending_cumusums.dtype', 'device': 'scores_mat.device'}), '(size=(ending_cumusums.shape[0], 1), dtype=ending_cumusums.dtype,\n device=scores_mat.device)\n', (871, 966), False, 'import torch\n'), ((2532, 2567), 'torch.arange', 'torch.arange', (['numransacs'], {'device': 'dv'}), '(numransacs, device=dv)\n', (2544, 2567), False, 'import torch\n'), ((3199, 3223), 'torch.max', 'torch.max', (['inlier_counts'], {}), '(inlier_counts)\n', (3208, 3223), False, 'import torch\n'), ((3895, 3930), 'torch.arange', 'torch.arange', (['numransacs'], {'device': 'dv'}), '(numransacs, device=dv)\n', (3907, 3930), False, 'import torch\n'), ((3984, 4012), 'torch.tensor', 'torch.tensor', (['[0]'], {'device': 'dv'}), '([0], device=dv)\n', (3996, 4012), False, 'import torch\n'), ((4014, 4045), 'torch.cumsum', 'torch.cumsum', (['rdims[:-1]'], {'dim': '(0)'}), '(rdims[:-1], dim=0)\n', (4026, 4045), False, 'import torch\n'), ((5395, 5438), 'torch.arange', 'torch.arange', (['inl_iters.shape[0]'], {'device': 'dv'}), '(inl_iters.shape[0], device=dv)\n', (5407, 5438), False, 'import torch\n'), ((2896, 2931), 'torch.arange', 'torch.arange', (['numransacs'], {'device': 'dv'}), '(numransacs, device=dv)\n', (2908, 2931), False, 'import torch\n')] |
import os
import datetime
import h5py
import numpy as np
DEFAULT_DTYPE = np.dtype([
('datetime', np.int64),
('open', np.float),
('close', np.float),
('high', np.float),
('low', np.float),
('limit_up', np.float),
('limit_down', np.float),
('volume', np.float),
('total_turnover', np.float),
('settlement', np.float),
('prev_settlement', np.float),
])
class Kline2HDF5:
def __init__(self, fo_name):
self._timeformat = "%Y-%m-%d %H:%M:%S"
self._create_output_file(fo_name)
def _create_output_file(self, fo_name):
self._fo = h5py.File(fo_name, "w")
def finished(self):
self._fo.close()
def translate(self, fi_name, symbol=None):
print(fi_name, symbol)
fi = open(fi_name, 'r')
if not symbol:
symbol = os.path.basename(fi_name).split('.')[0]
res = []
lines = fi.readlines()
last_dt = None
for line in lines:
vars = line.strip('\n').split(',')
datetime_array = datetime.datetime.fromtimestamp(int(vars[0]))
if last_dt:
delta = datetime_array - last_dt
if delta.days >= 1 and 20 <= datetime_array.hour <= 24:
# datetime_array = datetime_array - datetime.timedelta(days=1)
datetime_array = datetime_array.replace(day=last_dt.day, month=last_dt.month)
if datetime_array <= last_dt:
print(line)
print(datetime_array)
print(datetime_array.second)
print(last_dt)
print(last_dt.hour)
assert False
datetime_str = datetime_array.strftime("%Y%m%d%H%M%S")
# t = int(vars[0])
o = float(vars[1])
h = float(vars[2])
l = float(vars[3])
c = float(vars[4])
v = float(vars[5])
res.append((datetime_str, o, c, h, l, o * 1.1, o * 0.9, v, -1, -1, -1))
last_dt = datetime_array
fi.close()
res_array = np.asarray(res, dtype=DEFAULT_DTYPE)
self._fo.create_dataset(symbol, data=res_array)
return True
if __name__ == '__main__':
rq2h5 = Kline2HDF5("futures_min_test.h5")
rq2h5.translate("/Users/zhifeng/rqalpha/data/rqdata/I88-4.csv", "I88")
rq2h5.finished()
| [
"numpy.dtype",
"numpy.asarray",
"os.path.basename",
"h5py.File"
] | [((74, 361), 'numpy.dtype', 'np.dtype', (["[('datetime', np.int64), ('open', np.float), ('close', np.float), ('high',\n np.float), ('low', np.float), ('limit_up', np.float), ('limit_down', np\n .float), ('volume', np.float), ('total_turnover', np.float), (\n 'settlement', np.float), ('prev_settlement', np.float)]"], {}), "([('datetime', np.int64), ('open', np.float), ('close', np.float),\n ('high', np.float), ('low', np.float), ('limit_up', np.float), (\n 'limit_down', np.float), ('volume', np.float), ('total_turnover', np.\n float), ('settlement', np.float), ('prev_settlement', np.float)])\n", (82, 361), True, 'import numpy as np\n'), ((601, 624), 'h5py.File', 'h5py.File', (['fo_name', '"""w"""'], {}), "(fo_name, 'w')\n", (610, 624), False, 'import h5py\n'), ((2106, 2142), 'numpy.asarray', 'np.asarray', (['res'], {'dtype': 'DEFAULT_DTYPE'}), '(res, dtype=DEFAULT_DTYPE)\n', (2116, 2142), True, 'import numpy as np\n'), ((830, 855), 'os.path.basename', 'os.path.basename', (['fi_name'], {}), '(fi_name)\n', (846, 855), False, 'import os\n')] |
""" StackedMail management command tests """
from django.test import TestCase
from django.test import Client
from django.conf import settings
from django.core import mail
from django.core.management import call_command
from django.utils.six import StringIO
from app.logic.mailing.models.StackedMailModel import StackedMailEntry
from app.logic.httpcommon import res
import os
import shutil
class ManagementStackedMailTestCase(TestCase):
def setUp(self):
self.client = Client()
self.tmp_folder = os.path.join(settings.TMP_ROOT)
if not os.path.exists(self.tmp_folder):
os.makedirs(self.tmp_folder)
def tearDown(self):
if os.path.exists(self.tmp_folder):
shutil.rmtree(self.tmp_folder)
def create_stacked_email(self, sender, receiver, title, msg, sent):
StackedMailEntry.objects.create(
receiver=receiver,
sender=sender,
title=title,
content=msg,
is_sent=sent
)
def test_command_output(self):
self.create_stacked_email('<EMAIL>', '<EMAIL>', 'Title1', 'Body1', True)
self.create_stacked_email('<EMAIL>', '<EMAIL>', 'Title2', 'Body2', False)
self.create_stacked_email('<EMAIL>', '<EMAIL>', 'Title3', 'Body3', False)
self.create_stacked_email('<EMAIL>', '<EMAIL>', 'Title4', 'Body4', False)
out = StringIO()
call_command('sendmail', stdout=out)
self.assertIn('', out.getvalue())
mail.outbox.sort(key=lambda x: x.to[0])
self.assertEqual('<EMAIL>', mail.outbox[0].to[0])
self.assertEqual('<EMAIL>', mail.outbox[0].from_email)
self.assertEqual('<EMAIL>', mail.outbox[1].to[0])
self.assertEqual('<EMAIL>', mail.outbox[1].from_email)
self.assertEqual('<EMAIL>', mail.outbox[2].to[0])
self.assertEqual('<EMAIL>', mail.outbox[2].from_email)
emails = StackedMailEntry.objects.all()
for email in emails:
self.assertEqual(True, email.is_sent)
| [
"os.path.exists",
"os.makedirs",
"django.core.management.call_command",
"app.logic.mailing.models.StackedMailModel.StackedMailEntry.objects.create",
"os.path.join",
"shutil.rmtree",
"django.core.mail.outbox.sort",
"django.utils.six.StringIO",
"app.logic.mailing.models.StackedMailModel.StackedMailEnt... | [((483, 491), 'django.test.Client', 'Client', ([], {}), '()\n', (489, 491), False, 'from django.test import Client\n'), ((518, 549), 'os.path.join', 'os.path.join', (['settings.TMP_ROOT'], {}), '(settings.TMP_ROOT)\n', (530, 549), False, 'import os\n'), ((676, 707), 'os.path.exists', 'os.path.exists', (['self.tmp_folder'], {}), '(self.tmp_folder)\n', (690, 707), False, 'import os\n'), ((833, 943), 'app.logic.mailing.models.StackedMailModel.StackedMailEntry.objects.create', 'StackedMailEntry.objects.create', ([], {'receiver': 'receiver', 'sender': 'sender', 'title': 'title', 'content': 'msg', 'is_sent': 'sent'}), '(receiver=receiver, sender=sender, title=\n title, content=msg, is_sent=sent)\n', (864, 943), False, 'from app.logic.mailing.models.StackedMailModel import StackedMailEntry\n'), ((1387, 1397), 'django.utils.six.StringIO', 'StringIO', ([], {}), '()\n', (1395, 1397), False, 'from django.utils.six import StringIO\n'), ((1406, 1442), 'django.core.management.call_command', 'call_command', (['"""sendmail"""'], {'stdout': 'out'}), "('sendmail', stdout=out)\n", (1418, 1442), False, 'from django.core.management import call_command\n'), ((1494, 1533), 'django.core.mail.outbox.sort', 'mail.outbox.sort', ([], {'key': '(lambda x: x.to[0])'}), '(key=lambda x: x.to[0])\n', (1510, 1533), False, 'from django.core import mail\n'), ((1918, 1948), 'app.logic.mailing.models.StackedMailModel.StackedMailEntry.objects.all', 'StackedMailEntry.objects.all', ([], {}), '()\n', (1946, 1948), False, 'from app.logic.mailing.models.StackedMailModel import StackedMailEntry\n'), ((566, 597), 'os.path.exists', 'os.path.exists', (['self.tmp_folder'], {}), '(self.tmp_folder)\n', (580, 597), False, 'import os\n'), ((611, 639), 'os.makedirs', 'os.makedirs', (['self.tmp_folder'], {}), '(self.tmp_folder)\n', (622, 639), False, 'import os\n'), ((721, 751), 'shutil.rmtree', 'shutil.rmtree', (['self.tmp_folder'], {}), '(self.tmp_folder)\n', (734, 751), False, 'import shutil\n')] |
# coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from deepsecurity.api_client import ApiClient
class PolicyFirewallRuleDetailsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def describe_firewall_rule_on_policy(self, policy_id, firewall_rule_id, api_version, **kwargs): # noqa: E501
"""Describe a firewall rule # noqa: E501
Describe a firewall rule including policy-level overrides. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_firewall_rule_on_policy(policy_id, firewall_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.describe_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.describe_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, **kwargs) # noqa: E501
return data
def describe_firewall_rule_on_policy_with_http_info(self, policy_id, firewall_rule_id, api_version, **kwargs): # noqa: E501
"""Describe a firewall rule # noqa: E501
Describe a firewall rule including policy-level overrides. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'firewall_rule_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method describe_firewall_rule_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `describe_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'firewall_rule_id' is set
if ('firewall_rule_id' not in params or
params['firewall_rule_id'] is None):
raise ValueError("Missing the required parameter `firewall_rule_id` when calling `describe_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `describe_firewall_rule_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `describe_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'firewall_rule_id' in params and not re.search('\\d+', str(params['firewall_rule_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `firewall_rule_id` when calling `describe_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
if 'firewall_rule_id' in params:
path_params['firewallRuleID'] = params['firewall_rule_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/firewall/rules/{firewallRuleID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FirewallRule', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_firewall_rules_on_policy(self, policy_id, api_version, **kwargs): # noqa: E501
"""List firewall rules # noqa: E501
Lists all firewall rules assigned to a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_firewall_rules_on_policy(policy_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only rules assigned to the current policy.
:return: FirewallRules
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_firewall_rules_on_policy_with_http_info(policy_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.list_firewall_rules_on_policy_with_http_info(policy_id, api_version, **kwargs) # noqa: E501
return data
def list_firewall_rules_on_policy_with_http_info(self, policy_id, api_version, **kwargs): # noqa: E501
"""List firewall rules # noqa: E501
Lists all firewall rules assigned to a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_firewall_rules_on_policy_with_http_info(policy_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only rules assigned to the current policy.
:return: FirewallRules
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_firewall_rules_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `list_firewall_rules_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `list_firewall_rules_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `list_firewall_rules_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/firewall/rules', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FirewallRules', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_firewall_rule_on_policy(self, policy_id, firewall_rule_id, firewall_rule, api_version, **kwargs): # noqa: E501
"""Modify a firewall rule # noqa: E501
Modify a firewall rule assigned to a policy. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_firewall_rule_on_policy(policy_id, firewall_rule_id, firewall_rule, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule to modify. (required)
:param FirewallRule firewall_rule: The settings of the firewall rule to modify. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, firewall_rule, api_version, **kwargs) # noqa: E501
else:
(data) = self.modify_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, firewall_rule, api_version, **kwargs) # noqa: E501
return data
def modify_firewall_rule_on_policy_with_http_info(self, policy_id, firewall_rule_id, firewall_rule, api_version, **kwargs): # noqa: E501
"""Modify a firewall rule # noqa: E501
Modify a firewall rule assigned to a policy. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, firewall_rule, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule to modify. (required)
:param FirewallRule firewall_rule: The settings of the firewall rule to modify. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'firewall_rule_id', 'firewall_rule', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_firewall_rule_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `modify_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'firewall_rule_id' is set
if ('firewall_rule_id' not in params or
params['firewall_rule_id'] is None):
raise ValueError("Missing the required parameter `firewall_rule_id` when calling `modify_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'firewall_rule' is set
if ('firewall_rule' not in params or
params['firewall_rule'] is None):
raise ValueError("Missing the required parameter `firewall_rule` when calling `modify_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `modify_firewall_rule_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `modify_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'firewall_rule_id' in params and not re.search('\\d+', str(params['firewall_rule_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `firewall_rule_id` when calling `modify_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
if 'firewall_rule_id' in params:
path_params['firewallRuleID'] = params['firewall_rule_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'firewall_rule' in params:
body_params = params['firewall_rule']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/firewall/rules/{firewallRuleID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FirewallRule', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def reset_firewall_rule_on_policy(self, policy_id, firewall_rule_id, api_version, **kwargs): # noqa: E501
"""Reset firewall rule overrides # noqa: E501
Remove all overrides for a firewall rule from a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_firewall_rule_on_policy(policy_id, firewall_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule to reset. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.reset_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.reset_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, **kwargs) # noqa: E501
return data
def reset_firewall_rule_on_policy_with_http_info(self, policy_id, firewall_rule_id, api_version, **kwargs): # noqa: E501
"""Reset firewall rule overrides # noqa: E501
Remove all overrides for a firewall rule from a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule to reset. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'firewall_rule_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method reset_firewall_rule_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `reset_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'firewall_rule_id' is set
if ('firewall_rule_id' not in params or
params['firewall_rule_id'] is None):
raise ValueError("Missing the required parameter `firewall_rule_id` when calling `reset_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `reset_firewall_rule_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `reset_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'firewall_rule_id' in params and not re.search('\\d+', str(params['firewall_rule_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `firewall_rule_id` when calling `reset_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
if 'firewall_rule_id' in params:
path_params['firewallRuleID'] = params['firewall_rule_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/firewall/rules/{firewallRuleID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FirewallRule', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"six.iteritems",
"deepsecurity.api_client.ApiClient"
] | [((3929, 3960), 'six.iteritems', 'six.iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (3942, 3960), False, 'import six\n'), ((10063, 10094), 'six.iteritems', 'six.iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (10076, 10094), False, 'import six\n'), ((16156, 16187), 'six.iteritems', 'six.iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (16169, 16187), False, 'import six\n'), ((23030, 23061), 'six.iteritems', 'six.iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (23043, 23061), False, 'import six\n'), ((1010, 1021), 'deepsecurity.api_client.ApiClient', 'ApiClient', ([], {}), '()\n', (1019, 1021), False, 'from deepsecurity.api_client import ApiClient\n')] |
"""
Log requests middleware
"""
import time
from fastapi import Request
import geoip2.database
from geoip2.errors import AddressNotFoundError
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from server.config import app_logger
from server.config import CITY_DB
logger = app_logger(__name__, 'requests.log')
geo_location = geoip2.database.Reader(CITY_DB)
class LogRequests(BaseHTTPMiddleware):
async def dispatch(self,
request: Request,
call_next: RequestResponseEndpoint,
):
start_time = time.time()
response = await call_next(request)
process_time = (time.time() - start_time) * 1000
formatted_process_time = '{0:.2f}'.format(process_time)
remote = ":".join(map(str, request.client))
x_forwarded_for = request.headers.get('x-forwarded-for', None)
x_real_ip = request.headers.get('x-real-ip', None)
cc = request.headers.get('cf-ipcountry', None)
if x_forwarded_for:
x_forwarded_for = x_forwarded_for.split(',')[0]
if "None" in remote:
if x_forwarded_for:
remote = x_forwarded_for
elif x_real_ip:
remote = x_real_ip
try:
remote_country = geo_location.city(remote.rsplit(':', 1)[0])
remote_country = remote_country.country.name
except AddressNotFoundError:
remote_country = 'Unknown'
logger.info({
"remote_ip": remote,
"remote_country": remote_country,
"user_agent": request.headers.get('user-agent', None),
'method': request.method,
"path": request.url.path,
"completed in (ms)": formatted_process_time,
"response code": response.status_code
})
response.headers["X-Process-Time"] = str(formatted_process_time)
return response
| [
"server.config.app_logger",
"time.time"
] | [((306, 342), 'server.config.app_logger', 'app_logger', (['__name__', '"""requests.log"""'], {}), "(__name__, 'requests.log')\n", (316, 342), False, 'from server.config import app_logger\n'), ((607, 618), 'time.time', 'time.time', ([], {}), '()\n', (616, 618), False, 'import time\n'), ((688, 699), 'time.time', 'time.time', ([], {}), '()\n', (697, 699), False, 'import time\n')] |
import argparse
def parse_args():
parser = argparse.ArgumentParser(description="Search Products on Supported Platforms")
parser.add_argument(
"--platform", "-p", required=True, help="Online Retail Platform", type=str
)
parser.add_argument(
"--keyword", "-k", required=True, help="Product Keyword", type=str
)
args = parser.parse_args()
return args
def main():
args = parse_args()
platform = args.platform
if platform == "amazon":
from product_scrapper.amazon.amazon_search import AmazonSearch as Search
else:
raise NotImplementedError
search = Search(keyword=args.keyword)
search.scrape()
print(search)
if __name__ == "__main__":
main() | [
"product_scrapper.amazon.amazon_search.AmazonSearch",
"argparse.ArgumentParser"
] | [((49, 126), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Search Products on Supported Platforms"""'}), "(description='Search Products on Supported Platforms')\n", (72, 126), False, 'import argparse\n'), ((629, 657), 'product_scrapper.amazon.amazon_search.AmazonSearch', 'Search', ([], {'keyword': 'args.keyword'}), '(keyword=args.keyword)\n', (635, 657), True, 'from product_scrapper.amazon.amazon_search import AmazonSearch as Search\n')] |
import sys, os
import time
import getopt
import pprint
try:
# doesn't exist on macos
from shmem import PyShmemClient
except:
pass
from psana import dgram
from psana.event import Event
from psana.detector import detectors
from psana.psexp.event_manager import TransitionId
import numpy as np
def dumpDict(dict,indent):
for k in sorted(dict.keys()):
if hasattr(dict[k],'__dict__'):
print(' '*indent,k)
dumpDict(dict[k].__dict__,indent+2)
else:
print(' '*indent,k,dict[k])
# method to dump dgrams to stdout. ideally this would move into dgram.cc
def dumpDgram(d):
dumpDict(d.__dict__,0)
FN_L = 200
# Warning: If XtcData::Dgram ever changes, this function will likely need to change
def _service(view):
iSvc = 2 # Index of service field, in units of uint32_t
return (np.array(view, copy=False).view(dtype=np.uint32)[iSvc] >> 24) & 0x0f
# Warning: If XtcData::Dgram ever changes, this function will likely need to change
def _dgSize(view):
iExt = 5 # Index of extent field, in units of uint32_t
txSize = 3 * 4 # sizeof(XtcData::TransitionBase)
return txSize + np.array(view, copy=False).view(dtype=np.uint32)[iExt]
class DgramManager(object):
def __init__(self, xtc_files, configs=[], fds=[],
tag=None, run=None, max_retries=0,
found_xtc2_callback=None):
""" Opens xtc_files and stores configs.
If file descriptors (fds) is given, reuse the given file descriptors.
"""
self.xtc_files = []
self.shmem_cli = None
self.shmem_kwargs = {'index':-1,'size':0,'cli_cptr':None}
self.configs = []
self._timestamps = [] # built when iterating
self._run = run
self.found_endrun = True
self.buffered_beginruns = []
self.max_retries = max_retries
self.chunk_ids = []
# Add ability for dgrammanager to check if xtc2 files exist (in case
# .inprogress file is use).
if found_xtc2_callback:
setattr(self, 'found_xtc2', found_xtc2_callback)
if isinstance(xtc_files, (str)):
self.xtc_files = np.array([xtc_files], dtype='U%s'%FN_L)
elif isinstance(xtc_files, (list, np.ndarray)):
if len(xtc_files) > 0: # handles smalldata-only case
if xtc_files[0] == 'shmem':
self.shmem_cli = PyShmemClient()
#establish connection to available server - blocking
status = int(self.shmem_cli.connect(tag,0))
assert not status,'shmem connect failure %d' % status
#wait for first configure datagram - blocking
view = self.shmem_cli.get(self.shmem_kwargs)
assert view
# Release shmem buffer after copying Transition data
# cpo: copy L1Accepts too because some shmem
# applications like AMI's pickN can hold references
# to dgrams for a long time, consuming the shmem buffers
# and creating a deadlock situation. could revisit this
# later and only deep-copy arrays inside pickN, for example
# but would be more fragile.
barray = bytes(view[:_dgSize(view)])
self.shmem_cli.freeByIndex(self.shmem_kwargs['index'], self.shmem_kwargs['size'])
view = memoryview(barray)
d = dgram.Dgram(view=view)
self.configs += [d]
else:
self.xtc_files = np.asarray(xtc_files, dtype='U%s'%FN_L)
self.given_fds = True if len(fds) > 0 else False
if self.given_fds:
self.fds = np.asarray(fds, dtype=np.int32)
else:
self.fds = np.array([os.open(xtc_file, os.O_RDONLY) for xtc_file in self.xtc_files], dtype=np.int32)
self.fds_map = {}
for fd, xtc_file in zip(self.fds, self.xtc_files):
self.fds_map[fd] = xtc_file
given_configs = True if len(configs) > 0 else False
if given_configs:
self.configs = configs
elif xtc_files[0] != 'shmem':
self.configs = [dgram.Dgram(file_descriptor=fd, max_retries=self.max_retries) for fd in self.fds]
self.calibconst = {} # initialize to empty dict - will be populated by run class
self.n_files = len(self.xtc_files)
self.set_chunk_ids()
def set_chunk_ids(self):
if len(self.xtc_files) == 0: return
if self.xtc_files[0] == 'shmem': return
for xtc_file in self.xtc_files:
filename = os.path.basename(xtc_file)
found = filename.find('-c')
if found >= 0:
found_e = filename.find('.xtc2')
self.chunk_ids.append(int(filename[found+2:found_e]))
def get_chunk_id(self, ind):
if not self.chunk_ids: return None
return self.chunk_ids[ind]
def set_chunk_id(self, ind, new_chunk_id):
self.chunk_ids[ind] = new_chunk_id
def close(self):
if not self.given_fds:
for fd in self.fds:
os.close(fd)
def __iter__(self):
return self
def _check_missing_endrun(self, beginruns=None):
fake_endruns = None
if not self.found_endrun: # there's no previous EndRun
sec = (self._timestamps[-1] >> 32) & 0xffffffff
usec = int((self._timestamps[-1] & 0xffffffff) * 1e3 + 1)
if beginruns:
self.buffered_beginruns = [dgram.Dgram(config=config,
view=d, offset=0, size=d._size)
for d, config in zip(beginruns, self.configs)]
fake_endruns = [dgram.Dgram(config=config, fake_endrun=1, \
fake_endrun_sec=sec, fake_endrun_usec=usec) \
for config in self.configs]
self.found_endrun = True
else:
self.found_endrun = False
return fake_endruns
def __next__(self):
""" only support sequential read - no event building"""
if self.buffered_beginruns:
self.found_endrun = False
evt = Event(self.buffered_beginruns, run=self.run())
self._timestamps += [evt.timestamp]
self.buffered_beginruns = []
return evt
if self.shmem_cli:
view = self.shmem_cli.get(self.shmem_kwargs)
if view:
# Release shmem buffer after copying Transition data
# cpo: copy L1Accepts too because some shmem
# applications like AMI's pickN can hold references
# to dgrams for a long time, consuming the shmem buffers
# and creating a deadlock situation. could revisit this
# later and only deep-copy arrays inside pickN, for example
# but would be more fragile.
barray = bytes(view[:_dgSize(view)])
self.shmem_cli.freeByIndex(self.shmem_kwargs['index'], self.shmem_kwargs['size'])
view = memoryview(barray)
# use the most recent configure datagram
config = self.configs[len(self.configs)-1]
d = dgram.Dgram(config=config,view=view)
dgrams = [d]
else:
raise StopIteration
else:
try:
dgrams = [dgram.Dgram(config=config, max_retries=self.max_retries) for config in self.configs]
except StopIteration as err:
fake_endruns = self._check_missing_endrun()
if fake_endruns:
dgrams = fake_endruns
else:
print(err)
raise StopIteration
# Check BeginRun - EndRun pairing
service = dgrams[0].service()
if service == TransitionId.BeginRun:
fake_endruns = self._check_missing_endrun(beginruns=dgrams)
if fake_endruns:
dgrams = fake_endruns
if service == TransitionId.EndRun:
self.found_endrun = True
evt = Event(dgrams, run=self.get_run())
self._timestamps += [evt.timestamp]
return evt
def jumps(self, dgram_i, offset, size):
if offset == 0 and size == 0:
d = None
else:
try:
d = dgram.Dgram(file_descriptor=self.fds[dgram_i],
config=self.configs[dgram_i],
offset=offset,
size=size,
max_retries=self.max_retries)
except StopIteration:
d = None
return d
def jump(self, offsets, sizes):
""" Jumps to the offset and reads out dgram on each xtc file.
This is used in normal mode (multiple detectors with MPI).
"""
assert len(offsets) > 0 and len(sizes) > 0
dgrams = [self.jumps(dgram_i, offset, size) for dgram_i, (offset, size)
in enumerate(zip(offsets, sizes))]
evt = Event(dgrams, run=self._run)
return evt
def get_timestamps(self):
return np.asarray(self._timestamps, dtype=np.uint64) # return numpy array for easy search later
def set_run(self, run):
self._run = run
def get_run(self):
return self._run
def parse_command_line():
opts, args_proper = getopt.getopt(sys.argv[1:], 'hvd:f:')
xtcdata_filename="data.xtc"
for option, parameter in opts:
if option=='-h': usage_error()
if option=='-f': xtcdata_filename = parameter
if xtcdata_filename is None:
xtcdata_filename="data.xtc"
return (args_proper, xtcdata_filename)
def getMemUsage():
pid=os.getpid()
ppid=os.getppid()
cmd="/usr/bin/ps -q %d --no-headers -eo size" % pid
p=os.popen(cmd)
size=int(p.read())
return size
def main():
args_proper, xtcdata_filename = parse_command_line()
ds=DgramManager(xtcdata_filename)
print("vars(ds):")
for var_name in sorted(vars(ds)):
print(" %s:" % var_name)
e=getattr(ds, var_name)
if not isinstance(e, (tuple, list, int, float, str)):
for key in sorted(e.__dict__.keys()):
print("%s: %s" % (key, e.__dict__[key]))
print()
count=0
for evt in ds:
print("evt:", count)
for dgram in evt:
for var_name in sorted(vars(dgram)):
val=getattr(dgram, var_name)
print(" %s: %s" % (var_name, type(val)))
a=dgram.xpphsd.raw.array0Pgp
try:
a[0][0]=999
except ValueError:
print("The dgram.xpphsd.raw.array0Pgp is read-only, as it should be.")
else:
print("Warning: the evt.array0_pgp array is writable")
print()
count+=1
return
def usage_error():
s="usage: python %s" % os.path.basename(sys.argv[0])
sys.stdout.write("%s [-h]\n" % s)
sys.stdout.write("%s [-f xtcdata_filename]\n" % (" "*len(s)))
sys.exit(1)
if __name__=='__main__':
main()
| [
"getopt.getopt",
"shmem.PyShmemClient",
"psana.dgram.Dgram",
"os.close",
"os.open",
"numpy.asarray",
"os.getppid",
"numpy.array",
"os.popen",
"os.path.basename",
"os.getpid",
"sys.exit",
"psana.event.Event",
"sys.stdout.write"
] | [((9560, 9597), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""hvd:f:"""'], {}), "(sys.argv[1:], 'hvd:f:')\n", (9573, 9597), False, 'import getopt\n'), ((9898, 9909), 'os.getpid', 'os.getpid', ([], {}), '()\n', (9907, 9909), False, 'import sys, os\n'), ((9919, 9931), 'os.getppid', 'os.getppid', ([], {}), '()\n', (9929, 9931), False, 'import sys, os\n'), ((9994, 10007), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (10002, 10007), False, 'import sys, os\n'), ((11124, 11157), 'sys.stdout.write', 'sys.stdout.write', (["('%s [-h]\\n' % s)"], {}), "('%s [-h]\\n' % s)\n", (11140, 11157), False, 'import sys, os\n'), ((11228, 11239), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (11236, 11239), False, 'import sys, os\n'), ((9220, 9248), 'psana.event.Event', 'Event', (['dgrams'], {'run': 'self._run'}), '(dgrams, run=self._run)\n', (9225, 9248), False, 'from psana.event import Event\n'), ((9314, 9359), 'numpy.asarray', 'np.asarray', (['self._timestamps'], {'dtype': 'np.uint64'}), '(self._timestamps, dtype=np.uint64)\n', (9324, 9359), True, 'import numpy as np\n'), ((11090, 11119), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (11106, 11119), False, 'import sys, os\n'), ((2221, 2262), 'numpy.array', 'np.array', (['[xtc_files]'], {'dtype': "('U%s' % FN_L)"}), "([xtc_files], dtype='U%s' % FN_L)\n", (2229, 2262), True, 'import numpy as np\n'), ((3845, 3876), 'numpy.asarray', 'np.asarray', (['fds'], {'dtype': 'np.int32'}), '(fds, dtype=np.int32)\n', (3855, 3876), True, 'import numpy as np\n'), ((4755, 4781), 'os.path.basename', 'os.path.basename', (['xtc_file'], {}), '(xtc_file)\n', (4771, 4781), False, 'import sys, os\n'), ((5276, 5288), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (5284, 5288), False, 'import sys, os\n'), ((5866, 5955), 'psana.dgram.Dgram', 'dgram.Dgram', ([], {'config': 'config', 'fake_endrun': '(1)', 'fake_endrun_sec': 'sec', 'fake_endrun_usec': 'usec'}), '(config=config, fake_endrun=1, fake_endrun_sec=sec,\n fake_endrun_usec=usec)\n', (5877, 5955), False, 'from psana import dgram\n'), ((7380, 7417), 'psana.dgram.Dgram', 'dgram.Dgram', ([], {'config': 'config', 'view': 'view'}), '(config=config, view=view)\n', (7391, 7417), False, 'from psana import dgram\n'), ((8549, 8685), 'psana.dgram.Dgram', 'dgram.Dgram', ([], {'file_descriptor': 'self.fds[dgram_i]', 'config': 'self.configs[dgram_i]', 'offset': 'offset', 'size': 'size', 'max_retries': 'self.max_retries'}), '(file_descriptor=self.fds[dgram_i], config=self.configs[dgram_i],\n offset=offset, size=size, max_retries=self.max_retries)\n', (8560, 8685), False, 'from psana import dgram\n'), ((1205, 1231), 'numpy.array', 'np.array', (['view'], {'copy': '(False)'}), '(view, copy=False)\n', (1213, 1231), True, 'import numpy as np\n'), ((3924, 3954), 'os.open', 'os.open', (['xtc_file', 'os.O_RDONLY'], {}), '(xtc_file, os.O_RDONLY)\n', (3931, 3954), False, 'import sys, os\n'), ((4326, 4387), 'psana.dgram.Dgram', 'dgram.Dgram', ([], {'file_descriptor': 'fd', 'max_retries': 'self.max_retries'}), '(file_descriptor=fd, max_retries=self.max_retries)\n', (4337, 4387), False, 'from psana import dgram\n'), ((5678, 5736), 'psana.dgram.Dgram', 'dgram.Dgram', ([], {'config': 'config', 'view': 'd', 'offset': '(0)', 'size': 'd._size'}), '(config=config, view=d, offset=0, size=d._size)\n', (5689, 5736), False, 'from psana import dgram\n'), ((7557, 7613), 'psana.dgram.Dgram', 'dgram.Dgram', ([], {'config': 'config', 'max_retries': 'self.max_retries'}), '(config=config, max_retries=self.max_retries)\n', (7568, 7613), False, 'from psana import dgram\n'), ((868, 894), 'numpy.array', 'np.array', (['view'], {'copy': '(False)'}), '(view, copy=False)\n', (876, 894), True, 'import numpy as np\n'), ((2463, 2478), 'shmem.PyShmemClient', 'PyShmemClient', ([], {}), '()\n', (2476, 2478), False, 'from shmem import PyShmemClient\n'), ((3574, 3596), 'psana.dgram.Dgram', 'dgram.Dgram', ([], {'view': 'view'}), '(view=view)\n', (3585, 3596), False, 'from psana import dgram\n'), ((3696, 3737), 'numpy.asarray', 'np.asarray', (['xtc_files'], {'dtype': "('U%s' % FN_L)"}), "(xtc_files, dtype='U%s' % FN_L)\n", (3706, 3737), True, 'import numpy as np\n')] |
from flair.models import TARSTagger
from flair.data import Sentence
# 1. Load zero-shot NER tagger
tars = TARSTagger.load('tars-ner')
# 2. Prepare some test sentences
sentences = [
Sentence("kill la kill is an anime"),
Sentence("The Humboldt University of Berlin is situated near the Spree in Berlin, Germany"),
Sentence("Bayern Munich played against Real Madrid"),
Sentence("I flew with an Airbus A380 to Peru to pick up my Porsche Cayenne"),
Sentence("Game of Thrones is my favorite series"),
]
# 3. Define some classes of named entities such as "soccer teams", "TV shows" and "rivers"
labels = ["Soccer Team", "University", "Vehicle", "River", "City", "Country", "Person", 'Anime', "Movie", "TV Show"]
tars.add_and_switch_to_new_task('task 1', labels, label_type='ner')
# 4. Predict for these classes and print results
for sentence in sentences:
tars.predict(sentence)
print(sentence.to_tagged_string("ner"))
| [
"flair.models.TARSTagger.load",
"flair.data.Sentence"
] | [((107, 134), 'flair.models.TARSTagger.load', 'TARSTagger.load', (['"""tars-ner"""'], {}), "('tars-ner')\n", (122, 134), False, 'from flair.models import TARSTagger\n'), ((187, 223), 'flair.data.Sentence', 'Sentence', (['"""kill la kill is an anime"""'], {}), "('kill la kill is an anime')\n", (195, 223), False, 'from flair.data import Sentence\n'), ((229, 330), 'flair.data.Sentence', 'Sentence', (['"""The Humboldt University of Berlin is situated near the Spree in Berlin, Germany"""'], {}), "(\n 'The Humboldt University of Berlin is situated near the Spree in Berlin, Germany'\n )\n", (237, 330), False, 'from flair.data import Sentence\n'), ((326, 378), 'flair.data.Sentence', 'Sentence', (['"""Bayern Munich played against Real Madrid"""'], {}), "('Bayern Munich played against Real Madrid')\n", (334, 378), False, 'from flair.data import Sentence\n'), ((384, 460), 'flair.data.Sentence', 'Sentence', (['"""I flew with an Airbus A380 to Peru to pick up my Porsche Cayenne"""'], {}), "('I flew with an Airbus A380 to Peru to pick up my Porsche Cayenne')\n", (392, 460), False, 'from flair.data import Sentence\n'), ((466, 515), 'flair.data.Sentence', 'Sentence', (['"""Game of Thrones is my favorite series"""'], {}), "('Game of Thrones is my favorite series')\n", (474, 515), False, 'from flair.data import Sentence\n')] |
def _init():
from som.vm.universe import create_universe
return create_universe()
current_universe = _init()
| [
"som.vm.universe.create_universe"
] | [((73, 90), 'som.vm.universe.create_universe', 'create_universe', ([], {}), '()\n', (88, 90), False, 'from som.vm.universe import create_universe\n')] |
# author: <<NAME>>
# Client side CLI
# Necessary imports.
import os
import json
from pathlib import Path
import requests
import click
from server import run
# Click package is used to turn the python script into CLI.
@click.command()
@click.option('--path',prompt="Enter the path", help='Specify the path, that you want to display', required=True)
@click.option('--port',prompt="Enter the PORT number that is used to start the server", help='Specify the PORT number, that you already used to start server', required=True)
# The value of @click.option (path and port) is passed to the function getfiles.
def getfiles(path, port):
"""Simple program that displays a list of files in the directory."""
# Initializing an empty list variable called store_list.
# Which is used to store the directory details.
store_list = []
# Checking whether the path is exist or not.
if os.path.exists(path):
# By using os.walk module we can generate files/folders from the provided path.
for root, directories, files in os.walk(path):
# We create a iterator known as take_file, which collects all the files under the directory.
for take_file in files:
# Collecting files other than hidden files or hidden directories.
if not take_file.startswith('.') and not os.path.basename(root).startswith('.'):
# To get the path of the current file.
pathname = os.path.join(root,take_file)
# To get the information of that particular file.
stat = os.stat(pathname)
# Creating a dictionary to store the informations.
dicts = {
'name': take_file,
'path': pathname,
'size': str(round(stat.st_size/ (1024*1024), 2))+'MB',
'extension': Path(take_file).suffix
}
store_list.append(dicts)
# Checking, whether there are files in the directory or not.
if len(store_list) == 0:
print('\nThere is no files inside the directory')
# Else, send the dictionary to the server, through the valid PORT number.
else:
try:
datas = json.dumps(store_list)
url = 'http://localhost:{}/'.format(port)
x = requests.post(url, data = datas)
print("Please wait...")
print('Sending data to server...')
print("Success!")
except:
print("Invalid PORT number!")
else:
print('\nInvalid path!')
| [
"os.path.exists",
"requests.post",
"pathlib.Path",
"click.option",
"json.dumps",
"os.path.join",
"os.path.basename",
"os.stat",
"click.command",
"os.walk"
] | [((220, 235), 'click.command', 'click.command', ([], {}), '()\n', (233, 235), False, 'import click\n'), ((237, 355), 'click.option', 'click.option', (['"""--path"""'], {'prompt': '"""Enter the path"""', 'help': '"""Specify the path, that you want to display"""', 'required': '(True)'}), "('--path', prompt='Enter the path', help=\n 'Specify the path, that you want to display', required=True)\n", (249, 355), False, 'import click\n'), ((351, 538), 'click.option', 'click.option', (['"""--port"""'], {'prompt': '"""Enter the PORT number that is used to start the server"""', 'help': '"""Specify the PORT number, that you already used to start server"""', 'required': '(True)'}), "('--port', prompt=\n 'Enter the PORT number that is used to start the server', help=\n 'Specify the PORT number, that you already used to start server',\n required=True)\n", (363, 538), False, 'import click\n'), ((902, 922), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (916, 922), False, 'import os\n'), ((1054, 1067), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (1061, 1067), False, 'import os\n'), ((2338, 2360), 'json.dumps', 'json.dumps', (['store_list'], {}), '(store_list)\n', (2348, 2360), False, 'import json\n'), ((2439, 2469), 'requests.post', 'requests.post', (['url'], {'data': 'datas'}), '(url, data=datas)\n', (2452, 2469), False, 'import requests\n'), ((1485, 1514), 'os.path.join', 'os.path.join', (['root', 'take_file'], {}), '(root, take_file)\n', (1497, 1514), False, 'import os\n'), ((1613, 1630), 'os.stat', 'os.stat', (['pathname'], {}), '(pathname)\n', (1620, 1630), False, 'import os\n'), ((1939, 1954), 'pathlib.Path', 'Path', (['take_file'], {}), '(take_file)\n', (1943, 1954), False, 'from pathlib import Path\n'), ((1353, 1375), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (1369, 1375), False, 'import os\n')] |
import numpy as np
from scipy.integrate.odepack import odeint
import matplotlib.pyplot as plt
import functions
from chondrocyte import Voltage_clamp
from params import params_dict
import matplotlib as mpl
"""
The code is used to create Figure 4B for submitted paper
"Probing the putative role of KATP channels and biological variability in a mathematical model of chondrocyte electrophysiology”
"""
mpl.rcParams['font.family'] = 'Avenir'
plt.rcParams['font.size'] = 18
plt.rcParams['axes.linewidth'] = 2
# define time span
params_dict.update(t_final=180)
t_final = params_dict["t_final"]
dt = params_dict["dt"]
t = np.linspace(0, t_final, int(t_final/dt))
params_dict.update(Mg_i=1)
# Define initial condition vector
y0 = (params_dict["V_0"], params_dict["Na_i_0"], params_dict["K_i_0"], params_dict["Ca_i_0"], params_dict["H_i_0"],
params_dict["Cl_i_0"], params_dict["a_ur_0"], params_dict["i_ur_0"], params_dict["vol_i_0"],
params_dict["cal_0"])
fig, ax = plt.subplots()
params_dict.update(K_o_0=7, Mg_i=0.1)
solution1 = odeint(functions.rhs, y0, t, args=(params_dict,))
ax.plot(t, solution1[:,0], label="$\mathrm{[Mg^{2+}]_i}$=0.1 mM", color="k")
params_dict.update(Mg_i=1.0)
solution2 = odeint(functions.rhs, y0, t, args=(params_dict,))
ax.plot(t, solution2[:,0], label="$\mathrm{[Mg^{2+}]_i}$=1.0 mM", color="b")
params_dict.update(Mg_i=10)
solution3 = odeint(functions.rhs, y0, t, args=(params_dict,))
ax.plot(t, solution3[:,0], label="$\mathrm{[Mg^{2+}]_i}$=10 mM", color="r")
ax.set_xlabel("Time [s]", fontsize=16)
ax.set_ylabel("Membrane Potential [mV]", fontsize=16)
ax.xaxis.set_tick_params(which='major', size=14, width=2, direction='out')
ax.yaxis.set_tick_params(which='major', size=14, width=2, direction='out')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.legend(loc='upper right')
# plt.savefig("Fig4_B.png", bbox_inches='tight')
plt.show()
| [
"scipy.integrate.odepack.odeint",
"params.params_dict.update",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((529, 560), 'params.params_dict.update', 'params_dict.update', ([], {'t_final': '(180)'}), '(t_final=180)\n', (547, 560), False, 'from params import params_dict\n'), ((663, 689), 'params.params_dict.update', 'params_dict.update', ([], {'Mg_i': '(1)'}), '(Mg_i=1)\n', (681, 689), False, 'from params import params_dict\n'), ((981, 995), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (993, 995), True, 'import matplotlib.pyplot as plt\n'), ((997, 1034), 'params.params_dict.update', 'params_dict.update', ([], {'K_o_0': '(7)', 'Mg_i': '(0.1)'}), '(K_o_0=7, Mg_i=0.1)\n', (1015, 1034), False, 'from params import params_dict\n'), ((1048, 1097), 'scipy.integrate.odepack.odeint', 'odeint', (['functions.rhs', 'y0', 't'], {'args': '(params_dict,)'}), '(functions.rhs, y0, t, args=(params_dict,))\n', (1054, 1097), False, 'from scipy.integrate.odepack import odeint\n'), ((1176, 1204), 'params.params_dict.update', 'params_dict.update', ([], {'Mg_i': '(1.0)'}), '(Mg_i=1.0)\n', (1194, 1204), False, 'from params import params_dict\n'), ((1217, 1266), 'scipy.integrate.odepack.odeint', 'odeint', (['functions.rhs', 'y0', 't'], {'args': '(params_dict,)'}), '(functions.rhs, y0, t, args=(params_dict,))\n', (1223, 1266), False, 'from scipy.integrate.odepack import odeint\n'), ((1345, 1372), 'params.params_dict.update', 'params_dict.update', ([], {'Mg_i': '(10)'}), '(Mg_i=10)\n', (1363, 1372), False, 'from params import params_dict\n'), ((1385, 1434), 'scipy.integrate.odepack.odeint', 'odeint', (['functions.rhs', 'y0', 't'], {'args': '(params_dict,)'}), '(functions.rhs, y0, t, args=(params_dict,))\n', (1391, 1434), False, 'from scipy.integrate.odepack import odeint\n'), ((1829, 1858), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (1839, 1858), True, 'import matplotlib.pyplot as plt\n'), ((1908, 1918), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1916, 1918), True, 'import matplotlib.pyplot as plt\n')] |
import enum
import pdb
import functools
import pathlib
from typing import Any, Dict, List, Optional, Tuple, BinaryIO, cast, Union
from xml.etree import ElementTree
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
Filter,
Demultiplexer,
IterKeyZipper,
LineReader,
)
from torchvision.datasets import VOCDetection
from Dataset4EO.datasets.utils import OnlineResource, HttpResource, Dataset
from Dataset4EO.datasets.utils._internal import (
path_accessor,
getitem,
INFINITE_BUFFER_SIZE,
path_comparator,
hint_sharding,
hint_shuffling,
read_categories_file,
)
from Dataset4EO.features import BoundingBox, Label, EncodedImage
from .._api import register_dataset, register_info
NAME = "voc"
CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
'train', 'tvmonitor')
PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class VOC(Dataset):
"""
- **homepage**: http://host.robots.ox.ac.uk/pascal/VOC/
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
year: str = "2012",
task: str = "segmentation",
skip_integrity_check: bool = False,
) -> None:
self._year = self._verify_str_arg(year, "year", ("2007", "2008", "2009", "2010", "2011", "2012"))
if split == "test" and year != "2007":
raise ValueError("`split='test'` is only available for `year='2007'`")
else:
self._split = self._verify_str_arg(split, "split", ("train", "val", "trainval", "test"))
self._task = self._verify_str_arg(task, "task", ("detection", "segmentation"))
self._anns_folder = "Annotations" if task == "detection" else "SegmentationClass"
self._split_folder = "Main" if task == "detection" else "Segmentation"
self._categories = _info()["categories"]
self.CLASSES = CLASSES
self.PALETTE = PALETTE
super().__init__(root, skip_integrity_check=skip_integrity_check)
_TRAIN_VAL_ARCHIVES = {
"2007": ("VOCtrainval_06-Nov-2007.tar", "7d8cd951101b0957ddfd7a530bdc8a94f06121cfc1e511bb5937e973020c7508"),
"2008": ("VOCtrainval_14-Jul-2008.tar", "7f0ca53c1b5a838fbe946965fc106c6e86832183240af5c88e3f6c306318d42e"),
"2009": ("VOCtrainval_11-May-2009.tar", "11cbe1741fb5bdadbbca3c08e9ec62cd95c14884845527d50847bc2cf57e7fd6"),
"2010": ("VOCtrainval_03-May-2010.tar", "1af4189cbe44323ab212bff7afbc7d0f55a267cc191eb3aac911037887e5c7d4"),
"2011": ("VOCtrainval_25-May-2011.tar", "0a7f5f5d154f7290ec65ec3f78b72ef72c6d93ff6d79acd40dc222a9ee5248ba"),
"2012": ("VOCtrainval_11-May-2012.tar", "e14f763270cf193d0b5f74b169f44157a4b0c6efa708f4dd0ff78ee691763bcb"),
}
_TEST_ARCHIVES = {
"2007": ("VOCtest_06-Nov-2007.tar", "6836888e2e01dca84577a849d339fa4f73e1e4f135d312430c4856b5609b4892")
}
def _resources(self) -> List[OnlineResource]:
file_name, sha256 = (self._TEST_ARCHIVES if self._split == "test" else self._TRAIN_VAL_ARCHIVES)[self._year]
archive = HttpResource(f"http://host.robots.ox.ac.uk/pascal/VOC/voc{self._year}/{file_name}", sha256=sha256)
return [archive]
def _is_in_folder(self, data: Tuple[str, Any], *, name: str, depth: int = 1) -> bool:
path = pathlib.Path(data[0])
return name in path.parent.parts[-depth:]
class _Demux(enum.IntEnum):
SPLIT = 0
IMAGES = 1
ANNS = 2
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
if self._is_in_folder(data, name="ImageSets", depth=2):
return self._Demux.SPLIT
elif self._is_in_folder(data, name="JPEGImages"):
return self._Demux.IMAGES
elif self._is_in_folder(data, name=self._anns_folder):
return self._Demux.ANNS
else:
return None
def _parse_detection_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
return cast(Dict[str, Any], VOCDetection.parse_voc_xml(ElementTree.parse(buffer).getroot())["annotation"])
def _prepare_detection_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
anns = self._parse_detection_ann(buffer)
instances = anns["object"]
return dict(
bounding_boxes=BoundingBox(
[
[int(instance["bndbox"][part]) for part in ("xmin", "ymin", "xmax", "ymax")]
for instance in instances
],
format="xyxy",
image_size=cast(Tuple[int, int], tuple(int(anns["size"][dim]) for dim in ("height", "width"))),
),
labels=Label(
[self._categories.index(instance["name"]) for instance in instances], categories=self._categories
),
)
def _prepare_segmentation_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
return dict(segmentation=EncodedImage.from_file(buffer))
def _prepare_sample(
self,
data: Tuple[Tuple[Tuple[str, str], Tuple[str, BinaryIO]], Tuple[str, BinaryIO]],
) -> Dict[str, Any]:
split_and_image_data, ann_data = data
_, image_data = split_and_image_data
image_path, image_buffer = image_data
ann_path, ann_buffer = ann_data
image_path = pathlib.PosixPath(image_path).name
ann_path = pathlib.PosixPath(ann_path).name
#{'img_info': {'filename': '2009_000801.jpg', 'ann': {'seg_map': '2009_000801.png'}}, 'ann_info': {'seg_map': '2009_000801.png'}}
img_info = dict({'filename':image_path, 'ann':dict({'seg_map':ann_path})})
return img_info
return dict(
(self._prepare_detection_ann if self._task == "detection" else self._prepare_segmentation_ann)(ann_buffer),
image_path=image_path,
image=EncodedImage.from_file(image_buffer),
ann_path=ann_path,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
split_dp, images_dp, anns_dp = Demultiplexer(
archive_dp,
3,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
split_dp = Filter(split_dp, functools.partial(self._is_in_folder, name=self._split_folder))
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True)
split_dp = hint_shuffling(split_dp)
split_dp = hint_sharding(split_dp)
dp = split_dp
for level, data_dp in enumerate((images_dp, anns_dp)):
dp = IterKeyZipper(
dp,
data_dp,
key_fn=getitem(*[0] * level, 1),
ref_key_fn=path_accessor("stem"),
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
("train", "2007", "detection"): 2_501,
("train", "2007", "segmentation"): 209,
("train", "2008", "detection"): 2_111,
("train", "2008", "segmentation"): 511,
("train", "2009", "detection"): 3_473,
("train", "2009", "segmentation"): 749,
("train", "2010", "detection"): 4_998,
("train", "2010", "segmentation"): 964,
("train", "2011", "detection"): 5_717,
("train", "2011", "segmentation"): 1_112,
("train", "2012", "detection"): 5_717,
("train", "2012", "segmentation"): 1_464,
("val", "2007", "detection"): 2_510,
("val", "2007", "segmentation"): 213,
("val", "2008", "detection"): 2_221,
("val", "2008", "segmentation"): 512,
("val", "2009", "detection"): 3_581,
("val", "2009", "segmentation"): 750,
("val", "2010", "detection"): 5_105,
("val", "2010", "segmentation"): 964,
("val", "2011", "detection"): 5_823,
("val", "2011", "segmentation"): 1_111,
("val", "2012", "detection"): 5_823,
("val", "2012", "segmentation"): 1_449,
("trainval", "2007", "detection"): 5_011,
("trainval", "2007", "segmentation"): 422,
("trainval", "2008", "detection"): 4_332,
("trainval", "2008", "segmentation"): 1_023,
("trainval", "2009", "detection"): 7_054,
("trainval", "2009", "segmentation"): 1_499,
("trainval", "2010", "detection"): 10_103,
("trainval", "2010", "segmentation"): 1_928,
("trainval", "2011", "detection"): 11_540,
("trainval", "2011", "segmentation"): 2_223,
("trainval", "2012", "detection"): 11_540,
("trainval", "2012", "segmentation"): 2_913,
("test", "2007", "detection"): 4_952,
("test", "2007", "segmentation"): 210,
}[(self._split, self._year, self._task)]
def _filter_anns(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == self._Demux.ANNS
def _generate_categories(self) -> List[str]:
self._task = "detection"
resources = self._resources()
archive_dp = resources[0].load(self._root)
dp = Filter(archive_dp, self._filter_anns)
dp = Mapper(dp, self._parse_detection_ann, input_col=1)
categories = sorted({instance["name"] for _, anns in dp for instance in anns["object"]})
# We add a background category to be used during segmentation
categories.insert(0, "__background__")
return categories
| [
"torchdata.datapipes.iter.Mapper",
"Dataset4EO.datasets.utils._internal.getitem",
"xml.etree.ElementTree.parse",
"pathlib.Path",
"pathlib.PosixPath",
"Dataset4EO.datasets.utils._internal.read_categories_file",
"torchdata.datapipes.iter.LineReader",
"Dataset4EO.datasets.utils._internal.path_comparator"... | [((3711, 3818), 'Dataset4EO.datasets.utils.HttpResource', 'HttpResource', (['f"""http://host.robots.ox.ac.uk/pascal/VOC/voc{self._year}/{file_name}"""'], {'sha256': 'sha256'}), "(\n f'http://host.robots.ox.ac.uk/pascal/VOC/voc{self._year}/{file_name}',\n sha256=sha256)\n", (3723, 3818), False, 'from Dataset4EO.datasets.utils import OnlineResource, HttpResource, Dataset\n'), ((3941, 3962), 'pathlib.Path', 'pathlib.Path', (['data[0]'], {}), '(data[0])\n', (3953, 3962), False, 'import pathlib\n'), ((6688, 6794), 'torchdata.datapipes.iter.Demultiplexer', 'Demultiplexer', (['archive_dp', '(3)', 'self._classify_archive'], {'drop_none': '(True)', 'buffer_size': 'INFINITE_BUFFER_SIZE'}), '(archive_dp, 3, self._classify_archive, drop_none=True,\n buffer_size=INFINITE_BUFFER_SIZE)\n', (6701, 6794), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((7065, 7098), 'torchdata.datapipes.iter.LineReader', 'LineReader', (['split_dp'], {'decode': '(True)'}), '(split_dp, decode=True)\n', (7075, 7098), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((7118, 7142), 'Dataset4EO.datasets.utils._internal.hint_shuffling', 'hint_shuffling', (['split_dp'], {}), '(split_dp)\n', (7132, 7142), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((7162, 7185), 'Dataset4EO.datasets.utils._internal.hint_sharding', 'hint_sharding', (['split_dp'], {}), '(split_dp)\n', (7175, 7185), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((7527, 7559), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._prepare_sample'], {}), '(dp, self._prepare_sample)\n', (7533, 7559), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((9955, 9992), 'torchdata.datapipes.iter.Filter', 'Filter', (['archive_dp', 'self._filter_anns'], {}), '(archive_dp, self._filter_anns)\n', (9961, 9992), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((10006, 10056), 'torchdata.datapipes.iter.Mapper', 'Mapper', (['dp', 'self._parse_detection_ann'], {'input_col': '(1)'}), '(dp, self._parse_detection_ann, input_col=1)\n', (10012, 10056), False, 'from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter, Demultiplexer, IterKeyZipper, LineReader\n'), ((1465, 1491), 'Dataset4EO.datasets.utils._internal.read_categories_file', 'read_categories_file', (['NAME'], {}), '(NAME)\n', (1485, 1491), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((5914, 5943), 'pathlib.PosixPath', 'pathlib.PosixPath', (['image_path'], {}), '(image_path)\n', (5931, 5943), False, 'import pathlib\n'), ((5968, 5995), 'pathlib.PosixPath', 'pathlib.PosixPath', (['ann_path'], {}), '(ann_path)\n', (5985, 5995), False, 'import pathlib\n'), ((6899, 6961), 'functools.partial', 'functools.partial', (['self._is_in_folder'], {'name': 'self._split_folder'}), '(self._is_in_folder, name=self._split_folder)\n', (6916, 6961), False, 'import functools\n'), ((6999, 7044), 'Dataset4EO.datasets.utils._internal.path_comparator', 'path_comparator', (['"""name"""', 'f"""{self._split}.txt"""'], {}), "('name', f'{self._split}.txt')\n", (7014, 7044), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((5530, 5560), 'Dataset4EO.features.EncodedImage.from_file', 'EncodedImage.from_file', (['buffer'], {}), '(buffer)\n', (5552, 5560), False, 'from Dataset4EO.features import BoundingBox, Label, EncodedImage\n'), ((6441, 6477), 'Dataset4EO.features.EncodedImage.from_file', 'EncodedImage.from_file', (['image_buffer'], {}), '(image_buffer)\n', (6463, 6477), False, 'from Dataset4EO.features import BoundingBox, Label, EncodedImage\n'), ((7372, 7398), 'Dataset4EO.datasets.utils._internal.getitem', 'getitem', (['*([0] * level)', '(1)'], {}), '(*([0] * level), 1)\n', (7379, 7398), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((7425, 7446), 'Dataset4EO.datasets.utils._internal.path_accessor', 'path_accessor', (['"""stem"""'], {}), "('stem')\n", (7438, 7446), False, 'from Dataset4EO.datasets.utils._internal import path_accessor, getitem, INFINITE_BUFFER_SIZE, path_comparator, hint_sharding, hint_shuffling, read_categories_file\n'), ((4644, 4669), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (['buffer'], {}), '(buffer)\n', (4661, 4669), False, 'from xml.etree import ElementTree\n')] |
# Generated by Django 2.2.9 on 2020-01-29 16:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cart', '0008_cartitem_user'),
]
operations = [
migrations.RemoveField(
model_name='cartitem',
name='user',
),
]
| [
"django.db.migrations.RemoveField"
] | [((219, 277), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""cartitem"""', 'name': '"""user"""'}), "(model_name='cartitem', name='user')\n", (241, 277), False, 'from django.db import migrations\n')] |
from django.db import models
class Health(models.Model):
MALE = 'M'
FEMALE = 'F'
ALL = 'A'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(ALL, 'All')
)
class Meta:
abstract = True
class HealthActivity(models.Model):
MALE = 'M'
FEMALE = 'F'
ALL = 'A'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(ALL, 'All')
)
AGE_16_TO_24 = '16-24'
AGE_25_TO_34 = '25-34'
AGE_35_TO_44 = '35-44'
AGE_45_TO_54 = '45-54'
AGE_55_TO_64 = '55-64'
AGE_65_TO_74 = '65-74'
AGE_75_PLUS = '75+'
AGE_ALL = 'ALL'
AGE_CHOICES = (
(AGE_16_TO_24, '16-24'),
(AGE_25_TO_34, '25-34'),
(AGE_35_TO_44, '35-44'),
(AGE_45_TO_54, '45-54'),
(AGE_55_TO_64, '55-64'),
(AGE_65_TO_74, '65-74'),
(AGE_75_PLUS, '75+'),
(AGE_ALL, 'All Ages'),
)
ACTIVITY_MEETS = 'Meets'
ACTIVITY_SOME = 'Some'
ACTIVITY_LOW = 'Low'
ACTIVITY_BASES = 'Bases'
ACTIVITY_CHOICES = (
(ACTIVITY_MEETS, 'Meets Activity'),
(ACTIVITY_SOME, 'Some Activity'),
(ACTIVITY_LOW, 'Low Activity'),
(ACTIVITY_BASES, 'Bases'),
)
year = models.IntegerField()
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=MALE)
age = models.CharField(max_length=8, choices=AGE_CHOICES, default=AGE_16_TO_24)
activity = models.CharField(max_length=5, choices=ACTIVITY_CHOICES, default=ACTIVITY_MEETS)
percentage = models.FloatField(default=0.0)
class HealthWeight(models.Model):
MALE = 'M'
FEMALE = 'F'
ALL = 'A'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(ALL, 'All')
)
year = models.IntegerField()
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=MALE)
weight_mean = models.FloatField()
weight_stderr = models.FloatField()
base = models.IntegerField()
class HealthBMI(models.Model):
MALE = 'M'
FEMALE = 'F'
ALL = 'A'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(ALL, 'All')
)
AGE_16_TO_24 = '16-24'
AGE_25_TO_34 = '25-34'
AGE_35_TO_44 = '35-44'
AGE_45_TO_54 = '45-54'
AGE_55_TO_64 = '55-64'
AGE_65_TO_74 = '65-74'
AGE_75_PLUS = '75+'
AGE_ALL = 'ALL'
AGE_CHOICES = (
(AGE_16_TO_24, '16-24'),
(AGE_25_TO_34, '25-34'),
(AGE_35_TO_44, '35-44'),
(AGE_45_TO_54, '45-54'),
(AGE_55_TO_64, '55-64'),
(AGE_65_TO_74, '65-74'),
(AGE_75_PLUS, '75+'),
(AGE_ALL, 'All Ages'),
)
BMI_UNDERWEIGHT = 'U'
BMI_NORMAL = 'N'
BMI_OVERWEIGHT = 'O'
BMI_OBESE = 'B'
BMI_MORBIDLY_OBESE = 'M'
BMI_OVERWEIGHT_OBESE = 'W'
BMI_MEAN = 'E'
BMI_STDERR = 'S'
BMI_BASE = 'A'
BMI_ALL = 'L'
BMI_CHOICES = (
(BMI_UNDERWEIGHT, 'Underweight'),
(BMI_NORMAL, 'Normal'),
(BMI_OVERWEIGHT, 'Overweight'),
(BMI_OBESE, 'Obese'),
(BMI_MORBIDLY_OBESE, 'Morbidly Obese'),
(BMI_OVERWEIGHT_OBESE, 'Overweight including obese'),
(BMI_MEAN, 'Mean'),
(BMI_STDERR, 'Std error of the mean'),
(BMI_BASE, 'Base'),
(BMI_ALL, 'All'),
)
year = models.IntegerField()
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=MALE)
age = models.CharField(max_length=8, choices=AGE_CHOICES, default=AGE_16_TO_24)
bmi = models.CharField(max_length=1, choices=BMI_CHOICES, default=BMI_NORMAL)
percentage = models.FloatField(default=0.0)
# Create your models here.
class HealthFruitVeg(models.Model):
MALE = 'M'
FEMALE = 'F'
ALL = 'A'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(ALL, 'All')
)
AGE_16_TO_24 = '16-24'
AGE_25_TO_34 = '25-34'
AGE_35_TO_44 = '35-44'
AGE_45_TO_54 = '45-54'
AGE_55_TO_64 = '55-64'
AGE_65_TO_74 = '65-74'
AGE_75_PLUS = '75+'
AGE_ALL = 'ALL'
AGE_CHOICES = (
(AGE_16_TO_24, '16-24'),
(AGE_25_TO_34, '25-34'),
(AGE_35_TO_44, '35-44'),
(AGE_45_TO_54, '45-54'),
(AGE_55_TO_64, '55-64'),
(AGE_65_TO_74, '65-74'),
(AGE_75_PLUS, '75+'),
(AGE_ALL, 'All Ages'),
)
FRUITVEG_NONE = 'N'
FRUITVEG_LESS_1 = '1'
FRUITVEG_LESS_2 = '2'
FRUITVEG_LESS_3 = '3'
FRUITVEG_LESS_4 = '4'
FRUITVEG_LESS_5 = '5'
FRUITVEG_MORE_5 = '6'
FRUITVEG_MEAN = 'M'
FRUITVEG_STDERR = 'S'
FRUITVEG_MEDIAN = 'D'
FRUITVEG_BASE = 'B'
FRUITVEG_CHOICES = (
(FRUITVEG_NONE, 'No Fruit & Veg'),
(FRUITVEG_LESS_1, 'Under 1 portion'),
(FRUITVEG_LESS_2, '1-2 Portions'),
(FRUITVEG_LESS_3, '2-3 Portions'),
(FRUITVEG_LESS_4, '3-4 Portions'),
(FRUITVEG_LESS_5, '4-5 Portions'),
(FRUITVEG_MORE_5, '5+ Portions'),
(FRUITVEG_MEAN, 'Mean Portions'),
(FRUITVEG_STDERR, 'Standard error of the mean'),
(FRUITVEG_MEDIAN, 'Median Portions'),
(FRUITVEG_BASE, 'Standard error of the mean')
)
year = models.IntegerField()
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=MALE)
age = models.CharField(max_length=8, choices=AGE_CHOICES, default=AGE_16_TO_24)
fruitveg = models.CharField(max_length=1, choices=FRUITVEG_CHOICES, default=FRUITVEG_NONE)
percentage = models.FloatField(default=0.0)
# Create your models here.
class HealthHealth(models.Model):
MALE = 'M'
FEMALE = 'F'
ALL = 'A'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(ALL, 'All')
)
HEALTH_VG = 'VG'
HEALTH_VB = 'VB'
HEALTH_ILL = 'ILL'
HEALTH_SICK = 'SICK'
HEALTH_ALL = 'ALL'
HEALTH_BASE = 'BASE'
HEALTH_CHOICES = (
(HEALTH_VG, 'Very good/good health'),
(HEALTH_VB, 'Very bad/bad health'),
(HEALTH_ILL, 'At least one longstanding illness'),
(HEALTH_SICK, 'Acute sickness'),
(HEALTH_ALL, 'All'),
(HEALTH_BASE, 'Bases'),
)
year = models.IntegerField()
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=MALE)
health = models.CharField(max_length=4, choices=HEALTH_CHOICES, default=HEALTH_VG)
percentage = models.FloatField(default=0.0)
# Create your models here.
| [
"django.db.models.FloatField",
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((1245, 1266), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1264, 1266), False, 'from django.db import models\n'), ((1280, 1348), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'GENDER_CHOICES', 'default': 'MALE'}), '(max_length=1, choices=GENDER_CHOICES, default=MALE)\n', (1296, 1348), False, 'from django.db import models\n'), ((1359, 1432), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(8)', 'choices': 'AGE_CHOICES', 'default': 'AGE_16_TO_24'}), '(max_length=8, choices=AGE_CHOICES, default=AGE_16_TO_24)\n', (1375, 1432), False, 'from django.db import models\n'), ((1448, 1533), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5)', 'choices': 'ACTIVITY_CHOICES', 'default': 'ACTIVITY_MEETS'}), '(max_length=5, choices=ACTIVITY_CHOICES, default=ACTIVITY_MEETS\n )\n', (1464, 1533), False, 'from django.db import models\n'), ((1547, 1577), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (1564, 1577), False, 'from django.db import models\n'), ((1775, 1796), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1794, 1796), False, 'from django.db import models\n'), ((1810, 1878), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'GENDER_CHOICES', 'default': 'MALE'}), '(max_length=1, choices=GENDER_CHOICES, default=MALE)\n', (1826, 1878), False, 'from django.db import models\n'), ((1898, 1917), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (1915, 1917), False, 'from django.db import models\n'), ((1938, 1957), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (1955, 1957), False, 'from django.db import models\n'), ((1969, 1990), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1988, 1990), False, 'from django.db import models\n'), ((3318, 3339), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (3337, 3339), False, 'from django.db import models\n'), ((3353, 3421), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'GENDER_CHOICES', 'default': 'MALE'}), '(max_length=1, choices=GENDER_CHOICES, default=MALE)\n', (3369, 3421), False, 'from django.db import models\n'), ((3432, 3505), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(8)', 'choices': 'AGE_CHOICES', 'default': 'AGE_16_TO_24'}), '(max_length=8, choices=AGE_CHOICES, default=AGE_16_TO_24)\n', (3448, 3505), False, 'from django.db import models\n'), ((3516, 3587), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'BMI_CHOICES', 'default': 'BMI_NORMAL'}), '(max_length=1, choices=BMI_CHOICES, default=BMI_NORMAL)\n', (3532, 3587), False, 'from django.db import models\n'), ((3606, 3636), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (3623, 3636), False, 'from django.db import models\n'), ((5172, 5193), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (5191, 5193), False, 'from django.db import models\n'), ((5207, 5275), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'GENDER_CHOICES', 'default': 'MALE'}), '(max_length=1, choices=GENDER_CHOICES, default=MALE)\n', (5223, 5275), False, 'from django.db import models\n'), ((5286, 5359), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(8)', 'choices': 'AGE_CHOICES', 'default': 'AGE_16_TO_24'}), '(max_length=8, choices=AGE_CHOICES, default=AGE_16_TO_24)\n', (5302, 5359), False, 'from django.db import models\n'), ((5375, 5454), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'FRUITVEG_CHOICES', 'default': 'FRUITVEG_NONE'}), '(max_length=1, choices=FRUITVEG_CHOICES, default=FRUITVEG_NONE)\n', (5391, 5454), False, 'from django.db import models\n'), ((5473, 5503), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (5490, 5503), False, 'from django.db import models\n'), ((6149, 6170), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (6168, 6170), False, 'from django.db import models\n'), ((6184, 6252), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'GENDER_CHOICES', 'default': 'MALE'}), '(max_length=1, choices=GENDER_CHOICES, default=MALE)\n', (6200, 6252), False, 'from django.db import models\n'), ((6266, 6339), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'choices': 'HEALTH_CHOICES', 'default': 'HEALTH_VG'}), '(max_length=4, choices=HEALTH_CHOICES, default=HEALTH_VG)\n', (6282, 6339), False, 'from django.db import models\n'), ((6358, 6388), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (6375, 6388), False, 'from django.db import models\n')] |
# Copyright (c) 2020, <NAME>, University of Washington
# This file is part of rcwa_tf
# Written by <NAME> (Email: <EMAIL>)
import tensorflow as tf
import numpy as np
def convmat(A, P, Q):
'''
This function computes a convolution matrix for a real space matrix `A` that
represents either a relative permittivity or permeability distribution for a
set of pixels, layers, and batch.
Args:
A: A `tf.Tensor` of dtype `complex` and shape `(batchSize, pixelsX,
pixelsY, Nlayers, Nx, Ny)` specifying real space values on a Cartesian
grid.
P: A positive and odd `int` specifying the number of spatial harmonics
along `T1`.
Q: A positive and odd `int` specifying the number of spatial harmonics
along `T2`.
Returns:
A `tf.Tensor` of dtype `complex` and shape `(batchSize, pixelsX,
pixelsY, Nlayers, P * Q, P * Q)` representing a stack of convolution
matrices based on `A`.
'''
# Determine the shape of A.
batchSize, pixelsX, pixelsY, Nlayers, Nx, Ny = A.shape
# Compute indices of spatial harmonics.
NH = P * Q # total number of harmonics.
p_max = np.floor(P / 2.0)
q_max = np.floor(P / 2.0)
# Indices along T1 and T2.
p = np.linspace(-p_max, p_max, P)
q = np.linspace(-q_max, q_max, Q)
# Compute array indices of the center harmonic.
p0 = int(np.floor(Nx / 2))
q0 = int(np.floor(Ny / 2))
# Fourier transform the real space distributions.
A = tf.signal.fftshift(tf.signal.fft2d(A), axes = (4, 5)) / (Nx * Ny)
# Build the matrix.
firstCoeff = True
for qrow in range(Q):
for prow in range(P):
for qcol in range(Q):
for pcol in range(P):
pfft = int(p[prow] - p[pcol])
qfft = int(q[qrow] - q[qcol])
# Sequentially concatenate Fourier coefficients.
value = A[:, :, :, :, p0 + pfft, q0 + qfft]
value = value[:, :, :, :, tf.newaxis, tf.newaxis]
if firstCoeff:
firstCoeff = False
C = value
else:
C = tf.concat([C, value], axis = 5)
# Reshape the coefficients tensor into a stack of convolution matrices.
convMatrixShape = (batchSize, pixelsX, pixelsY, Nlayers, P * Q, P * Q)
matrixStack = tf.reshape(C, shape = convMatrixShape)
return matrixStack
def redheffer_star_product(SA, SB):
'''
This function computes the redheffer star product of two block matrices,
which is the result of combining the S-parameter of two systems.
Args:
SA: A `dict` of `tf.Tensor` values specifying the block matrix
corresponding to the S-parameters of a system. `SA` needs to have the
keys ('S11', 'S12', 'S21', 'S22'), where each key maps to a `tf.Tensor`
of shape `(batchSize, pixelsX, pixelsY, 2*NH, 2*NH)`, where NH is the
total number of spatial harmonics.
SB: A `dict` of `tf.Tensor` values specifying the block matrix
corresponding to the S-parameters of a second system. `SB` needs to have
the keys ('S11', 'S12', 'S21', 'S22'), where each key maps to a
`tf.Tensor` of shape `(batchSize, pixelsX, pixelsY, 2*NH, 2*NH)`, where
NH is the total number of spatial harmonics.
Returns:
A `dict` of `tf.Tensor` values specifying the block matrix
corresponding to the S-parameters of the combined system. `SA` needs
to have the keys ('S11', 'S12', 'S21', 'S22'), where each key maps to
a `tf.Tensor` of shape `(batchSize, pixelsX, pixelsY, 2*NH, 2*NH),
where NH is the total number of spatial harmonics.
'''
# Define the identity matrix.
batchSize, pixelsX, pixelsY, dim, _ = SA['S11'].shape
I = tf.eye(num_rows = dim, dtype = tf.complex64)
I = I[tf.newaxis, tf.newaxis, tf.newaxis, :, :]
I = tf.tile(I, multiples = (batchSize, pixelsX, pixelsY, 1, 1))
# Calculate S11.
S11 = tf.linalg.inv(I - tf.linalg.matmul(SB['S11'], SA['S22']))
S11 = tf.linalg.matmul(S11, SB['S11'])
S11 = tf.linalg.matmul(SA['S12'], S11)
S11 = SA['S11'] + tf.linalg.matmul(S11, SA['S21'])
# Calculate S12.
S12 = tf.linalg.inv(I - tf.linalg.matmul(SB['S11'], SA['S22']))
S12 = tf.linalg.matmul(S12, SB['S12'])
S12 = tf.linalg.matmul(SA['S12'], S12)
# Calculate S21.
S21 = tf.linalg.inv(I - tf.linalg.matmul(SA['S22'], SB['S11']))
S21 = tf.linalg.matmul(S21, SA['S21'])
S21 = tf.linalg.matmul(SB['S21'], S21)
# Calculate S22.
S22 = tf.linalg.inv(I - tf.linalg.matmul(SA['S22'], SB['S11']))
S22 = tf.linalg.matmul(S22, SA['S22'])
S22 = tf.linalg.matmul(SB['S21'], S22)
S22 = SB['S22'] + tf.linalg.matmul(S22, SB['S12'])
# Store S parameters in an output dictionary.
S = dict({})
S['S11'] = S11
S['S12'] = S12
S['S21'] = S21
S['S22'] = S22
return S
| [
"tensorflow.eye",
"tensorflow.tile",
"numpy.floor",
"tensorflow.concat",
"numpy.linspace",
"tensorflow.reshape",
"tensorflow.linalg.matmul",
"tensorflow.signal.fft2d"
] | [((1189, 1206), 'numpy.floor', 'np.floor', (['(P / 2.0)'], {}), '(P / 2.0)\n', (1197, 1206), True, 'import numpy as np\n'), ((1219, 1236), 'numpy.floor', 'np.floor', (['(P / 2.0)'], {}), '(P / 2.0)\n', (1227, 1236), True, 'import numpy as np\n'), ((1277, 1306), 'numpy.linspace', 'np.linspace', (['(-p_max)', 'p_max', 'P'], {}), '(-p_max, p_max, P)\n', (1288, 1306), True, 'import numpy as np\n'), ((1315, 1344), 'numpy.linspace', 'np.linspace', (['(-q_max)', 'q_max', 'Q'], {}), '(-q_max, q_max, Q)\n', (1326, 1344), True, 'import numpy as np\n'), ((2470, 2506), 'tensorflow.reshape', 'tf.reshape', (['C'], {'shape': 'convMatrixShape'}), '(C, shape=convMatrixShape)\n', (2480, 2506), True, 'import tensorflow as tf\n'), ((3929, 3969), 'tensorflow.eye', 'tf.eye', ([], {'num_rows': 'dim', 'dtype': 'tf.complex64'}), '(num_rows=dim, dtype=tf.complex64)\n', (3935, 3969), True, 'import tensorflow as tf\n'), ((4034, 4091), 'tensorflow.tile', 'tf.tile', (['I'], {'multiples': '(batchSize, pixelsX, pixelsY, 1, 1)'}), '(I, multiples=(batchSize, pixelsX, pixelsY, 1, 1))\n', (4041, 4091), True, 'import tensorflow as tf\n'), ((4198, 4230), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['S11', "SB['S11']"], {}), "(S11, SB['S11'])\n", (4214, 4230), True, 'import tensorflow as tf\n'), ((4241, 4273), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SA['S12']", 'S11'], {}), "(SA['S12'], S11)\n", (4257, 4273), True, 'import tensorflow as tf\n'), ((4433, 4465), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['S12', "SB['S12']"], {}), "(S12, SB['S12'])\n", (4449, 4465), True, 'import tensorflow as tf\n'), ((4476, 4508), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SA['S12']", 'S12'], {}), "(SA['S12'], S12)\n", (4492, 4508), True, 'import tensorflow as tf\n'), ((4613, 4645), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['S21', "SA['S21']"], {}), "(S21, SA['S21'])\n", (4629, 4645), True, 'import tensorflow as tf\n'), ((4656, 4688), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SB['S21']", 'S21'], {}), "(SB['S21'], S21)\n", (4672, 4688), True, 'import tensorflow as tf\n'), ((4793, 4825), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['S22', "SA['S22']"], {}), "(S22, SA['S22'])\n", (4809, 4825), True, 'import tensorflow as tf\n'), ((4836, 4868), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SB['S21']", 'S22'], {}), "(SB['S21'], S22)\n", (4852, 4868), True, 'import tensorflow as tf\n'), ((1415, 1431), 'numpy.floor', 'np.floor', (['(Nx / 2)'], {}), '(Nx / 2)\n', (1423, 1431), True, 'import numpy as np\n'), ((1446, 1462), 'numpy.floor', 'np.floor', (['(Ny / 2)'], {}), '(Ny / 2)\n', (1454, 1462), True, 'import numpy as np\n'), ((4296, 4328), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['S11', "SA['S21']"], {}), "(S11, SA['S21'])\n", (4312, 4328), True, 'import tensorflow as tf\n'), ((4891, 4923), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['S22', "SB['S12']"], {}), "(S22, SB['S12'])\n", (4907, 4923), True, 'import tensorflow as tf\n'), ((1546, 1564), 'tensorflow.signal.fft2d', 'tf.signal.fft2d', (['A'], {}), '(A)\n', (1561, 1564), True, 'import tensorflow as tf\n'), ((4148, 4186), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SB['S11']", "SA['S22']"], {}), "(SB['S11'], SA['S22'])\n", (4164, 4186), True, 'import tensorflow as tf\n'), ((4383, 4421), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SB['S11']", "SA['S22']"], {}), "(SB['S11'], SA['S22'])\n", (4399, 4421), True, 'import tensorflow as tf\n'), ((4563, 4601), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SA['S22']", "SB['S11']"], {}), "(SA['S22'], SB['S11'])\n", (4579, 4601), True, 'import tensorflow as tf\n'), ((4743, 4781), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SA['S22']", "SB['S11']"], {}), "(SA['S22'], SB['S11'])\n", (4759, 4781), True, 'import tensorflow as tf\n'), ((2238, 2267), 'tensorflow.concat', 'tf.concat', (['[C, value]'], {'axis': '(5)'}), '([C, value], axis=5)\n', (2247, 2267), True, 'import tensorflow as tf\n')] |
__author__ = "<NAME>"
__status__ = "prototype"
import tarfile
import json
import re
import subprocess
import os
import nltk
import time
year = 2019
month = 9
day = 16
keyword_list = ["netflix", "amazon", "apple", "microsoft", "google", "tesla", "facebook"]
date = "{0}-{1}-{2}".format(year, str(month).zfill(2), str(day).zfill(2))
subprocess.run("rm ./keyword_search_time.log", shell=True)
time_log = open("./keyword_search_time.log", 'a')
time_log.write("{0:8s} {1}\n".format("keyword", "searching time(s)"))
for keyword in keyword_list:
start_tick = time.time()
# subprocess.run("rm " + "./subject_tw/raw/{0}_{1}.json".format(keyword, date), shell=True)
tw_text_file_keyword = open("./subject_tw/raw/{0}_{1}.json".format(keyword, date), 'a')
# for hour in range(6, 24):
# tw_folder = "./{0}/{1}/".format(str(day).zfill(2), str(hour).zfill(2))
# for filename in os.listdir(tw_folder):
# # load the json file into the memory
# with open(tw_folder + filename, 'r') as f:
# data = [json.loads(line) for line in f]
# # write the selected items into a new json file
# for item in data:
# keys = item.keys()
# if "text" in keys and "lang" in keys and item["lang"] == "en":
# if re.search(re.compile(keyword), item["text"].lower()): # search based on the lower case
# json.dump(item, tw_text_file_keyword)
# tw_text_file_keyword.write('\n')
tw_text_file_keyword.close()
end_tick = time.time()
time_log.write("{0:8s} {1:.3f}\n".format(keyword, end_tick - start_tick))
time_log.close() | [
"subprocess.run",
"time.time"
] | [((333, 391), 'subprocess.run', 'subprocess.run', (['"""rm ./keyword_search_time.log"""'], {'shell': '(True)'}), "('rm ./keyword_search_time.log', shell=True)\n", (347, 391), False, 'import subprocess\n'), ((557, 568), 'time.time', 'time.time', ([], {}), '()\n', (566, 568), False, 'import time\n'), ((1432, 1443), 'time.time', 'time.time', ([], {}), '()\n', (1441, 1443), False, 'import time\n')] |
#!/usr/bin/env python
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import numpy as np
import os
from builtins import range
from PIL import Image
from tensorrtserver.api import *
import tensorrtserver.api.model_config_pb2 as model_config
FLAGS = None
def parse_model(url, protocol, model_name, verbose=False):
"""
Check the configuration of a model to make sure it meets the
requirements for an image classification network (as expected by
this client)
"""
ctx = ServerStatusContext(url, protocol, model_name, verbose)
server_status = ctx.get_server_status()
if model_name not in server_status.model_status:
raise Exception("unable to get status for '" + model_name + "'")
status = server_status.model_status[model_name]
config = status.config
if len(config.input) != 1:
raise Exception("expecting 1 input, got {}".format(len(config.input)))
if len(config.output) != 1:
raise Exception("expecting 1 output, got {}".format(len(config.output)))
input = config.input[0]
output = config.output[0]
return (input.name, output.name, config.max_batch_size)
def postprocess(results, filenames, batch_size):
"""
Post-process results to show classifications.
"""
if len(results) != 1:
raise Exception("expected 1 result, got {}".format(len(results)))
batched_result = list(results.values())[0]
if len(batched_result) != batch_size:
raise Exception("expected {} results, got {}".format(batch_size, len(batched_result)))
if len(filenames) != batch_size:
raise Exception("expected {} filenames, got {}".format(batch_size, len(filenames)))
for (index, result) in enumerate(batched_result):
print("Image '{}':".format(filenames[index]))
for cls in result:
print(" {} ({}) = {}".format(cls[0], cls[2], cls[1]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False,
help='Enable verbose output')
parser.add_argument('-c', '--classes', type=int, required=False, default=1,
help='Number of class results to report. Default is 1.')
parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8000',
help='Inference server URL. Default is localhost:8000.')
parser.add_argument('-i', '--protocol', type=str, required=False, default='HTTP',
help='Protocol (HTTP/gRPC) used to ' +
'communicate with inference service. Default is HTTP.')
parser.add_argument('image_filename', type=str, nargs='?', default=None,
help='Input image / Input folder.')
FLAGS = parser.parse_args()
protocol = ProtocolType.from_str(FLAGS.protocol)
input_name, output_name, batch_size = parse_model(
FLAGS.url, protocol, "preprocess_resnet50_ensemble", FLAGS.verbose)
ctx = InferContext(FLAGS.url, protocol, "preprocess_resnet50_ensemble",
-1, FLAGS.verbose)
filenames = []
if os.path.isdir(FLAGS.image_filename):
filenames = [os.path.join(FLAGS.image_filename, f)
for f in os.listdir(FLAGS.image_filename)
if os.path.isfile(os.path.join(FLAGS.image_filename, f))]
else:
filenames = [FLAGS.image_filename,]
filenames.sort()
# Set batch size to the smaller value of image size and max batch size
if len(filenames) <= batch_size:
batch_size = len(filenames)
else:
print("The number of images exceeds maximum batch size," \
"only the first {} images, sorted by name alphabetically," \
" will be processed".format(batch_size))
# Preprocess the images into input data according to model
# requirements
image_data = []
for idx in range(batch_size):
with open(filenames[idx], "rb") as fd:
image_data.append(np.array([fd.read()], dtype=bytes))
# Send requests of batch_size images.
input_filenames = []
input_batch = []
for idx in range(batch_size):
input_filenames.append(filenames[idx])
input_batch.append(image_data[idx])
# Send request
result = ctx.run(
{ input_name : input_batch },
{ output_name : (InferContext.ResultFormat.CLASS, FLAGS.classes) },
batch_size)
postprocess(result, input_filenames, batch_size)
| [
"os.listdir",
"argparse.ArgumentParser",
"os.path.join",
"builtins.range",
"os.path.isdir"
] | [((3426, 3451), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3449, 3451), False, 'import argparse\n'), ((4664, 4699), 'os.path.isdir', 'os.path.isdir', (['FLAGS.image_filename'], {}), '(FLAGS.image_filename)\n', (4677, 4699), False, 'import os\n'), ((5456, 5473), 'builtins.range', 'range', (['batch_size'], {}), '(batch_size)\n', (5461, 5473), False, 'from builtins import range\n'), ((5692, 5709), 'builtins.range', 'range', (['batch_size'], {}), '(batch_size)\n', (5697, 5709), False, 'from builtins import range\n'), ((4722, 4759), 'os.path.join', 'os.path.join', (['FLAGS.image_filename', 'f'], {}), '(FLAGS.image_filename, f)\n', (4734, 4759), False, 'import os\n'), ((4790, 4822), 'os.listdir', 'os.listdir', (['FLAGS.image_filename'], {}), '(FLAGS.image_filename)\n', (4800, 4822), False, 'import os\n'), ((4862, 4899), 'os.path.join', 'os.path.join', (['FLAGS.image_filename', 'f'], {}), '(FLAGS.image_filename, f)\n', (4874, 4899), False, 'import os\n')] |
# Copyright (c) 2021 <NAME>. All Rights Reserved.
import pymel.core as pm
import piper_config as pcfg
import piper.mayapy.util as myu
import piper.mayapy.convert as convert
import piper.mayapy.attribute as attribute
from .rig import curve # must do relative import in python 2
def get(node_type, ignore=None, search=True):
"""
Gets the selected given node type or all the given node types in the scene if none selected.
Args:
node_type (string): Type of node to get.
ignore (string): If given and piper node is a child of given ignore type, do not return the piper node.
search (boolean): If True, and nothing is selected, will attempt to search the scene for all of the given type.
Returns:
(list) All nodes of the given node type.
"""
piper_nodes = []
selected = pm.selected()
if selected:
# get only the piper nodes from selection
piper_nodes = pm.ls(selected, type=node_type)
# traverse hierarchy for piper nodes
if not piper_nodes:
piper_nodes = set()
for node in selected:
first_type_parent = myu.getFirstTypeParent(node, node_type)
piper_nodes.add(first_type_parent) if first_type_parent else None
# search the whole scene for the piper node
elif search:
piper_nodes = pm.ls(type=node_type)
# don't include any nodes that are a child of the given ignore type
if ignore:
piper_nodes = [node for node in piper_nodes if not myu.getFirstTypeParent(node, ignore)]
return piper_nodes
def multiply(transform, main_term=None, weight=None, inputs=None):
"""
Creates the multiply node and hooks up all the given given inputs to the given transform's scale.
Args:
transform (pm.nodetypes.Transform): Node to hook multiply onto its scale.
main_term (pm.general.Attribute): Attribute to connect onto the multiply main_term.
weight (pm.general.Attribute): Attribute to connect onto the multiply weight.
inputs (list): Attributes to connect to the input plug of the multiply node.
Returns:
(pm.nodetypes.piperMultiply): Multiply node created.
"""
multiply_node = pm.createNode('piperMultiply', n=transform.name(stripNamespace=True) + '_scaleMultiply')
multiply_node.output >> transform.scale
if main_term:
main_term >> multiply_node.mainTerm
if weight:
weight >> multiply_node.weight
if not inputs:
return multiply_node
[attr >> multiply_node.input[i] for i, attr in enumerate(inputs)]
return multiply_node
def divide(dividend=1.0, divisor=1.0, result_input=None):
"""
Creates a node that divides the given dividend by the given divisor.
Args:
dividend (pm.general.Attribute or float): Number that will be divided.
divisor (pm.general.Attribute or float): Number that will perform the division.
result_input (pm.general.Attribute): Attribute to plug in division output into.
Returns:
(pm.nodetypes.piperSafeDivide): Division node created.
"""
divide_node = pm.createNode('piperSafeDivide')
if isinstance(dividend, pm.general.Attribute):
dividend_name = dividend.name().split(':')[-1].replace('.', '_')
dividend >> divide_node.input1
else:
dividend_name = str(dividend)
divide_node.input1.set(dividend)
if isinstance(divisor, pm.general.Attribute):
divisor_name = divisor.name().split(':')[-1].replace('.', '_')
divisor >> divide_node.input2
else:
divisor_name = str(divisor)
divide_node.input2.set(divisor)
if result_input:
divide_node.output >> result_input
divide_node.rename(dividend_name + '_DIV_' + divisor_name)
return divide_node
def inputOutput(node_type, source=None, output=None):
"""
Creates a node that has an input and output attribute based on given node type.
Args:
node_type (string): Type of node to create.
source (pm.general.Attribute): Attribute to plug into node's input.
output (pm.general.Attribute): Attribute to plug node's output into.
Returns:
(pm.nodetypes.DependNode): Node created.
"""
name = source.node().name(stripNamespace=True) + '_' if source else ''
suffix = node_type.split('piper')[-1]
node = pm.createNode(node_type, name=name + suffix)
if source:
source >> node.input
if output:
node.output >> output
return node
def oneMinus(source=None, output=None):
"""
Creates a one minus node that turns a 0 to 1 range into a 1 to 0 or vice versa.
Args:
source (pm.general.Attribute): Attribute to plug into one minus input.
output (pm.general.Attribute): Attribute to plug one minus' output into.
Returns:
(pm.nodetypes.piperOneMinus): One minus node created.
"""
return inputOutput('piperOneMinus', source=source, output=output)
def reciprocal(source=None, output=None):
"""
Creates a node that takes in the given source attribute and output its reciprocal. Reciprocal == 1/X
Args:
source (pm.general.Attribute): Attribute to plug into reciprocal's input.
output (pm.general.Attribute): Attribute to plug reciprocal's output into.
Returns:
(pm.nodetypes.piperReciprocal): Reciprocal node created.
"""
return inputOutput('piperReciprocal', source=source, output=output)
def create(node_type, color=None, name=None, parent=None):
"""
Creates the given node type with the given color and given name/parent.
Args:
node_type (string): Node type to create.
color (string): Name of color to turn outliner text to. Currently supporting:
cyan, pink.
name (string): Name of node.
parent (PyNode or string): Parent of new node.
Returns:
(PyNode): Node created.
"""
name = name if name else node_type
piper_node = pm.createNode(node_type, name=name, parent=parent, skipSelect=True)
rgb = convert.colorToRGB(color)
if rgb:
piper_node.useOutlinerColor.set(True)
piper_node.outlinerColor.set(rgb)
return piper_node
def createShaped(node_type, name=None, control_shape=curve.circle):
"""
Creates piper IK transform with given control shape curve
Args:
node_type (string): Name for the type of node to create.
name (string): Name to give the transform node.
control_shape (method): Method that generates nurbs curve the transform will use.
Returns:
(PyNode): Transform node created with control shape curves as child(ren).
"""
transform = create(node_type, name=name)
transform._.lock()
ctrl = control_shape()
curves = ctrl.getChildren(type='nurbsCurve')
pm.parent(curves, transform, shape=True, add=True)
pm.delete(ctrl)
return transform
def createFK(name=None, control_shape=curve.circle):
"""
Creates piper FK transform with given control shape curve
Args:
name (string): Name for the piper IK nodes.
control_shape (method): Method that generates nurbs curve that Piper FK transform will use.
Returns:
(pm.nodetypes.piperFK): Piper FK node created.
"""
return createShaped('piperFK', name, control_shape)
def createIK(name=None, control_shape=curve.circle):
"""
Creates piper IK transform with given control shape curve
Args:
name (string): Name for the piper IK nodes.
control_shape (method): Method that generates nurbs curve that Piper IK transform will use.
Returns:
(pm.nodetypes.piperIK): Piper IK node created.
"""
return createShaped('piperIK', name, control_shape)
def createOrientMatrix(position, orientation, name=None):
"""
Creates a piper orient matrix node that keeps given position matrix, but maintains given orientation matrix.
Args:
position (pm.general.Attribute or pm.dt.Matrix): position to plug into orient matrix position attribute.
orientation (pm.general.Attribute or pm.dt.Matrix): orientation to plug into orient matrix orient attribute.
name (string): Name to give piper orient matrix node.
Returns:
(pm.nodetypes.piperOrientMatrix): Piper Orient Matrix node created.
"""
if not name:
name = 'orientMatrix'
node = pm.createNode('piperOrientMatrix', name=name)
if isinstance(position, pm.general.Attribute):
position >> node.positionMatrix
elif isinstance(position, pm.dt.Matrix):
node.positionMatrix.set(position)
if isinstance(orientation, pm.general.Attribute):
orientation >> node.orientMatrix
elif isinstance(orientation, pm.dt.Matrix):
node.orientMatrix.set(orientation)
return node
def createSwingTwist(driver, target, axis='y', swing=0, twist=1):
"""
Creates the swing twist node with given axis, swing, and twist attributes.
Args:
driver (pm.nodetypes.Transform): Node that will drive given target. Must have BIND used as rest matrix.
target (pm.nodetypes.Transform): Node that will be driven with twist/swing through offsetParentMatrix.
axis (string): Axis in which node will output twist.
swing (float): Weight of swing rotation.
twist (float): Weight of twist rotation.
Returns:
(pm.nodetypes.swingTwist): Swing Twist node created.
"""
name = target.name(stripNamespace=True) + '_ST'
swing_twist = pm.createNode('swingTwist', n=name)
axis_index = convert.axisToIndex(axis)
swing_twist.twistAxis.set(axis_index)
swing_twist.swing.set(swing)
swing_twist.twist.set(twist)
driver_bind = convert.toBind(driver, fail_display=pm.error)
driver.matrix >> swing_twist.driverMatrix
driver_bind.matrix >> swing_twist.driverRestMatrix
offset_driver = swing_twist.outMatrix
node_plug = attribute.getSourcePlug(target.offsetParentMatrix)
if node_plug:
mult_matrix = pm.createNode('multMatrix', n=name + '_MM')
swing_twist.outMatrix >> mult_matrix.matrixIn[0]
node_plug >> mult_matrix.matrixIn[1]
offset_driver = mult_matrix.matrixSum
offset_driver >> target.offsetParentMatrix
return swing_twist
def createMesh():
"""
Creates a piper mesh group(s) based on whether user has selection, shift held, and scene saved.
Returns:
(pm.nt.piperMesh or list): Usually PyNode created. If Shift held, will return list or all piperMesh(es) created.
"""
selected = pm.selected()
scene_name = pm.sceneName().namebase
if selected:
# if shift held, create a a piper mesh for each selected object.
if myu.isShiftHeld():
piper_meshes = []
for node in selected:
parent = node.getParent()
name = pcfg.mesh_prefix + node.nodeName()
piper_mesh = create('piperMesh', 'cyan', name=name, parent=parent)
pm.parent(node, piper_mesh)
piper_meshes.append(piper_mesh)
return piper_meshes
else:
# If user selected stuff that is not a mesh, warn the user.
non_mesh_transforms = [node for node in selected if not node.getShapes()]
if non_mesh_transforms:
pm.warning('The following are not meshes! \n' + '\n'.join(non_mesh_transforms))
# Get the parent roots and parent them under the piper mesh node to not mess up any hierarchies.
name = pcfg.mesh_prefix
name += scene_name if scene_name else selected[-1].nodeName()
piper_mesh = create('piperMesh', 'cyan', name=name)
parents = myu.getRootParents(selected)
pm.parent(parents, piper_mesh)
return piper_mesh
name = '' if scene_name.startswith(pcfg.mesh_prefix) else pcfg.mesh_prefix
name += scene_name if scene_name else 'piperMesh'
piper_mesh = create('piperMesh', 'cyan', name=name)
meshes = pm.ls(type='mesh')
parents = myu.getRootParents(meshes)
pm.parent(parents, piper_mesh)
return piper_mesh
def createSkinnedMesh():
"""
Creates a skinned mesh node for each root joint found in the skin clusters
Returns:
(list): PyNodes of nodes created.
"""
selected = pm.selected()
scene_name = pm.sceneName().namebase
if selected:
skin_clusters = set()
skin_clusters.update(set(pm.listConnections(selected, type='skinCluster')))
skin_clusters.update(set(pm.listHistory(selected, type='skinCluster')))
else:
skin_clusters = pm.ls(type='skinCluster')
if not skin_clusters:
pm.warning('No skin clusters found!')
piper_skinned_mesh = create('piperSkinnedMesh', 'pink', name=pcfg.skinned_mesh_prefix + 'piperSkinnedMesh')
return [piper_skinned_mesh]
piper_skinned_meshes = []
skinned_meshes = myu.getSkinnedMeshes(skin_clusters)
for root_joint, geometry in skinned_meshes.items():
name = '' if scene_name.startswith(pcfg.skinned_mesh_prefix) else pcfg.skinned_mesh_prefix
name += scene_name if scene_name else next(iter(geometry)).nodeName()
piper_skinned_mesh = create('piperSkinnedMesh', 'pink', name=name)
piper_skinned_meshes.append(piper_skinned_mesh)
geometry_parents = myu.getRootParents(geometry)
pm.parent(root_joint, geometry_parents, piper_skinned_mesh)
return piper_skinned_meshes
def createRig(name=''):
"""
Creates the node that houses all rig nodes.
Args:
name (string): If given, will use the given name as the name for the rig node.
Returns:
(pm.nodetypes.piperRig): Rig node created.
"""
name = name if name else 'piperRig'
piper_rig = create('piperRig', 'burnt orange', name=name)
piper_rig.addAttr(pcfg.message_root_control, at='message')
piper_rig._.lock()
attribute.nonKeyable(piper_rig.highPolyVisibility)
attribute.lockAndHideCompound(piper_rig)
attribute.addSeparator(piper_rig)
return piper_rig
def createAnimation():
"""
Creates the node that houses a rig. Used to export animation.
Returns:
(pm.nodetypes.piperAnimation): Animation node created.
"""
scene_name = pm.sceneName().namebase
name = scene_name if scene_name else 'piperAnimation'
piper_animation = create('piperAnimation', 'dark green', name=pcfg.animation_prefix + name)
attribute.lockAndHideCompound(piper_animation)
rigs = get('piperRig', ignore='piperAnimation')
pm.parent(rigs[0], piper_animation) if len(rigs) == 1 else pm.warning('{} rigs found!'.format(str(len(rigs))))
return piper_animation
| [
"piper.mayapy.convert.toBind",
"pymel.core.ls",
"piper.mayapy.attribute.lockAndHideCompound",
"piper.mayapy.util.getFirstTypeParent",
"piper.mayapy.attribute.getSourcePlug",
"piper.mayapy.attribute.nonKeyable",
"pymel.core.selected",
"pymel.core.listConnections",
"piper.mayapy.util.getSkinnedMeshes"... | [((834, 847), 'pymel.core.selected', 'pm.selected', ([], {}), '()\n', (845, 847), True, 'import pymel.core as pm\n'), ((3144, 3176), 'pymel.core.createNode', 'pm.createNode', (['"""piperSafeDivide"""'], {}), "('piperSafeDivide')\n", (3157, 3176), True, 'import pymel.core as pm\n'), ((4393, 4437), 'pymel.core.createNode', 'pm.createNode', (['node_type'], {'name': '(name + suffix)'}), '(node_type, name=name + suffix)\n', (4406, 4437), True, 'import pymel.core as pm\n'), ((6015, 6082), 'pymel.core.createNode', 'pm.createNode', (['node_type'], {'name': 'name', 'parent': 'parent', 'skipSelect': '(True)'}), '(node_type, name=name, parent=parent, skipSelect=True)\n', (6028, 6082), True, 'import pymel.core as pm\n'), ((6093, 6118), 'piper.mayapy.convert.colorToRGB', 'convert.colorToRGB', (['color'], {}), '(color)\n', (6111, 6118), True, 'import piper.mayapy.convert as convert\n'), ((6859, 6909), 'pymel.core.parent', 'pm.parent', (['curves', 'transform'], {'shape': '(True)', 'add': '(True)'}), '(curves, transform, shape=True, add=True)\n', (6868, 6909), True, 'import pymel.core as pm\n'), ((6914, 6929), 'pymel.core.delete', 'pm.delete', (['ctrl'], {}), '(ctrl)\n', (6923, 6929), True, 'import pymel.core as pm\n'), ((8439, 8484), 'pymel.core.createNode', 'pm.createNode', (['"""piperOrientMatrix"""'], {'name': 'name'}), "('piperOrientMatrix', name=name)\n", (8452, 8484), True, 'import pymel.core as pm\n'), ((9573, 9608), 'pymel.core.createNode', 'pm.createNode', (['"""swingTwist"""'], {'n': 'name'}), "('swingTwist', n=name)\n", (9586, 9608), True, 'import pymel.core as pm\n'), ((9626, 9651), 'piper.mayapy.convert.axisToIndex', 'convert.axisToIndex', (['axis'], {}), '(axis)\n', (9645, 9651), True, 'import piper.mayapy.convert as convert\n'), ((9778, 9823), 'piper.mayapy.convert.toBind', 'convert.toBind', (['driver'], {'fail_display': 'pm.error'}), '(driver, fail_display=pm.error)\n', (9792, 9823), True, 'import piper.mayapy.convert as convert\n'), ((9985, 10035), 'piper.mayapy.attribute.getSourcePlug', 'attribute.getSourcePlug', (['target.offsetParentMatrix'], {}), '(target.offsetParentMatrix)\n', (10008, 10035), True, 'import piper.mayapy.attribute as attribute\n'), ((10626, 10639), 'pymel.core.selected', 'pm.selected', ([], {}), '()\n', (10637, 10639), True, 'import pymel.core as pm\n'), ((12090, 12108), 'pymel.core.ls', 'pm.ls', ([], {'type': '"""mesh"""'}), "(type='mesh')\n", (12095, 12108), True, 'import pymel.core as pm\n'), ((12123, 12149), 'piper.mayapy.util.getRootParents', 'myu.getRootParents', (['meshes'], {}), '(meshes)\n', (12141, 12149), True, 'import piper.mayapy.util as myu\n'), ((12154, 12184), 'pymel.core.parent', 'pm.parent', (['parents', 'piper_mesh'], {}), '(parents, piper_mesh)\n', (12163, 12184), True, 'import pymel.core as pm\n'), ((12401, 12414), 'pymel.core.selected', 'pm.selected', ([], {}), '()\n', (12412, 12414), True, 'import pymel.core as pm\n'), ((13005, 13040), 'piper.mayapy.util.getSkinnedMeshes', 'myu.getSkinnedMeshes', (['skin_clusters'], {}), '(skin_clusters)\n', (13025, 13040), True, 'import piper.mayapy.util as myu\n'), ((14007, 14057), 'piper.mayapy.attribute.nonKeyable', 'attribute.nonKeyable', (['piper_rig.highPolyVisibility'], {}), '(piper_rig.highPolyVisibility)\n', (14027, 14057), True, 'import piper.mayapy.attribute as attribute\n'), ((14062, 14102), 'piper.mayapy.attribute.lockAndHideCompound', 'attribute.lockAndHideCompound', (['piper_rig'], {}), '(piper_rig)\n', (14091, 14102), True, 'import piper.mayapy.attribute as attribute\n'), ((14107, 14140), 'piper.mayapy.attribute.addSeparator', 'attribute.addSeparator', (['piper_rig'], {}), '(piper_rig)\n', (14129, 14140), True, 'import piper.mayapy.attribute as attribute\n'), ((14545, 14591), 'piper.mayapy.attribute.lockAndHideCompound', 'attribute.lockAndHideCompound', (['piper_animation'], {}), '(piper_animation)\n', (14574, 14591), True, 'import piper.mayapy.attribute as attribute\n'), ((938, 969), 'pymel.core.ls', 'pm.ls', (['selected'], {'type': 'node_type'}), '(selected, type=node_type)\n', (943, 969), True, 'import pymel.core as pm\n'), ((10077, 10120), 'pymel.core.createNode', 'pm.createNode', (['"""multMatrix"""'], {'n': "(name + '_MM')"}), "('multMatrix', n=name + '_MM')\n", (10090, 10120), True, 'import pymel.core as pm\n'), ((10657, 10671), 'pymel.core.sceneName', 'pm.sceneName', ([], {}), '()\n', (10669, 10671), True, 'import pymel.core as pm\n'), ((10783, 10800), 'piper.mayapy.util.isShiftHeld', 'myu.isShiftHeld', ([], {}), '()\n', (10798, 10800), True, 'import piper.mayapy.util as myu\n'), ((12432, 12446), 'pymel.core.sceneName', 'pm.sceneName', ([], {}), '()\n', (12444, 12446), True, 'import pymel.core as pm\n'), ((12702, 12727), 'pymel.core.ls', 'pm.ls', ([], {'type': '"""skinCluster"""'}), "(type='skinCluster')\n", (12707, 12727), True, 'import pymel.core as pm\n'), ((12763, 12800), 'pymel.core.warning', 'pm.warning', (['"""No skin clusters found!"""'], {}), "('No skin clusters found!')\n", (12773, 12800), True, 'import pymel.core as pm\n'), ((13432, 13460), 'piper.mayapy.util.getRootParents', 'myu.getRootParents', (['geometry'], {}), '(geometry)\n', (13450, 13460), True, 'import piper.mayapy.util as myu\n'), ((13469, 13528), 'pymel.core.parent', 'pm.parent', (['root_joint', 'geometry_parents', 'piper_skinned_mesh'], {}), '(root_joint, geometry_parents, piper_skinned_mesh)\n', (13478, 13528), True, 'import pymel.core as pm\n'), ((14363, 14377), 'pymel.core.sceneName', 'pm.sceneName', ([], {}), '()\n', (14375, 14377), True, 'import pymel.core as pm\n'), ((14648, 14683), 'pymel.core.parent', 'pm.parent', (['rigs[0]', 'piper_animation'], {}), '(rigs[0], piper_animation)\n', (14657, 14683), True, 'import pymel.core as pm\n'), ((1356, 1377), 'pymel.core.ls', 'pm.ls', ([], {'type': 'node_type'}), '(type=node_type)\n', (1361, 1377), True, 'import pymel.core as pm\n'), ((11784, 11812), 'piper.mayapy.util.getRootParents', 'myu.getRootParents', (['selected'], {}), '(selected)\n', (11802, 11812), True, 'import piper.mayapy.util as myu\n'), ((11825, 11855), 'pymel.core.parent', 'pm.parent', (['parents', 'piper_mesh'], {}), '(parents, piper_mesh)\n', (11834, 11855), True, 'import pymel.core as pm\n'), ((1146, 1185), 'piper.mayapy.util.getFirstTypeParent', 'myu.getFirstTypeParent', (['node', 'node_type'], {}), '(node, node_type)\n', (1168, 1185), True, 'import piper.mayapy.util as myu\n'), ((11065, 11092), 'pymel.core.parent', 'pm.parent', (['node', 'piper_mesh'], {}), '(node, piper_mesh)\n', (11074, 11092), True, 'import pymel.core as pm\n'), ((12537, 12585), 'pymel.core.listConnections', 'pm.listConnections', (['selected'], {'type': '"""skinCluster"""'}), "(selected, type='skinCluster')\n", (12555, 12585), True, 'import pymel.core as pm\n'), ((12621, 12665), 'pymel.core.listHistory', 'pm.listHistory', (['selected'], {'type': '"""skinCluster"""'}), "(selected, type='skinCluster')\n", (12635, 12665), True, 'import pymel.core as pm\n'), ((1525, 1561), 'piper.mayapy.util.getFirstTypeParent', 'myu.getFirstTypeParent', (['node', 'ignore'], {}), '(node, ignore)\n', (1547, 1561), True, 'import piper.mayapy.util as myu\n')] |
import pandas as pd
import json
import os
import os.path as osp
import numpy as np
"""
python -m spinup.run hyper_search <files> -ae <start from which epoch>
make a file that can order the experiments in terms of their performance
use this to easily find good hyperparameters when doing hyperparameter search
upload this file when it's ready don't use it again lol
"""
DIV_LINE_WIDTH = 50
# Global vars for tracking and labeling data at load time.
exp_idx = 0
units = dict()
def compute_hyper(data, xaxis='Epoch', value="AverageEpRet", condition="Condition1", smooth=1, no_legend=False,
legend_loc='best', color=None, linestyle=None, font_scale=1.5,
label_font_size=24, xlabel=None, ylabel=None, after_epoch=0, no_order=False,
**kwargs):
if smooth > 1:
"""
smooth data with moving window average.
that is,
smoothed_y[t] = average(y[t-k], y[t-k+1], ..., y[t+k-1], y[t+k])
where the "smooth" param is width of that window (2k+1)
"""
y = np.ones(smooth)
for datum in data:
x = np.asarray(datum[value])
z = np.ones(len(x))
smoothed_x = np.convolve(x, y, 'same') / np.convolve(z, y, 'same')
datum[value] = smoothed_x
if isinstance(data, list):
data = pd.concat(data, ignore_index=True)
# print("columns", data.columns)
unique_names = data[condition].unique() ## these are the experiment names
n_settings = len(unique_names)
score_list = np.zeros(n_settings)
std_list = np.zeros(n_settings)
print(score_list)
for i in range(n_settings):
un = unique_names[i]
print("\nunique name: ",un)
exp_data = data.loc[data[condition] == un] ## the data related to this experiment
# average_test_epret = exp_data['AverageTestEpRet'].values
# print(average_test_epret.shape)
# final performance data only concern the last few epoches
final_performance_data = exp_data.loc[exp_data['Epoch'] >= after_epoch]
average_test_epret_final = final_performance_data['AverageTestEpRet'].values
mean_score = average_test_epret_final.mean()
std_score = average_test_epret_final.std()
score_list[i] = mean_score
std_list[i] = std_score
epoch_reached = final_performance_data['Epoch'].max()
if np.isnan(mean_score):
print('n/a')
else:
print('total epoch: %d, score: %.2f' % (epoch_reached,mean_score))
"""
here we want to give an ordering of the hyper-settings, so that we can know
which ones are good hyper-parameters
"""
sorted_index =np.flip(np.argsort(score_list))
if no_order:
sorted_index = np.arange(len(sorted_index))
for i in range(n_settings):
setting_index = sorted_index[i]
print('%s\t%.1f\t%.1f' % (unique_names[setting_index], score_list[setting_index], std_list[setting_index]))
def get_datasets(logdir, condition=None):
"""
Recursively look through logdir for output files produced by
spinup.logx.Logger.
Assumes that any file "progress.txt" is a valid hit.
"""
global exp_idx
global units
datasets = []
for root, _, files in os.walk(logdir):
if 'progress.txt' in files:
exp_name = None
try:
config_path = open(os.path.join(root, 'config.json'))
config = json.load(config_path)
if 'exp_name' in config:
exp_name = config['exp_name']
except:
print('No file named config.json')
condition1 = condition or exp_name or 'exp'
condition2 = condition1 + '-' + str(exp_idx)
exp_idx += 1
if condition1 not in units:
units[condition1] = 0
unit = units[condition1]
units[condition1] += 1
try:
exp_data = pd.read_table(os.path.join(root, 'progress.txt'))
performance = 'AverageTestEpRet' if 'AverageTestEpRet' in exp_data else 'AverageEpRet'
exp_data.insert(len(exp_data.columns), 'Unit', unit)
exp_data.insert(len(exp_data.columns), 'Condition1', condition1)
exp_data.insert(len(exp_data.columns), 'Condition2', condition2)
exp_data.insert(len(exp_data.columns), 'Performance', exp_data[performance])
datasets.append(exp_data)
except Exception as e:
print(e)
return datasets
def get_all_datasets(all_logdirs, legend=None, select=None, exclude=None):
"""
For every entry in all_logdirs,
1) check if the entry is a real directory and if it is,
pull data from it;
2) if not, check to see if the entry is a prefix for a
real directory, and pull data from that.
"""
logdirs = []
for logdir in all_logdirs:
if osp.isdir(logdir) and logdir[-1] == '/':
logdirs += [logdir]
else:
basedir = osp.dirname(logdir)
fulldir = lambda x: osp.join(basedir, x)
prefix = logdir.split('/')[-1]
listdir = os.listdir(basedir)
logdirs += sorted([fulldir(x) for x in listdir if prefix in x])
"""
Enforce selection rules, which check logdirs for certain substrings.
Makes it easier to look at graphs from particular ablations, if you
launch many jobs at once with similar names.
"""
if select is not None:
logdirs = [log for log in logdirs if all(x in log for x in select)]
if exclude is not None:
logdirs = [log for log in logdirs if all(not (x in log) for x in exclude)]
# Verify logdirs
print('Plotting from...\n' + '=' * DIV_LINE_WIDTH + '\n')
for logdir in logdirs:
print(logdir)
print('\n' + '=' * DIV_LINE_WIDTH)
# Make sure the legend is compatible with the logdirs
assert not (legend) or (len(legend) == len(logdirs)), \
"Must give a legend title for each set of experiments."
# Load data from logdirs
data = []
if legend:
for log, leg in zip(logdirs, legend):
data += get_datasets(log, leg)
else:
for log in logdirs:
data += get_datasets(log)
return data
def compare_performance(all_logdirs, legend=None, xaxis=None, values=None, count=False,
font_scale=1.5, smooth=1, select=None, exclude=None, estimator='mean', no_legend=False,
legend_loc='best', after_epoch=0,
save_name=None, xlimit=-1, color=None, linestyle=None, label_font_size=24,
xlabel=None, ylabel=None,
no_order=False):
data = get_all_datasets(all_logdirs, legend, select, exclude)
values = values if isinstance(values, list) else [values]
condition = 'Condition2' if count else 'Condition1'
estimator = getattr(np, estimator) # choose what to show on main curve: mean? max? min?
for value in values:
compute_hyper(data, xaxis=xaxis, value=value, condition=condition, smooth=smooth, no_legend=no_legend,
legend_loc=legend_loc,
estimator=estimator, color=color, linestyle=linestyle, font_scale=font_scale,
label_font_size=label_font_size,
xlabel=xlabel, ylabel=ylabel, after_epoch=after_epoch, no_order=no_order)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('logdir', nargs='*')
parser.add_argument('--legend', '-l', nargs='*')
parser.add_argument('--xaxis', '-x', default='TotalEnvInteracts')
parser.add_argument('--value', '-y', default='Performance', nargs='*')
parser.add_argument('--count', action='store_true')
parser.add_argument('--smooth', '-s', type=int, default=1)
parser.add_argument('--select', nargs='*')
parser.add_argument('--exclude', nargs='*')
parser.add_argument('--est', default='mean')
parser.add_argument('--after-epoch', '-ae', type=int, default=0)
parser.add_argument('-no', '--no-order', action='store_true')
args = parser.parse_args()
"""
Args:
logdir (strings): As many log directories (or prefixes to log
directories, which the plotter will autocomplete internally) as
you'd like to plot from.
legend (strings): Optional way to specify legend for the plot. The
plotter legend will automatically use the ``exp_name`` from the
config.json file, unless you tell it otherwise through this flag.
This only works if you provide a name for each directory that
will get plotted. (Note: this may not be the same as the number
of logdir args you provide! Recall that the plotter looks for
autocompletes of the logdir args: there may be more than one
match for a given logdir prefix, and you will need to provide a
legend string for each one of those matches---unless you have
removed some of them as candidates via selection or exclusion
rules (below).)
xaxis (string): Pick what column from data is used for the x-axis.
Defaults to ``TotalEnvInteracts``.
value (strings): Pick what columns from data to graph on the y-axis.
Submitting multiple values will produce multiple graphs. Defaults
to ``Performance``, which is not an actual output of any algorithm.
Instead, ``Performance`` refers to either ``AverageEpRet``, the
correct performance measure for the on-policy algorithms, or
``AverageTestEpRet``, the correct performance measure for the
off-policy algorithms. The plotter will automatically figure out
which of ``AverageEpRet`` or ``AverageTestEpRet`` to report for
each separate logdir.
count: Optional flag. By default, the plotter shows y-values which
are averaged across all results that share an ``exp_name``,
which is typically a set of identical experiments that only vary
in random seed. But if you'd like to see all of those curves
separately, use the ``--count`` flag.
smooth (int): Smooth data by averaging it over a fixed window. This
parameter says how wide the averaging window will be.
select (strings): Optional selection rule: the plotter will only show
curves from logdirs that contain all of these substrings.
exclude (strings): Optional exclusion rule: plotter will only show
curves from logdirs that do not contain these substrings.
after-epoch: if > 0 then when computing an algorithm's "score",
we will use the average of test returns after a certain epoch number
no-order: have this option so it doesn't print setting names in order of performance
"""
compare_performance(args.logdir, args.legend, args.xaxis, args.value, args.count,
smooth=args.smooth, select=args.select, exclude=args.exclude,
estimator=args.est, after_epoch=args.after_epoch, no_order=args.no_order)
if __name__ == "__main__":
main()
| [
"os.listdir",
"numpy.convolve",
"numpy.ones",
"argparse.ArgumentParser",
"numpy.asarray",
"os.path.join",
"numpy.argsort",
"os.path.dirname",
"numpy.zeros",
"os.path.isdir",
"numpy.isnan",
"json.load",
"pandas.concat",
"os.walk"
] | [((1541, 1561), 'numpy.zeros', 'np.zeros', (['n_settings'], {}), '(n_settings)\n', (1549, 1561), True, 'import numpy as np\n'), ((1577, 1597), 'numpy.zeros', 'np.zeros', (['n_settings'], {}), '(n_settings)\n', (1585, 1597), True, 'import numpy as np\n'), ((3264, 3279), 'os.walk', 'os.walk', (['logdir'], {}), '(logdir)\n', (3271, 3279), False, 'import os\n'), ((7539, 7564), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7562, 7564), False, 'import argparse\n'), ((1058, 1073), 'numpy.ones', 'np.ones', (['smooth'], {}), '(smooth)\n', (1065, 1073), True, 'import numpy as np\n'), ((1338, 1372), 'pandas.concat', 'pd.concat', (['data'], {'ignore_index': '(True)'}), '(data, ignore_index=True)\n', (1347, 1372), True, 'import pandas as pd\n'), ((2393, 2413), 'numpy.isnan', 'np.isnan', (['mean_score'], {}), '(mean_score)\n', (2401, 2413), True, 'import numpy as np\n'), ((2697, 2719), 'numpy.argsort', 'np.argsort', (['score_list'], {}), '(score_list)\n', (2707, 2719), True, 'import numpy as np\n'), ((1117, 1141), 'numpy.asarray', 'np.asarray', (['datum[value]'], {}), '(datum[value])\n', (1127, 1141), True, 'import numpy as np\n'), ((4972, 4989), 'os.path.isdir', 'osp.isdir', (['logdir'], {}), '(logdir)\n', (4981, 4989), True, 'import os.path as osp\n'), ((5081, 5100), 'os.path.dirname', 'osp.dirname', (['logdir'], {}), '(logdir)\n', (5092, 5100), True, 'import os.path as osp\n'), ((5219, 5238), 'os.listdir', 'os.listdir', (['basedir'], {}), '(basedir)\n', (5229, 5238), False, 'import os\n'), ((1199, 1224), 'numpy.convolve', 'np.convolve', (['x', 'y', '"""same"""'], {}), "(x, y, 'same')\n", (1210, 1224), True, 'import numpy as np\n'), ((1227, 1252), 'numpy.convolve', 'np.convolve', (['z', 'y', '"""same"""'], {}), "(z, y, 'same')\n", (1238, 1252), True, 'import numpy as np\n'), ((3457, 3479), 'json.load', 'json.load', (['config_path'], {}), '(config_path)\n', (3466, 3479), False, 'import json\n'), ((5133, 5153), 'os.path.join', 'osp.join', (['basedir', 'x'], {}), '(basedir, x)\n', (5141, 5153), True, 'import os.path as osp\n'), ((3397, 3430), 'os.path.join', 'os.path.join', (['root', '"""config.json"""'], {}), "(root, 'config.json')\n", (3409, 3430), False, 'import os\n'), ((3989, 4023), 'os.path.join', 'os.path.join', (['root', '"""progress.txt"""'], {}), "(root, 'progress.txt')\n", (4001, 4023), False, 'import os\n')] |
from piecrust.serving.server import WsgiServer
def get_app(root_dir, cache_key='prod', enable_debug_info=False):
app = WsgiServer(root_dir,
cache_key=cache_key,
enable_debug_info=enable_debug_info)
return app
| [
"piecrust.serving.server.WsgiServer"
] | [((125, 203), 'piecrust.serving.server.WsgiServer', 'WsgiServer', (['root_dir'], {'cache_key': 'cache_key', 'enable_debug_info': 'enable_debug_info'}), '(root_dir, cache_key=cache_key, enable_debug_info=enable_debug_info)\n', (135, 203), False, 'from piecrust.serving.server import WsgiServer\n')] |
from Interfaces.CommandLineInterface import CLI
if __name__ == '__main__':
CLI().initiate()
| [
"Interfaces.CommandLineInterface.CLI"
] | [((81, 86), 'Interfaces.CommandLineInterface.CLI', 'CLI', ([], {}), '()\n', (84, 86), False, 'from Interfaces.CommandLineInterface import CLI\n')] |
# -*- coding: utf-8 -*-
u"""Concurrency testing
This test does not always fail when there is a problem (false
positive), because it depends on a specific sequence of events
that can't be controlled by the test.
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
_REPORT = 'heightWeightReport'
def test_myapp(fc):
"""https://github.com/radiasoft/sirepo/issues/2346"""
from pykern import pkunit
import time
import threading
from pykern.pkdebug import pkdlog
d = fc.sr_sim_data()
d.models.simulation.name = 'srunit_long_run'
def _t2():
pkdlog('start 2')
r2 = fc.sr_post(
'runSimulation',
dict(
forceRun=False,
models=d.models,
report=_REPORT,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
),
)
pkdlog(r2)
for _ in range(20):
pkunit.pkok(r2.state != 'error', 'unexpected error state: {}')
if r2.state == 'running':
break
if r2.state == 'canceled':
pkdlog('canceled')
break
time.sleep(.1)
pkdlog('runStatus 2')
r2 = fc.sr_post('runStatus', r2.nextRequest)
else:
pkunit.pkfail('runStatus: failed to start running: {}', r2)
pkdlog('start 1')
r1 = fc.sr_post(
'runSimulation',
dict(
forceRun=False,
models=d.models,
report=_REPORT,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
),
)
for _ in range(10):
pkdlog(r1)
pkunit.pkok(r1.state != 'error', 'unexpected error state: {}')
if r1.state == 'running':
break
time.sleep(.1)
r1 = fc.sr_post('runStatus', r1.nextRequest)
else:
pkunit.pkfail('runStatus: failed to start running: {}', r1)
t2 = threading.Thread(target=_t2)
t2.start()
time.sleep(.1)
pkdlog('runCancel')
c = fc.sr_post('runCancel', r1.nextRequest)
pkunit.pkeq('canceled', c.state)
pkdlog('start 3')
r1 = fc.sr_post(
'runSimulation',
dict(
forceRun=False,
models=d.models,
report=_REPORT,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
),
)
for _ in range(10):
pkunit.pkok(r1.state != 'error', 'unexpected error state: {}')
if r1.state == 'running':
break
time.sleep(.1)
r1 = fc.sr_post('runStatus', r1.nextRequest)
else:
pkunit.pkfail('runStatus: failed to start running: {}', r1)
c = fc.sr_post('runCancel', r1.nextRequest)
pkunit.pkeq('canceled', c.state)
| [
"pykern.pkunit.pkeq",
"pykern.pkunit.pkok",
"pykern.pkunit.pkfail",
"time.sleep",
"pykern.pkdebug.pkdlog",
"threading.Thread"
] | [((1550, 1567), 'pykern.pkdebug.pkdlog', 'pkdlog', (['"""start 1"""'], {}), "('start 1')\n", (1556, 1567), False, 'from pykern.pkdebug import pkdlog\n'), ((2164, 2192), 'threading.Thread', 'threading.Thread', ([], {'target': '_t2'}), '(target=_t2)\n', (2180, 2192), False, 'import threading\n'), ((2212, 2227), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2222, 2227), False, 'import time\n'), ((2231, 2250), 'pykern.pkdebug.pkdlog', 'pkdlog', (['"""runCancel"""'], {}), "('runCancel')\n", (2237, 2250), False, 'from pykern.pkdebug import pkdlog\n'), ((2303, 2335), 'pykern.pkunit.pkeq', 'pkunit.pkeq', (['"""canceled"""', 'c.state'], {}), "('canceled', c.state)\n", (2314, 2335), False, 'from pykern import pkunit\n'), ((2341, 2358), 'pykern.pkdebug.pkdlog', 'pkdlog', (['"""start 3"""'], {}), "('start 3')\n", (2347, 2358), False, 'from pykern.pkdebug import pkdlog\n'), ((2978, 3010), 'pykern.pkunit.pkeq', 'pkunit.pkeq', (['"""canceled"""', 'c.state'], {}), "('canceled', c.state)\n", (2989, 3010), False, 'from pykern import pkunit\n'), ((739, 756), 'pykern.pkdebug.pkdlog', 'pkdlog', (['"""start 2"""'], {}), "('start 2')\n", (745, 756), False, 'from pykern.pkdebug import pkdlog\n'), ((1071, 1081), 'pykern.pkdebug.pkdlog', 'pkdlog', (['r2'], {}), '(r2)\n', (1077, 1081), False, 'from pykern.pkdebug import pkdlog\n'), ((1866, 1876), 'pykern.pkdebug.pkdlog', 'pkdlog', (['r1'], {}), '(r1)\n', (1872, 1876), False, 'from pykern.pkdebug import pkdlog\n'), ((1885, 1947), 'pykern.pkunit.pkok', 'pkunit.pkok', (["(r1.state != 'error')", '"""unexpected error state: {}"""'], {}), "(r1.state != 'error', 'unexpected error state: {}')\n", (1896, 1947), False, 'from pykern import pkunit\n'), ((2008, 2023), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2018, 2023), False, 'import time\n'), ((2094, 2153), 'pykern.pkunit.pkfail', 'pkunit.pkfail', (['"""runStatus: failed to start running: {}"""', 'r1'], {}), "('runStatus: failed to start running: {}', r1)\n", (2107, 2153), False, 'from pykern import pkunit\n'), ((2657, 2719), 'pykern.pkunit.pkok', 'pkunit.pkok', (["(r1.state != 'error')", '"""unexpected error state: {}"""'], {}), "(r1.state != 'error', 'unexpected error state: {}')\n", (2668, 2719), False, 'from pykern import pkunit\n'), ((2780, 2795), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2790, 2795), False, 'import time\n'), ((2866, 2925), 'pykern.pkunit.pkfail', 'pkunit.pkfail', (['"""runStatus: failed to start running: {}"""', 'r1'], {}), "('runStatus: failed to start running: {}', r1)\n", (2879, 2925), False, 'from pykern import pkunit\n'), ((1122, 1184), 'pykern.pkunit.pkok', 'pkunit.pkok', (["(r2.state != 'error')", '"""unexpected error state: {}"""'], {}), "(r2.state != 'error', 'unexpected error state: {}')\n", (1133, 1184), False, 'from pykern import pkunit\n'), ((1353, 1368), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1363, 1368), False, 'import time\n'), ((1380, 1401), 'pykern.pkdebug.pkdlog', 'pkdlog', (['"""runStatus 2"""'], {}), "('runStatus 2')\n", (1386, 1401), False, 'from pykern.pkdebug import pkdlog\n'), ((1485, 1544), 'pykern.pkunit.pkfail', 'pkunit.pkfail', (['"""runStatus: failed to start running: {}"""', 'r2'], {}), "('runStatus: failed to start running: {}', r2)\n", (1498, 1544), False, 'from pykern import pkunit\n'), ((1300, 1318), 'pykern.pkdebug.pkdlog', 'pkdlog', (['"""canceled"""'], {}), "('canceled')\n", (1306, 1318), False, 'from pykern.pkdebug import pkdlog\n')] |
"""Subscription manager for Graph QL websocket."""
import asyncio
import json
import logging
import socket
import sys
from time import time
import pkg_resources
import websockets
_LOGGER = logging.getLogger(__name__)
STATE_STARTING = "starting"
STATE_RUNNING = "running"
STATE_STOPPED = "stopped"
try:
VERSION = pkg_resources.require("graphql-subscription-manager")[0].version
except Exception: # pylint: disable=broad-except
VERSION = "dev"
class SubscriptionManager:
"""Subscription manager."""
# pylint: disable=too-many-instance-attributes
def __init__(self, init_payload, url):
"""Create resources for websocket communication."""
try:
self.loop = asyncio.get_running_loop()
except RuntimeError:
self.loop = asyncio.get_event_loop()
self.subscriptions = {}
self._url = url
self._state = None
self.websocket = None
self._retry_timer = None
self._client_task = None
self._wait_time_before_retry = 15
self._session_id = 0
self._init_payload = init_payload
self._show_connection_error = True
self._is_running = False
self._user_agent = "Python/{0[0]}.{0[1]} PyGraphqlWebsocketManager/{1}".format(
sys.version_info, VERSION
)
def start(self):
"""Start websocket."""
_LOGGER.debug("Start state %s.", self._state)
if self._state == STATE_RUNNING:
return
self._state = STATE_STARTING
self._cancel_client_task()
self._client_task = self.loop.create_task(self.running())
for subscription in self.subscriptions.copy():
callback, sub_query = self.subscriptions.pop(subscription, (None, None))
_LOGGER.debug("Removed, %s", subscription)
if callback is None:
continue
_LOGGER.debug("Add subscription %s", callback)
self.loop.create_task(self.subscribe(sub_query, callback))
@property
def is_running(self):
"""Return if client is running or not."""
return self._is_running
async def running(self):
"""Start websocket connection."""
# pylint: disable=too-many-branches, too-many-statements
await self._close_websocket()
_LOGGER.debug("Starting")
try:
self.websocket = await asyncio.wait_for(
websockets.connect(
self._url,
subprotocols=["graphql-subscriptions"],
extra_headers={"User-Agent": self._user_agent},
),
timeout=30,
)
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Failed to connect. Reconnecting... ", exc_info=True)
self._state = STATE_STOPPED
self.retry()
return
self._state = STATE_RUNNING
_LOGGER.debug("Running")
await self.websocket.send(
json.dumps({"type": "init", "payload": self._init_payload})
)
try:
k = 0
while self._state == STATE_RUNNING:
try:
msg = await asyncio.wait_for(self.websocket.recv(), timeout=30)
except asyncio.TimeoutError:
k += 1
if k > 10:
if self._show_connection_error:
_LOGGER.error("No data, reconnecting.")
self._show_connection_error = False
else:
_LOGGER.warning("No data, reconnecting.")
self._is_running = False
_LOGGER.debug("Reconnecting")
self._state = STATE_STOPPED
self.retry()
_LOGGER.debug(
"No websocket data in 30 seconds, checking the connection."
)
try:
pong_waiter = await self.websocket.ping()
await asyncio.wait_for(pong_waiter, timeout=10)
except asyncio.TimeoutError:
if self._show_connection_error:
_LOGGER.error(
"No response to ping in 10 seconds, reconnecting."
)
self._show_connection_error = False
else:
_LOGGER.warning(
"No response to ping in 10 seconds, reconnecting."
)
self._is_running = False
_LOGGER.debug("Reconnecting")
self._state = STATE_STOPPED
self.retry()
continue
k = 0
self._is_running = True
await self._process_msg(msg)
self._show_connection_error = True
except (websockets.exceptions.InvalidStatusCode, socket.gaierror):
if self._show_connection_error:
_LOGGER.error("Connection error", exc_info=True)
self._show_connection_error = False
else:
_LOGGER.debug("Connection error", exc_info=True)
except websockets.exceptions.ConnectionClosed:
if self._show_connection_error and self._state != STATE_STOPPED:
_LOGGER.error("Connection error", exc_info=True)
self._show_connection_error = False
else:
_LOGGER.debug("Connection error", exc_info=True)
except Exception: # pylint: disable=broad-except
_LOGGER.error("Unexpected error", exc_info=True)
finally:
await self._close_websocket()
if self._state != STATE_STOPPED:
_LOGGER.debug("Reconnecting")
self._state = STATE_STOPPED
self.retry()
_LOGGER.debug("Closing running task.")
async def stop(self, timeout=10):
"""Close websocket connection."""
_LOGGER.debug("Stopping client.")
start_time = time()
self._cancel_retry_timer()
for subscription_id in range(len(self.subscriptions)):
_LOGGER.debug("Sending unsubscribe: %s", subscription_id)
await self.unsubscribe(subscription_id)
while (
timeout > 0
and self.websocket is not None
and not self.subscriptions
and (time() - start_time) < timeout / 2
):
await asyncio.sleep(0.1)
self._state = STATE_STOPPED
await self._close_websocket()
while (
timeout > 0
and self.websocket is not None
and not self.websocket.closed
and (time() - start_time) < timeout
):
await asyncio.sleep(0.1)
self._cancel_client_task()
_LOGGER.debug("Server connection is stopped")
def retry(self):
"""Retry to connect to websocket."""
_LOGGER.debug("Retry, state: %s", self._state)
if self._state in [STATE_STARTING, STATE_RUNNING]:
_LOGGER.debug("Skip retry since state: %s", self._state)
return
_LOGGER.debug("Cancel retry timer")
self._cancel_retry_timer()
self._state = STATE_STARTING
_LOGGER.debug("Restart")
self._retry_timer = self.loop.call_later(
self._wait_time_before_retry, self.start
)
_LOGGER.debug(
"Reconnecting to server in %i seconds.", self._wait_time_before_retry
)
async def subscribe(self, sub_query, callback, timeout=3):
"""Add a new subscription."""
current_session_id = self._session_id
self._session_id += 1
subscription = {
"query": sub_query,
"type": "subscription_start",
"id": current_session_id,
}
json_subscription = json.dumps(subscription)
self.subscriptions[current_session_id] = (callback, sub_query)
start_time = time()
while time() - start_time < timeout:
if (
self.websocket is None
or not self.websocket.open
or not self._state == STATE_RUNNING
):
await asyncio.sleep(1)
continue
await self.websocket.send(json_subscription)
_LOGGER.debug("New subscription %s", current_session_id)
return current_session_id
async def unsubscribe(self, subscription_id):
"""Unsubscribe."""
if self.websocket is None or not self.websocket.open:
_LOGGER.warning("Websocket is closed.")
return
await self.websocket.send(
json.dumps({"id": subscription_id, "type": "subscription_end"})
)
if self.subscriptions and subscription_id in self.subscriptions:
self.subscriptions.pop(subscription_id)
async def _close_websocket(self):
if self.websocket is None:
return
try:
await self.websocket.close()
finally:
self.websocket = None
async def _process_msg(self, msg):
"""Process received msg."""
result = json.loads(msg)
_LOGGER.debug("Recv, %s", result)
if result.get("type") == "init_fail":
if (
result.get("payload", {}).get("error")
== "Too many concurrent sockets for token"
):
self._wait_time_before_retry = self._wait_time_before_retry * 2
if self._wait_time_before_retry >= 120:
_LOGGER.error(
"Connection is closed, too many concurrent sockets for token"
)
self._wait_time_before_retry = min(self._wait_time_before_retry, 600)
return
_LOGGER.error(result.get("payload", {}).get("error"))
return
subscription_id = result.get("id")
if subscription_id is None:
return
callback, _ = self.subscriptions.get(subscription_id, (None, None))
if callback is None:
_LOGGER.debug("Unknown id %s.", subscription_id)
return
if result.get("type", "") == "complete":
_LOGGER.debug("Unsubscribe %s successfully.", subscription_id)
return
data = result.get("payload")
if data is None:
return
self._wait_time_before_retry = 15
try:
await callback(data)
except TypeError as exp:
if "object NoneType can't be used in 'await' expression" in str(exp):
callback(data)
return
raise exp
def _cancel_retry_timer(self):
if self._retry_timer is None:
return
try:
self._retry_timer.cancel()
finally:
self._retry_timer = None
def _cancel_client_task(self):
if self._client_task is None:
return
try:
self._client_task.cancel()
finally:
self._client_task = None
| [
"logging.getLogger",
"json.loads",
"pkg_resources.require",
"json.dumps",
"websockets.connect",
"asyncio.wait_for",
"asyncio.sleep",
"asyncio.get_event_loop",
"time.time",
"asyncio.get_running_loop"
] | [((192, 219), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (209, 219), False, 'import logging\n'), ((6228, 6234), 'time.time', 'time', ([], {}), '()\n', (6232, 6234), False, 'from time import time\n'), ((8065, 8089), 'json.dumps', 'json.dumps', (['subscription'], {}), '(subscription)\n', (8075, 8089), False, 'import json\n'), ((8183, 8189), 'time.time', 'time', ([], {}), '()\n', (8187, 8189), False, 'from time import time\n'), ((9378, 9393), 'json.loads', 'json.loads', (['msg'], {}), '(msg)\n', (9388, 9393), False, 'import json\n'), ((321, 374), 'pkg_resources.require', 'pkg_resources.require', (['"""graphql-subscription-manager"""'], {}), "('graphql-subscription-manager')\n", (342, 374), False, 'import pkg_resources\n'), ((710, 736), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (734, 736), False, 'import asyncio\n'), ((790, 814), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (812, 814), False, 'import asyncio\n'), ((3002, 3061), 'json.dumps', 'json.dumps', (["{'type': 'init', 'payload': self._init_payload}"], {}), "({'type': 'init', 'payload': self._init_payload})\n", (3012, 3061), False, 'import json\n'), ((6660, 6678), 'asyncio.sleep', 'asyncio.sleep', (['(0.1)'], {}), '(0.1)\n', (6673, 6678), False, 'import asyncio\n'), ((6957, 6975), 'asyncio.sleep', 'asyncio.sleep', (['(0.1)'], {}), '(0.1)\n', (6970, 6975), False, 'import asyncio\n'), ((8204, 8210), 'time.time', 'time', ([], {}), '()\n', (8208, 8210), False, 'from time import time\n'), ((8888, 8951), 'json.dumps', 'json.dumps', (["{'id': subscription_id, 'type': 'subscription_end'}"], {}), "({'id': subscription_id, 'type': 'subscription_end'})\n", (8898, 8951), False, 'import json\n'), ((2423, 2544), 'websockets.connect', 'websockets.connect', (['self._url'], {'subprotocols': "['graphql-subscriptions']", 'extra_headers': "{'User-Agent': self._user_agent}"}), "(self._url, subprotocols=['graphql-subscriptions'],\n extra_headers={'User-Agent': self._user_agent})\n", (2441, 2544), False, 'import websockets\n'), ((6596, 6602), 'time.time', 'time', ([], {}), '()\n', (6600, 6602), False, 'from time import time\n'), ((6897, 6903), 'time.time', 'time', ([], {}), '()\n', (6901, 6903), False, 'from time import time\n'), ((8423, 8439), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (8436, 8439), False, 'import asyncio\n'), ((4103, 4144), 'asyncio.wait_for', 'asyncio.wait_for', (['pong_waiter'], {'timeout': '(10)'}), '(pong_waiter, timeout=10)\n', (4119, 4144), False, 'import asyncio\n')] |
import copy
import json
import numpy as np
import pandas as pd
import basicDeltaOperations as op
import calcIsotopologues as ci
import fragmentAndSimulate as fas
import solveSystem as ss
'''
This is a set of functions to quickly initalize methionine molecules based on input delta values and to simulate its fragmentation. See runAllTests for implementation.
'''
def initializeMethionine(deltas, fragSubset = ['full','133','104','102','88','74High','74Low','61','56'], printHeavy = True):
'''
Initializes methionine, returning a dataframe with basic information about the molecule as well as information about fragmentation.
Inputs:
deltas: A list of 13 M1 delta values, giving the delta values by site for the 13C, 17O, 15N, 33S, and 2H isotopes. The sites are defined in the IDList variable, below.
fragSubset: A list giving the subset of fragments to observe. If you are not observing all fragments, you may input only those you do observe.
printHeavy: The user manually specifies delta 17O, and delta 18O is set via mass scaling (see basicDeltaOperations). If True, this will print out delta 18O, 34S, & 36S.
Outputs:
molecularDataFrame: A dataframe containing basic information about the molecule.
expandedFrags: An ATOM depiction of each fragment, where an ATOM depiction has one entry for each atom (rather than for each site). See fragmentAndSimulate for details.
fragSubgeometryKeys: A list of strings, e.g. 133_01, 133_02, corresponding to each subgeometry of each fragment. A fragment will have multiple subgeometries if there are multiple fragmentation pathways to form it.
fragmentationDictionary: A dictionary like the allFragments variable, but only including the subset of fragments selected by fragSubset.
'''
##### INITIALIZE SITES #####
IDList = ['Cmethyl','Cgamma','Calphabeta','Ccarboxyl','Ocarboxyl','Ssulfur','Namine','Hmethyl','Hgamma',
'Halphabeta','Hamine','Hhydroxyl','Hprotonated']
elIDs = ['C','C','C','C','O','S','N','H','H','H','H','H','H']
numberAtSite = [1,1,2,1,2,1,1,3,2,3,2,1,1]
l = [elIDs, numberAtSite, deltas]
cols = ['IDS','Number','deltas']
condensedFrags =[]
fragKeys = []
#88 and both 74 are conjecture. 74 High has only one oxygen, so we generally do not use it.
allFragments = {'full':{'01':{'subgeometry':[1,1,1,1,1,1,1,1,1,1,1,1,1],'relCont':1}},
'133':{'01':{'subgeometry':[1,1,1,1,1,1,'x',1,1,1,'x',1,'x'],'relCont':1}},
'104':{'01':{'subgeometry':[1,1,1,'x','x',1,1,1,1,1,1,'x','x'],'relCont':1}},
'102':{'01':{'subgeometry':['x',1,1,1,1,'x',1,'x',1,1,1,1,'x'],'relCont':1}},
'88':{'01':{'subgeometry':[1,1,1,'x','x',1,'x',1,1,'x',1,'x','x'],'relCont':1}},
'74High':{'01':{'subgeometry':[1,'x',1,'x',1,'x',1,1,1,1,'x','x','x'],'relCont':1}},
'74Low':{'01':{'subgeometry':[1,1,'x','x',1,'x',1,'x',1,'x',1,'x','x'],'relCont':1}},
'61':{'01':{'subgeometry':[1,1,'x','x','x',1,'x',1,1,'x','x','x','x'],'relCont':1}},
'56':{'01':{'subgeometry':['x',1,1,'x','x','x',1,'x',1,1,'x',1,'x'],'relCont':1}}}
fragmentationDictionary = {key: value for key, value in allFragments.items() if key in fragSubset}
for fragKey, subFragDict in fragmentationDictionary.items():
for subFragNum, subFragInfo in subFragDict.items():
l.append(subFragInfo['subgeometry'])
cols.append(fragKey + '_' + subFragNum)
condensedFrags.append(subFragInfo['subgeometry'])
fragKeys.append(fragKey + '_' + subFragNum)
molecularDataFrame = pd.DataFrame(l, columns = IDList)
molecularDataFrame = molecularDataFrame.transpose()
molecularDataFrame.columns = cols
expandedFrags = [fas.expandFrag(x, numberAtSite) for x in condensedFrags]
if printHeavy:
SConc = op.deltaToConcentration('S',deltas[5])
del34 = op.ratioToDelta('34S',SConc[2]/SConc[0])
del36 = op.ratioToDelta('36S',SConc[3]/SConc[0])
OConc = op.deltaToConcentration('O',deltas[4])
del18 = op.ratioToDelta('18O',OConc[2]/OConc[0])
print("Delta 34S")
print(del34)
print("Delta 36S")
print(del36)
print("Delta 18O")
print(del18)
return molecularDataFrame, expandedFrags, fragKeys, fragmentationDictionary
def simulateMeasurement(molecularDataFrame, fragmentationDictionary, expandedFrags, fragKeys, abundanceThreshold = 0, UValueList = [],
massThreshold = 4, clumpD = {}, outputPath = None, disableProgress = False, calcFF = False, fractionationFactors = {}, omitMeasurements = {}, ffstd = 0.05, unresolvedDict = {}, outputFull = False):
'''
Simulates M+N measurements of a methionine molecule with input deltas specified by the input dataframe molecularDataFrame.
Inputs:
molecularDataFrame: A dataframe containing basic information about the molecule.
expandedFrags: An ATOM depiction of each fragment, where an ATOM depiction has one entry for each atom (rather than for each site). See fragmentAndSimulate for details.
fragSubgeometryKeys: A list of strings, e.g. 133_01, 133_02, corresponding to each subgeometry of each fragment. A fragment will have multiple subgeometries if there are multiple fragmentation pathways to form it.
fragmentationDictionary: A dictionary like the allFragments variable from initalizeMethionine, but only including the subset of fragments selected by fragSubset.
abundanceThreshold: A float; Does not include measurements below this M+N relative abundance, i.e. assuming they will not be measured due to low abundance.
UValueList: A list giving specific substitutions to calculate molecular average U values for ('13C', '15N', etc.)
massThreshold: An integer; will calculate M+N relative abundances for N <= massThreshold
clumpD: Specifies information about clumps to add; otherwise the isotome follows the stochastic assumption. Currently works only for mass 1 substitutions (e.g. 1717, 1317, etc.) See ci.introduceClump for details.
outputPath: A string, e.g. 'output', or None. If it is a string, outputs the simulated spectrum as a json.
disableProgress: Disables tqdm progress bars when True.
calcFF: When True, computes a new set of fractionation factors for this measurement.
fractionationFactors: A dictionary, specifying a fractionation factor to apply to each ion beam. This is used to apply fractionation factors calculated previously to this predicted measurement (e.g. for a sample/standard comparison with the same experimental fractionation)
omitMeasurements: omitMeasurements: A dictionary, {}, specifying measurements which I will not observed. For example, omitMeasurements = {'M1':{'61':'D'}} would mean I do not observe the D ion beam of the 61 fragment of the M+1 experiment, regardless of its abundance.
ffstd: A float; if new fractionation factors are calculated, they are pulled from a normal distribution centered around 1, with this standard deviation.
unresolvedDict: A dictionary, specifying which unresolved ion beams add to each other.
outputFull: A boolean. Typically False, in which case beams that are not observed are culled from the dictionary. If True, includes this information; this should only be used for debugging, and will likely break the solver routine.
Outputs:
predictedMeasurement: A dictionary giving information from the M+N measurements.
MN: A dictionary where keys are mass selections ("M1", "M2") and values are dictionaries giving information about the isotopologues of each mass selection.
fractionationFactors: The calculated fractionation factors for this measurement (empty unless calcFF == True)
'''
M1Only = False
if massThreshold == 1:
M1Only = True
byAtom = ci.inputToAtomDict(molecularDataFrame, disable = disableProgress, M1Only = M1Only)
#Introduce any clumps of interest with clumps
if clumpD == {}:
bySub = ci.calcSubDictionary(byAtom, molecularDataFrame, atomInput = True)
else:
print("Adding clumps")
stochD = copy.deepcopy(byAtom)
for clumpNumber, clumpInfo in clumpD.items():
byAtom = ci.introduceClump(byAtom, clumpInfo['Sites'], clumpInfo['Amount'], molecularDataFrame)
for clumpNumber, clumpInfo in clumpD.items():
ci.checkClumpDelta(clumpInfo['Sites'], molecularDataFrame, byAtom, stochD)
bySub = ci.calcSubDictionary(byAtom, molecularDataFrame, atomInput = True)
#Initialize Measurement output
if disableProgress == False:
print("Simulating Measurement")
allMeasurementInfo = {}
allMeasurementInfo = fas.UValueMeasurement(bySub, allMeasurementInfo, massThreshold = massThreshold,
subList = UValueList)
MN = ci.massSelections(byAtom, massThreshold = massThreshold)
MN = fas.trackMNFragments(MN, expandedFrags, fragKeys, molecularDataFrame, unresolvedDict = unresolvedDict)
predictedMeasurement, FF = fas.predictMNFragmentExpt(allMeasurementInfo, MN, expandedFrags, fragKeys, molecularDataFrame,
fragmentationDictionary,
abundanceThreshold = abundanceThreshold, calcFF = calcFF, ffstd = ffstd, fractionationFactors = fractionationFactors, omitMeasurements = omitMeasurements, unresolvedDict = unresolvedDict, outputFull = outputFull)
if outputPath != None:
output = json.dumps(predictedMeasurement)
f = open(outputPath + ".json","w")
f.write(output)
f.close()
return predictedMeasurement, MN, FF
def updateAbundanceCorrection(latestDeltas, fragSubset, fragmentationDictionary, expandedFrags,
fragSubgeometryKeys, processStandard, processSample, isotopologuesDict, UValuesSmp, molecularDataFrame,
NUpdates = 30, breakCondition = 1, perturbTheoryOAmt = 0.002,
experimentalOCorrectList = [],
abundanceThreshold = 0,
massThreshold = 1,
omitMeasurements = {},
unresolvedDict = {},
UMNSub = ['13C'],
N = 100,
setSpreadByExtreme = False,
oACorrectBounds = False):
'''
A function for the iterated abundance correction. This function iterates N times; for each, it:
1) takes the most recent set of deltas, recomputes the predicted measurement of methionine with them, and uses this to update the O value correction.
2) Defines a reasonable standard deviation to sample around this O value, based on the perturbTheoryOAmt parameter (e.g. sigma of 0.002 * O_correct)
3) Recalculates the site specific structure using the new correction factors.
4) Checks if the difference between the old deltas and new deltas is smaller than a break condition; if so, ends the routine.
It outputs the final set of results and thisODict, a data product storing information about the correction procedure.
Inputs:
latestDeltas: The input deltas to use for the first iteration of the procedure.
fragSubset: A list giving the subset of fragments to observe. If you are not observing all fragments, you may input only those you do observe.
fragmentationDictionary: A dictionary like the allFragments variable from initalizeMethionine, but only including the subset of fragments selected by fragSubset.
expandedFrags: An ATOM depiction of each fragment, where an ATOM depiction has one entry for each atom (rather than for each site). See fragmentAndSimulate for details.
fragSubgeometryKeys: A list of strings, e.g. 133_01, 133_02, corresponding to each subgeometry of each fragment. A fragment will have multiple subgeometries if there are multiple fragmentation pathways to form it.
processStandard: A dictionary containing data from several measurements, in the form: process[fileKey][MNKey][fragKey] = {'Observed Abundance':A list of floats,
'Subs':A list of strings
'Error':A list of floats
'predicted Abundance':A list of floats}
it should have information for each measurement of each observation. See runAllTests for implementation.
processSample: As processStandard, but the 'Predicted Abundance' terms will be an empty list.
isotopologuesDict: isotopologuesDict: A dictionary where the keys are "M0", "M1", etc. and the values are dataFrames giving the isotopologues with those substitutions.
UValuesSmp: A dictionary specifying the molecular average U values and their errors, i.e. {'13C':'Observed':float,'Error':float}. See readInput.readComputedUValues
molecularDataFrame: A dataFrame containing information about the molecule.
NUpdates: The maximum number of iterations to perform.
breakCondition: Each iteration, a residual is calculated as the sum of squares between all delta values. If that sums is <break condition, the routine ends.
perturbTheoryOAmt: Each O correction is given as a mean and a sigma. Then for each iteration of the Monte Carlo, we draw a new factor from this distribution. This parameter determines the relative width, e.g. sigma = mean * perturbTheoryOAmt
N = 100: The number of iterations for each MN Monte Carlo. E.g., if NUPdates is 30 and N is 100, we recalculate the methionine spectrum 30 times. Each iteration, we solve for site specific values using a monte carlo routine with N = 100.
UMNSub: Sets the specific substitutions that we will use molecular average U values from to calculate UMN. Otherwise it will use all molecular average U values for that UMN. Recommended to use--the procedure only works for substitions that are totally solved for. For example, if one 13C 13C isotopologue is not solved for precisely in M+N relative abundance space, we should not use 13C13C in the UMN routine. The best candidates tend to be abundant things--36S, 18O, 13C, 34S, and so forth.
abundanceThreshold, massThreshold, omitMeasurements, unresolvedDict: See simulateMeasurement; set these parameters for each simulated dataset.
experimentalOCorrectList: A list, containing information about which peaks to use experimental correction for. See solveSystem.perturbSample.
Outputs:
M1Results: A dataframe giving the final results of the iterated correction process.
thisODict: A dictionary containing information about each correction (all except Histogram) and histograms of the sampled O values from every 10th iteration (as well as the final iteration).
'''
#Initialize dictionary to track output of iterated correction process.
thisODict = {'residual':[],
'delta':[],
'O':[],
'relDelta':[],
'relDeltaErr':[],
'Histogram':[]}
for i in range(NUpdates):
oldDeltas = latestDeltas
#Get new dataframe, simulate new measurement.
M1Df, expandedFrags, fragSubgeometryKeys, fragmentationDictionary = initializeMethionine(latestDeltas, fragSubset,
printHeavy = False)
predictedMeasurementUpdate, MNDictUpdate, FFUpdate = simulateMeasurement(M1Df, fragmentationDictionary,
expandedFrags,
fragSubgeometryKeys,
abundanceThreshold = abundanceThreshold,
massThreshold = massThreshold,
calcFF = False,
outputPath = None,
disableProgress = True,
fractionationFactors = {},
omitMeasurements = omitMeasurements,
unresolvedDict = unresolvedDict)
#Generate new O Corrections
OCorrectionUpdate = ss.OValueCorrectTheoretical(predictedMeasurementUpdate, processSample,
massThreshold = massThreshold)
#For each O correction, generate a normal distribution. The computed value is the mean, and the sigma is set by perturbTheoryOAmt.
#explicitOCorrect may optionally contain a "Bounds" entry, when using extreme values. For example, explicitOCorrect[MNKey][fragKey] = (Lower Bound, Upper Bound).
#This is not implemented in this routine.
explicitOCorrect = {}
for MNKey, MNData in OCorrectionUpdate.items():
if MNKey not in explicitOCorrect:
explicitOCorrect[MNKey] = {}
for fragKey, fragData in MNData.items():
if fragKey not in explicitOCorrect[MNKey]:
explicitOCorrect[MNKey][fragKey] = {}
explicitOCorrect[MNKey][fragKey]['Mu,Sigma'] = (fragData, fragData * perturbTheoryOAmt)
M1Results = ss.M1MonteCarlo(processStandard, processSample, OCorrectionUpdate, isotopologuesDict,
fragmentationDictionary, perturbTheoryOAmt = perturbTheoryOAmt,
experimentalOCorrectList = experimentalOCorrectList,
N = N, GJ = False, debugMatrix = False, disableProgress = True,
storePerturbedSamples = False, storeOCorrect = True,
explicitOCorrect = explicitOCorrect, perturbOverrideList = ['M1'])
processedResults = ss.processM1MCResults(M1Results, UValuesSmp, isotopologuesDict, molecularDataFrame, disableProgress = True,
UMNSub = UMNSub)
ss.updateSiteSpecificDfM1MC(processedResults, molecularDataFrame)
M1Df = molecularDataFrame.copy()
M1Df['deltas'] = M1Df['VPDB etc. Deltas']
thisODict['O'].append(copy.deepcopy(OCorrectionUpdate['M1']))
thisODict['delta'].append(list(M1Df['deltas']))
residual = ((np.array(M1Df['deltas']) - np.array(oldDeltas))**2).sum()
thisODict['residual'].append(residual)
latestDeltas = M1Df['deltas'].values
thisODict['relDelta'].append(M1Df['Relative Deltas'].values)
thisODict['relDeltaErr'].append(M1Df['Relative Deltas Error'].values)
print(residual)
if i % 10 == 0 or residual <= breakCondition:
correctVals = {'61':[],
'133':[],
'full':[]}
for res in M1Results['Extra Info']['O Correct']:
correctVals['full'].append(res['full'])
correctVals['133'].append(res['133'])
correctVals['61'].append(res['61'])
thisODict['Histogram'].append(copy.deepcopy(correctVals))
if residual <= breakCondition:
break
return M1Results, thisODict | [
"numpy.array",
"fragmentAndSimulate.predictMNFragmentExpt",
"copy.deepcopy",
"solveSystem.processM1MCResults",
"calcIsotopologues.inputToAtomDict",
"solveSystem.OValueCorrectTheoretical",
"solveSystem.updateSiteSpecificDfM1MC",
"json.dumps",
"solveSystem.M1MonteCarlo",
"pandas.DataFrame",
"basic... | [((3681, 3712), 'pandas.DataFrame', 'pd.DataFrame', (['l'], {'columns': 'IDList'}), '(l, columns=IDList)\n', (3693, 3712), True, 'import pandas as pd\n'), ((8014, 8092), 'calcIsotopologues.inputToAtomDict', 'ci.inputToAtomDict', (['molecularDataFrame'], {'disable': 'disableProgress', 'M1Only': 'M1Only'}), '(molecularDataFrame, disable=disableProgress, M1Only=M1Only)\n', (8032, 8092), True, 'import calcIsotopologues as ci\n'), ((8923, 9025), 'fragmentAndSimulate.UValueMeasurement', 'fas.UValueMeasurement', (['bySub', 'allMeasurementInfo'], {'massThreshold': 'massThreshold', 'subList': 'UValueList'}), '(bySub, allMeasurementInfo, massThreshold=\n massThreshold, subList=UValueList)\n', (8944, 9025), True, 'import fragmentAndSimulate as fas\n'), ((9081, 9135), 'calcIsotopologues.massSelections', 'ci.massSelections', (['byAtom'], {'massThreshold': 'massThreshold'}), '(byAtom, massThreshold=massThreshold)\n', (9098, 9135), True, 'import calcIsotopologues as ci\n'), ((9147, 9251), 'fragmentAndSimulate.trackMNFragments', 'fas.trackMNFragments', (['MN', 'expandedFrags', 'fragKeys', 'molecularDataFrame'], {'unresolvedDict': 'unresolvedDict'}), '(MN, expandedFrags, fragKeys, molecularDataFrame,\n unresolvedDict=unresolvedDict)\n', (9167, 9251), True, 'import fragmentAndSimulate as fas\n'), ((9290, 9627), 'fragmentAndSimulate.predictMNFragmentExpt', 'fas.predictMNFragmentExpt', (['allMeasurementInfo', 'MN', 'expandedFrags', 'fragKeys', 'molecularDataFrame', 'fragmentationDictionary'], {'abundanceThreshold': 'abundanceThreshold', 'calcFF': 'calcFF', 'ffstd': 'ffstd', 'fractionationFactors': 'fractionationFactors', 'omitMeasurements': 'omitMeasurements', 'unresolvedDict': 'unresolvedDict', 'outputFull': 'outputFull'}), '(allMeasurementInfo, MN, expandedFrags, fragKeys,\n molecularDataFrame, fragmentationDictionary, abundanceThreshold=\n abundanceThreshold, calcFF=calcFF, ffstd=ffstd, fractionationFactors=\n fractionationFactors, omitMeasurements=omitMeasurements, unresolvedDict\n =unresolvedDict, outputFull=outputFull)\n', (9315, 9627), True, 'import fragmentAndSimulate as fas\n'), ((3831, 3862), 'fragmentAndSimulate.expandFrag', 'fas.expandFrag', (['x', 'numberAtSite'], {}), '(x, numberAtSite)\n', (3845, 3862), True, 'import fragmentAndSimulate as fas\n'), ((3924, 3963), 'basicDeltaOperations.deltaToConcentration', 'op.deltaToConcentration', (['"""S"""', 'deltas[5]'], {}), "('S', deltas[5])\n", (3947, 3963), True, 'import basicDeltaOperations as op\n'), ((3979, 4022), 'basicDeltaOperations.ratioToDelta', 'op.ratioToDelta', (['"""34S"""', '(SConc[2] / SConc[0])'], {}), "('34S', SConc[2] / SConc[0])\n", (3994, 4022), True, 'import basicDeltaOperations as op\n'), ((4036, 4079), 'basicDeltaOperations.ratioToDelta', 'op.ratioToDelta', (['"""36S"""', '(SConc[3] / SConc[0])'], {}), "('36S', SConc[3] / SConc[0])\n", (4051, 4079), True, 'import basicDeltaOperations as op\n'), ((4094, 4133), 'basicDeltaOperations.deltaToConcentration', 'op.deltaToConcentration', (['"""O"""', 'deltas[4]'], {}), "('O', deltas[4])\n", (4117, 4133), True, 'import basicDeltaOperations as op\n'), ((4149, 4192), 'basicDeltaOperations.ratioToDelta', 'op.ratioToDelta', (['"""18O"""', '(OConc[2] / OConc[0])'], {}), "('18O', OConc[2] / OConc[0])\n", (4164, 4192), True, 'import basicDeltaOperations as op\n'), ((8189, 8253), 'calcIsotopologues.calcSubDictionary', 'ci.calcSubDictionary', (['byAtom', 'molecularDataFrame'], {'atomInput': '(True)'}), '(byAtom, molecularDataFrame, atomInput=True)\n', (8209, 8253), True, 'import calcIsotopologues as ci\n'), ((8314, 8335), 'copy.deepcopy', 'copy.deepcopy', (['byAtom'], {}), '(byAtom)\n', (8327, 8335), False, 'import copy\n'), ((8690, 8754), 'calcIsotopologues.calcSubDictionary', 'ci.calcSubDictionary', (['byAtom', 'molecularDataFrame'], {'atomInput': '(True)'}), '(byAtom, molecularDataFrame, atomInput=True)\n', (8710, 8754), True, 'import calcIsotopologues as ci\n'), ((9771, 9803), 'json.dumps', 'json.dumps', (['predictedMeasurement'], {}), '(predictedMeasurement)\n', (9781, 9803), False, 'import json\n'), ((17088, 17191), 'solveSystem.OValueCorrectTheoretical', 'ss.OValueCorrectTheoretical', (['predictedMeasurementUpdate', 'processSample'], {'massThreshold': 'massThreshold'}), '(predictedMeasurementUpdate, processSample,\n massThreshold=massThreshold)\n', (17115, 17191), True, 'import solveSystem as ss\n'), ((18103, 18492), 'solveSystem.M1MonteCarlo', 'ss.M1MonteCarlo', (['processStandard', 'processSample', 'OCorrectionUpdate', 'isotopologuesDict', 'fragmentationDictionary'], {'perturbTheoryOAmt': 'perturbTheoryOAmt', 'experimentalOCorrectList': 'experimentalOCorrectList', 'N': 'N', 'GJ': '(False)', 'debugMatrix': '(False)', 'disableProgress': '(True)', 'storePerturbedSamples': '(False)', 'storeOCorrect': '(True)', 'explicitOCorrect': 'explicitOCorrect', 'perturbOverrideList': "['M1']"}), "(processStandard, processSample, OCorrectionUpdate,\n isotopologuesDict, fragmentationDictionary, perturbTheoryOAmt=\n perturbTheoryOAmt, experimentalOCorrectList=experimentalOCorrectList, N\n =N, GJ=False, debugMatrix=False, disableProgress=True,\n storePerturbedSamples=False, storeOCorrect=True, explicitOCorrect=\n explicitOCorrect, perturbOverrideList=['M1'])\n", (18118, 18492), True, 'import solveSystem as ss\n'), ((18705, 18829), 'solveSystem.processM1MCResults', 'ss.processM1MCResults', (['M1Results', 'UValuesSmp', 'isotopologuesDict', 'molecularDataFrame'], {'disableProgress': '(True)', 'UMNSub': 'UMNSub'}), '(M1Results, UValuesSmp, isotopologuesDict,\n molecularDataFrame, disableProgress=True, UMNSub=UMNSub)\n', (18726, 18829), True, 'import solveSystem as ss\n'), ((18883, 18948), 'solveSystem.updateSiteSpecificDfM1MC', 'ss.updateSiteSpecificDfM1MC', (['processedResults', 'molecularDataFrame'], {}), '(processedResults, molecularDataFrame)\n', (18910, 18948), True, 'import solveSystem as ss\n'), ((8420, 8510), 'calcIsotopologues.introduceClump', 'ci.introduceClump', (['byAtom', "clumpInfo['Sites']", "clumpInfo['Amount']", 'molecularDataFrame'], {}), "(byAtom, clumpInfo['Sites'], clumpInfo['Amount'],\n molecularDataFrame)\n", (8437, 8510), True, 'import calcIsotopologues as ci\n'), ((8586, 8660), 'calcIsotopologues.checkClumpDelta', 'ci.checkClumpDelta', (["clumpInfo['Sites']", 'molecularDataFrame', 'byAtom', 'stochD'], {}), "(clumpInfo['Sites'], molecularDataFrame, byAtom, stochD)\n", (8604, 8660), True, 'import calcIsotopologues as ci\n'), ((19088, 19126), 'copy.deepcopy', 'copy.deepcopy', (["OCorrectionUpdate['M1']"], {}), "(OCorrectionUpdate['M1'])\n", (19101, 19126), False, 'import copy\n'), ((19986, 20012), 'copy.deepcopy', 'copy.deepcopy', (['correctVals'], {}), '(correctVals)\n', (19999, 20012), False, 'import copy\n'), ((19215, 19239), 'numpy.array', 'np.array', (["M1Df['deltas']"], {}), "(M1Df['deltas'])\n", (19223, 19239), True, 'import numpy as np\n'), ((19242, 19261), 'numpy.array', 'np.array', (['oldDeltas'], {}), '(oldDeltas)\n', (19250, 19261), True, 'import numpy as np\n')] |
import time
from typing import Any, List
import pytest
from yarl import URL
from neuro_sdk import Action, FileStatus, FileStatusType
from neuro_sdk.storage import DiskUsageInfo
from neuro_cli.formatters.storage import (
BaseFilesFormatter,
BSDAttributes,
BSDPainter,
DiskUsageFormatter,
FilesSorter,
GnuIndicators,
GnuPainter,
LongFilesFormatter,
NonePainter,
SimpleFilesFormatter,
VerticalColumnsFilesFormatter,
get_painter,
)
class TestNonePainter:
def test_simple(self, rich_cmp: Any) -> None:
painter = NonePainter()
file = FileStatus(
"File1",
2048,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/user/File1"),
)
rich_cmp(painter.paint(file.name, file.type))
class TestGnuPainter:
def test_color_parsing_simple(self) -> None:
painter = GnuPainter("rs=1;0;1")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
painter = GnuPainter(":rs=1;0;1")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
painter = GnuPainter("rs=1;0;1:")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
painter = GnuPainter("rs=1;0;1:fi=32;42")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
assert painter.color_indicator[GnuIndicators.FILE] == "32;42"
painter = GnuPainter("rs=1;0;1:fi")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
assert painter.color_indicator[GnuIndicators.FILE] == ""
painter = GnuPainter("rs=1;0;1:fi=")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
assert painter.color_indicator[GnuIndicators.FILE] == ""
@pytest.mark.parametrize(
"escaped,result",
[
("\\a", "\a"),
("\\b", "\b"),
("\\e", chr(27)),
("\\f", "\f"),
("\\n", "\n"),
("\\r", "\r"),
("\\t", "\t"),
("\\v", "\v"),
("\\?", chr(127)),
("\\_", " "),
("a\\n", "a\n"),
("a\\tb", "a\tb"),
("a\\t\\rb", "a\t\rb"),
("a\\=b", "a=b"),
],
)
def test_color_parsing_escaped_simple(self, escaped: str, result: str) -> None:
painter = GnuPainter("rs=" + escaped)
assert painter.color_indicator[GnuIndicators.RESET] == result
painter = GnuPainter(escaped + "=1;2")
assert painter.color_ext_type[result] == "1;2"
painter = GnuPainter(escaped + "=" + escaped)
assert painter.color_ext_type[result] == result
@pytest.mark.parametrize(
"escaped,result",
[
("\\7", chr(7)),
("\\8", "8"),
("\\10", chr(8)),
("a\\2", "a" + chr(2)),
("a\\2b", "a" + chr(2) + "b"),
],
)
def test_color_parsing_escaped_octal(self, escaped: str, result: str) -> None:
painter = GnuPainter("rs=" + escaped)
assert painter.color_indicator[GnuIndicators.RESET] == result
painter = GnuPainter(escaped + "=1;2")
assert painter.color_ext_type[result] == "1;2"
painter = GnuPainter(escaped + "=" + escaped)
assert painter.color_ext_type[result] == result
@pytest.mark.parametrize(
"escaped,result",
[
("\\x7", chr(0x7)),
("\\x8", chr(0x8)),
("\\x10", chr(0x10)),
("\\XaA", chr(0xAA)),
("a\\x222", "a" + chr(0x22) + "2"),
("a\\x2z", "a" + chr(0x2) + "z"),
],
)
def test_color_parsing_escaped_hex(self, escaped: str, result: str) -> None:
painter = GnuPainter("rs=" + escaped)
assert painter.color_indicator[GnuIndicators.RESET] == result
painter = GnuPainter(escaped + "=1;2")
assert painter.color_ext_type[result] == "1;2"
painter = GnuPainter(escaped + "=" + escaped)
assert painter.color_ext_type[result] == result
@pytest.mark.parametrize(
"escaped,result",
[
("^a", chr(1)),
("^?", chr(127)),
("^z", chr(26)),
("a^Z", "a" + chr(26)),
("a^Zb", "a" + chr(26) + "b"),
],
)
def test_color_parsing_carret(self, escaped: str, result: str) -> None:
painter = GnuPainter("rs=" + escaped)
assert painter.color_indicator[GnuIndicators.RESET] == result
painter = GnuPainter(escaped + "=1;2")
assert painter.color_ext_type[result] == "1;2"
painter = GnuPainter(escaped + "=" + escaped)
assert painter.color_ext_type[result] == result
@pytest.mark.parametrize("escaped", [("^1"), ("^"), ("^" + chr(130))])
def test_color_parsing_carret_incorrect(self, escaped: str) -> None:
with pytest.raises(EnvironmentError):
GnuPainter("rs=" + escaped)
with pytest.raises(EnvironmentError):
GnuPainter(escaped + "=1;2")
@pytest.mark.parametrize(
"ls_colors",
[
"di=32;41:fi=0;44:no=0;46",
"di=32;41:no=0;46",
"no=0;46",
"*.text=0;46",
"*.txt=0;46",
],
)
def test_coloring(self, rich_cmp: Any, ls_colors: str) -> None:
file = FileStatus(
"test.txt",
1024,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/usertest.txt"),
)
folder = FileStatus(
"tmp",
0,
FileStatusType.DIRECTORY,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.WRITE,
uri=URL("storage://default/usertmp"),
)
painter = GnuPainter(ls_colors)
rich_cmp(painter.paint(file.name, file.type), index=0)
rich_cmp(painter.paint(folder.name, folder.type), index=1)
class TestBSDPainter:
def test_color_parsing(self) -> None:
painter = BSDPainter("exfxcxdxbxegedabagacad")
assert painter._colors[BSDAttributes.DIRECTORY] == "ex"
@pytest.mark.parametrize(
"ls_colors", ["exfxcxdxbxegedabagacad", "Eafxcxdxbxegedabagacad"]
)
def test_coloring(self, ls_colors: str, rich_cmp: Any) -> None:
file = FileStatus(
"test.txt",
1024,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/usertest.txt"),
)
folder = FileStatus(
"tmp",
0,
FileStatusType.DIRECTORY,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.WRITE,
uri=URL("storage://default/usertmp"),
)
painter = BSDPainter(ls_colors)
rich_cmp(painter.paint(file.name, file.type), index=0)
rich_cmp(painter.paint(folder.name, folder.type), index=1)
class TestPainterFactory:
def test_detection(self, monkeypatch: Any) -> None:
monkeypatch.setenv("LS_COLORS", "")
monkeypatch.setenv("LSCOLORS", "")
painter = get_painter(True)
assert isinstance(painter, NonePainter)
monkeypatch.setenv("LSCOLORS", "exfxcxdxbxegedabagacad")
monkeypatch.setenv("LS_COLORS", "di=32;41:fi=0;44:no=0;46")
painter_without_color = get_painter(False)
painter_with_color = get_painter(True)
assert isinstance(painter_without_color, NonePainter)
assert not isinstance(painter_with_color, NonePainter)
monkeypatch.setenv("LSCOLORS", "")
monkeypatch.setenv("LS_COLORS", "di=32;41:fi=0;44:no=0;46")
painter = get_painter(True)
assert isinstance(painter, GnuPainter)
monkeypatch.setenv("LSCOLORS", "exfxcxdxbxegedabagacad")
monkeypatch.setenv("LS_COLORS", "")
painter = get_painter(True)
assert isinstance(painter, BSDPainter)
class TestFilesFormatter:
files = [
FileStatus(
"File1",
2048,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/userFile1"),
),
FileStatus(
"File2",
1024,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-10-10 13:10:10", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/userFile2"),
),
FileStatus(
"File3 with space",
1_024_001,
FileStatusType.FILE,
int(time.mktime(time.strptime("2019-02-02 05:02:02", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/userFile 3 with space"),
),
]
folders = [
FileStatus(
"Folder1",
0,
FileStatusType.DIRECTORY,
int(time.mktime(time.strptime("2017-03-03 06:03:03", "%Y-%m-%d %H:%M:%S"))),
Action.MANAGE,
uri=URL("storage://default/userFolder11"),
),
FileStatus(
"1Folder with space",
0,
FileStatusType.DIRECTORY,
int(time.mktime(time.strptime("2017-03-03 06:03:02", "%Y-%m-%d %H:%M:%S"))),
Action.MANAGE,
uri=URL("storage://default/user1Folder with space"),
),
]
files_and_folders = files + folders
@pytest.mark.parametrize(
"formatter",
[
(SimpleFilesFormatter(color=False)),
(VerticalColumnsFilesFormatter(width=100, color=False)),
(LongFilesFormatter(human_readable=False, color=False)),
],
)
def test_formatter_with_files_and_folders(
self, formatter: BaseFilesFormatter, rich_cmp: Any
) -> None:
rich_cmp(formatter(self.files_and_folders))
@pytest.mark.parametrize(
"formatter",
[
(SimpleFilesFormatter(color=False)),
(VerticalColumnsFilesFormatter(width=100, color=False)),
(LongFilesFormatter(human_readable=False, color=False)),
],
)
def test_formatter_with_empty_files(
self, formatter: BaseFilesFormatter, rich_cmp: Any
) -> None:
files: List[FileStatus] = []
rich_cmp(formatter(files))
def test_sorter(self) -> None:
sorter = FilesSorter.NAME
files = sorted(self.files_and_folders, key=sorter.key())
assert files == [
self.folders[1],
self.files[0],
self.files[1],
self.files[2],
self.folders[0],
]
sorter = FilesSorter.SIZE
files = sorted(self.files_and_folders, key=sorter.key())
assert files[2:5] == [self.files[1], self.files[0], self.files[2]]
sorter = FilesSorter.TIME
files = sorted(self.files_and_folders, key=sorter.key())
assert files == [
self.folders[1],
self.folders[0],
self.files[0],
self.files[1],
self.files[2],
]
class TestUsageFormatter:
def test_formatter(self, rich_cmp: Any) -> None:
usage = DiskUsageInfo(
total=100000, used=80000, free=20000, cluster_name="default"
)
formatter = DiskUsageFormatter()
rich_cmp(formatter(usage))
| [
"neuro_cli.formatters.storage.GnuPainter",
"neuro_sdk.storage.DiskUsageInfo",
"time.strptime",
"neuro_cli.formatters.storage.DiskUsageFormatter",
"neuro_cli.formatters.storage.VerticalColumnsFilesFormatter",
"pytest.mark.parametrize",
"neuro_cli.formatters.storage.LongFilesFormatter",
"pytest.raises",... | [((5151, 5281), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ls_colors"""', "['di=32;41:fi=0;44:no=0;46', 'di=32;41:no=0;46', 'no=0;46', '*.text=0;46',\n '*.txt=0;46']"], {}), "('ls_colors', ['di=32;41:fi=0;44:no=0;46',\n 'di=32;41:no=0;46', 'no=0;46', '*.text=0;46', '*.txt=0;46'])\n", (5174, 5281), False, 'import pytest\n'), ((6358, 6452), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ls_colors"""', "['exfxcxdxbxegedabagacad', 'Eafxcxdxbxegedabagacad']"], {}), "('ls_colors', ['exfxcxdxbxegedabagacad',\n 'Eafxcxdxbxegedabagacad'])\n", (6381, 6452), False, 'import pytest\n'), ((572, 585), 'neuro_cli.formatters.storage.NonePainter', 'NonePainter', ([], {}), '()\n', (583, 585), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((1007, 1029), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (['"""rs=1;0;1"""'], {}), "('rs=1;0;1')\n", (1017, 1029), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((1120, 1143), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (['""":rs=1;0;1"""'], {}), "(':rs=1;0;1')\n", (1130, 1143), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((1234, 1257), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (['"""rs=1;0;1:"""'], {}), "('rs=1;0;1:')\n", (1244, 1257), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((1348, 1379), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (['"""rs=1;0;1:fi=32;42"""'], {}), "('rs=1;0;1:fi=32;42')\n", (1358, 1379), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((1540, 1565), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (['"""rs=1;0;1:fi"""'], {}), "('rs=1;0;1:fi')\n", (1550, 1565), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((1721, 1747), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (['"""rs=1;0;1:fi="""'], {}), "('rs=1;0;1:fi=')\n", (1731, 1747), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((2472, 2499), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (["('rs=' + escaped)"], {}), "('rs=' + escaped)\n", (2482, 2499), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((2589, 2617), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (["(escaped + '=1;2')"], {}), "(escaped + '=1;2')\n", (2599, 2617), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((2692, 2727), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (["(escaped + '=' + escaped)"], {}), "(escaped + '=' + escaped)\n", (2702, 2727), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((3133, 3160), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (["('rs=' + escaped)"], {}), "('rs=' + escaped)\n", (3143, 3160), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((3250, 3278), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (["(escaped + '=1;2')"], {}), "(escaped + '=1;2')\n", (3260, 3278), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((3353, 3388), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (["(escaped + '=' + escaped)"], {}), "(escaped + '=' + escaped)\n", (3363, 3388), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((3854, 3881), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (["('rs=' + escaped)"], {}), "('rs=' + escaped)\n", (3864, 3881), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((3971, 3999), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (["(escaped + '=1;2')"], {}), "(escaped + '=1;2')\n", (3981, 3999), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((4074, 4109), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (["(escaped + '=' + escaped)"], {}), "(escaped + '=' + escaped)\n", (4084, 4109), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((4510, 4537), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (["('rs=' + escaped)"], {}), "('rs=' + escaped)\n", (4520, 4537), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((4627, 4655), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (["(escaped + '=1;2')"], {}), "(escaped + '=1;2')\n", (4637, 4655), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((4730, 4765), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (["(escaped + '=' + escaped)"], {}), "(escaped + '=' + escaped)\n", (4740, 4765), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((6015, 6036), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (['ls_colors'], {}), '(ls_colors)\n', (6025, 6036), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((6251, 6287), 'neuro_cli.formatters.storage.BSDPainter', 'BSDPainter', (['"""exfxcxdxbxegedabagacad"""'], {}), "('exfxcxdxbxegedabagacad')\n", (6261, 6287), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((7106, 7127), 'neuro_cli.formatters.storage.BSDPainter', 'BSDPainter', (['ls_colors'], {}), '(ls_colors)\n', (7116, 7127), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((7447, 7464), 'neuro_cli.formatters.storage.get_painter', 'get_painter', (['(True)'], {}), '(True)\n', (7458, 7464), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((7679, 7697), 'neuro_cli.formatters.storage.get_painter', 'get_painter', (['(False)'], {}), '(False)\n', (7690, 7697), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((7727, 7744), 'neuro_cli.formatters.storage.get_painter', 'get_painter', (['(True)'], {}), '(True)\n', (7738, 7744), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((8000, 8017), 'neuro_cli.formatters.storage.get_painter', 'get_painter', (['(True)'], {}), '(True)\n', (8011, 8017), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((8193, 8210), 'neuro_cli.formatters.storage.get_painter', 'get_painter', (['(True)'], {}), '(True)\n', (8204, 8210), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((11530, 11605), 'neuro_sdk.storage.DiskUsageInfo', 'DiskUsageInfo', ([], {'total': '(100000)', 'used': '(80000)', 'free': '(20000)', 'cluster_name': '"""default"""'}), "(total=100000, used=80000, free=20000, cluster_name='default')\n", (11543, 11605), False, 'from neuro_sdk.storage import DiskUsageInfo\n'), ((11648, 11668), 'neuro_cli.formatters.storage.DiskUsageFormatter', 'DiskUsageFormatter', ([], {}), '()\n', (11666, 11668), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((4984, 5015), 'pytest.raises', 'pytest.raises', (['EnvironmentError'], {}), '(EnvironmentError)\n', (4997, 5015), False, 'import pytest\n'), ((5029, 5056), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (["('rs=' + escaped)"], {}), "('rs=' + escaped)\n", (5039, 5056), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((5071, 5102), 'pytest.raises', 'pytest.raises', (['EnvironmentError'], {}), '(EnvironmentError)\n', (5084, 5102), False, 'import pytest\n'), ((5116, 5144), 'neuro_cli.formatters.storage.GnuPainter', 'GnuPainter', (["(escaped + '=1;2')"], {}), "(escaped + '=1;2')\n", (5126, 5144), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((9856, 9889), 'neuro_cli.formatters.storage.SimpleFilesFormatter', 'SimpleFilesFormatter', ([], {'color': '(False)'}), '(color=False)\n', (9876, 9889), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((9905, 9958), 'neuro_cli.formatters.storage.VerticalColumnsFilesFormatter', 'VerticalColumnsFilesFormatter', ([], {'width': '(100)', 'color': '(False)'}), '(width=100, color=False)\n', (9934, 9958), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((9974, 10027), 'neuro_cli.formatters.storage.LongFilesFormatter', 'LongFilesFormatter', ([], {'human_readable': '(False)', 'color': '(False)'}), '(human_readable=False, color=False)\n', (9992, 10027), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((10295, 10328), 'neuro_cli.formatters.storage.SimpleFilesFormatter', 'SimpleFilesFormatter', ([], {'color': '(False)'}), '(color=False)\n', (10315, 10328), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((10344, 10397), 'neuro_cli.formatters.storage.VerticalColumnsFilesFormatter', 'VerticalColumnsFilesFormatter', ([], {'width': '(100)', 'color': '(False)'}), '(width=100, color=False)\n', (10373, 10397), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((10413, 10466), 'neuro_cli.formatters.storage.LongFilesFormatter', 'LongFilesFormatter', ([], {'human_readable': '(False)', 'color': '(False)'}), '(human_readable=False, color=False)\n', (10431, 10466), False, 'from neuro_cli.formatters.storage import BaseFilesFormatter, BSDAttributes, BSDPainter, DiskUsageFormatter, FilesSorter, GnuIndicators, GnuPainter, LongFilesFormatter, NonePainter, SimpleFilesFormatter, VerticalColumnsFilesFormatter, get_painter\n'), ((815, 850), 'yarl.URL', 'URL', (['"""storage://default/user/File1"""'], {}), "('storage://default/user/File1')\n", (818, 850), False, 'from yarl import URL\n'), ((5672, 5709), 'yarl.URL', 'URL', (['"""storage://default/usertest.txt"""'], {}), "('storage://default/usertest.txt')\n", (5675, 5709), False, 'from yarl import URL\n'), ((5953, 5985), 'yarl.URL', 'URL', (['"""storage://default/usertmp"""'], {}), "('storage://default/usertmp')\n", (5956, 5985), False, 'from yarl import URL\n'), ((6763, 6800), 'yarl.URL', 'URL', (['"""storage://default/usertest.txt"""'], {}), "('storage://default/usertest.txt')\n", (6766, 6800), False, 'from yarl import URL\n'), ((7044, 7076), 'yarl.URL', 'URL', (['"""storage://default/usertmp"""'], {}), "('storage://default/usertmp')\n", (7047, 7076), False, 'from yarl import URL\n'), ((8523, 8557), 'yarl.URL', 'URL', (['"""storage://default/userFile1"""'], {}), "('storage://default/userFile1')\n", (8526, 8557), False, 'from yarl import URL\n'), ((8792, 8826), 'yarl.URL', 'URL', (['"""storage://default/userFile2"""'], {}), "('storage://default/userFile2')\n", (8795, 8826), False, 'from yarl import URL\n'), ((9077, 9123), 'yarl.URL', 'URL', (['"""storage://default/userFile 3 with space"""'], {}), "('storage://default/userFile 3 with space')\n", (9080, 9123), False, 'from yarl import URL\n'), ((9386, 9423), 'yarl.URL', 'URL', (['"""storage://default/userFolder11"""'], {}), "('storage://default/userFolder11')\n", (9389, 9423), False, 'from yarl import URL\n'), ((9675, 9722), 'yarl.URL', 'URL', (['"""storage://default/user1Folder with space"""'], {}), "('storage://default/user1Folder with space')\n", (9678, 9722), False, 'from yarl import URL\n'), ((713, 770), 'time.strptime', 'time.strptime', (['"""2018-01-01 03:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2018-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')\n", (726, 770), False, 'import time\n'), ((5570, 5627), 'time.strptime', 'time.strptime', (['"""2018-01-01 03:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2018-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')\n", (5583, 5627), False, 'import time\n'), ((5850, 5907), 'time.strptime', 'time.strptime', (['"""2018-01-01 03:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2018-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')\n", (5863, 5907), False, 'import time\n'), ((6661, 6718), 'time.strptime', 'time.strptime', (['"""2018-01-01 03:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2018-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')\n", (6674, 6718), False, 'import time\n'), ((6941, 6998), 'time.strptime', 'time.strptime', (['"""2018-01-01 03:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2018-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')\n", (6954, 6998), False, 'import time\n'), ((8421, 8478), 'time.strptime', 'time.strptime', (['"""2018-01-01 03:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2018-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')\n", (8434, 8478), False, 'import time\n'), ((8690, 8747), 'time.strptime', 'time.strptime', (['"""2018-10-10 13:10:10"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2018-10-10 13:10:10', '%Y-%m-%d %H:%M:%S')\n", (8703, 8747), False, 'import time\n'), ((8975, 9032), 'time.strptime', 'time.strptime', (['"""2019-02-02 05:02:02"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2019-02-02 05:02:02', '%Y-%m-%d %H:%M:%S')\n", (8988, 9032), False, 'import time\n'), ((9282, 9339), 'time.strptime', 'time.strptime', (['"""2017-03-03 06:03:03"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2017-03-03 06:03:03', '%Y-%m-%d %H:%M:%S')\n", (9295, 9339), False, 'import time\n'), ((9571, 9628), 'time.strptime', 'time.strptime', (['"""2017-03-03 06:03:02"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2017-03-03 06:03:02', '%Y-%m-%d %H:%M:%S')\n", (9584, 9628), False, 'import time\n')] |
"""
Demonstrate the type 1 NUFFT using cuFINUFFT
"""
import numpy as np
import pycuda.autoinit
from pycuda.gpuarray import GPUArray, to_gpu
from cufinufft import cufinufft
# Set up parameters for problem.
N1, N2 = 59, 61 # Size of uniform grid
M = 100 # Number of nonuniform points
n_transf = 2 # Number of input arrays
eps = 1e-6 # Requested tolerance
dtype = np.float32 # Datatype (real)
complex_dtype = np.complex64 # Datatype (complex)
# Generate coordinates of non-uniform points.
kx = np.random.uniform(-np.pi, np.pi, size=M)
ky = np.random.uniform(-np.pi, np.pi, size=M)
# Generate source strengths.
c = (np.random.standard_normal((n_transf, M))
+ 1j * np.random.standard_normal((n_transf, M)))
# Cast to desired datatype.
kx = kx.astype(dtype)
ky = ky.astype(dtype)
c = c.astype(complex_dtype)
# Allocate memory for the uniform grid on the GPU.
fk_gpu = GPUArray((n_transf, N1, N2), dtype=complex_dtype)
# Initialize the plan and set the points.
plan = cufinufft(1, (N1, N2), n_transf, eps=eps, dtype=dtype)
plan.set_pts(to_gpu(kx), to_gpu(ky))
# Execute the plan, reading from the strengths array c and storing the
# result in fk_gpu.
plan.execute(to_gpu(c), fk_gpu)
# Retreive the result from the GPU.
fk = fk_gpu.get()
# Check accuracy of the transform at position (nt1, nt2).
nt1 = int(0.37 * N1)
nt2 = int(0.26 * N2)
for i in range(n_transf):
# Calculate the true value of the type 1 transform at the uniform grid
# point (nt1, nt2), which corresponds to the coordinate nt1 - N1 // 2 and
# nt2 - N2 // 2.
x, y = nt1 - N1 // 2, nt2 - N2 // 2
fk_true = np.sum(c[i] * np.exp(1j * (x * kx + y * ky)))
# Calculate the absolute and relative error.
err = np.abs(fk[i, nt1, nt2] - fk_true)
rel_err = err / np.max(np.abs(fk[i]))
print(f"[{i}] Absolute error on mode [{nt1}, {nt2}] is {err:.3g}")
print(f"[{i}] Relative error on mode [{nt1}, {nt2}] is {rel_err:.3g}")
assert(rel_err < 10 * eps)
| [
"numpy.random.standard_normal",
"numpy.abs",
"pycuda.gpuarray.GPUArray",
"numpy.exp",
"cufinufft.cufinufft",
"numpy.random.uniform",
"pycuda.gpuarray.to_gpu"
] | [((592, 632), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi'], {'size': 'M'}), '(-np.pi, np.pi, size=M)\n', (609, 632), True, 'import numpy as np\n'), ((638, 678), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi'], {'size': 'M'}), '(-np.pi, np.pi, size=M)\n', (655, 678), True, 'import numpy as np\n'), ((971, 1020), 'pycuda.gpuarray.GPUArray', 'GPUArray', (['(n_transf, N1, N2)'], {'dtype': 'complex_dtype'}), '((n_transf, N1, N2), dtype=complex_dtype)\n', (979, 1020), False, 'from pycuda.gpuarray import GPUArray, to_gpu\n'), ((1071, 1125), 'cufinufft.cufinufft', 'cufinufft', (['(1)', '(N1, N2)', 'n_transf'], {'eps': 'eps', 'dtype': 'dtype'}), '(1, (N1, N2), n_transf, eps=eps, dtype=dtype)\n', (1080, 1125), False, 'from cufinufft import cufinufft\n'), ((714, 754), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(n_transf, M)'], {}), '((n_transf, M))\n', (739, 754), True, 'import numpy as np\n'), ((1139, 1149), 'pycuda.gpuarray.to_gpu', 'to_gpu', (['kx'], {}), '(kx)\n', (1145, 1149), False, 'from pycuda.gpuarray import GPUArray, to_gpu\n'), ((1151, 1161), 'pycuda.gpuarray.to_gpu', 'to_gpu', (['ky'], {}), '(ky)\n', (1157, 1161), False, 'from pycuda.gpuarray import GPUArray, to_gpu\n'), ((1268, 1277), 'pycuda.gpuarray.to_gpu', 'to_gpu', (['c'], {}), '(c)\n', (1274, 1277), False, 'from pycuda.gpuarray import GPUArray, to_gpu\n'), ((1804, 1837), 'numpy.abs', 'np.abs', (['(fk[i, nt1, nt2] - fk_true)'], {}), '(fk[i, nt1, nt2] - fk_true)\n', (1810, 1837), True, 'import numpy as np\n'), ((767, 807), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(n_transf, M)'], {}), '((n_transf, M))\n', (792, 807), True, 'import numpy as np\n'), ((1712, 1744), 'numpy.exp', 'np.exp', (['(1.0j * (x * kx + y * ky))'], {}), '(1.0j * (x * kx + y * ky))\n', (1718, 1744), True, 'import numpy as np\n'), ((1865, 1878), 'numpy.abs', 'np.abs', (['fk[i]'], {}), '(fk[i])\n', (1871, 1878), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import sys
import cut
from box import Box
from reactor import Reactor
from termcolor import colored
# -- assert helper
def check(what,output,f=None):
try:
assert(what)
except:
print("Assert failed, debug output: ",end="")
print(output)
if f is not None:
f()
sys.exit()
# -- end assert helper
# --- test cases
print (colored("Testcase 1: adding two identical boxes results in only one box","red"))
R = Reactor()
R += Box("on x=1..3,y=1..3,z=1..3")
R += Box("on x=1..3,y=1..3,z=1..3")
assert(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=1..3]")
print (colored("Testcase 2: adding various boxes that overlap but are on the edge with the first, also result in only one box","red"))
R = Reactor()
R += Box("on x=1..3,y=1..3,z=1..3")
print (colored(" 2a: check that adding sliver on x doesn't cause more boxes","yellow"))
R += Box("on x=1..1,y=1..3,z=1..3")
check(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=1..3]",[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
print (colored(" 2c: check that adding sliver on z doesn't cause more boxes ","yellow"))
R += Box("on x=1..3,y=1..3,z=1..1")
check(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=1..3]",[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
print (colored(" 2b: check that adding sliver on y doesn't cause more boxes","yellow"))
R += Box("on x=1..3,y=1..1,z=1..3")
check(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=1..3]",[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
print (colored("Testcase 3: merge on X edge"),"red")
R = Reactor()
R += Box("on x=1..1,y=1..1,z=1..1")
R += Box("on x=2..3,y=1..1,z=1..1")
assert(R.realcubes.__repr__()=="[on x=1..3,y=1..1,z=1..1]")
print ("Testcase 4: merge on Y edge")
R += Box("on x=1..3,y=2..3,z=1..1")
assert(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=1..1]")
print ("Testcase 5: merge on Z edge")
R += Box("on x=1..3,y=1..3,z=2..3")
assert(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=1..3]")
print ("Testcase 6: merge on (reverse) X edge")
R = Reactor()
R += Box("on x=2..3,y=2..3,z=2..3")
R += Box("on x=1..1,y=2..3,z=2..3")
assert(R.realcubes.__repr__()=="[on x=1..3,y=2..3,z=2..3]")
print ("Testcase 7: merge on (reverse) Y edge")
R += Box("on x=1..3,y=1..1,z=2..3")
assert(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=2..3]")
print ("Testcase 8: merge on (reverse) Z edge")
R += Box("on x=1..3,y=1..3,z=1..1")
assert(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=1..3]")
print (colored("Testcase 9a: Remove a 1x1x1 cube from a 3x3x3 cube","red"))
R = Reactor()
R += Box("on x=1..3,y=1..3,z=1..3")
assert(R.size()==27)
R += Box("off x=1..1,y=1..1,z=1..1")
check(R.size()==26,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 9b: Remove a 1x1x1 cube from a 3x3x3 cube","red"))
R = Reactor()
R += Box("on x=1..3,y=1..3,z=1..3")
assert(R.size()==27)
R += Box("off x=3..3,y=3..3,z=3..3")
check(R.size()==26,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 9c: Remove a 1x1x1 cube from a 3x3x3 cube","red"))
R = Reactor()
R += Box("on x=1..3,y=1..3,z=1..3")
assert(R.size()==27)
R += Box("off x=2..2,y=2..2,z=2..2")
check(R.size()==26,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 10x: Remove a slab from the upper end of a slab","red"))
R = Reactor()
R += Box("on x=1..3,y=1..1,z=1..1")
assert(R.size()==3)
R += Box("off x=3..3,y=1..1,z=1..1")
check(R.size()==2,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 10y: Remove a slab from the upper end of a slab","red"))
R = Reactor()
R += Box("on x=1..1,y=1..3,z=1..1")
assert(R.size()==3)
R += Box("off x=1..1,y=3..3,z=1..1")
check(R.size()==2,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 10z: Remove a slab from the upper end of a slab","red"))
R = Reactor()
R += Box("on x=1..1,y=1..1,z=1..3")
assert(R.size()==3)
R += Box("off x=1..1,y=1..1,z=3..3")
check(R.size()==2,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 10xy: Remove a corner from the upper end of a slab","red"))
R = Reactor()
R += Box("on x=1..3,y=1..3,z=1..1")
assert(R.size()==9)
R += Box("off x=3..3,y=3..3,z=1..1")
check(R.size()==8,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 10xy: Remove a corner from the upper end of a cube","red"))
R = Reactor()
R += Box("on x=1..3,y=1..3,z=1..3")
assert(R.size()==27)
R += Box("off x=3..3,y=3..3,z=3..3")
check(R.size()==26,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 10x-x: Remove a smaller part of the blob","red"))
R = Reactor()
R+= Box("on x=1..4,y=1..1,z=1..1")
R+= Box("off x=2..3,y=1..1,z=1..1")
check(R.size()==2,[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
print (colored("Testcase 10y-y: Remove a smaller part of the blob","red"))
R = Reactor()
R+= Box("on x=1..1,y=1..4,z=1..1")
R+= Box("off x=1..1,y=2..3,z=1..1")
check(R.size()==2,[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
print (colored("Testcase 10z-z: Remove a smaller part of the blob","red"))
R = Reactor()
R+= Box("on x=1..1,y=1..1,z=1..4")
R+= Box("off x=1..1,y=1..1,z=2..3")
check(R.size()==2,[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
# -----------------
print (colored("Testcase 10xyz: Remove a part of a blob","red"))
# off x=9..11,y=9..11,z=9..11 from on x=10..10,y=10..12,z=10..12
R = Reactor()
R+=Box("on x=10..10,y=10..12,z=10..12")
R+=Box("off x=11..11,y=9..11,z=9..11")
R.savefig()
sys.exit()
# -----------------
print(colored("Testcase 11, deconstructed","green"))
R = Reactor()
# steps 11a and 11b results in these cubes
a=Box("on x=10..10,y=10..12,z=10..12")
a.id="[1]"
b=Box("on x=11..12,y=10..10,z=10..12")
b.id="[2]"
c=Box("on x=11..12,y=11..12,z=10..10")
c.id="[3]"
d=Box("on x=11..13,y=11..13,z=11..13")
d.id="[4]"
R+=a
#R+=b
#R+=c
#R+=d
# steps 11c removes a cube
e=Box("off x=9..11,y=9..11,z=9..11")
e.id="[5]"
R+=e
R.savefig()
sys.exit()
print (colored("Testcase 11 : First example from AOC","red"))
R=Reactor()
print (" 11a: Check that first cube is size 27")
a=Box("on x=10..12,y=10..12,z=10..12")
a.id="Box1"
R+=a
assert(R.size()==27)
b = Box("on x=11..13,y=11..13,z=11..13")
b.id = "Box2"
R+=b
print (" 11b: Check that merging two cubes result in the correct size ("+str(27+19)+")")
check(R.size()==27+19,[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
c = Box("off x=9..11,y=9..11,z=9..11")
c.id = "Box3"
R+=c
print (" 11c: Check that removing a cube result in the correct size ("+str(27+19-8)+")")
check(R.size()==27+19-8,[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
print (" 11d: Check that adding a cube result in the correct size ("+str(39)+")")
R+=Box("on x=10..10,y=10..10,z=10..10")
assert(R.size()==39)
sys.exit()
# - finally, read input from stdin and solve the problem
R = Reactor()
def readinaTOR():
RR = Reactor()
for l in sys.stdin:
l = l.strip()
b = Box(l)
RR = RR + b
return RR
| [
"box.Box",
"termcolor.colored",
"reactor.Reactor",
"sys.exit"
] | [((485, 494), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (492, 494), False, 'from reactor import Reactor\n'), ((500, 530), 'box.Box', 'Box', (['"""on x=1..3,y=1..3,z=1..3"""'], {}), "('on x=1..3,y=1..3,z=1..3')\n", (503, 530), False, 'from box import Box\n'), ((536, 566), 'box.Box', 'Box', (['"""on x=1..3,y=1..3,z=1..3"""'], {}), "('on x=1..3,y=1..3,z=1..3')\n", (539, 566), False, 'from box import Box\n'), ((767, 776), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (774, 776), False, 'from reactor import Reactor\n'), ((782, 812), 'box.Box', 'Box', (['"""on x=1..3,y=1..3,z=1..3"""'], {}), "('on x=1..3,y=1..3,z=1..3')\n", (785, 812), False, 'from box import Box\n'), ((914, 944), 'box.Box', 'Box', (['"""on x=1..1,y=1..3,z=1..3"""'], {}), "('on x=1..1,y=1..3,z=1..3')\n", (917, 944), False, 'from box import Box\n'), ((1184, 1214), 'box.Box', 'Box', (['"""on x=1..3,y=1..3,z=1..1"""'], {}), "('on x=1..3,y=1..3,z=1..1')\n", (1187, 1214), False, 'from box import Box\n'), ((1453, 1483), 'box.Box', 'Box', (['"""on x=1..3,y=1..1,z=1..3"""'], {}), "('on x=1..3,y=1..1,z=1..3')\n", (1456, 1483), False, 'from box import Box\n'), ((1682, 1691), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (1689, 1691), False, 'from reactor import Reactor\n'), ((1698, 1728), 'box.Box', 'Box', (['"""on x=1..1,y=1..1,z=1..1"""'], {}), "('on x=1..1,y=1..1,z=1..1')\n", (1701, 1728), False, 'from box import Box\n'), ((1734, 1764), 'box.Box', 'Box', (['"""on x=2..3,y=1..1,z=1..1"""'], {}), "('on x=2..3,y=1..1,z=1..1')\n", (1737, 1764), False, 'from box import Box\n'), ((1869, 1899), 'box.Box', 'Box', (['"""on x=1..3,y=2..3,z=1..1"""'], {}), "('on x=1..3,y=2..3,z=1..1')\n", (1872, 1899), False, 'from box import Box\n'), ((2004, 2034), 'box.Box', 'Box', (['"""on x=1..3,y=1..3,z=2..3"""'], {}), "('on x=1..3,y=1..3,z=2..3')\n", (2007, 2034), False, 'from box import Box\n'), ((2148, 2157), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (2155, 2157), False, 'from reactor import Reactor\n'), ((2164, 2194), 'box.Box', 'Box', (['"""on x=2..3,y=2..3,z=2..3"""'], {}), "('on x=2..3,y=2..3,z=2..3')\n", (2167, 2194), False, 'from box import Box\n'), ((2200, 2230), 'box.Box', 'Box', (['"""on x=1..1,y=2..3,z=2..3"""'], {}), "('on x=1..1,y=2..3,z=2..3')\n", (2203, 2230), False, 'from box import Box\n'), ((2346, 2376), 'box.Box', 'Box', (['"""on x=1..3,y=1..1,z=2..3"""'], {}), "('on x=1..3,y=1..1,z=2..3')\n", (2349, 2376), False, 'from box import Box\n'), ((2491, 2521), 'box.Box', 'Box', (['"""on x=1..3,y=1..3,z=1..1"""'], {}), "('on x=1..3,y=1..3,z=1..1')\n", (2494, 2521), False, 'from box import Box\n'), ((2672, 2681), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (2679, 2681), False, 'from reactor import Reactor\n'), ((2687, 2717), 'box.Box', 'Box', (['"""on x=1..3,y=1..3,z=1..3"""'], {}), "('on x=1..3,y=1..3,z=1..3')\n", (2690, 2717), False, 'from box import Box\n'), ((2744, 2775), 'box.Box', 'Box', (['"""off x=1..1,y=1..1,z=1..1"""'], {}), "('off x=1..1,y=1..1,z=1..1')\n", (2747, 2775), False, 'from box import Box\n'), ((2932, 2941), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (2939, 2941), False, 'from reactor import Reactor\n'), ((2947, 2977), 'box.Box', 'Box', (['"""on x=1..3,y=1..3,z=1..3"""'], {}), "('on x=1..3,y=1..3,z=1..3')\n", (2950, 2977), False, 'from box import Box\n'), ((3004, 3035), 'box.Box', 'Box', (['"""off x=3..3,y=3..3,z=3..3"""'], {}), "('off x=3..3,y=3..3,z=3..3')\n", (3007, 3035), False, 'from box import Box\n'), ((3193, 3202), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (3200, 3202), False, 'from reactor import Reactor\n'), ((3208, 3238), 'box.Box', 'Box', (['"""on x=1..3,y=1..3,z=1..3"""'], {}), "('on x=1..3,y=1..3,z=1..3')\n", (3211, 3238), False, 'from box import Box\n'), ((3265, 3296), 'box.Box', 'Box', (['"""off x=2..2,y=2..2,z=2..2"""'], {}), "('off x=2..2,y=2..2,z=2..2')\n", (3268, 3296), False, 'from box import Box\n'), ((3451, 3460), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (3458, 3460), False, 'from reactor import Reactor\n'), ((3466, 3496), 'box.Box', 'Box', (['"""on x=1..3,y=1..1,z=1..1"""'], {}), "('on x=1..3,y=1..1,z=1..1')\n", (3469, 3496), False, 'from box import Box\n'), ((3522, 3553), 'box.Box', 'Box', (['"""off x=3..3,y=1..1,z=1..1"""'], {}), "('off x=3..3,y=1..1,z=1..1')\n", (3525, 3553), False, 'from box import Box\n'), ((3708, 3717), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (3715, 3717), False, 'from reactor import Reactor\n'), ((3723, 3753), 'box.Box', 'Box', (['"""on x=1..1,y=1..3,z=1..1"""'], {}), "('on x=1..1,y=1..3,z=1..1')\n", (3726, 3753), False, 'from box import Box\n'), ((3779, 3810), 'box.Box', 'Box', (['"""off x=1..1,y=3..3,z=1..1"""'], {}), "('off x=1..1,y=3..3,z=1..1')\n", (3782, 3810), False, 'from box import Box\n'), ((3965, 3974), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (3972, 3974), False, 'from reactor import Reactor\n'), ((3980, 4010), 'box.Box', 'Box', (['"""on x=1..1,y=1..1,z=1..3"""'], {}), "('on x=1..1,y=1..1,z=1..3')\n", (3983, 4010), False, 'from box import Box\n'), ((4036, 4067), 'box.Box', 'Box', (['"""off x=1..1,y=1..1,z=3..3"""'], {}), "('off x=1..1,y=1..1,z=3..3')\n", (4039, 4067), False, 'from box import Box\n'), ((4224, 4233), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (4231, 4233), False, 'from reactor import Reactor\n'), ((4239, 4269), 'box.Box', 'Box', (['"""on x=1..3,y=1..3,z=1..1"""'], {}), "('on x=1..3,y=1..3,z=1..1')\n", (4242, 4269), False, 'from box import Box\n'), ((4295, 4326), 'box.Box', 'Box', (['"""off x=3..3,y=3..3,z=1..1"""'], {}), "('off x=3..3,y=3..3,z=1..1')\n", (4298, 4326), False, 'from box import Box\n'), ((4483, 4492), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (4490, 4492), False, 'from reactor import Reactor\n'), ((4498, 4528), 'box.Box', 'Box', (['"""on x=1..3,y=1..3,z=1..3"""'], {}), "('on x=1..3,y=1..3,z=1..3')\n", (4501, 4528), False, 'from box import Box\n'), ((4555, 4586), 'box.Box', 'Box', (['"""off x=3..3,y=3..3,z=3..3"""'], {}), "('off x=3..3,y=3..3,z=3..3')\n", (4558, 4586), False, 'from box import Box\n'), ((4734, 4743), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (4741, 4743), False, 'from reactor import Reactor\n'), ((4748, 4778), 'box.Box', 'Box', (['"""on x=1..4,y=1..1,z=1..1"""'], {}), "('on x=1..4,y=1..1,z=1..1')\n", (4751, 4778), False, 'from box import Box\n'), ((4783, 4814), 'box.Box', 'Box', (['"""off x=2..3,y=1..1,z=1..1"""'], {}), "('off x=2..3,y=1..1,z=1..1')\n", (4786, 4814), False, 'from box import Box\n'), ((4992, 5001), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (4999, 5001), False, 'from reactor import Reactor\n'), ((5006, 5036), 'box.Box', 'Box', (['"""on x=1..1,y=1..4,z=1..1"""'], {}), "('on x=1..1,y=1..4,z=1..1')\n", (5009, 5036), False, 'from box import Box\n'), ((5041, 5072), 'box.Box', 'Box', (['"""off x=1..1,y=2..3,z=1..1"""'], {}), "('off x=1..1,y=2..3,z=1..1')\n", (5044, 5072), False, 'from box import Box\n'), ((5251, 5260), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (5258, 5260), False, 'from reactor import Reactor\n'), ((5265, 5295), 'box.Box', 'Box', (['"""on x=1..1,y=1..1,z=1..4"""'], {}), "('on x=1..1,y=1..1,z=1..4')\n", (5268, 5295), False, 'from box import Box\n'), ((5300, 5331), 'box.Box', 'Box', (['"""off x=1..1,y=1..1,z=2..3"""'], {}), "('off x=1..1,y=1..1,z=2..3')\n", (5303, 5331), False, 'from box import Box\n'), ((5585, 5594), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (5592, 5594), False, 'from reactor import Reactor\n'), ((5598, 5634), 'box.Box', 'Box', (['"""on x=10..10,y=10..12,z=10..12"""'], {}), "('on x=10..10,y=10..12,z=10..12')\n", (5601, 5634), False, 'from box import Box\n'), ((5638, 5673), 'box.Box', 'Box', (['"""off x=11..11,y=9..11,z=9..11"""'], {}), "('off x=11..11,y=9..11,z=9..11')\n", (5641, 5673), False, 'from box import Box\n'), ((5686, 5696), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5694, 5696), False, 'import sys\n'), ((5777, 5786), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (5784, 5786), False, 'from reactor import Reactor\n'), ((5832, 5868), 'box.Box', 'Box', (['"""on x=10..10,y=10..12,z=10..12"""'], {}), "('on x=10..10,y=10..12,z=10..12')\n", (5835, 5868), False, 'from box import Box\n'), ((5882, 5918), 'box.Box', 'Box', (['"""on x=11..12,y=10..10,z=10..12"""'], {}), "('on x=11..12,y=10..10,z=10..12')\n", (5885, 5918), False, 'from box import Box\n'), ((5932, 5968), 'box.Box', 'Box', (['"""on x=11..12,y=11..12,z=10..10"""'], {}), "('on x=11..12,y=11..12,z=10..10')\n", (5935, 5968), False, 'from box import Box\n'), ((5982, 6018), 'box.Box', 'Box', (['"""on x=11..13,y=11..13,z=11..13"""'], {}), "('on x=11..13,y=11..13,z=11..13')\n", (5985, 6018), False, 'from box import Box\n'), ((6082, 6116), 'box.Box', 'Box', (['"""off x=9..11,y=9..11,z=9..11"""'], {}), "('off x=9..11,y=9..11,z=9..11')\n", (6085, 6116), False, 'from box import Box\n'), ((6145, 6155), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6153, 6155), False, 'import sys\n'), ((6223, 6232), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (6230, 6232), False, 'from reactor import Reactor\n'), ((6292, 6328), 'box.Box', 'Box', (['"""on x=10..12,y=10..12,z=10..12"""'], {}), "('on x=10..12,y=10..12,z=10..12')\n", (6295, 6328), False, 'from box import Box\n'), ((6371, 6407), 'box.Box', 'Box', (['"""on x=11..13,y=11..13,z=11..13"""'], {}), "('on x=11..13,y=11..13,z=11..13')\n", (6374, 6407), False, 'from box import Box\n'), ((6630, 6664), 'box.Box', 'Box', (['"""off x=9..11,y=9..11,z=9..11"""'], {}), "('off x=9..11,y=9..11,z=9..11')\n", (6633, 6664), False, 'from box import Box\n'), ((6978, 7014), 'box.Box', 'Box', (['"""on x=10..10,y=10..10,z=10..10"""'], {}), "('on x=10..10,y=10..10,z=10..10')\n", (6981, 7014), False, 'from box import Box\n'), ((7037, 7047), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7045, 7047), False, 'import sys\n'), ((7111, 7120), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (7118, 7120), False, 'from reactor import Reactor\n'), ((400, 485), 'termcolor.colored', 'colored', (['"""Testcase 1: adding two identical boxes results in only one box"""', '"""red"""'], {}), "('Testcase 1: adding two identical boxes results in only one box', 'red'\n )\n", (407, 485), False, 'from termcolor import colored\n'), ((635, 772), 'termcolor.colored', 'colored', (['"""Testcase 2: adding various boxes that overlap but are on the edge with the first, also result in only one box"""', '"""red"""'], {}), "(\n 'Testcase 2: adding various boxes that overlap but are on the edge with the first, also result in only one box'\n , 'red')\n", (642, 772), False, 'from termcolor import colored\n'), ((821, 912), 'termcolor.colored', 'colored', (['""" 2a: check that adding sliver on x doesn\'t cause more boxes"""', '"""yellow"""'], {}), '(" 2a: check that adding sliver on x doesn\'t cause more boxes",\n \'yellow\')\n', (828, 912), False, 'from termcolor import colored\n'), ((1090, 1182), 'termcolor.colored', 'colored', (['""" 2c: check that adding sliver on z doesn\'t cause more boxes """', '"""yellow"""'], {}), '(" 2c: check that adding sliver on z doesn\'t cause more boxes ",\n \'yellow\')\n', (1097, 1182), False, 'from termcolor import colored\n'), ((1360, 1451), 'termcolor.colored', 'colored', (['""" 2b: check that adding sliver on y doesn\'t cause more boxes"""', '"""yellow"""'], {}), '(" 2b: check that adding sliver on y doesn\'t cause more boxes",\n \'yellow\')\n', (1367, 1451), False, 'from termcolor import colored\n'), ((1632, 1670), 'termcolor.colored', 'colored', (['"""Testcase 3: merge on X edge"""'], {}), "('Testcase 3: merge on X edge')\n", (1639, 1670), False, 'from termcolor import colored\n'), ((2590, 2658), 'termcolor.colored', 'colored', (['"""Testcase 9a: Remove a 1x1x1 cube from a 3x3x3 cube"""', '"""red"""'], {}), "('Testcase 9a: Remove a 1x1x1 cube from a 3x3x3 cube', 'red')\n", (2597, 2658), False, 'from termcolor import colored\n'), ((2850, 2918), 'termcolor.colored', 'colored', (['"""Testcase 9b: Remove a 1x1x1 cube from a 3x3x3 cube"""', '"""red"""'], {}), "('Testcase 9b: Remove a 1x1x1 cube from a 3x3x3 cube', 'red')\n", (2857, 2918), False, 'from termcolor import colored\n'), ((3111, 3179), 'termcolor.colored', 'colored', (['"""Testcase 9c: Remove a 1x1x1 cube from a 3x3x3 cube"""', '"""red"""'], {}), "('Testcase 9c: Remove a 1x1x1 cube from a 3x3x3 cube', 'red')\n", (3118, 3179), False, 'from termcolor import colored\n'), ((3371, 3445), 'termcolor.colored', 'colored', (['"""Testcase 10x: Remove a slab from the upper end of a slab"""', '"""red"""'], {}), "('Testcase 10x: Remove a slab from the upper end of a slab', 'red')\n", (3378, 3445), False, 'from termcolor import colored\n'), ((3628, 3702), 'termcolor.colored', 'colored', (['"""Testcase 10y: Remove a slab from the upper end of a slab"""', '"""red"""'], {}), "('Testcase 10y: Remove a slab from the upper end of a slab', 'red')\n", (3635, 3702), False, 'from termcolor import colored\n'), ((3885, 3959), 'termcolor.colored', 'colored', (['"""Testcase 10z: Remove a slab from the upper end of a slab"""', '"""red"""'], {}), "('Testcase 10z: Remove a slab from the upper end of a slab', 'red')\n", (3892, 3959), False, 'from termcolor import colored\n'), ((4141, 4218), 'termcolor.colored', 'colored', (['"""Testcase 10xy: Remove a corner from the upper end of a slab"""', '"""red"""'], {}), "('Testcase 10xy: Remove a corner from the upper end of a slab', 'red')\n", (4148, 4218), False, 'from termcolor import colored\n'), ((4400, 4477), 'termcolor.colored', 'colored', (['"""Testcase 10xy: Remove a corner from the upper end of a cube"""', '"""red"""'], {}), "('Testcase 10xy: Remove a corner from the upper end of a cube', 'red')\n", (4407, 4477), False, 'from termcolor import colored\n'), ((4662, 4729), 'termcolor.colored', 'colored', (['"""Testcase 10x-x: Remove a smaller part of the blob"""', '"""red"""'], {}), "('Testcase 10x-x: Remove a smaller part of the blob', 'red')\n", (4669, 4729), False, 'from termcolor import colored\n'), ((4920, 4987), 'termcolor.colored', 'colored', (['"""Testcase 10y-y: Remove a smaller part of the blob"""', '"""red"""'], {}), "('Testcase 10y-y: Remove a smaller part of the blob', 'red')\n", (4927, 4987), False, 'from termcolor import colored\n'), ((5179, 5246), 'termcolor.colored', 'colored', (['"""Testcase 10z-z: Remove a smaller part of the blob"""', '"""red"""'], {}), "('Testcase 10z-z: Remove a smaller part of the blob', 'red')\n", (5186, 5246), False, 'from termcolor import colored\n'), ((5458, 5515), 'termcolor.colored', 'colored', (['"""Testcase 10xyz: Remove a part of a blob"""', '"""red"""'], {}), "('Testcase 10xyz: Remove a part of a blob', 'red')\n", (5465, 5515), False, 'from termcolor import colored\n'), ((5726, 5772), 'termcolor.colored', 'colored', (['"""Testcase 11, deconstructed"""', '"""green"""'], {}), "('Testcase 11, deconstructed', 'green')\n", (5733, 5772), False, 'from termcolor import colored\n'), ((6165, 6219), 'termcolor.colored', 'colored', (['"""Testcase 11 : First example from AOC"""', '"""red"""'], {}), "('Testcase 11 : First example from AOC', 'red')\n", (6172, 6219), False, 'from termcolor import colored\n'), ((7150, 7159), 'reactor.Reactor', 'Reactor', ([], {}), '()\n', (7157, 7159), False, 'from reactor import Reactor\n'), ((7224, 7230), 'box.Box', 'Box', (['l'], {}), '(l)\n', (7227, 7230), False, 'from box import Box\n'), ((339, 349), 'sys.exit', 'sys.exit', ([], {}), '()\n', (347, 349), False, 'import sys\n')] |
import os
import sys
import pytest
from prefect import Flow
from prefect.tasks.dbt import DbtShellTask
pytestmark = pytest.mark.skipif(
sys.platform == "win32", reason="DbtShellTask currently not supported on Windows"
)
def test_shell_result_from_stdout(tmpdir):
dbt_dir = tmpdir.mkdir("dbt")
task = DbtShellTask(
command="dbt --version",
profile_name="default",
environment="test",
dbt_kwargs={
"type": "snowflake",
"threads": 1,
"account": "JH72176.us-east-1",
"user": "<EMAIL>",
"role": "analyst",
"database": "staging",
"warehouse": "data_science",
"schema": "analysis",
"private_key_path": "/src/private_key.p8",
"private_key_passphrase": "<PASSWORD>",
},
overwrite_profiles=True,
profiles_dir=str(dbt_dir),
)
out = task.run()
# default config should return a string
assert isinstance(out, str)
# check that the result is not empty
assert len(out) > 0
def test_shell_result_from_stdout_with_full_return(tmpdir):
dbt_dir = tmpdir.mkdir("dbt")
task = DbtShellTask(
return_all=True,
command="dbt --version",
profile_name="default",
environment="test",
dbt_kwargs={
"type": "snowflake",
"threads": 1,
"account": "JH72176.us-east-1",
"user": "<EMAIL>",
"role": "analyst",
"database": "staging",
"warehouse": "data_science",
"schema": "analysis",
"private_key_path": "/src/private_key.p8",
"private_key_passphrase": "<PASSWORD>",
},
overwrite_profiles=True,
profiles_dir=str(dbt_dir),
)
out = task.run()
# when set to `return_all=True`, should return a list
assert isinstance(out, list)
# check that the result is multiple lines
assert len(out) > 1
def test_shell_creates_profiles_yml_file(tmpdir):
dbt_dir = tmpdir.mkdir("dbt")
with Flow(name="test") as f:
task = DbtShellTask(
profile_name="default",
environment="test",
dbt_kwargs={
"type": "snowflake",
"threads": 1,
"account": "JH72176.us-east-1",
"user": "<EMAIL>",
"role": "analyst",
"database": "staging",
"warehouse": "data_science",
"schema": "analysis",
"private_key_path": "/src/private_key.p8",
"private_key_passphrase": "<PASSWORD>",
},
overwrite_profiles=True,
profiles_dir=str(dbt_dir),
)(command="ls")
out = f.run()
profiles_path = dbt_dir.join("profiles.yml")
assert out.is_successful()
assert os.path.exists(profiles_path)
def test_shell_uses_dbt_envar(tmpdir, monkeypatch):
dbt_project_path = tmpdir.mkdir("dbt_project")
monkeypatch.setenv("DBT_PROFILES_DIR", str(dbt_project_path))
real_profiles_path = dbt_project_path.join("profiles.yml")
open(real_profiles_path, "a").close()
with Flow(name="test") as f:
task = DbtShellTask(
profile_name="default",
environment="test",
dbt_kwargs={
"type": "snowflake",
"threads": 1,
"account": "JH72176.us-east-1",
"user": "<EMAIL>",
"role": "analyst",
"database": "staging",
"warehouse": "data_science",
"schema": "analysis",
"private_key_path": "/src/private_key.p8",
"private_key_passphrase": "<PASSWORD>",
},
overwrite_profiles=False,
profiles_dir=str(tmpdir),
)(command="ls")
out = f.run()
missing_profiles_path = tmpdir.join("profiles.yml")
assert out.is_successful()
assert not os.path.exists(missing_profiles_path)
| [
"os.path.exists",
"prefect.Flow",
"pytest.mark.skipif"
] | [((120, 226), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(sys.platform == 'win32')"], {'reason': '"""DbtShellTask currently not supported on Windows"""'}), "(sys.platform == 'win32', reason=\n 'DbtShellTask currently not supported on Windows')\n", (138, 226), False, 'import pytest\n'), ((2874, 2903), 'os.path.exists', 'os.path.exists', (['profiles_path'], {}), '(profiles_path)\n', (2888, 2903), False, 'import os\n'), ((2081, 2098), 'prefect.Flow', 'Flow', ([], {'name': '"""test"""'}), "(name='test')\n", (2085, 2098), False, 'from prefect import Flow\n'), ((3190, 3207), 'prefect.Flow', 'Flow', ([], {'name': '"""test"""'}), "(name='test')\n", (3194, 3207), False, 'from prefect import Flow\n'), ((3994, 4031), 'os.path.exists', 'os.path.exists', (['missing_profiles_path'], {}), '(missing_profiles_path)\n', (4008, 4031), False, 'import os\n')] |
from django.conf.urls import url
from django.urls import path
from captcha import views
# urlpatterns = [
# url(r'image/(?P<key>\w+)/$', views.captcha_image, name='captcha-image', kwargs={'scale': 1}),
# url(r'image/(?P<key>\w+)@2/$', views.captcha_image, name='captcha-image-2x', kwargs={'scale': 2}),
# url(r'audio/(?P<key>\w+).wav$', views.captcha_audio, name='captcha-audio'),
# url(r'refresh/$', views.captcha_refresh, name='captcha-refresh'),
# ]
urlpatterns = [
path('image/<slug:key>/', views.captcha_image, name='captcha-image', kwargs={'scale': 1}),
path('image/<slug:key>@2/', views.captcha_image, name='captcha-image-2x', kwargs={'scale': 2}),
path('audio/<slug:key>.wav', views.captcha_audio, name='captcha-audio'),
path('refresh/', views.captcha_refresh, name='captcha-refresh'),
] | [
"django.urls.path"
] | [((490, 584), 'django.urls.path', 'path', (['"""image/<slug:key>/"""', 'views.captcha_image'], {'name': '"""captcha-image"""', 'kwargs': "{'scale': 1}"}), "('image/<slug:key>/', views.captcha_image, name='captcha-image', kwargs\n ={'scale': 1})\n", (494, 584), False, 'from django.urls import path\n'), ((585, 683), 'django.urls.path', 'path', (['"""image/<slug:key>@2/"""', 'views.captcha_image'], {'name': '"""captcha-image-2x"""', 'kwargs': "{'scale': 2}"}), "('image/<slug:key>@2/', views.captcha_image, name='captcha-image-2x',\n kwargs={'scale': 2})\n", (589, 683), False, 'from django.urls import path\n'), ((685, 756), 'django.urls.path', 'path', (['"""audio/<slug:key>.wav"""', 'views.captcha_audio'], {'name': '"""captcha-audio"""'}), "('audio/<slug:key>.wav', views.captcha_audio, name='captcha-audio')\n", (689, 756), False, 'from django.urls import path\n'), ((762, 825), 'django.urls.path', 'path', (['"""refresh/"""', 'views.captcha_refresh'], {'name': '"""captcha-refresh"""'}), "('refresh/', views.captcha_refresh, name='captcha-refresh')\n", (766, 825), False, 'from django.urls import path\n')] |
# Copyright (C) 2020 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker.api.common import attribute_filter
from tacker.common import exceptions as exception
class BaseViewBuilder(object):
@classmethod
def validate_filter(cls, filters=None):
if not filters:
return
return attribute_filter.parse_filter_rule(filters,
target=cls.FLATTEN_ATTRIBUTES)
@classmethod
def validate_attribute_fields(cls, all_fields=None, fields=None,
exclude_fields=None, exclude_default=None):
if all_fields and (fields or exclude_fields or exclude_default):
msg = ("Invalid query parameter combination: 'all_fields' "
"cannot be combined with 'fields' or 'exclude_fields' "
"or 'exclude_default'")
raise exception.ValidationError(msg)
if fields and (all_fields or exclude_fields):
msg = ("Invalid query parameter combination: 'fields' "
"cannot be combined with 'all_fields' or 'exclude_fields' ")
raise exception.ValidationError(msg)
if exclude_fields and (all_fields or fields or exclude_default):
msg = ("Invalid query parameter combination: 'exclude_fields' "
"cannot be combined with 'all_fields' or 'fields' "
"or 'exclude_default'")
raise exception.ValidationError(msg)
if exclude_default and (all_fields or exclude_fields):
msg = ("Invalid query parameter combination: 'exclude_default' "
"cannot be combined with 'all_fields' or 'exclude_fields' ")
raise exception.ValidationError(msg)
def _validate_complex_attributes(query_parameter, fields):
msg = ("Invalid query parameter '%(query_parameter)s'. "
"Value: %(field)s")
for field in fields:
if field in cls.COMPLEX_ATTRIBUTES:
continue
elif '*' in field:
# Field should never contain '*' as it's reserved for
# special purpose for handling key-value pairs.
raise exception.ValidationError(msg %
{"query_parameter": query_parameter,
"field": field})
elif field not in cls.FLATTEN_COMPLEX_ATTRIBUTES:
# Special case for field with key-value pairs.
# In this particular case, key will act as an attribute
# in structure so you need to treat it differently than
# other fields. All key-value pair field will be post-fix
# with '*' in FLATTEN_COMPLEX_ATTRIBUTES. Request
# with field which contains '*' will be treated as an
# error.
special_field = False
for attribute in cls.FLATTEN_COMPLEX_ATTRIBUTES:
if '*' in attribute and field.startswith(
attribute.split('*')[0]):
special_field = True
if not special_field:
raise exception.ValidationError(msg %
{"query_parameter": query_parameter,
"field": field})
if fields:
_validate_complex_attributes("fields", fields.split(','))
elif exclude_fields:
_validate_complex_attributes("exclude_fields",
exclude_fields.split(","))
| [
"tacker.common.exceptions.ValidationError",
"tacker.api.common.attribute_filter.parse_filter_rule"
] | [((880, 954), 'tacker.api.common.attribute_filter.parse_filter_rule', 'attribute_filter.parse_filter_rule', (['filters'], {'target': 'cls.FLATTEN_ATTRIBUTES'}), '(filters, target=cls.FLATTEN_ATTRIBUTES)\n', (914, 954), False, 'from tacker.api.common import attribute_filter\n'), ((1392, 1422), 'tacker.common.exceptions.ValidationError', 'exception.ValidationError', (['msg'], {}), '(msg)\n', (1417, 1422), True, 'from tacker.common import exceptions as exception\n'), ((1644, 1674), 'tacker.common.exceptions.ValidationError', 'exception.ValidationError', (['msg'], {}), '(msg)\n', (1669, 1674), True, 'from tacker.common import exceptions as exception\n'), ((1957, 1987), 'tacker.common.exceptions.ValidationError', 'exception.ValidationError', (['msg'], {}), '(msg)\n', (1982, 1987), True, 'from tacker.common import exceptions as exception\n'), ((2227, 2257), 'tacker.common.exceptions.ValidationError', 'exception.ValidationError', (['msg'], {}), '(msg)\n', (2252, 2257), True, 'from tacker.common import exceptions as exception\n'), ((2751, 2840), 'tacker.common.exceptions.ValidationError', 'exception.ValidationError', (["(msg % {'query_parameter': query_parameter, 'field': field})"], {}), "(msg % {'query_parameter': query_parameter,\n 'field': field})\n", (2776, 2840), True, 'from tacker.common import exceptions as exception\n'), ((3779, 3868), 'tacker.common.exceptions.ValidationError', 'exception.ValidationError', (["(msg % {'query_parameter': query_parameter, 'field': field})"], {}), "(msg % {'query_parameter': query_parameter,\n 'field': field})\n", (3804, 3868), True, 'from tacker.common import exceptions as exception\n')] |
# Generated by Django 2.2.1 on 2019-05-29 15:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='gender',
field=models.CharField(choices=[('M', 'Male'), ('F', 'Female')], default='Ч', max_length=1),
),
]
| [
"django.db.models.CharField"
] | [((324, 413), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('M', 'Male'), ('F', 'Female')]", 'default': '"""Ч"""', 'max_length': '(1)'}), "(choices=[('M', 'Male'), ('F', 'Female')], default='Ч',\n max_length=1)\n", (340, 413), False, 'from django.db import migrations, models\n')] |
""""
名称:064 童芯派 面向对象 体感小飞机
硬件: 童芯派
功能介绍:
使用童芯派的精灵功能在屏幕上编写了一个基于陀螺仪控制的体感小游戏,控制角色飞机躲避空中的子弹。
难度:⭐⭐⭐
支持的模式:上传模式
无
"""
# ---------程序分割线----------------程序分割线----------------程序分割线----------
import cyberpi
import random
import math
import time
class Enemy:
def __init__(self):
self.x = random.randint(0, 128)
self.y = -1
self.pix = cyberpi.sprite()
self.pix.draw_pixels(
[0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0xf5a623, 0xf5a623, 0xf5a623, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0xf5a623,
0xf5a623, 0xf5a623, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0xf5a623, 0xf5a623, 0xf5a623, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000])
self.pix.set_align("center")
self.pix.set_brush(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
self.pix.move_to(self.x, self.y)
self.speed = random.randint(2, 6)
def set_speed_up(self):
self.speed += 1
if self.speed >= 30:
self.speed = 30
def set_speed_down(self):
self.speed -= 1
if self.speed <= 2:
self.speed = 2
def speed_re(self):
self.speed = random.randint(3, 8)
def start(self):
self.pix.show()
self.pix.set_brush(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
self.x = random.randint(2, 125)
self.y = -1
self.pix.move_to(self.x, self.y)
def move(self):
if self.pix.get_y() > 128:
self.start()
self.pix.move_y(self.speed)
class Player:
def __init__(self):
cyberpi.led.on('b')
self.point = 0
self.player = cyberpi.sprite()
self.player.draw_pixels(
[0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x1eaaff, 0x1eaaff, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x1eaaff, 0x1eaaff, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff,
0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x1eaaff, 0x1eaaff, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x1eaaff, 0x1eaaff, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x1eaaff, 0x1eaaff, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x1eaaff, 0x1eaaff,
0x1eaaff, 0x1eaaff, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000])
self.player.move_to(64, 64)
def reset(self):
self.point = 0
cyberpi.led.on('b')
self.player.move_to(64, 64)
def collide_detect(self):
self.player_x = 64 - -cyberpi.get_roll()
self.player_y = 64 - -cyberpi.get_pitch()
if self.player_x < 0:
self.player_x = 5
if self.player_x > 128:
self.player_x = 123
if self.player_y < 0:
self.player_y = 5
if self.player_y > 128:
self.player_y = 123
self.player.move_to(self.player_x, self.player_y)
def control(self, *para):
self.collide_detect()
for item in para:
if self.player.is_touch(item.pix):
item.pix.hide()
self.point += 1
print(self.point)
num = math.floor(self.point / 4)
cyberpi.led.off(id=6 - num)
if self.point >= 20:
cyberpi.audio.play('prompt-tone')
return False
first = Player()
a = Enemy()
b = Enemy()
c = Enemy()
d = Enemy()
e = Enemy()
f = Enemy()
g = Enemy()
h = Enemy()
enemy_list = [a, b, c, e, f, g, h]
cyberpi.console.println("体感小飞机 上键加速 下键减速 灯光为血条")
cyberpi.console.println("A键重新开始")
cyberpi.console.println("B键开始游戏")
time.sleep(5)
while True:
first.control(*enemy_list)
cyberpi.screen.render()
if cyberpi.controller.is_press("B"):
cyberpi.display.clear()
break
cyberpi.screen.render()
time.sleep(1)
while True:
if first.control(*enemy_list) is False:
cyberpi.led.on('r')
cyberpi.display.label("GameOver", 24, 'center')
cyberpi.audio.play('prompt-tone')
while True:
if cyberpi.controller.is_press('A'):
first.reset()
cyberpi.screen.render()
time.sleep(2)
for i in enemy_list:
i.start()
i.speed_re()
cyberpi.screen.render()
break
for i in enemy_list:
i.move()
if cyberpi.controller.is_press('up'):
for i in enemy_list:
i.set_speed_up()
if cyberpi.controller.is_press('down'):
for i in enemy_list:
i.set_speed_down()
cyberpi.screen.render() | [
"cyberpi.audio.play",
"cyberpi.console.println",
"cyberpi.led.on",
"cyberpi.get_pitch",
"math.floor",
"cyberpi.display.clear",
"time.sleep",
"cyberpi.sprite",
"cyberpi.controller.is_press",
"cyberpi.get_roll",
"cyberpi.display.label",
"cyberpi.screen.render",
"random.randint",
"cyberpi.led... | [((8419, 8467), 'cyberpi.console.println', 'cyberpi.console.println', (['"""体感小飞机 上键加速 下键减速 灯光为血条"""'], {}), "('体感小飞机 上键加速 下键减速 灯光为血条')\n", (8442, 8467), False, 'import cyberpi\n'), ((8468, 8501), 'cyberpi.console.println', 'cyberpi.console.println', (['"""A键重新开始"""'], {}), "('A键重新开始')\n", (8491, 8501), False, 'import cyberpi\n'), ((8502, 8535), 'cyberpi.console.println', 'cyberpi.console.println', (['"""B键开始游戏"""'], {}), "('B键开始游戏')\n", (8525, 8535), False, 'import cyberpi\n'), ((8536, 8549), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (8546, 8549), False, 'import time\n'), ((8708, 8731), 'cyberpi.screen.render', 'cyberpi.screen.render', ([], {}), '()\n', (8729, 8731), False, 'import cyberpi\n'), ((8732, 8745), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8742, 8745), False, 'import time\n'), ((8597, 8620), 'cyberpi.screen.render', 'cyberpi.screen.render', ([], {}), '()\n', (8618, 8620), False, 'import cyberpi\n'), ((8628, 8660), 'cyberpi.controller.is_press', 'cyberpi.controller.is_press', (['"""B"""'], {}), "('B')\n", (8655, 8660), False, 'import cyberpi\n'), ((9308, 9341), 'cyberpi.controller.is_press', 'cyberpi.controller.is_press', (['"""up"""'], {}), "('up')\n", (9335, 9341), False, 'import cyberpi\n'), ((9408, 9443), 'cyberpi.controller.is_press', 'cyberpi.controller.is_press', (['"""down"""'], {}), "('down')\n", (9435, 9443), False, 'import cyberpi\n'), ((9509, 9532), 'cyberpi.screen.render', 'cyberpi.screen.render', ([], {}), '()\n', (9530, 9532), False, 'import cyberpi\n'), ((292, 314), 'random.randint', 'random.randint', (['(0)', '(128)'], {}), '(0, 128)\n', (306, 314), False, 'import random\n'), ((354, 370), 'cyberpi.sprite', 'cyberpi.sprite', ([], {}), '()\n', (368, 370), False, 'import cyberpi\n'), ((3498, 3518), 'random.randint', 'random.randint', (['(2)', '(6)'], {}), '(2, 6)\n', (3512, 3518), False, 'import random\n'), ((3785, 3805), 'random.randint', 'random.randint', (['(3)', '(8)'], {}), '(3, 8)\n', (3799, 3805), False, 'import random\n'), ((3968, 3990), 'random.randint', 'random.randint', (['(2)', '(125)'], {}), '(2, 125)\n', (3982, 3990), False, 'import random\n'), ((4225, 4244), 'cyberpi.led.on', 'cyberpi.led.on', (['"""b"""'], {}), "('b')\n", (4239, 4244), False, 'import cyberpi\n'), ((4290, 4306), 'cyberpi.sprite', 'cyberpi.sprite', ([], {}), '()\n', (4304, 4306), False, 'import cyberpi\n'), ((7328, 7347), 'cyberpi.led.on', 'cyberpi.led.on', (['"""b"""'], {}), "('b')\n", (7342, 7347), False, 'import cyberpi\n'), ((8670, 8693), 'cyberpi.display.clear', 'cyberpi.display.clear', ([], {}), '()\n', (8691, 8693), False, 'import cyberpi\n'), ((8810, 8829), 'cyberpi.led.on', 'cyberpi.led.on', (['"""r"""'], {}), "('r')\n", (8824, 8829), False, 'import cyberpi\n'), ((8838, 8885), 'cyberpi.display.label', 'cyberpi.display.label', (['"""GameOver"""', '(24)', '"""center"""'], {}), "('GameOver', 24, 'center')\n", (8859, 8885), False, 'import cyberpi\n'), ((8894, 8927), 'cyberpi.audio.play', 'cyberpi.audio.play', (['"""prompt-tone"""'], {}), "('prompt-tone')\n", (8912, 8927), False, 'import cyberpi\n'), ((3364, 3386), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3378, 3386), False, 'import random\n'), ((3388, 3410), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3402, 3410), False, 'import random\n'), ((3412, 3434), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3426, 3434), False, 'import random\n'), ((3879, 3901), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3893, 3901), False, 'import random\n'), ((3903, 3925), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3917, 3925), False, 'import random\n'), ((3927, 3949), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3941, 3949), False, 'import random\n'), ((8963, 8995), 'cyberpi.controller.is_press', 'cyberpi.controller.is_press', (['"""A"""'], {}), "('A')\n", (8990, 8995), False, 'import cyberpi\n'), ((7445, 7463), 'cyberpi.get_roll', 'cyberpi.get_roll', ([], {}), '()\n', (7461, 7463), False, 'import cyberpi\n'), ((7494, 7513), 'cyberpi.get_pitch', 'cyberpi.get_pitch', ([], {}), '()\n', (7511, 7513), False, 'import cyberpi\n'), ((8074, 8100), 'math.floor', 'math.floor', (['(self.point / 4)'], {}), '(self.point / 4)\n', (8084, 8100), False, 'import math\n'), ((8117, 8144), 'cyberpi.led.off', 'cyberpi.led.off', ([], {'id': '(6 - num)'}), '(id=6 - num)\n', (8132, 8144), False, 'import cyberpi\n'), ((9043, 9066), 'cyberpi.screen.render', 'cyberpi.screen.render', ([], {}), '()\n', (9064, 9066), False, 'import cyberpi\n'), ((9083, 9096), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (9093, 9096), False, 'import time\n'), ((9213, 9236), 'cyberpi.screen.render', 'cyberpi.screen.render', ([], {}), '()\n', (9234, 9236), False, 'import cyberpi\n'), ((8202, 8235), 'cyberpi.audio.play', 'cyberpi.audio.play', (['"""prompt-tone"""'], {}), "('prompt-tone')\n", (8220, 8235), False, 'import cyberpi\n')] |
import time
import asyncio
import random
import pyee
import logging
from plugins.input_fsx import fsx_pb2
from hexi.service import event
_logger = logging.getLogger(__name__)
class UDPServer(asyncio.DatagramProtocol):
def __init__(self, manager, token):
super().__init__()
self.manager = manager
self.token = token
self.sn = 0
def datagram_received(self, data, addr):
try:
# Note: there are no length prefix in UDP packets
msg = fsx_pb2.UdpResponseMessage()
msg.ParseFromString(data)
if msg.token != self.token:
_logger.warn('A message is discarded because of incorrect token')
self.manager.ee.emit('udp_discarded_message')
return
if msg.serialNumber <= self.sn:
_logger.warn('A message is discarded because of received newer message')
self.manager.ee.emit('udp_discarded_message')
return
self.sn = msg.serialNumber
self.manager.ee.emit('udp_received_message', msg)
except Exception as e:
_logger.warn(e)
self.manager.ee.emit('udp_discarded_message')
def connection_lost(self, exc):
self.manager.ee.emit('udp_closed')
class TCPClientManager(object):
def __init__(self, channel, host, port, retry_sec=2):
self.channel = channel
self.host = host
self.port = port
self.retry_sec = retry_sec
self.work_future = None
self.heartbeat_future = None
self.connect_future = None
self.reconnect_future = None
self.reader = None
self.writer = None
self.state = 'idle'
self.ee = channel.ee
async def connect_async(self):
while True and (self.state in ['connecting', 'reconnecting']):
try:
future = asyncio.open_connection(self.host, self.port)
reader, writer = await asyncio.wait_for(future, timeout=3)
_logger.info('Telemetry connected')
self.reader = reader
self.writer = writer
self.state = 'connected'
self.work_future = asyncio.ensure_future(self.work_async())
self.work_future.add_done_callback(self.on_work_done)
self.heartbeat_future = asyncio.ensure_future(self.heartbeat_async())
self.heartbeat_future.add_done_callback(self.on_heartbeat_done)
self.ee.emit('tcp_connected')
break
except (OSError, asyncio.TimeoutError):
#print('Server not connected, retry in {0} seconds'.format(self.retry_sec))
await asyncio.sleep(self.retry_sec)
def connect(self):
assert(self.state in ['idle', 'disconnected'])
assert(self.connect_future == None)
self.state = 'connecting'
self.connect_future = asyncio.ensure_future(self.connect_async())
self.connect_future.add_done_callback(self.on_connect_done)
return self.connect_future
def on_connect_done(self, future):
self.connect_future = None
async def heartbeat_async(self):
while True:
await asyncio.sleep(10)
msg = fsx_pb2.TcpRequestMessage()
msg.msgType = fsx_pb2.TcpRequestMessage.MSG_TYPE_PING
msg.pingBody.timeStamp = int(time.time())
self.write_message(msg)
def on_heartbeat_done(self, future):
self.heartbeat_future = None
async def work_async(self):
try:
while True:
size_buffer = await self.reader.readexactly(4)
size = int.from_bytes(size_buffer, byteorder='little')
body_buffer = await self.reader.readexactly(size)
msg = fsx_pb2.TcpResponseMessage()
msg.ParseFromString(body_buffer)
self.ee.emit('tcp_received_message', msg)
except (asyncio.IncompleteReadError, ConnectionResetError, ConnectionAbortedError):
pass
def on_work_done(self, future):
_logger.info('Telemetry connection lost')
self.work_future = None
if self.heartbeat_future != None:
self.heartbeat_future.cancel()
self.reader = None
self.writer = None
if self.state != 'disconnected':
self.reconnect()
async def reconnect_async(self):
await self.connect_async()
def reconnect(self):
assert(self.state == 'connected')
assert(self.reconnect_future == None)
_logger.info('Telemetry reconnecting')
self.state = 'reconnecting'
self.reconnect_future = asyncio.ensure_future(self.reconnect_async())
self.reconnect_future.add_done_callback(self.on_reconnect_done)
return self.reconnect_future
def on_reconnect_done(self, f):
self.reconnect_future = None
def disconnect(self):
assert(self.state in ['connecting', 'connected', 'reconnecting'])
self.state = 'disconnected'
if self.connect_future != None:
self.connect_future.cancel()
if self.reconnect_future != None:
self.reconnect_future.cancel()
if self.work_future != None:
self.work_future.cancel()
if self.heartbeat_future != None:
self.heartbeat_future.cancel()
if self.writer != None:
self.writer.close()
def write_message(self, msg):
data = msg.SerializeToString()
data = len(data).to_bytes(4, byteorder = 'little') + data
self.writer.write(data)
class UDPServerManager(object):
def __init__(self, channel, token, host, port):
self.channel = channel
self.token = token
self.host = host
self.port = port
self.transport = None
self.protocol = None
self.state = 'idle'
self.ee = channel.ee
def protocol_factory(self):
return UDPServer(self, self.token)
async def create_endpoint_async(self):
assert(self.state in ['idle', 'closed'])
self.state = 'opening'
loop = asyncio.get_event_loop()
transport, protocol = await loop.create_datagram_endpoint(
self.protocol_factory, local_addr=(self.host, self.port))
self.transport = transport
self.protocol = protocol
self.state = 'opened'
_logger.info('Telemetry receiver listening at {0}:{1}'.format(self.host, self.port))
def close(self):
assert(self.state in ['opening', 'opened'])
_logger.info('Telemetry receiver is closing')
self.state = 'closed'
if self.transport != None:
self.transport.close()
self.transport == None
self.protocol == None
class DataChannel(object):
def __init__(self, udp_port, tcp_host, tcp_port):
self.ee = pyee.EventEmitter()
self.udp_token = random.randint(0, 0x6FFFFFFF)
self.udp_port = udp_port
self.tcp = TCPClientManager(self, tcp_host, tcp_port)
self.udp = UDPServerManager(self, self.udp_token, '0.0.0.0', udp_port)
self.udp_receive_counter = 0
self.udp_discard_counter = 0
self.ee.on('tcp_connected', self.on_tcp_connected)
self.ee.on('tcp_received_message', self.on_tcp_received_message)
self.ee.on('udp_received_message', self.on_udp_received_message)
self.ee.on('udp_discarded_message', self.on_udp_discarded_message)
async def udp_analytics_async(self):
last_receive = 0
last_discard = 0
while True:
await asyncio.sleep(1)
delta_receive = self.udp_receive_counter - last_receive
delta_discard = self.udp_discard_counter - last_discard
last_receive = self.udp_receive_counter
last_discard = self.udp_discard_counter
self.ee.emit('udp_analytics_tick', {
'receive_all': last_receive,
'discard_all': last_discard,
'receive_tick': delta_receive,
'discard_tick': delta_discard})
def on_udp_analytics_done(self, future):
self.udp_analytics_future = None
async def start_async(self):
_logger.info('Starting telemetry channel')
self.udp_analytics_future = asyncio.ensure_future(self.udp_analytics_async())
self.udp_analytics_future.add_done_callback(self.on_udp_analytics_done)
await self.udp.create_endpoint_async()
await self.tcp.connect()
_logger.info('Telemetry channel started')
def stop(self):
_logger.info('Stopping telemetry channel')
if self.udp_analytics_future != None:
self.udp_analytics_future.cancel()
self.tcp.disconnect()
self.udp.close()
def on_tcp_connected(self):
self.udp.protocol.sn = 0
msg = fsx_pb2.TcpRequestMessage()
msg.msgType = fsx_pb2.TcpRequestMessage.MSG_TYPE_SET_CONFIG
msg.setConfigBody.udpPort = self.udp_port
msg.setConfigBody.udpToken = self.udp_token
self.tcp.write_message(msg)
def on_tcp_received_message(self, msg):
if msg.success != True:
_logger.error('Telemetry command failed')
def on_udp_received_message(self, msg):
self.udp_receive_counter = self.udp_receive_counter + 1
def on_udp_discarded_message(self):
self.udp_discard_counter = self.udp_discard_counter + 1
| [
"logging.getLogger",
"pyee.EventEmitter",
"asyncio.sleep",
"plugins.input_fsx.fsx_pb2.TcpRequestMessage",
"plugins.input_fsx.fsx_pb2.TcpResponseMessage",
"asyncio.open_connection",
"asyncio.wait_for",
"plugins.input_fsx.fsx_pb2.UdpResponseMessage",
"asyncio.get_event_loop",
"time.time",
"random.... | [((150, 177), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (167, 177), False, 'import logging\n'), ((5506, 5530), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (5528, 5530), False, 'import asyncio\n'), ((6189, 6208), 'pyee.EventEmitter', 'pyee.EventEmitter', ([], {}), '()\n', (6206, 6208), False, 'import pyee\n'), ((6230, 6259), 'random.randint', 'random.randint', (['(0)', '(1879048191)'], {}), '(0, 1879048191)\n', (6244, 6259), False, 'import random\n'), ((7993, 8020), 'plugins.input_fsx.fsx_pb2.TcpRequestMessage', 'fsx_pb2.TcpRequestMessage', ([], {}), '()\n', (8018, 8020), False, 'from plugins.input_fsx import fsx_pb2\n'), ((472, 500), 'plugins.input_fsx.fsx_pb2.UdpResponseMessage', 'fsx_pb2.UdpResponseMessage', ([], {}), '()\n', (498, 500), False, 'from plugins.input_fsx import fsx_pb2\n'), ((2926, 2953), 'plugins.input_fsx.fsx_pb2.TcpRequestMessage', 'fsx_pb2.TcpRequestMessage', ([], {}), '()\n', (2951, 2953), False, 'from plugins.input_fsx import fsx_pb2\n'), ((1701, 1746), 'asyncio.open_connection', 'asyncio.open_connection', (['self.host', 'self.port'], {}), '(self.host, self.port)\n', (1724, 1746), False, 'import asyncio\n'), ((2896, 2913), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (2909, 2913), False, 'import asyncio\n'), ((3049, 3060), 'time.time', 'time.time', ([], {}), '()\n', (3058, 3060), False, 'import time\n'), ((3413, 3441), 'plugins.input_fsx.fsx_pb2.TcpResponseMessage', 'fsx_pb2.TcpResponseMessage', ([], {}), '()\n', (3439, 3441), False, 'from plugins.input_fsx import fsx_pb2\n'), ((6862, 6878), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (6875, 6878), False, 'import asyncio\n'), ((1778, 1813), 'asyncio.wait_for', 'asyncio.wait_for', (['future'], {'timeout': '(3)'}), '(future, timeout=3)\n', (1794, 1813), False, 'import asyncio\n'), ((2425, 2454), 'asyncio.sleep', 'asyncio.sleep', (['self.retry_sec'], {}), '(self.retry_sec)\n', (2438, 2454), False, 'import asyncio\n')] |
from openff.toolkit.typing.engines.smirnoff.forcefield import ForceField
from simtk import openmm, unit
from scipy.stats import distributions
import copy
import numpy as np
import os
from smt.sampling_methods import LHS
def vary_parameters_lhc(filename, num_samples, output_directory):
forcefield = ForceField(filename, allow_cosmetic_attributes=True)
lj_params = forcefield.get_parameter_handler('vdW', allow_cosmetic_attributes=True)
smirks_types_to_change = ['[#6X4:1]', '[#1:1]-[#6X4]', '[#8X2H1+0:1]', '[#1:1]-[#8]']
param_range = np.asarray([0.75, 1.25])
n_dim = len(smirks_types_to_change) * 2
lj_sample_ranges = []
for i in range(n_dim):
lj_sample_ranges.append(param_range)
lj_sample_ranges = np.asarray(lj_sample_ranges)
sampling = LHS(xlimits=lj_sample_ranges)
values = sampling(num_samples)
os.makedirs(output_directory,exist_ok=True)
for i, value in enumerate(values):
reshape_values = value.reshape((int(n_dim/2), 2))
counter = 0
for lj in lj_params:
if lj.smirks in smirks_types_to_change:
lj.epsilon *= reshape_values[counter, 0]
lj.rmin_half *= reshape_values[counter, 1]
counter += 1
os.makedirs(os.path.join(output_directory,str(i+1)))
ff_name = 'force-field.offxml'
forcefield.to_file(os.path.join(output_directory, str(i+1),ff_name))
| [
"smt.sampling_methods.LHS",
"numpy.asarray",
"os.makedirs",
"openff.toolkit.typing.engines.smirnoff.forcefield.ForceField"
] | [((305, 357), 'openff.toolkit.typing.engines.smirnoff.forcefield.ForceField', 'ForceField', (['filename'], {'allow_cosmetic_attributes': '(True)'}), '(filename, allow_cosmetic_attributes=True)\n', (315, 357), False, 'from openff.toolkit.typing.engines.smirnoff.forcefield import ForceField\n'), ((556, 580), 'numpy.asarray', 'np.asarray', (['[0.75, 1.25]'], {}), '([0.75, 1.25])\n', (566, 580), True, 'import numpy as np\n'), ((746, 774), 'numpy.asarray', 'np.asarray', (['lj_sample_ranges'], {}), '(lj_sample_ranges)\n', (756, 774), True, 'import numpy as np\n'), ((790, 819), 'smt.sampling_methods.LHS', 'LHS', ([], {'xlimits': 'lj_sample_ranges'}), '(xlimits=lj_sample_ranges)\n', (793, 819), False, 'from smt.sampling_methods import LHS\n'), ((859, 903), 'os.makedirs', 'os.makedirs', (['output_directory'], {'exist_ok': '(True)'}), '(output_directory, exist_ok=True)\n', (870, 903), False, 'import os\n')] |
import argparse
from collections import defaultdict
import pickle
import re
import lightgbm as lgb
import pandas as pd
import numpy as np
import xgboost as xgb
from ..data_utils import SEG_FP, get_encoded_classes
from ..utils import print_metrics
from ..metric import get_metrics
from .blend import (
score_predictions_by_image_id, submission_from_predictions_by_image_id)
def main():
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('detailed_then_features', nargs='+',
help='detailed dataframes and the features in the same order')
arg('--use-xgb', type=int, default=1)
arg('--use-lgb', type=int, default=1)
arg('--num-boost-round', type=int, default=400)
arg('--lr', type=float, default=0.05, help='for lightgbm')
arg('--eta', type=float, default=0.15, help='for xgboost')
arg('--save-model')
arg('--load-model')
arg('--output')
arg('--n-folds', type=int, default=5)
arg('--seg-fp-adjust', type=float)
args = parser.parse_args()
if len(args.detailed_then_features) % 2 != 0:
parser.error('number of detailed and features must be equal')
n = len(args.detailed_then_features) // 2
detailed_paths, feature_paths = (args.detailed_then_features[:n],
args.detailed_then_features[n:])
if args.output:
if not args.load_model:
parser.error('--output needs --load-model')
elif len(feature_paths) == 1:
parser.error('need more than one feature df for train/valid split')
print('\n'.join(
f'{f} | {d}' for f, d in zip(detailed_paths, feature_paths)))
detailed_dfs = [pd.read_csv(path) for path in detailed_paths]
feature_dfs = [pd.read_csv(path) for path in feature_paths]
valid_df = feature_dfs[0]
assert valid_df.columns[0] == 'item'
assert valid_df.columns[-1] == 'y'
feature_cols = [
col for col in valid_df.columns[1:-1] if col not in {
'width', 'height', 'aspect',
'candidate_count', 'candidate_count_on_page',
'candidate_freq_on_page',
}]
top_cls_re = re.compile('^top_\d+_cls')
def build_features(df):
df = df[feature_cols].copy()
for col in feature_cols:
if top_cls_re.match(col):
df[f'{col}_is_candidate'] = df[col] == df['candidate_cls']
# del df[col]
print(' '.join(df.columns))
return df
classes = get_encoded_classes()
cls_by_idx = {idx: cls for cls, idx in classes.items()}
cls_by_idx[-1] = SEG_FP
y_preds = []
all_metrics = []
for fold_num in range(args.n_folds):
print(f'fold {fold_num}')
detailed = (detailed_dfs[fold_num if len(detailed_dfs) != 1 else 0]
.copy())
valid_df = feature_dfs[fold_num if len(feature_dfs) != 1 else 0].copy()
valid_features = build_features(valid_df)
xgb_valid_data = xgb.DMatrix(valid_features, label=valid_df['y'])
fold_path = lambda path, kind: f'{path}.{kind}.fold{fold_num}'
if args.load_model:
lgb_load_path = (fold_path(args.load_model, 'lgb')
if args.use_lgb else None)
xgb_load_path = (fold_path(args.load_model, 'xgb')
if args.use_xgb else None)
print(f'loading from {lgb_load_path}, {xgb_load_path}')
if lgb_load_path:
lgb_model = lgb.Booster(model_file=lgb_load_path)
if xgb_load_path:
with open(xgb_load_path, 'rb') as f:
xgb_model = pickle.load(f)
else:
train_df = pd.concat([df for i, df in enumerate(feature_dfs)
if i != fold_num])
train_features = build_features(train_df)
if args.use_lgb:
lgb_model = train_lgb(
train_features, train_df['y'],
valid_features, valid_df['y'],
lr=args.lr,
num_boost_round=args.num_boost_round)
if args.use_xgb:
xgb_model = train_xgb(
train_features, train_df['y'],
valid_features, valid_df['y'],
eta=args.eta,
num_boost_round=args.num_boost_round)
if args.save_model:
lgb_save_path = (fold_path(args.save_model, 'lgb')
if args.use_lgb else None)
xgb_save_path = (fold_path(args.save_model, 'xgb')
if args.use_xgb else None)
print(f'saving to {lgb_save_path}, {xgb_save_path}')
if lgb_save_path:
lgb_model.save_model(
lgb_save_path, num_iteration=lgb_model.best_iteration)
if xgb_save_path:
with open(xgb_save_path, 'wb') as f:
pickle.dump(xgb_model, f)
print('prediction')
predictions = []
if args.use_lgb:
predictions.append(lgb_model.predict(
valid_features, num_iteration=lgb_model.best_iteration))
if args.use_xgb:
predictions.append(xgb_model.predict(
xgb_valid_data, ntree_limit=xgb_model.best_ntree_limit))
valid_df['y_pred'] = np.mean(predictions, axis=0)
if args.seg_fp_adjust:
valid_df.loc[valid_df['candidate_cls'] == -1, 'y_pred'] += \
args.seg_fp_adjust
y_preds.append(valid_df['y_pred'].values)
max_by_item = get_max_by_item(valid_df)
print('scoring')
detailed['pred'] = \
max_by_item['candidate_cls'].apply(cls_by_idx.__getitem__)
print(f'SEG_FP ratio: {(detailed["pred"] == SEG_FP).mean():.5f}')
predictions_by_image_id = get_predictions_by_image_id(detailed)
if not args.output:
metrics = {
'accuracy': (detailed["pred"] == detailed["true"]).mean(),
}
metrics.update(
score_predictions_by_image_id(predictions_by_image_id))
print_metrics(metrics)
all_metrics.append(metrics)
if args.output:
valid_df['y_pred'] = np.mean(y_preds, axis=0)
max_by_item = get_max_by_item(valid_df)
detailed['pred'] = \
max_by_item['candidate_cls'].apply(cls_by_idx.__getitem__)
predictions_by_image_id = get_predictions_by_image_id(detailed)
submission = submission_from_predictions_by_image_id(
predictions_by_image_id)
submission.to_csv(args.output, index=False)
else:
print('\nAll folds:')
print_metrics(get_metrics(all_metrics))
def train_lgb(train_features, train_y, valid_features, valid_y, *,
lr, num_boost_round):
train_data = lgb.Dataset(train_features, train_y)
valid_data = lgb.Dataset(valid_features, valid_y, reference=train_data)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'learning_rate': lr,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'feature_fraction': 0.9,
'min_data_in_leaf': 20,
'num_leaves': 41,
'scale_pos_weight': 1.2,
'lambda_l2': 1,
}
print(params)
return lgb.train(
params=params,
train_set=train_data,
num_boost_round=num_boost_round,
early_stopping_rounds=20,
valid_sets=[valid_data],
verbose_eval=10,
)
def train_xgb(train_features, train_y, valid_features, valid_y, *,
eta, num_boost_round):
train_data = xgb.DMatrix(train_features, label=train_y)
valid_data = xgb.DMatrix(valid_features, label=valid_y)
params = {
'eta': eta,
'objective': 'binary:logistic',
'gamma': 0.01,
'max_depth': 8,
}
print(params)
eval_list = [(valid_data, 'eval')]
return xgb.train(
params, train_data, num_boost_round, eval_list,
early_stopping_rounds=20,
verbose_eval=10,
)
def get_max_by_item(df):
return (df.iloc[df.groupby('item')['y_pred'].idxmax()]
.reset_index(drop=True))
def get_predictions_by_image_id(detailed):
predictions_by_image_id = defaultdict(list)
for item in detailed.itertuples():
if item.pred != SEG_FP:
predictions_by_image_id[item.image_id].append({
'cls': item.pred,
'center': (item.x + item.w / 2, item.y + item.h / 2),
})
return predictions_by_image_id
if __name__ == '__main__':
main()
| [
"numpy.mean",
"pickle.dump",
"argparse.ArgumentParser",
"re.compile",
"xgboost.train",
"pandas.read_csv",
"lightgbm.train",
"lightgbm.Booster",
"pickle.load",
"lightgbm.Dataset",
"collections.defaultdict",
"xgboost.DMatrix"
] | [((406, 431), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (429, 431), False, 'import argparse\n'), ((2124, 2151), 're.compile', 're.compile', (['"""^top_\\\\d+_cls"""'], {}), "('^top_\\\\d+_cls')\n", (2134, 2151), False, 'import re\n'), ((6836, 6872), 'lightgbm.Dataset', 'lgb.Dataset', (['train_features', 'train_y'], {}), '(train_features, train_y)\n', (6847, 6872), True, 'import lightgbm as lgb\n'), ((6890, 6948), 'lightgbm.Dataset', 'lgb.Dataset', (['valid_features', 'valid_y'], {'reference': 'train_data'}), '(valid_features, valid_y, reference=train_data)\n', (6901, 6948), True, 'import lightgbm as lgb\n'), ((7303, 7459), 'lightgbm.train', 'lgb.train', ([], {'params': 'params', 'train_set': 'train_data', 'num_boost_round': 'num_boost_round', 'early_stopping_rounds': '(20)', 'valid_sets': '[valid_data]', 'verbose_eval': '(10)'}), '(params=params, train_set=train_data, num_boost_round=\n num_boost_round, early_stopping_rounds=20, valid_sets=[valid_data],\n verbose_eval=10)\n', (7312, 7459), True, 'import lightgbm as lgb\n'), ((7629, 7671), 'xgboost.DMatrix', 'xgb.DMatrix', (['train_features'], {'label': 'train_y'}), '(train_features, label=train_y)\n', (7640, 7671), True, 'import xgboost as xgb\n'), ((7689, 7731), 'xgboost.DMatrix', 'xgb.DMatrix', (['valid_features'], {'label': 'valid_y'}), '(valid_features, label=valid_y)\n', (7700, 7731), True, 'import xgboost as xgb\n'), ((7928, 8032), 'xgboost.train', 'xgb.train', (['params', 'train_data', 'num_boost_round', 'eval_list'], {'early_stopping_rounds': '(20)', 'verbose_eval': '(10)'}), '(params, train_data, num_boost_round, eval_list,\n early_stopping_rounds=20, verbose_eval=10)\n', (7937, 8032), True, 'import xgboost as xgb\n'), ((8258, 8275), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (8269, 8275), False, 'from collections import defaultdict\n'), ((1656, 1673), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (1667, 1673), True, 'import pandas as pd\n'), ((1721, 1738), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (1732, 1738), True, 'import pandas as pd\n'), ((2946, 2994), 'xgboost.DMatrix', 'xgb.DMatrix', (['valid_features'], {'label': "valid_df['y']"}), "(valid_features, label=valid_df['y'])\n", (2957, 2994), True, 'import xgboost as xgb\n'), ((5325, 5353), 'numpy.mean', 'np.mean', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (5332, 5353), True, 'import numpy as np\n'), ((6230, 6254), 'numpy.mean', 'np.mean', (['y_preds'], {'axis': '(0)'}), '(y_preds, axis=0)\n', (6237, 6254), True, 'import numpy as np\n'), ((3459, 3496), 'lightgbm.Booster', 'lgb.Booster', ([], {'model_file': 'lgb_load_path'}), '(model_file=lgb_load_path)\n', (3470, 3496), True, 'import lightgbm as lgb\n'), ((3612, 3626), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3623, 3626), False, 'import pickle\n'), ((4920, 4945), 'pickle.dump', 'pickle.dump', (['xgb_model', 'f'], {}), '(xgb_model, f)\n', (4931, 4945), False, 'import pickle\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-06 16:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problem', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128, verbose_name='Title')),
('text', models.TextField(verbose_name='Text')),
('visible', models.BooleanField(default=False, verbose_name='Visible')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Created time')),
('edit_time', models.DateTimeField(auto_now=True, verbose_name='Edit time')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-edit_time'],
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(verbose_name='Text')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Created time')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('blog', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Blog')),
('problem', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='problem.Problem')),
],
options={
'ordering': ['-create_time'],
},
),
]
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((312, 369), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (343, 369), False, 'from django.db import migrations, models\n'), ((535, 628), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (551, 628), False, 'from django.db import migrations, models\n'), ((653, 707), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'verbose_name': '"""Title"""'}), "(max_length=128, verbose_name='Title')\n", (669, 707), False, 'from django.db import migrations, models\n'), ((735, 772), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Text"""'}), "(verbose_name='Text')\n", (751, 772), False, 'from django.db import migrations, models\n'), ((803, 861), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Visible"""'}), "(default=False, verbose_name='Visible')\n", (822, 861), False, 'from django.db import migrations, models\n'), ((896, 964), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""Created time"""'}), "(auto_now_add=True, verbose_name='Created time')\n", (916, 964), False, 'from django.db import migrations, models\n'), ((997, 1058), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'verbose_name': '"""Edit time"""'}), "(auto_now=True, verbose_name='Edit time')\n", (1017, 1058), False, 'from django.db import migrations, models\n'), ((1088, 1184), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1105, 1184), False, 'from django.db import migrations, models\n'), ((1393, 1486), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1409, 1486), False, 'from django.db import migrations, models\n'), ((1510, 1547), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Text"""'}), "(verbose_name='Text')\n", (1526, 1547), False, 'from django.db import migrations, models\n'), ((1582, 1650), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""Created time"""'}), "(auto_now_add=True, verbose_name='Created time')\n", (1602, 1650), False, 'from django.db import migrations, models\n'), ((1680, 1776), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1697, 1776), False, 'from django.db import migrations, models\n'), ((1799, 1892), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""blog.Blog"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='blog.Blog')\n", (1816, 1892), False, 'from django.db import migrations, models\n'), ((1919, 2018), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""problem.Problem"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='problem.Problem')\n", (1936, 2018), False, 'from django.db import migrations, models\n')] |
import requests
from bs4 import BeautifulSoup
# 聽說要空兩行,註解要空一格
# 搜尋
def search_title(html):
key = input("請輸入關鍵字:") # input key words
starttag = html.find(key) # the index of word user wants
# I got it!
if starttag != -1:
# 找到關鍵字之後把往後到下一個標籤之前的字抓起來
for j in range(starttag, len(html)):
if html[j].find("<") != -1:
print("標題止於", j)
# 現在有關鍵字+往後剩下的標題,該是找關鍵字之前的標題部份了
for k in range(starttag, 0, -1):
if html[k].find(">") != -1: # 驀然回首,那人卻在燈火闌珊處
print("標題始於", k)
# 都找到了,印出來吧
for i in range(k, j+1):
print(html[i], end="")
break
break
return html.find(key)
else:
return "NULL"
"""
http://blog.castman.net/%E6%95%99%E5%AD%B8/2018/01/27/python-name-main.html
避免被引用的時候本身也執行一次
__name__:模組名稱。當被引用的時候就是模組名稱;如果是被直接執行就會變成__main__
藉此控制式判斷是被直接執行還是引用
"""
if __name__ == '__main__':
res = requests.get('https://www.pcstore.com.tw/')
res.encoding = 'big5' # 網站設定是 big5 編碼
print("\n關鍵字位於", search_title(res.text))
| [
"requests.get"
] | [((1055, 1098), 'requests.get', 'requests.get', (['"""https://www.pcstore.com.tw/"""'], {}), "('https://www.pcstore.com.tw/')\n", (1067, 1098), False, 'import requests\n')] |
# Generated by Django 3.0.8 on 2020-07-19 17:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phone_field.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.CharField(max_length=100)),
('hood_name', models.CharField(max_length=100)),
('population', models.PositiveIntegerField(null=True)),
('user', models.ForeignKey(default=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prof_pic', models.ImageField(blank=True, upload_to='images/')),
('bio', models.CharField(max_length=250, null=True)),
('email', models.EmailField(max_length=100)),
('date_joined', models.DateTimeField(auto_now_add=True)),
('neighborhood', models.ForeignKey(default=2, null=True, on_delete=django.db.models.deletion.CASCADE, to='Posts.Neighborhood')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Police',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('station_name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=100)),
('tel', phone_field.models.PhoneField(blank=True, help_text='Police Station Phone Number', max_length=31)),
('neighborhood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Posts.Neighborhood')),
],
),
migrations.CreateModel(
name='Notice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('notice_title', models.CharField(max_length=100, null=True)),
('notice_pic', models.ImageField(blank=True, null=True, upload_to='images/')),
('notice_details', models.CharField(max_length=250, null=True)),
('post_date', models.DateField(auto_now_add=True, null=True)),
('neighborhood', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Posts.Neighborhood')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='HealthCenter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hospital_name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=100)),
('tel', phone_field.models.PhoneField(blank=True, help_text='Hospital Phone Number', max_length=31)),
('neighborhood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Posts.Neighborhood')),
],
),
migrations.CreateModel(
name='Business',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bsns_name', models.CharField(max_length=250)),
('bsns_email', models.EmailField(max_length=100)),
('neighborhood', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Posts.Neighborhood')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"django.db.models.EmailField",
"django.db.models.OneToOneField",
"django.db.models.DateField",
"django.db.models.ForeignKey",
"django.db.models.DateTimeField",
"django.db.models.AutoField",
"django.db.models.PositiveIntegerField",
"django.db.models.ImageField",
"django.db.migrations.swappable_depend... | [((273, 330), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (304, 330), False, 'from django.db import migrations, models\n'), ((467, 560), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (483, 560), False, 'from django.db import migrations, models\n'), ((588, 620), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (604, 620), False, 'from django.db import migrations, models\n'), ((653, 685), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (669, 685), False, 'from django.db import migrations, models\n'), ((719, 757), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (746, 757), False, 'from django.db import migrations, models\n'), ((785, 894), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(default=True, on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL)\n', (802, 894), False, 'from django.db import migrations, models\n'), ((1023, 1116), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1039, 1116), False, 'from django.db import migrations, models\n'), ((1144, 1194), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'upload_to': '"""images/"""'}), "(blank=True, upload_to='images/')\n", (1161, 1194), False, 'from django.db import migrations, models\n'), ((1221, 1264), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'null': '(True)'}), '(max_length=250, null=True)\n', (1237, 1264), False, 'from django.db import migrations, models\n'), ((1293, 1326), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1310, 1326), False, 'from django.db import migrations, models\n'), ((1361, 1400), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1381, 1400), False, 'from django.db import migrations, models\n'), ((1436, 1550), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': '(2)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""Posts.Neighborhood"""'}), "(default=2, null=True, on_delete=django.db.models.deletion\n .CASCADE, to='Posts.Neighborhood')\n", (1453, 1550), False, 'from django.db import migrations, models\n'), ((1573, 1672), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL)\n', (1593, 1672), False, 'from django.db import migrations, models\n'), ((1799, 1892), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1815, 1892), False, 'from django.db import migrations, models\n'), ((1924, 1956), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1940, 1956), False, 'from django.db import migrations, models\n'), ((1985, 2018), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2002, 2018), False, 'from django.db import migrations, models\n'), ((2178, 2270), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""Posts.Neighborhood"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'Posts.Neighborhood')\n", (2195, 2270), False, 'from django.db import migrations, models\n'), ((2397, 2490), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2413, 2490), False, 'from django.db import migrations, models\n'), ((2522, 2565), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)'}), '(max_length=100, null=True)\n', (2538, 2565), False, 'from django.db import migrations, models\n'), ((2599, 2660), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': '"""images/"""'}), "(blank=True, null=True, upload_to='images/')\n", (2616, 2660), False, 'from django.db import migrations, models\n'), ((2698, 2741), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'null': '(True)'}), '(max_length=250, null=True)\n', (2714, 2741), False, 'from django.db import migrations, models\n'), ((2774, 2820), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (2790, 2820), False, 'from django.db import migrations, models\n'), ((2856, 2958), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""Posts.Neighborhood"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='Posts.Neighborhood')\n", (2873, 2958), False, 'from django.db import migrations, models\n'), ((2982, 3088), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(null=True, on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL)\n', (2999, 3088), False, 'from django.db import migrations, models\n'), ((3222, 3315), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3238, 3315), False, 'from django.db import migrations, models\n'), ((3348, 3380), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (3364, 3380), False, 'from django.db import migrations, models\n'), ((3409, 3442), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (3426, 3442), False, 'from django.db import migrations, models\n'), ((3596, 3688), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""Posts.Neighborhood"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'Posts.Neighborhood')\n", (3613, 3688), False, 'from django.db import migrations, models\n'), ((3817, 3910), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3833, 3910), False, 'from django.db import migrations, models\n'), ((3939, 3971), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (3955, 3971), False, 'from django.db import migrations, models\n'), ((4005, 4038), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (4022, 4038), False, 'from django.db import migrations, models\n'), ((4074, 4176), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""Posts.Neighborhood"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='Posts.Neighborhood')\n", (4091, 4176), False, 'from django.db import migrations, models\n'), ((4200, 4306), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(null=True, on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL)\n', (4217, 4306), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
"""
pip_services3_container.build.DefaultContainerFactory
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Default container factory implementation
:copyright: Conceptual Vision Consulting LLC 2018-2019, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from pip_services3_commons.refer import Descriptor
from pip_services3_components.build import CompositeFactory
from pip_services3_components.log import DefaultLoggerFactory
from pip_services3_components.count import DefaultCountersFactory
from pip_services3_components.config import DefaultConfigReaderFactory
from pip_services3_components.cache import DefaultCacheFactory
from pip_services3_components.auth import DefaultCredentialStoreFactory
from pip_services3_components.connect import DefaultDiscoveryFactory
from pip_services3_components.info._DefaultInfoFactory import DefaultInfoFactory
class DefaultContainerFactory(CompositeFactory):
"""
Creates default container components (loggers, counters, caches, locks, etc.) by their descriptors.
"""
DefaultContainerFactoryDescriptor = Descriptor(
"pip-services", "factory", "container", "default", "1.0"
)
def __init__(self, *factories):
"""
Create a new instance of the factory and sets nested factories.
:param factories: a list of nested factories
"""
super(DefaultContainerFactory, self).__init__(factories)
self.add(DefaultInfoFactory())
self.add(DefaultLoggerFactory())
self.add(DefaultCountersFactory())
self.add(DefaultConfigReaderFactory())
self.add(DefaultCacheFactory())
self.add(DefaultCredentialStoreFactory())
self.add(DefaultDiscoveryFactory())
| [
"pip_services3_components.log.DefaultLoggerFactory",
"pip_services3_components.connect.DefaultDiscoveryFactory",
"pip_services3_components.count.DefaultCountersFactory",
"pip_services3_components.info._DefaultInfoFactory.DefaultInfoFactory",
"pip_services3_components.cache.DefaultCacheFactory",
"pip_servi... | [((1147, 1215), 'pip_services3_commons.refer.Descriptor', 'Descriptor', (['"""pip-services"""', '"""factory"""', '"""container"""', '"""default"""', '"""1.0"""'], {}), "('pip-services', 'factory', 'container', 'default', '1.0')\n", (1157, 1215), False, 'from pip_services3_commons.refer import Descriptor\n'), ((1499, 1519), 'pip_services3_components.info._DefaultInfoFactory.DefaultInfoFactory', 'DefaultInfoFactory', ([], {}), '()\n', (1517, 1519), False, 'from pip_services3_components.info._DefaultInfoFactory import DefaultInfoFactory\n'), ((1538, 1560), 'pip_services3_components.log.DefaultLoggerFactory', 'DefaultLoggerFactory', ([], {}), '()\n', (1558, 1560), False, 'from pip_services3_components.log import DefaultLoggerFactory\n'), ((1579, 1603), 'pip_services3_components.count.DefaultCountersFactory', 'DefaultCountersFactory', ([], {}), '()\n', (1601, 1603), False, 'from pip_services3_components.count import DefaultCountersFactory\n'), ((1622, 1650), 'pip_services3_components.config.DefaultConfigReaderFactory', 'DefaultConfigReaderFactory', ([], {}), '()\n', (1648, 1650), False, 'from pip_services3_components.config import DefaultConfigReaderFactory\n'), ((1669, 1690), 'pip_services3_components.cache.DefaultCacheFactory', 'DefaultCacheFactory', ([], {}), '()\n', (1688, 1690), False, 'from pip_services3_components.cache import DefaultCacheFactory\n'), ((1709, 1740), 'pip_services3_components.auth.DefaultCredentialStoreFactory', 'DefaultCredentialStoreFactory', ([], {}), '()\n', (1738, 1740), False, 'from pip_services3_components.auth import DefaultCredentialStoreFactory\n'), ((1759, 1784), 'pip_services3_components.connect.DefaultDiscoveryFactory', 'DefaultDiscoveryFactory', ([], {}), '()\n', (1782, 1784), False, 'from pip_services3_components.connect import DefaultDiscoveryFactory\n')] |
"""
Msg based protocol handler
"""
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2015, 2017 VMware, Inc. All rights reserved. -- VMware Confidential' # pylint: disable=line-too-long
from collections import deque
from vmware.vapi.lib.log import get_vapi_logger
from vmware.vapi.protocol.server.api_handler import ApiHandler, AsyncApiHandler
from vmware.vapi.protocol.server.transport.async_protocol_handler import AsyncProtocolHandler
logger = get_vapi_logger(__name__)
def get_async_api_handler(api_handler):
"""
get async api handler
:type api_handler: :class:`vmware.vapi.protocol.server.api_handler.ApiHandler`
:param api_handler: api handler instance
:rtype: :class:`vmware.vapi.protocol.server.async_api_handler_adapter.PooledAsyncApiHandlerAdapter`
:return: Threaded async api handler
"""
if isinstance(api_handler, ApiHandler):
from vmware.vapi.protocol.server.async_api_handler_adapter import PooledAsyncApiHandlerAdapter
from vmware.vapi.lib.workers_pool import get_workers_pool
workers_pool = get_workers_pool('api_handler')
api_handler = PooledAsyncApiHandlerAdapter(api_handler, workers_pool)
return api_handler
class MsgBasedProtocolHandler(AsyncProtocolHandler):
""" Message based protocol handler """
def __init__(self, api_handler):
"""
Message based protocol handler init
:type api_handler: :class:`vmware.vapi.protocol.server.api_handler.ApiHandler`
:param api_handler: api handler instance
"""
AsyncProtocolHandler.__init__(self)
assert(api_handler)
self.api_handler = get_async_api_handler(api_handler)
## Begin AsyncProtocolHandler interface
def get_data_handler(self, connection):
data_handler = self.DataHandler(self, connection)
return data_handler
## End AsyncProtocolHandler interface
class DataHandler(AsyncProtocolHandler.DataHandler):
""" Message based protocol data handler """
def __init__(self, parent, connection):
""" Message based protocol data handler init """
AsyncProtocolHandler.DataHandler.__init__(self)
self.parent = parent
self.connection = connection
self.data = deque()
## Begin AsyncProtocolHandler.DataHandler interface
def data_ready(self, data):
if data:
self.data.append(data)
def data_end(self):
connection = self.connection
def state_change_cb(*args, **kwargs):
""" state change callback """
self.request_state_change(connection, *args, **kwargs)
self.parent.api_handler.async_handle_request(
b''.join(self.data), state_change_cb)
self._cleanup()
def data_abort(self):
self._cleanup()
# Used to throttle the lower layer from sending more data
def can_read(self):
# TODO: Throttle if needed
return True
## End AsyncProtocolHandler.DataHandler interface
def request_state_change(self, connection, state, response=None): # pylint: disable=R0201
"""
request state changed
:type connection: :class:`file`
:param connection: response connection
:type state: :class:`int`
:param state: refer to :class:`vmware.vapi.protocol.server.api_handler.\
AsyncApiHandler.async_handle_request` state_change_cb
:type response: :class:`object`
:param response: refer to :class:`vmware.vapi.protocol.server.api_handler.\
AsyncApiHandler.async_handle_request` state_change_cb
"""
if state in AsyncApiHandler.END_STATES:
# Reached one of the end state
try:
if state == AsyncApiHandler.SUCCESS:
try:
connection.write(response)
except Exception as err:
# Connection closed
logger.error('write: Failed to write %s', err)
elif state == AsyncApiHandler.ERROR:
if response is None:
response = Exception("Error")
raise response # pylint: disable=E0702
elif state == AsyncApiHandler.CANCELLED:
# Cancelled
pass
else:
# Unexpected state
raise NotImplementedError('Unexpected state %d' % state)
finally:
connection.close() # Close the virtual connection
connection = None
else:
# Transition state change
pass
def _cleanup(self):
""" Cleanup """
self.data = None
self.connection = None
self.parent = None
def __del__(self):
self._cleanup()
| [
"collections.deque",
"vmware.vapi.lib.log.get_vapi_logger",
"vmware.vapi.lib.workers_pool.get_workers_pool",
"vmware.vapi.protocol.server.async_api_handler_adapter.PooledAsyncApiHandlerAdapter",
"vmware.vapi.protocol.server.transport.async_protocol_handler.AsyncProtocolHandler.__init__",
"vmware.vapi.prot... | [((458, 483), 'vmware.vapi.lib.log.get_vapi_logger', 'get_vapi_logger', (['__name__'], {}), '(__name__)\n', (473, 483), False, 'from vmware.vapi.lib.log import get_vapi_logger\n'), ((1079, 1110), 'vmware.vapi.lib.workers_pool.get_workers_pool', 'get_workers_pool', (['"""api_handler"""'], {}), "('api_handler')\n", (1095, 1110), False, 'from vmware.vapi.lib.workers_pool import get_workers_pool\n'), ((1133, 1188), 'vmware.vapi.protocol.server.async_api_handler_adapter.PooledAsyncApiHandlerAdapter', 'PooledAsyncApiHandlerAdapter', (['api_handler', 'workers_pool'], {}), '(api_handler, workers_pool)\n', (1161, 1188), False, 'from vmware.vapi.protocol.server.async_api_handler_adapter import PooledAsyncApiHandlerAdapter\n'), ((1560, 1595), 'vmware.vapi.protocol.server.transport.async_protocol_handler.AsyncProtocolHandler.__init__', 'AsyncProtocolHandler.__init__', (['self'], {}), '(self)\n', (1589, 1595), False, 'from vmware.vapi.protocol.server.transport.async_protocol_handler import AsyncProtocolHandler\n'), ((2136, 2183), 'vmware.vapi.protocol.server.transport.async_protocol_handler.AsyncProtocolHandler.DataHandler.__init__', 'AsyncProtocolHandler.DataHandler.__init__', (['self'], {}), '(self)\n', (2177, 2183), False, 'from vmware.vapi.protocol.server.transport.async_protocol_handler import AsyncProtocolHandler\n'), ((2282, 2289), 'collections.deque', 'deque', ([], {}), '()\n', (2287, 2289), False, 'from collections import deque\n')] |
# Copyright 2018 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of excluding certain test records from the output callbacks.
In this case, we exclude tests which were aborted before a DUT ID was set, since
they are unlikely to contain any useful information. Note that "abort" refers to
a KeyboardInterrupt. If any other error occurs before the DUT ID is set, those
records are not excluded, since they may be relevant for debugging.
It may make sense to implement this check if your hardware tests follow the
common pattern of waiting for the DUT ID to be entered via a prompt at the test
start.
"""
import openhtf as htf
from openhtf.output.callbacks import json_factory
from openhtf.core import test_record
from openhtf.plugs import user_input
from openhtf.util import console_output
DEFAULT_DUT_ID = '<UNSET_DUT_ID>'
class CustomOutputToJSON(json_factory.OutputToJSON):
def __call__(self, record):
if (record.outcome == test_record.Outcome.ABORTED
and record.dut_id == DEFAULT_DUT_ID):
console_output.cli_print(
'Test was aborted at test start. Skipping output to JSON.')
else:
console_output.cli_print('Outputting test record to JSON.')
super(CustomOutputToJSON, self).__call__(record)
@htf.plug(user=user_input.UserInput)
def HelloWorldPhase(test, user):
test.logger.info('Hello World!')
user.prompt('The DUT ID is `%s`. Press enter to continue.' %
test.test_record.dut_id)
def main():
test = htf.Test(HelloWorldPhase)
test.configure(default_dut_id=DEFAULT_DUT_ID)
test.add_output_callbacks(
CustomOutputToJSON('./{dut_id}.hello_world.json', indent=2))
test.execute(test_start=user_input.prompt_for_test_start())
if __name__ == '__main__':
main()
| [
"openhtf.plugs.user_input.prompt_for_test_start",
"openhtf.util.console_output.cli_print",
"openhtf.Test",
"openhtf.plug"
] | [((1788, 1823), 'openhtf.plug', 'htf.plug', ([], {'user': 'user_input.UserInput'}), '(user=user_input.UserInput)\n', (1796, 1823), True, 'import openhtf as htf\n'), ((2017, 2042), 'openhtf.Test', 'htf.Test', (['HelloWorldPhase'], {}), '(HelloWorldPhase)\n', (2025, 2042), True, 'import openhtf as htf\n'), ((1558, 1647), 'openhtf.util.console_output.cli_print', 'console_output.cli_print', (['"""Test was aborted at test start. Skipping output to JSON."""'], {}), "(\n 'Test was aborted at test start. Skipping output to JSON.')\n", (1582, 1647), False, 'from openhtf.util import console_output\n'), ((1670, 1729), 'openhtf.util.console_output.cli_print', 'console_output.cli_print', (['"""Outputting test record to JSON."""'], {}), "('Outputting test record to JSON.')\n", (1694, 1729), False, 'from openhtf.util import console_output\n'), ((2213, 2247), 'openhtf.plugs.user_input.prompt_for_test_start', 'user_input.prompt_for_test_start', ([], {}), '()\n', (2245, 2247), False, 'from openhtf.plugs import user_input\n')] |
from rest_framework import viewsets, filters, mixins
from rest_framework.response import Response
from products import models
from products import serializers
class CategoryApiView(viewsets.ReadOnlyModelViewSet):
"""API View for Category"""
serializer_class = serializers.CategorySerializer
queryset = models.Category.objects.all()
class SubCategoryApiView(viewsets.ReadOnlyModelViewSet):
"""API View for SubCategory"""
serializer_class = serializers.SubCategorySerializer
queryset = models.SubCategory.objects.all().order_by('-created_on')
def retrieve(self, response, pk):
category = models.Category.objects.filter(
name = pk
).first()
queryset = models.SubCategory.objects.filter(category = category.id)
values = [x.to_dict() for x in queryset]
return Response(values)
class ProductApiView(mixins.ListModelMixin, mixins.RetrieveModelMixin, mixins.CreateModelMixin, viewsets.GenericViewSet):
"""API View for Product"""
serializer_class = serializers.ProductSerializer
queryset = models.Product.objects.all().order_by('-created_on')
class ProductSubCategoryView(viewsets.ReadOnlyModelViewSet):
"""Products for a sub-category"""
serializer_class = serializers.ProductSerializer
queryset = models.Product.objects.all()
def retrieve(self, request, pk):
sub_category = models.SubCategory.objects.filter(
name = pk
).first()
if sub_category is None:
return Response("Error: Sub Category not found", 404)
queryset = models.Product.objects.filter(sub_category = sub_category.id)
values = [x.to_dict() for x in queryset]
return Response(values)
class ProductCategoryView(viewsets.ReadOnlyModelViewSet):
"""Product for a category"""
serializer_class = serializers.ProductSerializer
queryset = models.Product.objects.all()
def retrieve(self, request, pk):
category = models.Category.objects.filter(
name = pk
).first()
if category is None:
return Response("Error: Category Not Found", 404)
q = self.queryset.filter(
category = category.id
)
values = [x.to_dict() for x in q]
return Response(values) | [
"products.models.SubCategory.objects.filter",
"products.models.Product.objects.filter",
"products.models.Category.objects.all",
"rest_framework.response.Response",
"products.models.Category.objects.filter",
"products.models.SubCategory.objects.all",
"products.models.Product.objects.all"
] | [((323, 352), 'products.models.Category.objects.all', 'models.Category.objects.all', ([], {}), '()\n', (350, 352), False, 'from products import models\n'), ((1313, 1341), 'products.models.Product.objects.all', 'models.Product.objects.all', ([], {}), '()\n', (1339, 1341), False, 'from products import models\n'), ((1903, 1931), 'products.models.Product.objects.all', 'models.Product.objects.all', ([], {}), '()\n', (1929, 1931), False, 'from products import models\n'), ((726, 781), 'products.models.SubCategory.objects.filter', 'models.SubCategory.objects.filter', ([], {'category': 'category.id'}), '(category=category.id)\n', (759, 781), False, 'from products import models\n'), ((849, 865), 'rest_framework.response.Response', 'Response', (['values'], {}), '(values)\n', (857, 865), False, 'from rest_framework.response import Response\n'), ((1598, 1657), 'products.models.Product.objects.filter', 'models.Product.objects.filter', ([], {'sub_category': 'sub_category.id'}), '(sub_category=sub_category.id)\n', (1627, 1657), False, 'from products import models\n'), ((1724, 1740), 'rest_framework.response.Response', 'Response', (['values'], {}), '(values)\n', (1732, 1740), False, 'from rest_framework.response import Response\n'), ((2288, 2304), 'rest_framework.response.Response', 'Response', (['values'], {}), '(values)\n', (2296, 2304), False, 'from rest_framework.response import Response\n'), ((520, 552), 'products.models.SubCategory.objects.all', 'models.SubCategory.objects.all', ([], {}), '()\n', (550, 552), False, 'from products import models\n'), ((1090, 1118), 'products.models.Product.objects.all', 'models.Product.objects.all', ([], {}), '()\n', (1116, 1118), False, 'from products import models\n'), ((1531, 1577), 'rest_framework.response.Response', 'Response', (['"""Error: Sub Category not found"""', '(404)'], {}), "('Error: Sub Category not found', 404)\n", (1539, 1577), False, 'from rest_framework.response import Response\n'), ((2109, 2151), 'rest_framework.response.Response', 'Response', (['"""Error: Category Not Found"""', '(404)'], {}), "('Error: Category Not Found', 404)\n", (2117, 2151), False, 'from rest_framework.response import Response\n'), ((635, 674), 'products.models.Category.objects.filter', 'models.Category.objects.filter', ([], {'name': 'pk'}), '(name=pk)\n', (665, 674), False, 'from products import models\n'), ((1403, 1445), 'products.models.SubCategory.objects.filter', 'models.SubCategory.objects.filter', ([], {'name': 'pk'}), '(name=pk)\n', (1436, 1445), False, 'from products import models\n'), ((1989, 2028), 'products.models.Category.objects.filter', 'models.Category.objects.filter', ([], {'name': 'pk'}), '(name=pk)\n', (2019, 2028), False, 'from products import models\n')] |
"""Controller for the health endpoints."""
from fastapi import APIRouter
from pydantic import BaseModel
router = APIRouter()
class HealthCheckResponse(BaseModel):
"""Model for health check responses."""
online: bool
@router.get("", response_model=HealthCheckResponse, description="Health check endpoint")
def health() -> HealthCheckResponse:
"""Get the current status of the service."""
return HealthCheckResponse(online=True)
| [
"fastapi.APIRouter"
] | [((114, 125), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (123, 125), False, 'from fastapi import APIRouter\n')] |
# Generated by Django 3.2.5 on 2021-07-14 15:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0007_alter_childlist_child'),
]
operations = [
migrations.AlterField(
model_name='childprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='user_child', serialize=False, to=settings.AUTH_USER_MODEL, verbose_name='Kullanıcı'),
),
migrations.AlterField(
model_name='parentprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='user_parent', serialize=False, to=settings.AUTH_USER_MODEL, verbose_name='Kullanıcı'),
),
]
| [
"django.db.models.OneToOneField"
] | [((409, 600), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'primary_key': '(True)', 'related_name': '"""user_child"""', 'serialize': '(False)', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Kullanıcı"""'}), "(on_delete=django.db.models.deletion.CASCADE,\n primary_key=True, related_name='user_child', serialize=False, to=\n settings.AUTH_USER_MODEL, verbose_name='Kullanıcı')\n", (429, 600), False, 'from django.db import migrations, models\n'), ((718, 910), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'primary_key': '(True)', 'related_name': '"""user_parent"""', 'serialize': '(False)', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Kullanıcı"""'}), "(on_delete=django.db.models.deletion.CASCADE,\n primary_key=True, related_name='user_parent', serialize=False, to=\n settings.AUTH_USER_MODEL, verbose_name='Kullanıcı')\n", (738, 910), False, 'from django.db import migrations, models\n')] |
##
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import supybot.conf as conf
import supybot.registry as registry
import supybot.ircutils as ircutils
class SupyConfTestCase(SupyTestCase):
def testJoinToOneChannel(self):
orig = conf.supybot.networks.test.channels()
channels = ircutils.IrcSet()
channels.add("#bar")
conf.supybot.networks.test.channels.setValue(channels)
msgs = conf.supybot.networks.test.channels.joins()
self.assertEqual(msgs[0].args, ("#bar",))
conf.supybot.networks.test.channels.setValue(orig)
def testJoinToManyChannels(self):
orig = conf.supybot.networks.test.channels()
channels = ircutils.IrcSet()
input_list = []
for x in range(1, 30):
name = "#verylongchannelname" + str(x)
channels.add(name)
input_list.append(name)
conf.supybot.networks.test.channels.setValue(channels)
msgs = conf.supybot.networks.test.channels.joins()
# Double check we split the messages
self.assertEqual(len(msgs), 2)
# Ensure all channel names are present
chan_list = (msgs[0].args[0] + ',' + msgs[1].args[0]).split(',')
self.assertCountEqual(input_list, chan_list)
conf.supybot.networks.test.channels.setValue(orig)
| [
"supybot.conf.supybot.networks.test.channels.setValue",
"supybot.conf.supybot.networks.test.channels.joins",
"supybot.conf.supybot.networks.test.channels",
"supybot.ircutils.IrcSet"
] | [((1738, 1775), 'supybot.conf.supybot.networks.test.channels', 'conf.supybot.networks.test.channels', ([], {}), '()\n', (1773, 1775), True, 'import supybot.conf as conf\n'), ((1795, 1812), 'supybot.ircutils.IrcSet', 'ircutils.IrcSet', ([], {}), '()\n', (1810, 1812), True, 'import supybot.ircutils as ircutils\n'), ((1850, 1904), 'supybot.conf.supybot.networks.test.channels.setValue', 'conf.supybot.networks.test.channels.setValue', (['channels'], {}), '(channels)\n', (1894, 1904), True, 'import supybot.conf as conf\n'), ((1920, 1963), 'supybot.conf.supybot.networks.test.channels.joins', 'conf.supybot.networks.test.channels.joins', ([], {}), '()\n', (1961, 1963), True, 'import supybot.conf as conf\n'), ((2022, 2072), 'supybot.conf.supybot.networks.test.channels.setValue', 'conf.supybot.networks.test.channels.setValue', (['orig'], {}), '(orig)\n', (2066, 2072), True, 'import supybot.conf as conf\n'), ((2127, 2164), 'supybot.conf.supybot.networks.test.channels', 'conf.supybot.networks.test.channels', ([], {}), '()\n', (2162, 2164), True, 'import supybot.conf as conf\n'), ((2184, 2201), 'supybot.ircutils.IrcSet', 'ircutils.IrcSet', ([], {}), '()\n', (2199, 2201), True, 'import supybot.ircutils as ircutils\n'), ((2383, 2437), 'supybot.conf.supybot.networks.test.channels.setValue', 'conf.supybot.networks.test.channels.setValue', (['channels'], {}), '(channels)\n', (2427, 2437), True, 'import supybot.conf as conf\n'), ((2453, 2496), 'supybot.conf.supybot.networks.test.channels.joins', 'conf.supybot.networks.test.channels.joins', ([], {}), '()\n', (2494, 2496), True, 'import supybot.conf as conf\n'), ((2762, 2812), 'supybot.conf.supybot.networks.test.channels.setValue', 'conf.supybot.networks.test.channels.setValue', (['orig'], {}), '(orig)\n', (2806, 2812), True, 'import supybot.conf as conf\n')] |
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
"""
Imported from StreamBank.bas
"""
import logging
from .Input.LandUse.AreaTotal import AreaTotal_f
from .Input.LandUse.Ag.AGSTRM import AGSTRM_f
from .Input.LandUse.Ag.TileDrain import TileDrain_f
from .Output.AvAnimalNSum.AnimalN import AnimalN_f
from .Output.Loading.StreamBankEros import StreamBankEros_f
from .Output.Loading.StreamBankEros_1 import StreamBankEros_1_f
from .Output.Loading.StreamBankN_1 import StreamBankN_1_f
log = logging.getLogger(__name__)
def CalculateStreamBankEros(z, Y):
# CALCULATE THE STREAM BANK SEDIMENT AND N AND P
for i in range(12):
# CALCULATE ER FACTOR FOR STREAMBANK EROSION
z.StreamBankP[Y][i] = \
StreamBankEros_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper,
z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0,
z.RecessionCoef, z.SeepCoef,
z.Qretention, z.PctAreaInfil, z.n25b, z.Landuse, z.TileDrainDensity, z.PointFlow,
z.StreamWithdrawal,
z.GroundWithdrawal, z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj, z.SedAFactor_0, z.AvKF,
z.AvSlope,
z.SedAAdjust, z.StreamLength)[Y][i] * (z.SedPhos / 1000000) * z.BankPFrac
# CALCULATIONS FOR STREAM BANK STABILIZATION AND FENCING
z.PURBBANK = 0
z.FCURBBANK = 0
z.PSTAB = 0
z.PURBBANK = 0
if z.n42b > 0:
z.PSTAB = (z.n46c / z.n42b) * z.StreamBankP[Y][i] * z.n77c
z.PURBBANK = (z.UrbBankStab / z.n42b) * z.StreamBankP[Y][i] * z.n77c
z.PFEN = 0
if z.n42 > 0:
z.PFEN = (z.n45 / z.n42) * z.StreamBankP[Y][i] * AGSTRM_f(z.AgLength, z.StreamLength) * z.n77
z.StreamBankP[Y][i] = z.StreamBankP[Y][i] - (z.PSTAB + z.PFEN + z.PURBBANK)
if z.StreamBankP[Y][i] < 0:
z.StreamBankP[Y][i] = 0
# CALCULATE ANNUAL STREAMBANK N AND P AND SEDIMENT
z.StreamBankPSum[Y] += z.StreamBankP[Y][i]
z.StreamBankErosSum[Y] += \
StreamBankEros_1_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0,
z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0, z.RecessionCoef, z.SeepCoef
, z.Qretention, z.PctAreaInfil, z.n25b, z.Landuse, z.TileDrainDensity, z.PointFlow,
z.StreamWithdrawal, z.GroundWithdrawal, z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj,
z.SedAFactor_0, z.AvKF, z.AvSlope, z.SedAAdjust, z.StreamLength, z.n42b, z.n46c, z.n85d,
z.AgLength, z.n42, z.n45, z.n85, z.UrbBankStab)[Y][i]
# GROUNDWATER N LOADS ARE REDUCED BASED ON SPECIFIC BMPS
z.GWNRF = 0
z.CHNGN1 = 0
z.CHNGN2 = 0
z.CHNGN3 = 0
z.CHNGN4 = 0
z.CHNGN5 = 0
z.CHNGNTOT = 0
z.PCTN1 = 0
z.PCTN2 = 0
z.PCTN3 = 0
z.PCTN4 = 0
z.PCBMPAC = 0
z.HPBMPAC = 0
z.BMPACRES = 0
z.PCTAG = 0
z.RCNMAC = 0
z.HPNMAC = 0
z.CHNGN1 = z.n25 / 100
z.CHNGN2 = z.n26 / 100
z.CHNGN3 = z.n27 / 100
z.CHNGN4 = z.n27b / 100
z.CHNGN5 = z.n28 / 100
z.CHNGNTOT = z.CHNGN1 + z.CHNGN2 + z.CHNGN3 + z.CHNGN4 + z.CHNGN5
if AreaTotal_f(z.Area) > 0 and z.n23 > 0 and z.n42 > 0 and z.n42b > 0:
z.PCTAG = (z.n23 + z.n24) / AreaTotal_f(z.Area)
z.GroundNitr[Y][i] -= z.GroundNitr[Y][i] * ((z.n28b / 100) * z.n23) / z.n23 * z.PCTAG * z.n70
z.GroundNitr[Y][i] -= z.GroundNitr[Y][i] * (z.n43 / z.n42) * (z.n42 / z.n42b) * z.PCTAG * z.n64
z.GroundNitr[Y][i] -= (z.GroundNitr[Y][i] * (
(((z.n29 / 100) * z.n23) + ((z.n37 / 100) * z.n24)) / (z.n23 + z.n24))) * z.PCTAG * z.n68
# Groundwater P loads are reduced based on extent of nutrient management BMP
z.RCNMAC = (z.n28b / 100) * z.n23
z.HPNMAC = (z.n35b / 100) * z.n24
if AreaTotal_f(z.Area) > 0:
z.GroundPhos[Y][i] -= (((z.RCNMAC + z.HPNMAC) / AreaTotal_f(z.Area)) *
z.GroundPhos[Y][i] * z.n78)
z.GroundNitrSum[Y] += z.GroundNitr[Y][i]
z.GroundPhosSum[Y] += z.GroundPhos[Y][i]
z.TileDrainSum[Y] += \
TileDrain_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper,
z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0,
z.RecessionCoef, z.SeepCoef, z.Landuse,
z.TileDrainDensity)[Y][i]
z.TileDrainNSum[Y] += z.TileDrainN[Y][i]
z.TileDrainPSum[Y] += z.TileDrainP[Y][i]
z.TileDrainSedSum[Y] += z.TileDrainSed[Y][i]
z.AnimalPSum[Y] += z.AnimalP[Y][i]
z.AnimalFCSum[Y] += z.AnimalFC[Y][i]
z.WWOrgsSum[Y] += z.WWOrgs[Y][i]
z.SSOrgsSum[Y] += z.SSOrgs[Y][i]
z.UrbOrgsSum[Y] += z.UrbOrgs[Y][i]
z.TotalOrgsSum[Y] += z.TotalOrgs[Y][i]
z.WildOrgsSum[Y] += z.WildOrgs[Y][i]
z.GRLostBarnPSum[Y] += z.GRLostBarnP[Y][i]
z.GRLostBarnFCSum[Y] += z.GRLostBarnFC[Y][i]
z.NGLostBarnPSum[Y] += z.NGLostBarnP[Y][i]
z.NGLostBarnFCSum[Y] += z.NGLostBarnFC[Y][i]
z.NGLostManPSum[Y] += z.NGLostManP[Y][i]
z.TotNitr[Y][i] += StreamBankN_1_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0,
z.RecessionCoef, z.SeepCoef, z.Qretention, z.PctAreaInfil, z.n25b, z.Landuse,
z.TileDrainDensity, z.PointFlow, z.StreamWithdrawal, z.GroundWithdrawal,
z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj, z.SedAFactor_0, z.AvKF,
z.AvSlope, z.SedAAdjust, z.StreamLength, z.n42b, z.AgLength,
z.UrbBankStab, z.SedNitr, z.BankNFrac, z.n69c, z.n45, z.n69, z.n46c, z.n42)[
Y][i] + \
z.TileDrainN[Y][i] + \
AnimalN_f(z.NYrs, z.NGPctManApp, z.GrazingAnimal_0, z.NumAnimals, z.AvgAnimalWt,
z.AnimalDailyN, z.NGAppNRate, z.Prec, z.DaysMonth,
z.NGPctSoilIncRate, z.GRPctManApp, z.GRAppNRate, z.GRPctSoilIncRate, z.NGBarnNRate,
z.AWMSNgPct, z.NgAWMSCoeffN,
z.RunContPct, z.RunConCoeffN, z.PctGrazing, z.GRBarnNRate, z.AWMSGrPct,
z.GrAWMSCoeffN, z.PctStreams, z.GrazingNRate)[Y][i]
z.TotPhos[Y][i] += z.StreamBankP[Y][i] + z.TileDrainP[Y][i] + z.AnimalP[Y][i]
z.TotNitrSum[Y] += StreamBankN_1_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0,
z.RecessionCoef, z.SeepCoef, z.Qretention, z.PctAreaInfil, z.n25b, z.Landuse,
z.TileDrainDensity, z.PointFlow, z.StreamWithdrawal, z.GroundWithdrawal,
z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj, z.SedAFactor_0, z.AvKF,
z.AvSlope, z.SedAAdjust, z.StreamLength, z.n42b, z.AgLength,
z.UrbBankStab, z.SedNitr, z.BankNFrac, z.n69c, z.n45, z.n69, z.n46c, z.n42)[
Y][i] + \
z.TileDrainN[Y][i] + \
AnimalN_f(z.NYrs, z.NGPctManApp, z.GrazingAnimal_0, z.NumAnimals, z.AvgAnimalWt,
z.AnimalDailyN, z.NGAppNRate, z.Prec, z.DaysMonth,
z.NGPctSoilIncRate, z.GRPctManApp, z.GRAppNRate, z.GRPctSoilIncRate, z.NGBarnNRate,
z.AWMSNgPct, z.NgAWMSCoeffN,
z.RunContPct, z.RunConCoeffN, z.PctGrazing, z.GRBarnNRate, z.AWMSGrPct,
z.GrAWMSCoeffN, z.PctStreams, z.GrazingNRate)[Y][i]
z.TotPhosSum[Y] += z.StreamBankP[Y][i] + z.TileDrainP[Y][i] + z.AnimalP[Y][i]
| [
"logging.getLogger"
] | [((575, 602), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (592, 602), False, 'import logging\n')] |
from django.urls import include, path
from feedzero.users.views import LogoutView
app_name = "users"
urlpatterns = [
path("logout/", LogoutView.as_view(), name="logout"),
path("integrations/pocket/", include("feedzero.users.integrations.pocket.urls")),
]
| [
"feedzero.users.views.LogoutView.as_view",
"django.urls.include"
] | [((140, 160), 'feedzero.users.views.LogoutView.as_view', 'LogoutView.as_view', ([], {}), '()\n', (158, 160), False, 'from feedzero.users.views import LogoutView\n'), ((211, 261), 'django.urls.include', 'include', (['"""feedzero.users.integrations.pocket.urls"""'], {}), "('feedzero.users.integrations.pocket.urls')\n", (218, 261), False, 'from django.urls import include, path\n')] |
# -*- coding: utf-8 -*-
"""Example 1: Load and plot airfoil coordinates
"""
import os
import matplotlib.pyplot as plt
from mypack.utils.io import read_selig
from mypack.utils.plotting import plot_airfoil
def example_1():
"""Run example 1"""
# script inputs
mod_path = os.path.dirname(os.path.abspath(__file__)) # current module
air_path = os.path.join(mod_path, '..',
'tests', 'test_utils', 'files', 'demo_selig.dat')
# load coordinates from a a selig-style airfoil file
air_df = read_selig(air_path)
# plot the airfoil
plot_airfoil(air_df)
# save the png for the documentation
fig = plt.gcf()
save_name = os.path.basename(__file__).replace('.py', '.png') # file name
save_path = os.path.join(mod_path, save_name)
fig.savefig(save_path)
if __name__ == '__main__': # call function when run as script
example_1()
| [
"matplotlib.pyplot.gcf",
"os.path.join",
"mypack.utils.io.read_selig",
"os.path.basename",
"mypack.utils.plotting.plot_airfoil",
"os.path.abspath"
] | [((360, 438), 'os.path.join', 'os.path.join', (['mod_path', '""".."""', '"""tests"""', '"""test_utils"""', '"""files"""', '"""demo_selig.dat"""'], {}), "(mod_path, '..', 'tests', 'test_utils', 'files', 'demo_selig.dat')\n", (372, 438), False, 'import os\n'), ((538, 558), 'mypack.utils.io.read_selig', 'read_selig', (['air_path'], {}), '(air_path)\n', (548, 558), False, 'from mypack.utils.io import read_selig\n'), ((587, 607), 'mypack.utils.plotting.plot_airfoil', 'plot_airfoil', (['air_df'], {}), '(air_df)\n', (599, 607), False, 'from mypack.utils.plotting import plot_airfoil\n'), ((660, 669), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (667, 669), True, 'import matplotlib.pyplot as plt\n'), ((765, 798), 'os.path.join', 'os.path.join', (['mod_path', 'save_name'], {}), '(mod_path, save_name)\n', (777, 798), False, 'import os\n'), ((300, 325), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (315, 325), False, 'import os\n'), ((686, 712), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (702, 712), False, 'import os\n')] |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from pyramid.view import view_config
from pyweaving.generators.twill import twill
@view_config(route_name='index', renderer='index.html')
def index_view(request):
draft = twill(2, warp_color=(200, 0, 0), weft_color=(90, 90, 90))
return dict(draft_json=draft.to_json())
| [
"pyweaving.generators.twill.twill",
"pyramid.view.view_config"
] | [((195, 249), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""index"""', 'renderer': '"""index.html"""'}), "(route_name='index', renderer='index.html')\n", (206, 249), False, 'from pyramid.view import view_config\n'), ((287, 344), 'pyweaving.generators.twill.twill', 'twill', (['(2)'], {'warp_color': '(200, 0, 0)', 'weft_color': '(90, 90, 90)'}), '(2, warp_color=(200, 0, 0), weft_color=(90, 90, 90))\n', (292, 344), False, 'from pyweaving.generators.twill import twill\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import os
import pathlib
from fbpcp.service.storage_s3 import S3StorageService
from fbpmp.utils.buffered_s3_file_handler import BufferedS3Reader, BufferedS3Writer
S3_PATH_DRIVE = "https:"
def abstract_file_reader_path(path: pathlib.Path) -> pathlib.Path:
if path.parts[0].lower() == S3_PATH_DRIVE:
region = os.environ.get("PL_AWS_REGION")
key_id = os.environ.get("PL_AWS_KEY_ID")
key_data = os.environ.get("PL_AWS_KEY_DATA")
if region:
storage_service = S3StorageService(
region=region, access_key_id=key_id, access_key_data=key_data
)
else:
storage_service = S3StorageService(
access_key_id=key_id, access_key_data=key_data
)
with BufferedS3Reader(path, storage_service) as reader:
return reader.copy_to_local()
else:
return pathlib.Path(path)
def abstract_file_writer_ctx(path: pathlib.Path) -> contextlib.AbstractContextManager:
if path.parts[0].lower() == S3_PATH_DRIVE:
region = os.environ.get("PL_AWS_REGION")
key_id = os.environ.get("PL_AWS_KEY_ID")
key_data = os.environ.get("PL_AWS_KEY_DATA")
if region:
storage_service = S3StorageService(
region=region, access_key_id=key_id, access_key_data=key_data
)
else:
storage_service = S3StorageService(
access_key_id=key_id, access_key_data=key_data
)
return BufferedS3Writer(path, storage_service)
else:
return open(path, "w")
| [
"fbpcp.service.storage_s3.S3StorageService",
"pathlib.Path",
"os.environ.get",
"fbpmp.utils.buffered_s3_file_handler.BufferedS3Writer",
"fbpmp.utils.buffered_s3_file_handler.BufferedS3Reader"
] | [((543, 574), 'os.environ.get', 'os.environ.get', (['"""PL_AWS_REGION"""'], {}), "('PL_AWS_REGION')\n", (557, 574), False, 'import os\n'), ((592, 623), 'os.environ.get', 'os.environ.get', (['"""PL_AWS_KEY_ID"""'], {}), "('PL_AWS_KEY_ID')\n", (606, 623), False, 'import os\n'), ((643, 676), 'os.environ.get', 'os.environ.get', (['"""PL_AWS_KEY_DATA"""'], {}), "('PL_AWS_KEY_DATA')\n", (657, 676), False, 'import os\n'), ((1106, 1124), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (1118, 1124), False, 'import pathlib\n'), ((1278, 1309), 'os.environ.get', 'os.environ.get', (['"""PL_AWS_REGION"""'], {}), "('PL_AWS_REGION')\n", (1292, 1309), False, 'import os\n'), ((1327, 1358), 'os.environ.get', 'os.environ.get', (['"""PL_AWS_KEY_ID"""'], {}), "('PL_AWS_KEY_ID')\n", (1341, 1358), False, 'import os\n'), ((1378, 1411), 'os.environ.get', 'os.environ.get', (['"""PL_AWS_KEY_DATA"""'], {}), "('PL_AWS_KEY_DATA')\n", (1392, 1411), False, 'import os\n'), ((1725, 1764), 'fbpmp.utils.buffered_s3_file_handler.BufferedS3Writer', 'BufferedS3Writer', (['path', 'storage_service'], {}), '(path, storage_service)\n', (1741, 1764), False, 'from fbpmp.utils.buffered_s3_file_handler import BufferedS3Reader, BufferedS3Writer\n'), ((726, 805), 'fbpcp.service.storage_s3.S3StorageService', 'S3StorageService', ([], {'region': 'region', 'access_key_id': 'key_id', 'access_key_data': 'key_data'}), '(region=region, access_key_id=key_id, access_key_data=key_data)\n', (742, 805), False, 'from fbpcp.service.storage_s3 import S3StorageService\n'), ((880, 944), 'fbpcp.service.storage_s3.S3StorageService', 'S3StorageService', ([], {'access_key_id': 'key_id', 'access_key_data': 'key_data'}), '(access_key_id=key_id, access_key_data=key_data)\n', (896, 944), False, 'from fbpcp.service.storage_s3 import S3StorageService\n'), ((988, 1027), 'fbpmp.utils.buffered_s3_file_handler.BufferedS3Reader', 'BufferedS3Reader', (['path', 'storage_service'], {}), '(path, storage_service)\n', (1004, 1027), False, 'from fbpmp.utils.buffered_s3_file_handler import BufferedS3Reader, BufferedS3Writer\n'), ((1461, 1540), 'fbpcp.service.storage_s3.S3StorageService', 'S3StorageService', ([], {'region': 'region', 'access_key_id': 'key_id', 'access_key_data': 'key_data'}), '(region=region, access_key_id=key_id, access_key_data=key_data)\n', (1477, 1540), False, 'from fbpcp.service.storage_s3 import S3StorageService\n'), ((1615, 1679), 'fbpcp.service.storage_s3.S3StorageService', 'S3StorageService', ([], {'access_key_id': 'key_id', 'access_key_data': 'key_data'}), '(access_key_id=key_id, access_key_data=key_data)\n', (1631, 1679), False, 'from fbpcp.service.storage_s3 import S3StorageService\n')] |
import time
from typing import Callable
from functools import wraps
def timeit(metric_callback: Callable, **labels):
def wrapper(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
metric_callback(time.time() - start, labels=labels)
return result
return func_wrapper
return wrapper
| [
"time.time",
"functools.wraps"
] | [((151, 162), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (156, 162), False, 'from functools import wraps\n'), ((226, 237), 'time.time', 'time.time', ([], {}), '()\n', (235, 237), False, 'import time\n'), ((309, 320), 'time.time', 'time.time', ([], {}), '()\n', (318, 320), False, 'import time\n')] |
# Start Imports.
import scapy.all as scapy
import time , sys
def Banner():
# One Line Banner
Ban = "\t\t\t\t\t[+] << ARP-SP00F (MITM) >> [+]"
print(Ban)
Banner()
Address = input("\nTarget IP >> ")
MainIP = input("Network IP >> ")
Packets = 0
def MAC_SCAN(ip):
arp = scapy.ARP(pdst=ip)
broadcast = scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
ARP_Req_broadcast = arp/broadcast
answerd = scapy.srp(ARP_Req_broadcast,timeout=1,verbose=False)[0]
return answerd[0][1].hwsrc
def SPOOF(target,network):
target_MAC = MAC_SCAN(target) # Get The Victim MAC Address
packet = scapy.ARP(op=2,pdst=target,hwdst=target_MAC,pscr=network) # Create The Packet To Send TO The Victim/Router.
scapy.send(packet,verbose=False) # Send The Package Using Scapy
def restore(dist_ip,real_ip):
dist_MAC = MAC_SCAN(dist_ip)
real_MAC = MAC_SCAN(real_ip)
packet = scapy.ARP(op=2,pdst=dist_ip,hwdst=real_MAC,pscr=real_ip,hwsrc=real_MAC)
scapy.send(packet,count=4,verbose=False)
try:
while True:
SPOOF(Address,MainIP) #--\ Send A Request To Address Once
SPOOF(MainIP,Address) #--/ Then Send Another Request To The Router.
Packets += 2 # Add 2 To The Total Number Of Packets After Sent The Requests
print("\r[+] Successful Send {0} Packets".format(str(Packets)), end="") # Print The Number Of Sent Packets
sys.stdout.flush() # Keep The Print AT Same Line
time.sleep(2.4) # Wait For 2.4 Seconds
except KeyboardInterrupt:
print("\nCancel.")
answer = input("Do you want to restore every thing? (Y)es or (N)o: ")
if answer.lower() == "y":
restore(Address,MainIP)
elif answer.lower() == "n":
print("Ok!")
sys.exit()
else: # Auto Exit If There Is Wrong Answer.
sys.exit()
| [
"time.sleep",
"scapy.all.send",
"scapy.all.ARP",
"sys.exit",
"scapy.all.srp",
"sys.stdout.flush",
"scapy.all.Ether"
] | [((288, 306), 'scapy.all.ARP', 'scapy.ARP', ([], {'pdst': 'ip'}), '(pdst=ip)\n', (297, 306), True, 'import scapy.all as scapy\n'), ((321, 357), 'scapy.all.Ether', 'scapy.Ether', ([], {'dst': '"""ff:ff:ff:ff:ff:ff"""'}), "(dst='ff:ff:ff:ff:ff:ff')\n", (332, 357), True, 'import scapy.all as scapy\n'), ((593, 653), 'scapy.all.ARP', 'scapy.ARP', ([], {'op': '(2)', 'pdst': 'target', 'hwdst': 'target_MAC', 'pscr': 'network'}), '(op=2, pdst=target, hwdst=target_MAC, pscr=network)\n', (602, 653), True, 'import scapy.all as scapy\n'), ((703, 736), 'scapy.all.send', 'scapy.send', (['packet'], {'verbose': '(False)'}), '(packet, verbose=False)\n', (713, 736), True, 'import scapy.all as scapy\n'), ((873, 948), 'scapy.all.ARP', 'scapy.ARP', ([], {'op': '(2)', 'pdst': 'dist_ip', 'hwdst': 'real_MAC', 'pscr': 'real_ip', 'hwsrc': 'real_MAC'}), '(op=2, pdst=dist_ip, hwdst=real_MAC, pscr=real_ip, hwsrc=real_MAC)\n', (882, 948), True, 'import scapy.all as scapy\n'), ((947, 989), 'scapy.all.send', 'scapy.send', (['packet'], {'count': '(4)', 'verbose': '(False)'}), '(packet, count=4, verbose=False)\n', (957, 989), True, 'import scapy.all as scapy\n'), ((406, 460), 'scapy.all.srp', 'scapy.srp', (['ARP_Req_broadcast'], {'timeout': '(1)', 'verbose': '(False)'}), '(ARP_Req_broadcast, timeout=1, verbose=False)\n', (415, 460), True, 'import scapy.all as scapy\n'), ((1334, 1352), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1350, 1352), False, 'import time, sys\n'), ((1386, 1401), 'time.sleep', 'time.sleep', (['(2.4)'], {}), '(2.4)\n', (1396, 1401), False, 'import time, sys\n'), ((1649, 1659), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1657, 1659), False, 'import time, sys\n'), ((1709, 1719), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1717, 1719), False, 'import time, sys\n')] |
from urllib.parse import urlencode
from ebrains_drive.files import SeafDir, SeafFile
from ebrains_drive.utils import raise_does_not_exist
class Repo(object):
"""
A seafile library
"""
def __init__(self, client, **kwargs):
self.client = client
allowed_keys = ['encrypted', 'group_name', 'groupid', 'head_commit_id', 'id', 'modifier_contact_email', 'modifier_email', 'modifier_name', 'mtime', 'mtime_relative', 'name', 'owner', 'owner_contact_email', 'owner_name', 'permission', 'root', 'share_from', 'share_from_contact_email', 'share_from_name', 'share_type', 'size', 'size_formatted', 'type', 'version', 'virtual']
# Update __dict__ but only for keys that have been predefined
# (silently ignore others)
self.__dict__.update((key, value) for key, value in kwargs.items() if key in allowed_keys)
# To NOT silently ignore rejected keys
# rejected_keys = set(kwargs.keys()) - set(allowed_keys)
# if rejected_keys:
# raise ValueError("Invalid arguments in constructor:{}".format(rejected_keys))
def __str__(self):
return "(id='{}', name='{}')".format(self.id, self.name)
def __repr__(self):
return "ebrains_drive.repo.Repo(id='{}', name='{}')".format(self.id, self.name)
@classmethod
def from_json(cls, client, repo_json):
return cls(client, **repo_json)
def is_readonly(self):
return 'w' not in self.perm
@raise_does_not_exist('The requested file does not exist')
def get_file(self, path):
"""Get the file object located in `path` in this repo.
Return a :class:`SeafFile` object
"""
assert path.startswith('/')
url = '/api2/repos/%s/file/detail/' % self.id
query = '?' + urlencode(dict(p=path))
file_json = self.client.get(url + query).json()
return SeafFile(self, path, file_json['id'], "file", file_json['size'])
@raise_does_not_exist('The requested dir does not exist')
def get_dir(self, path):
"""Get the dir object located in `path` in this repo.
Return a :class:`SeafDir` object
"""
assert path.startswith('/')
url = '/api2/repos/%s/dir/' % self.id
query = '?' + urlencode(dict(p=path))
resp = self.client.get(url + query)
dir_id = resp.headers['oid']
dir_json = resp.json()
dir = SeafDir(self, path, dir_id, "dir")
dir.load_entries(dir_json)
return dir
def delete(self):
"""Remove this repo. Only the repo owner can do this"""
self.client.delete('/api2/repos/' + self.id)
def list_history(self):
"""List the history of this repo
Returns a list of :class:`RepoRevision` object.
"""
pass
## Operations only the repo owner can do:
def update(self, name=None):
"""Update the name of this repo. Only the repo owner can do
this.
"""
pass
def get_settings(self):
"""Get the settings of this repo. Returns a dict containing the following
keys:
`history_limit`: How many days of repo history to keep.
"""
pass
def restore(self, commit_id):
pass
class RepoRevision(object):
def __init__(self, client, repo, commit_id):
self.client = client
self.repo = repo
self.commit_id = commit_id
def restore(self):
"""Restore the repo to this revision"""
self.repo.revert(self.commit_id)
| [
"ebrains_drive.files.SeafFile",
"ebrains_drive.utils.raise_does_not_exist",
"ebrains_drive.files.SeafDir"
] | [((1460, 1517), 'ebrains_drive.utils.raise_does_not_exist', 'raise_does_not_exist', (['"""The requested file does not exist"""'], {}), "('The requested file does not exist')\n", (1480, 1517), False, 'from ebrains_drive.utils import raise_does_not_exist\n'), ((1945, 2001), 'ebrains_drive.utils.raise_does_not_exist', 'raise_does_not_exist', (['"""The requested dir does not exist"""'], {}), "('The requested dir does not exist')\n", (1965, 2001), False, 'from ebrains_drive.utils import raise_does_not_exist\n'), ((1874, 1938), 'ebrains_drive.files.SeafFile', 'SeafFile', (['self', 'path', "file_json['id']", '"""file"""', "file_json['size']"], {}), "(self, path, file_json['id'], 'file', file_json['size'])\n", (1882, 1938), False, 'from ebrains_drive.files import SeafDir, SeafFile\n'), ((2401, 2435), 'ebrains_drive.files.SeafDir', 'SeafDir', (['self', 'path', 'dir_id', '"""dir"""'], {}), "(self, path, dir_id, 'dir')\n", (2408, 2435), False, 'from ebrains_drive.files import SeafDir, SeafFile\n')] |
from django.conf.urls import url
import referrals
urlpatterns = [
url(r'^referrals/$', referrals.ReferralsView.as_view()),
url(r'^referral/(?P<referral_id>[-&\w]+)/$', referrals.ReferralView.as_view()),
]
| [
"referrals.ReferralsView.as_view",
"referrals.ReferralView.as_view"
] | [((92, 125), 'referrals.ReferralsView.as_view', 'referrals.ReferralsView.as_view', ([], {}), '()\n', (123, 125), False, 'import referrals\n'), ((177, 209), 'referrals.ReferralView.as_view', 'referrals.ReferralView.as_view', ([], {}), '()\n', (207, 209), False, 'import referrals\n')] |
from typing import Optional
import pytest
from testoot.base import TestootContext, Comparator, TestootSerializer, \
FileType, TestootTestResult
from testoot.ext.pytest import PytestContext
from testoot.testoot import Testoot
from tests.conftest import AbcDiffResult
@pytest.fixture(scope='module')
def base_testoot(root_base_testoot):
testoot = root_base_testoot.clone(
storage=root_base_testoot.storage.clone(add_path='examples'),
)
testoot.storage.ensure_exists()
yield testoot
@pytest.fixture(scope='function')
def testoot(base_testoot, request):
testoot = Testoot(base_testoot, PytestContext(request))
yield testoot
class TrueComparator(Comparator):
@classmethod
def compare(cls, test_obj: any, canon_obj: any):
assert True
class FalseComparator(Comparator):
@classmethod
def compare(cls, test_obj: any, canon_obj: any):
assert False
class ContextTestoot(TestootContext):
def __init__(self, name, comparator: Optional[Comparator] = None,
serializer: Optional[TestootSerializer] = None,
ask_canonize: bool = False):
self._name = name
self._comparator = (TrueComparator() if comparator is None
else comparator)
self._serializer = serializer
self._ask_canonize = ask_canonize
def get_storage_name(self, file_type_hint: FileType,
suffix: Optional[str] = None):
return self._name
def get_storage_name_from_filename(self, filename: str):
return filename
def get_comparator(self) -> Optional[Comparator]:
return self._comparator
def get_serializer(self) -> Optional[TestootSerializer]:
return self._serializer
def ask_canonize(self) -> bool:
return self._ask_canonize
def create_test_result(self, test_obj: any, canon_obj: any,
exc: Exception) -> TestootTestResult:
return AbcDiffResult()
| [
"pytest.fixture",
"testoot.ext.pytest.PytestContext",
"tests.conftest.AbcDiffResult"
] | [((275, 305), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (289, 305), False, 'import pytest\n'), ((515, 547), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (529, 547), False, 'import pytest\n'), ((620, 642), 'testoot.ext.pytest.PytestContext', 'PytestContext', (['request'], {}), '(request)\n', (633, 642), False, 'from testoot.ext.pytest import PytestContext\n'), ((1978, 1993), 'tests.conftest.AbcDiffResult', 'AbcDiffResult', ([], {}), '()\n', (1991, 1993), False, 'from tests.conftest import AbcDiffResult\n')] |
# -*- coding: utf-8 -*-
import json
from random import shuffle
def serialize_stream(stream):
items = stream.youtube_links.split("\n")
if stream.play_random:
shuffle(items)
return {
"id": stream.pk,
"name": stream.name,
"slug": stream.slug,
"keywords": stream.keywords,
"plex_playlist_id": stream.plex_playlist_id,
"channels": stream.channels,
"update_frequency": stream.update_frequency,
"video_length": stream.video_length,
"is_news": stream.is_news,
"play_random": stream.play_random,
"links": [{
"url": link
} for link in items]
}
| [
"random.shuffle"
] | [((174, 188), 'random.shuffle', 'shuffle', (['items'], {}), '(items)\n', (181, 188), False, 'from random import shuffle\n')] |
# -*- coding: utf-8 -*-
import warnings
import six
def encode_if_unicode(value, encoding='utf-8'): # pragma: no cover
"""
Encode and return a ``value`` using specified ``encoding``.
Encoding is done only if ``value`` is a ``unicode`` instance
(utf-8 encoding is used as default).
.. deprecated:: 5.0.0
Use :func:`laterpay.compat.stringify` instead.
"""
warnings.warn(
'laterpay.compat.encode_if_unicode is deprecated and will be removed '
'in future versions. Use laterpay.compat.stringify instead',
DeprecationWarning
)
if six.PY2 and isinstance(value, six.text_type):
value = value.encode(encoding)
return value
def stringify(value):
"""
Convert ``value`` into a native Python string.
If value is not a byte- or unicode-string the function calls ``str()`` on
it.
If the value then is a unicode string (on Python 2) or byte string (on
Python 3) the function converts it into the respective native string type
(byte string on Python 2; unicode string on Python 3).
In all other cases the value is returned as-is.
"""
if not isinstance(value, (six.string_types, six.binary_type)):
# If any non-string or non-bytes like objects, ``str()`` them.
value = str(value)
if six.PY3 and isinstance(value, six.binary_type):
# Issue #84, decode byte strings before using them on Python 3
value = value.decode()
elif six.PY2 and isinstance(value, six.text_type):
value = value.encode('utf-8')
return value
def byteify(value):
"""
Convert ``value`` into a byte-string.
"""
if isinstance(value, six.text_type):
return value.encode('utf-8')
return value
| [
"warnings.warn"
] | [((396, 568), 'warnings.warn', 'warnings.warn', (['"""laterpay.compat.encode_if_unicode is deprecated and will be removed in future versions. Use laterpay.compat.stringify instead"""', 'DeprecationWarning'], {}), "(\n 'laterpay.compat.encode_if_unicode is deprecated and will be removed in future versions. Use laterpay.compat.stringify instead'\n , DeprecationWarning)\n", (409, 568), False, 'import warnings\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 2 21:55:56 2015
@author: aidanrocke
"""
import numpy as np
def compressSequenceNFast(posture_seq, newStart, nMax):
"""
# COMPRESSSEQUENCE Recursively finds the most compressive subsequence in
# posture_seq and creates and replaces it with a new number. This replacement
# creates a new rule in the grammar. Replacements are made until there are
# none left that lead to further compression. See the following paper
# for more details: <NAME> Witten (2000) On-Line and Off-Line
# Heuristics for Inferring Hierarchies of Repetitions in Sequences.
# Proceedings of the IEEE 88:1745.
#
# Input
# posture_seq - a list of posture sequences to be compressed
# newStart - this is the number that will be used to label the first new
# rule in the grammar. It must be greater than the maximum
# value in posture_seq. If empty, then max(posture_seq) + 1 is used.
# nMax - the maximum length n-gram to check for compression
#
# Output
# grammar - a number of rules by 2 cell array. The first column has the
# left hand side of each replacement rule while the second
# column has the right hand side (so the first column lists
# all non-terminals in the grammar).
# compVec - the vector that has been compressed using grammar. posture_seq
# can be recovered by applying the grammar rules in reverse.
# totSavings - the total space saving achieved during the compression,
# taking into account the size of the created grammar rules"""
# check posture_seq
if len(np.shape(posture_seq)) > 1:
raise ValueError('posture_seq must be a row vector.')
# define newStart if left empty
if newStart == 0:
newStart = max(posture_seq) + 1
# check that newStart is large enough
if newStart <= max(posture_seq):
raise ValueError('newStart must be greater than max(posture_seq).')
# initialise grammar
grammar = [[0,[0,0]]]
# initialise compVec and make a suffix array
compVec = posture_seq
totSavings = 0
# compress segments until none are found that lead to compression
sequence = [np.nan]
newInd = newStart
while len(sequence) > 0:
# find the most compressive sequence in posture_seq
[sequence, locations, savings] = compressiveNFast(compVec, nMax)
# update the total savings (i.e. compression)
totSavings = totSavings + savings
# add the rule to grammar
grammar.append([newInd,sequence])
# make the replacements. Note: strrep does not work here. For example
# if sequence is [44 68 44] and compVec has a subsequence that is
# [44 68 44 68 44 68 44 448], strrep will give [68 480 480 480 448]
# which is wrong.
for j in range(len(locations)):
compVec[locations[j]:locations[j] + len(sequence) - 1] = [newInd]+[np.nan]*(len(sequence)-1)
while compVec.count(np.nan) > 0:
compVec.remove(np.nan)
newInd += 1
# check that compressed lengths, savings, and grammar size are
# consistent
if len(sequence) > 0: # on last iteration last grammar entry is empty
if len(compVec) + totSavings + len(grammar) + np.sum(len(grammar[i][1]) for i in range(len(grammar))) != len(posture_seq):
raise ValueError(['Calculated savings not consistent with original and compressed lengths and grammar size.'])
else:
if len(compVec) + totSavings + len(grammar)-1 + np.sum(len(grammar[i][1]) for i in range(len(grammar))) != len(posture_seq):
ValueError(['Calculated savings not consistent with original and compressed lengths and grammar size.'])
# remove the last (empty) entry of the grammar
return grammar[1:-1]
| [
"numpy.shape"
] | [((1731, 1752), 'numpy.shape', 'np.shape', (['posture_seq'], {}), '(posture_seq)\n', (1739, 1752), True, 'import numpy as np\n')] |
from myhdl import Signal, intbv, always, always_comb, block, instances
from hdmi.cores.primitives import dram16xn
@block
def convert_30_to_15(reset, clock, clockx2, data_in, tmds_data2, tmds_data1, tmds_data0):
"""
The block converts the 30-bit data into 15-bit data.
Args:
reset: The reset signal
clock: The pixel clock
clockx2: The clock with twice the frequency of pixel clock
data_in: The input 30-bit data
tmds_data2: 5 bits of the output data (output[15:10])
tmds_data1: 5 bits of the output data (output[10:5])
tmds_data0: 5 bits of the output data (output[5:0])
Returns:
myhdl.instances() : A list of myhdl instances.
"""
# RAM Address
write_addr, _write_addr, read_addr, _read_addr = [Signal(intbv(0)[4:0]) for _ in range(4)]
data_int = Signal(intbv(0)[30:0])
@always(write_addr)
def case_wa():
if write_addr < 15:
_write_addr.next = write_addr + 1
else:
_write_addr.next = 0
@always(clock.posedge, reset.posedge)
def fdc():
if reset:
write_addr.next = 0
else:
write_addr.next = _write_addr
o_data_out = Signal(intbv(0)[30:0]) # Dummy variable
fifo_u = dram16xn(data_in, write_addr, read_addr, Signal(True), clock, o_data_out, data_int)
@always(read_addr)
def case_ra():
if read_addr < 15:
_read_addr.next = read_addr + 1
else:
_read_addr.next = 0
reset_sync, _reset_sync, reset_p = [Signal(bool(0)) for _ in range(3)]
sync = Signal(bool(0))
@always(clockx2.posedge, reset.posedge)
def fdp():
if reset:
reset_sync.next = 1
else:
reset_sync.next = reset
@always(clockx2.posedge)
def fdr():
if reset_p:
sync.next = 0
else:
sync.next = not sync
@always(clockx2.posedge)
def fdre():
if reset_p:
read_addr.next = 0
elif sync:
read_addr.next = _read_addr
db = Signal(intbv(0)[30:0])
@always(clockx2.posedge)
def fde():
if sync:
db.next = data_int
mux = Signal(intbv(0)[15:0])
@always_comb
def mux_logic():
if not sync:
mux.next = db[15:0]
else:
mux.next = db[30:15]
@always(clockx2.posedge)
def fd():
_reset_sync.next = reset_sync
reset_p.next = _reset_sync
tmds_data0.next = mux[5:0]
tmds_data1.next = mux[10:5]
tmds_data2.next = mux[15:10]
return instances()
| [
"myhdl.always",
"myhdl.instances",
"myhdl.intbv",
"myhdl.Signal"
] | [((881, 899), 'myhdl.always', 'always', (['write_addr'], {}), '(write_addr)\n', (887, 899), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n'), ((1047, 1083), 'myhdl.always', 'always', (['clock.posedge', 'reset.posedge'], {}), '(clock.posedge, reset.posedge)\n', (1053, 1083), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n'), ((1370, 1387), 'myhdl.always', 'always', (['read_addr'], {}), '(read_addr)\n', (1376, 1387), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n'), ((1635, 1673), 'myhdl.always', 'always', (['clockx2.posedge', 'reset.posedge'], {}), '(clockx2.posedge, reset.posedge)\n', (1641, 1673), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n'), ((1795, 1818), 'myhdl.always', 'always', (['clockx2.posedge'], {}), '(clockx2.posedge)\n', (1801, 1818), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n'), ((1933, 1956), 'myhdl.always', 'always', (['clockx2.posedge'], {}), '(clockx2.posedge)\n', (1939, 1956), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n'), ((2122, 2145), 'myhdl.always', 'always', (['clockx2.posedge'], {}), '(clockx2.posedge)\n', (2128, 2145), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n'), ((2388, 2411), 'myhdl.always', 'always', (['clockx2.posedge'], {}), '(clockx2.posedge)\n', (2394, 2411), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n'), ((2619, 2630), 'myhdl.instances', 'instances', ([], {}), '()\n', (2628, 2630), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n'), ((1321, 1333), 'myhdl.Signal', 'Signal', (['(True)'], {}), '(True)\n', (1327, 1333), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n'), ((859, 867), 'myhdl.intbv', 'intbv', (['(0)'], {}), '(0)\n', (864, 867), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n'), ((1230, 1238), 'myhdl.intbv', 'intbv', (['(0)'], {}), '(0)\n', (1235, 1238), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n'), ((2100, 2108), 'myhdl.intbv', 'intbv', (['(0)'], {}), '(0)\n', (2105, 2108), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n'), ((2227, 2235), 'myhdl.intbv', 'intbv', (['(0)'], {}), '(0)\n', (2232, 2235), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n'), ((802, 810), 'myhdl.intbv', 'intbv', (['(0)'], {}), '(0)\n', (807, 810), False, 'from myhdl import Signal, intbv, always, always_comb, block, instances\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.GoodsCategoryResult import GoodsCategoryResult
class AlipayPcreditHuabeiGoodsCategoryQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayPcreditHuabeiGoodsCategoryQueryResponse, self).__init__()
self._categorys = None
self._success = None
@property
def categorys(self):
return self._categorys
@categorys.setter
def categorys(self, value):
if isinstance(value, list):
self._categorys = list()
for i in value:
if isinstance(i, GoodsCategoryResult):
self._categorys.append(i)
else:
self._categorys.append(GoodsCategoryResult.from_alipay_dict(i))
@property
def success(self):
return self._success
@success.setter
def success(self, value):
self._success = value
def parse_response_content(self, response_content):
response = super(AlipayPcreditHuabeiGoodsCategoryQueryResponse, self).parse_response_content(response_content)
if 'categorys' in response:
self.categorys = response['categorys']
if 'success' in response:
self.success = response['success']
| [
"alipay.aop.api.domain.GoodsCategoryResult.GoodsCategoryResult.from_alipay_dict"
] | [((826, 865), 'alipay.aop.api.domain.GoodsCategoryResult.GoodsCategoryResult.from_alipay_dict', 'GoodsCategoryResult.from_alipay_dict', (['i'], {}), '(i)\n', (862, 865), False, 'from alipay.aop.api.domain.GoodsCategoryResult import GoodsCategoryResult\n')] |
import re
from typing import Tuple
BINANCE_SYMBOL_SPLITTER = re.compile(r"^(\w+)(BTC|ETH|BNB|XRP|USDT|USDC|TUSD|PAX)$")
class SymbolSplitter:
def __init__(self, market: str, symbol: str):
self._symbol: Tuple[str, str] = self.split(market, symbol)
@property
def base_asset(self):
return self._symbol[0]
@property
def quote_asset(self):
return self._symbol[1]
@staticmethod
def split(market, symbol) -> Tuple[str, str]:
"""
Takes an exchange pair and return
:param market: lowercase market e.g. binance
:param symbol: uppercase exchange pair e.g. ETHUSDT
:return: tuple: (base_asset, quote_asset)
"""
try:
if market == "binance":
m = BINANCE_SYMBOL_SPLITTER.match(symbol)
result: Tuple = (m.group(1), m.group(2))
elif market in ["ddex", "radar_relay", "coinbase_pro"]:
result: Tuple = tuple(symbol.split('-'))
else:
raise ValueError("Market %s not supported" % (market,))
except Exception:
raise ValueError("Error parsing %s symbol. Symbol %s is not a valid %s symbol" % (market, symbol, market))
if len(result) != 2:
raise ValueError("Symbol %s does not match %s's format" % (symbol, market))
return result
| [
"re.compile"
] | [((63, 121), 're.compile', 're.compile', (['"""^(\\\\w+)(BTC|ETH|BNB|XRP|USDT|USDC|TUSD|PAX)$"""'], {}), "('^(\\\\w+)(BTC|ETH|BNB|XRP|USDT|USDC|TUSD|PAX)$')\n", (73, 121), False, 'import re\n')] |
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import urllib3
from socket import timeout
import tldextract
import re
import traceback
import sys
import logging
import socket
import threading
from time import sleep
from collector import Collector
from checker import Checker
http = urllib3.PoolManager()
page = http.request('GET', "http://opennederland.nl", timeout=2)
print(page)
print("-------------------------------------------------------")
print(page.headers)
print("-------------------------------------------------------")
print(page.headers.keys())
print("-------------------------------------------------------")
print(page.headers.items())
print("-------------------------------------------------------")
t = page.headers.items()
print("-------------------------------------------------------")
d = dict((x, y) for x, y in t)
print (d)
print("-------------------------------------------------------")
if "X-Powered-By" in d:
print ("print " + d['X-Powered-By']) | [
"urllib3.PoolManager"
] | [((300, 321), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (319, 321), False, 'import urllib3\n')] |
# Generated by Django 3.0.1 on 2020-01-08 08:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog_entries', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='for_adult',
),
]
| [
"django.db.migrations.RemoveField"
] | [((221, 283), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""article"""', 'name': '"""for_adult"""'}), "(model_name='article', name='for_adult')\n", (243, 283), False, 'from django.db import migrations\n')] |
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('config_path')
parser.add_argument('sh_path',default='train_10.24.sh')
args = parser.parse_args()
config_path = args.config_path
sh_path = args.sh_path
with open(f'/home/mist/SegformerDistillation/local_configs/{sh_path}','w') as f:
for c in os.listdir(config_path):
if 'example' not in c:
command = f'bash tools/dist_train.sh {config_path}{c} 8;\n'
f.write(command)
print('done') | [
"os.listdir",
"argparse.ArgumentParser"
] | [((35, 60), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (58, 60), False, 'import argparse\n'), ((328, 351), 'os.listdir', 'os.listdir', (['config_path'], {}), '(config_path)\n', (338, 351), False, 'import os\n')] |
import json
frecuencias = [
{
"tag": "company",
"count": 44
},
{
"tag": "team",
"count": 44
},
{
"tag": "experience",
"count": 43
},
{
"tag": "global",
"count": 42
},
{
"tag": "local",
"count": 41
},
{
"tag": "we",
"count": 40
},
{
"tag": "experiencia",
"count": 39
},
{
"tag": "work",
"count": 39
},
{
"tag": "time",
"count": 38
},
{
"tag": "support",
"count": 37
},
{
"tag": "english",
"count": 36
},
{
"tag": "management",
"count": 36
},
{
"tag": "required",
"count": 36
},
{
"tag": "working",
"count": 35
},
{
"tag": "related",
"count": 34
},
{
"tag": "skills",
"count": 34
},
{
"tag": "years",
"count": 34
},
{
"tag": "you",
"count": 34
},
{
"tag": "business",
"count": 33
},
{
"tag": "including",
"count": 33
},
{
"tag": "medical",
"count": 33
},
{
"tag": "trabajo",
"count": 33
},
{
"tag": "clinical",
"count": 32
},
{
"tag": "degree",
"count": 32
},
{
"tag": "knowledge",
"count": 32
},
{
"tag": "communication",
"count": 31
},
{
"tag": "development",
"count": 31
},
{
"tag": "activities",
"count": 30
},
{
"tag": "description",
"count": 30
},
{
"tag": "health",
"count": 30
},
{
"tag": "strong",
"count": 30
},
{
"tag": "empresa",
"count": 28
},
{
"tag": "process",
"count": 28
},
{
"tag": "qualifications",
"count": 28
},
{
"tag": "requisitos",
"count": 28
},
{
"tag": "written",
"count": 28
},
{
"tag": "área",
"count": 28
},
{
"tag": "control",
"count": 27
},
{
"tag": "equipo",
"count": 27
},
{
"tag": "opportunities",
"count": 27
},
{
"tag": "provide",
"count": 27
},
{
"tag": "requirements",
"count": 27
},
{
"tag": "research",
"count": 27
},
{
"tag": "training",
"count": 27
},
{
"tag": "within",
"count": 27
},
{
"tag": "años",
"count": 26
},
{
"tag": "data",
"count": 26
},
{
"tag": "information",
"count": 26
},
{
"tag": "job description",
"count": 26
},
{
"tag": "office",
"count": 26
},
{
"tag": "ability",
"count": 25
},
{
"tag": "avanzado",
"count": 25
},
{
"tag": "opportunity",
"count": 25
},
{
"tag": "position",
"count": 25
},
{
"tag": "ensure",
"count": 24
},
{
"tag": "responsibilities",
"count": 24
},
{
"tag": "role",
"count": 24
},
{
"tag": "site",
"count": 24
},
{
"tag": "career",
"count": 23
},
{
"tag": "etc",
"count": 23
},
{
"tag": "join",
"count": 23
},
{
"tag": "new",
"count": 23
},
{
"tag": "personal",
"count": 23
},
{
"tag": "projects",
"count": 23
},
{
"tag": "quality",
"count": 23
},
{
"tag": "status",
"count": 23
},
{
"tag": "applicable",
"count": 22
},
{
"tag": "develop",
"count": 22
},
{
"tag": "environment",
"count": 22
},
{
"tag": "internal",
"count": 22
},
{
"tag": "patients",
"count": 22
},
{
"tag": "pharmaceutical",
"count": 22
},
{
"tag": "processes",
"count": 22
},
{
"tag": "productos",
"count": 22
},
{
"tag": "regulatory",
"count": 22
},
{
"tag": "responsible",
"count": 22
},
{
"tag": "employment",
"count": 21
},
{
"tag": "full",
"count": 21
},
{
"tag": "life",
"count": 21
},
{
"tag": "orientation",
"count": 21
},
{
"tag": "performance",
"count": 21
},
{
"tag": "products",
"count": 21
},
{
"tag": "review",
"count": 21
},
{
"tag": "salud",
"count": 21
},
{
"tag": "services",
"count": 21
},
{
"tag": "travel",
"count": 21
},
{
"tag": "across",
"count": 20
},
{
"tag": "based",
"count": 20
},
{
"tag": "compliance",
"count": 20
},
{
"tag": "education",
"count": 20
},
{
"tag": "equal",
"count": 20
},
{
"tag": "marketing",
"count": 20
},
{
"tag": "operations",
"count": 20
},
{
"tag": "organization",
"count": 20
},
{
"tag": "planning",
"count": 20
},
{
"tag": "procedures",
"count": 20
},
{
"tag": "sexual",
"count": 20
},
{
"tag": "us",
"count": 20
},
{
"tag": "apply",
"count": 19
},
{
"tag": "chile",
"count": 19
},
{
"tag": "contact",
"count": 19
},
{
"tag": "healthcare",
"count": 19
},
{
"tag": "include",
"count": 19
},
{
"tag": "industry",
"count": 19
},
{
"tag": "maintain",
"count": 19
},
{
"tag": "manager",
"count": 19
},
{
"tag": "may",
"count": 19
},
{
"tag": "monitoring",
"count": 19
},
{
"tag": "must",
"count": 19
},
{
"tag": "people",
"count": 19
},
{
"tag": "principales",
"count": 19
},
{
"tag": "procesos",
"count": 19
},
{
"tag": "race",
"count": 19
},
{
"tag": "sexual orientation",
"count": 19
},
{
"tag": "si",
"count": 19
},
{
"tag": "solutions",
"count": 19
},
{
"tag": "trabajar",
"count": 19
},
{
"tag": "use",
"count": 19
},
{
"tag": "world",
"count": 19
},
{
"tag": "assigned",
"count": 18
},
{
"tag": "employees",
"count": 18
},
{
"tag": "inglés",
"count": 18
},
{
"tag": "lead",
"count": 18
},
{
"tag": "leadership",
"count": 18
},
{
"tag": "level",
"count": 18
},
{
"tag": "needs",
"count": 18
},
{
"tag": "nivel",
"count": 18
},
{
"tag": "one",
"count": 18
},
{
"tag": "plan",
"count": 18
},
{
"tag": "religion",
"count": 18
},
{
"tag": "sites",
"count": 18
},
{
"tag": "specific",
"count": 18
},
{
"tag": "study",
"count": 18
},
{
"tag": "teams",
"count": 18
},
{
"tag": "therapeutic",
"count": 18
},
{
"tag": "well",
"count": 18
},
{
"tag": "ambiente",
"count": 17
},
{
"tag": "areas",
"count": 17
},
{
"tag": "clinical research",
"count": 17
},
{
"tag": "conocimientos",
"count": 17
},
{
"tag": "day",
"count": 17
},
{
"tag": "excel",
"count": 17
},
{
"tag": "field",
"count": 17
},
{
"tag": "functional",
"count": 17
},
{
"tag": "good",
"count": 17
},
{
"tag": "impact",
"count": 17
},
{
"tag": "lives",
"count": 17
},
{
"tag": "patient",
"count": 17
},
{
"tag": "profesional",
"count": 17
},
{
"tag": "regulations",
"count": 17
},
{
"tag": "responsabilidades",
"count": 17
},
{
"tag": "also",
"count": 16
},
{
"tag": "calidad",
"count": 16
},
{
"tag": "comunicación",
"count": 16
},
{
"tag": "countries",
"count": 16
},
{
"tag": "desarrollo",
"count": 16
},
{
"tag": "disability",
"count": 16
},
{
"tag": "equivalent",
"count": 16
},
{
"tag": "every",
"count": 16
},
{
"tag": "farmacia",
"count": 16
},
{
"tag": "general",
"count": 16
},
{
"tag": "high",
"count": 16
},
{
"tag": "innovative",
"count": 16
},
{
"tag": "language",
"count": 16
},
{
"tag": "leading",
"count": 16
},
{
"tag": "market",
"count": 16
},
{
"tag": "mercado",
"count": 16
},
{
"tag": "part",
"count": 16
},
{
"tag": "product",
"count": 16
},
{
"tag": "project",
"count": 16
},
{
"tag": "relevant",
"count": 16
},
{
"tag": "systems",
"count": 16
},
{
"tag": "accordance",
"count": 15
},
{
"tag": "appropriate",
"count": 15
},
{
"tag": "area",
"count": 15
},
{
"tag": "búsqueda",
"count": 15
},
{
"tag": "care",
"count": 15
},
{
"tag": "cargo",
"count": 15
},
{
"tag": "carrera",
"count": 15
},
{
"tag": "compañía",
"count": 15
},
{
"tag": "computer",
"count": 15
},
{
"tag": "conduct",
"count": 15
},
{
"tag": "conocimiento",
"count": 15
},
{
"tag": "customer",
"count": 15
},
{
"tag": "customers",
"count": 15
},
{
"tag": "documentation",
"count": 15
},
{
"tag": "excellent",
"count": 15
},
{
"tag": "external",
"count": 15
},
{
"tag": "gcp",
"count": 15
},
{
"tag": "gender",
"count": 15
},
{
"tag": "help",
"count": 15
},
{
"tag": "issues",
"count": 15
},
{
"tag": "key",
"count": 15
},
{
"tag": "make",
"count": 15
},
{
"tag": "minimum",
"count": 15
},
{
"tag": "offer",
"count": 15
},
{
"tag": "origin",
"count": 15
},
{
"tag": "protocol",
"count": 15
},
{
"tag": "provides",
"count": 15
},
{
"tag": "realizar",
"count": 15
},
{
"tag": "reports",
"count": 15
},
{
"tag": "sales",
"count": 15
},
{
"tag": "science",
"count": 15
},
{
"tag": "sops",
"count": 15
},
{
"tag": "trial",
"count": 15
},
{
"tag": "trials",
"count": 15
},
{
"tag": "understanding",
"count": 15
},
{
"tag": "vida",
"count": 15
},
{
"tag": "what",
"count": 15
},
{
"tag": "actions",
"count": 14
},
{
"tag": "age",
"count": 14
},
{
"tag": "analysis",
"count": 14
},
{
"tag": "argentina",
"count": 14
},
{
"tag": "años experiencia",
"count": 14
},
{
"tag": "color",
"count": 14
},
{
"tag": "diverse",
"count": 14
},
{
"tag": "effective",
"count": 14
},
{
"tag": "eg",
"count": 14
},
{
"tag": "employer",
"count": 14
},
{
"tag": "equal opportunity",
"count": 14
},
{
"tag": "follow",
"count": 14
},
{
"tag": "habilidades",
"count": 14
},
{
"tag": "importante",
"count": 14
},
{
"tag": "national",
"count": 14
},
{
"tag": "ofrecemos",
"count": 14
},
{
"tag": "preferred",
"count": 14
},
{
"tag": "regional",
"count": 14
},
{
"tag": "tareas",
"count": 14
},
{
"tag": "verbal",
"count": 14
},
{
"tag": "áreas",
"count": 14
},
{
"tag": "able",
"count": 13
},
{
"tag": "actividades",
"count": 13
},
{
"tag": "aires",
"count": 13
},
{
"tag": "análisis",
"count": 13
},
{
"tag": "both",
"count": 13
},
{
"tag": "buenos",
"count": 13
},
{
"tag": "buenos aires",
"count": 13
},
{
"tag": "capacidad",
"count": 13
},
{
"tag": "clientes",
"count": 13
},
{
"tag": "clinical trials",
"count": 13
},
{
"tag": "communication skills",
"count": 13
},
{
"tag": "core",
"count": 13
},
{
"tag": "duties",
"count": 13
},
{
"tag": "essential",
"count": 13
},
{
"tag": "farmacéutica",
"count": 13
},
{
"tag": "flexible",
"count": 13
},
{
"tag": "full time",
"count": 13
},
{
"tag": "gestión",
"count": 13
},
{
"tag": "grow",
"count": 13
},
{
"tag": "growth",
"count": 13
},
{
"tag": "información",
"count": 13
},
{
"tag": "location",
"count": 13
},
{
"tag": "manejo",
"count": 13
},
{
"tag": "medicamentos",
"count": 13
},
{
"tag": "necesidades",
"count": 13
},
{
"tag": "necessary",
"count": 13
},
{
"tag": "participate",
"count": 13
},
{
"tag": "plans",
"count": 13
},
{
"tag": "potential",
"count": 13
},
{
"tag": "proceso",
"count": 13
},
{
"tag": "providing",
"count": 13
},
{
"tag": "relationships",
"count": 13
},
{
"tag": "report",
"count": 13
},
{
"tag": "service",
"count": 13
},
{
"tag": "solving",
"count": 13
},
{
"tag": "specialist",
"count": 13
},
{
"tag": "staff",
"count": 13
},
{
"tag": "supporting",
"count": 13
},
{
"tag": "tasks",
"count": 13
},
{
"tag": "technical",
"count": 13
},
{
"tag": "together",
"count": 13
},
{
"tag": "track",
"count": 13
},
{
"tag": "través",
"count": 13
},
{
"tag": "without",
"count": 13
},
{
"tag": "access",
"count": 12
},
{
"tag": "administración",
"count": 12
},
{
"tag": "advanced",
"count": 12
},
{
"tag": "around",
"count": 12
},
{
"tag": "building",
"count": 12
},
{
"tag": "buscamos",
"count": 12
},
{
"tag": "cross",
"count": 12
},
{
"tag": "documentación",
"count": 12
},
{
"tag": "documents",
"count": 12
},
{
"tag": "ensuring",
"count": 12
},
{
"tag": "excellence",
"count": 12
},
{
"tag": "experienced",
"count": 12
},
{
"tag": "industria",
"count": 12
},
{
"tag": "líder",
"count": 12
},
{
"tag": "manage",
"count": 12
},
{
"tag": "mantener",
"count": 12
},
{
"tag": "marital",
"count": 12
},
{
"tag": "members",
"count": 12
},
{
"tag": "negocio",
"count": 12
},
{
"tag": "participar",
"count": 12
},
{
"tag": "point",
"count": 12
},
{
"tag": "policies",
"count": 12
},
{
"tag": "posición",
"count": 12
},
{
"tag": "profesionales",
"count": 12
},
{
"tag": "reviews",
"count": 12
},
{
"tag": "santiago",
"count": 12
},
{
"tag": "sap",
"count": 12
},
{
"tag": "scientific",
"count": 12
},
{
"tag": "ser",
"count": 12
},
{
"tag": "soporte",
"count": 12
},
{
"tag": "stakeholders",
"count": 12
},
{
"tag": "standard",
"count": 12
},
{
"tag": "standards",
"count": 12
},
{
"tag": "success",
"count": 12
},
{
"tag": "technology",
"count": 12
},
{
"tag": "timely",
"count": 12
},
{
"tag": "veteran",
"count": 12
},
{
"tag": "approval",
"count": 11
},
{
"tag": "best",
"count": 11
},
{
"tag": "build",
"count": 11
},
{
"tag": "cada",
"count": 11
},
{
"tag": "carreras",
"count": 11
},
{
"tag": "cliente",
"count": 11
},
{
"tag": "commercial",
"count": 11
},
{
"tag": "companies",
"count": 11
},
{
"tag": "completion",
"count": 11
},
{
"tag": "completo",
"count": 11
},
{
"tag": "condition",
"count": 11
},
{
"tag": "contacto",
"count": 11
},
{
"tag": "contract",
"count": 11
},
{
"tag": "coordinar",
"count": 11
},
{
"tag": "coordination",
"count": 11
},
{
"tag": "current",
"count": 11
},
{
"tag": "dentro",
"count": 11
},
{
"tag": "develops",
"count": 11
},
{
"tag": "diferentes",
"count": 11
},
{
"tag": "diversity",
"count": 11
},
{
"tag": "effectively",
"count": 11
},
{
"tag": "employee",
"count": 11
},
{
"tag": "equipos",
"count": 11
},
{
"tag": "farmacéutico",
"count": 11
},
{
"tag": "fluent",
"count": 11
},
{
"tag": "gender identity",
"count": 11
},
{
"tag": "guidelines",
"count": 11
},
{
"tag": "identity",
"count": 11
},
{
"tag": "implementation",
"count": 11
},
{
"tag": "important",
"count": 11
},
{
"tag": "improve",
"count": 11
},
{
"tag": "improvement",
"count": 11
},
{
"tag": "individuals",
"count": 11
},
{
"tag": "innovation",
"count": 11
},
{
"tag": "interpersonal",
"count": 11
},
{
"tag": "laboratorio",
"count": 11
},
{
"tag": "marital status",
"count": 11
},
{
"tag": "medicines",
"count": 11
},
{
"tag": "menos",
"count": 11
},
{
"tag": "next",
"count": 11
},
{
"tag": "oportunidades",
"count": 11
},
{
"tag": "oral",
"count": 11
},
{
"tag": "organización",
"count": 11
},
{
"tag": "please",
"count": 11
},
{
"tag": "portfolio",
"count": 11
},
{
"tag": "practices",
"count": 11
},
{
"tag": "problem",
"count": 11
},
{
"tag": "problem solving",
"count": 11
},
{
"tag": "software",
"count": 11
},
{
"tag": "strategy",
"count": 11
},
{
"tag": "team members",
"count": 11
},
{
"tag": "todas",
"count": 11
},
{
"tag": "trabajo equipo",
"count": 11
},
{
"tag": "written communication",
"count": 11
},
{
"tag": "action",
"count": 10
},
{
"tag": "afines",
"count": 10
},
{
"tag": "application",
"count": 10
},
{
"tag": "available",
"count": 10
},
{
"tag": "bachelor",
"count": 10
},
{
"tag": "case",
"count": 10
},
{
"tag": "client",
"count": 10
},
{
"tag": "collaboration",
"count": 10
},
{
"tag": "colleagues",
"count": 10
},
{
"tag": "color religion",
"count": 10
},
{
"tag": "comercial",
"count": 10
},
{
"tag": "company description",
"count": 10
},
{
"tag": "complex",
"count": 10
},
{
"tag": "critical",
"count": 10
},
{
"tag": "cro",
"count": 10
},
{
"tag": "cross functional",
"count": 10
},
{
"tag": "equal employment",
"count": 10
},
{
"tag": "excluyente",
"count": 10
},
{
"tag": "execution",
"count": 10
},
{
"tag": "expertise",
"count": 10
},
{
"tag": "funciones",
"count": 10
},
{
"tag": "goals",
"count": 10
},
{
"tag": "ich",
"count": 10
},
{
"tag": "laboratorios",
"count": 10
},
{
"tag": "liderar",
"count": 10
},
{
"tag": "maintaining",
"count": 10
},
{
"tag": "mantenimiento",
"count": 10
},
{
"tag": "meet",
"count": 10
},
{
"tag": "meetings",
"count": 10
},
{
"tag": "microsoft",
"count": 10
},
{
"tag": "mission",
"count": 10
},
{
"tag": "multiple",
"count": 10
},
{
"tag": "mundo",
"count": 10
},
{
"tag": "médicos",
"count": 10
},
{
"tag": "need",
"count": 10
},
{
"tag": "needed",
"count": 10
},
{
"tag": "objectives",
"count": 10
},
{
"tag": "ongoing",
"count": 10
},
{
"tag": "open",
"count": 10
},
{
"tag": "operating",
"count": 10
},
{
"tag": "operating procedures",
"count": 10
},
{
"tag": "organizational",
"count": 10
},
{
"tag": "orientación",
"count": 10
},
{
"tag": "pacientes",
"count": 10
},
{
"tag": "perfil",
"count": 10
},
{
"tag": "persona",
"count": 10
},
{
"tag": "personas",
"count": 10
},
{
"tag": "policy",
"count": 10
},
{
"tag": "positions",
"count": 10
},
{
"tag": "primary",
"count": 10
},
{
"tag": "prior",
"count": 10
},
{
"tag": "professional",
"count": 10
},
{
"tag": "recruitment",
"count": 10
},
{
"tag": "regarding",
"count": 10
},
{
"tag": "regular",
"count": 10
},
{
"tag": "relaciones",
"count": 10
},
{
"tag": "resolution",
"count": 10
},
{
"tag": "resources",
"count": 10
},
{
"tag": "sciences",
"count": 10
},
{
"tag": "seguimiento",
"count": 10
},
{
"tag": "servicios",
"count": 10
},
{
"tag": "sex",
"count": 10
},
{
"tag": "strategic",
"count": 10
},
{
"tag": "successful",
"count": 10
},
{
"tag": "superior",
"count": 10
},
{
"tag": "tiempo",
"count": 10
},
{
"tag": "tools",
"count": 10
},
{
"tag": "tracking",
"count": 10
},
{
"tag": "university",
"count": 10
},
{
"tag": "visit",
"count": 10
},
{
"tag": "written communication skills",
"count": 10
},
{
"tag": "according",
"count": 9
},
{
"tag": "accuracy",
"count": 9
},
{
"tag": "administration",
"count": 9
},
{
"tag": "agency",
"count": 9
},
{
"tag": "assist",
"count": 9
},
{
"tag": "bachelor degree",
"count": 9
},
{
"tag": "bajo",
"count": 9
},
{
"tag": "basic",
"count": 9
},
{
"tag": "basis",
"count": 9
},
{
"tag": "bring",
"count": 9
},
{
"tag": "candidates",
"count": 9
},
{
"tag": "clinical trial",
"count": 9
},
{
"tag": "collection",
"count": 9
},
{
"tag": "come",
"count": 9
},
{
"tag": "country",
"count": 9
},
{
"tag": "cra",
"count": 9
},
{
"tag": "cultura",
"count": 9
},
{
"tag": "da",
"count": 9
},
{
"tag": "dedicated",
"count": 9
},
{
"tag": "delivery",
"count": 9
},
{
"tag": "demonstrated",
"count": 9
},
{
"tag": "department",
"count": 9
},
{
"tag": "diseases",
"count": 9
},
{
"tag": "disponibilidad",
"count": 9
},
{
"tag": "employment opportunity",
"count": 9
},
{
"tag": "empresas",
"count": 9
},
{
"tag": "ensures",
"count": 9
},
{
"tag": "equal employment opportunity",
"count": 9
},
{
"tag": "estrategias",
"count": 9
},
{
"tag": "every day",
"count": 9
},
{
"tag": "following",
"count": 9
},
{
"tag": "health care",
"count": 9
},
{
"tag": "herramientas",
"count": 9
},
{
"tag": "international",
"count": 9
},
{
"tag": "internos",
"count": 9
},
{
"tag": "investigator",
"count": 9
},
{
"tag": "laboral",
"count": 9
},
{
"tag": "leader",
"count": 9
},
{
"tag": "least",
"count": 9
},
{
"tag": "maintains",
"count": 9
},
{
"tag": "master",
"count": 9
},
{
"tag": "ms",
"count": 9
},
{
"tag": "oncology",
"count": 9
},
{
"tag": "oportunidad",
"count": 9
},
{
"tag": "order",
"count": 9
},
{
"tag": "others",
"count": 9
},
{
"tag": "parte",
"count": 9
},
{
"tag": "partner",
"count": 9
},
{
"tag": "prepare",
"count": 9
},
{
"tag": "presentation",
"count": 9
},
{
"tag": "protected",
"count": 9
},
{
"tag": "race color",
"count": 9
},
{
"tag": "range",
"count": 9
},
{
"tag": "related field",
"count": 9
},
{
"tag": "schedule",
"count": 9
},
{
"tag": "senior",
"count": 9
},
{
"tag": "set",
"count": 9
},
{
"tag": "spanish",
"count": 9
},
{
"tag": "sponsor",
"count": 9
},
{
"tag": "standard operating",
"count": 9
},
{
"tag": "standard operating procedures",
"count": 9
}
]
data_mercado = json.dumps(frecuencias ,ensure_ascii=False)
| [
"json.dumps"
] | [((23918, 23961), 'json.dumps', 'json.dumps', (['frecuencias'], {'ensure_ascii': '(False)'}), '(frecuencias, ensure_ascii=False)\n', (23928, 23961), False, 'import json\n')] |
# Copyright (c) 2013, 9T9IT and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import fmt_money
from toolz.curried import compose, groupby, valmap, first, reduce, unique, pluck, count, partial
def execute(filters=None):
columns, data = _get_columns(filters), _get_data(filters)
return columns, _append_summary(data)
def _get_columns(filters):
def make_column(label, fieldname, width, fieldtype='Data', options='', hidden=False):
return {
'label': _(label),
'fieldname': fieldname,
'fieldtype': fieldtype,
'width': width,
'options': options
}
return [
make_column('Invoice No', 'invoice_no', 130, 'Link', 'Sales Invoice'),
make_column('Invoice Date', 'invoice_date', 130, 'Date'),
make_column('Item', 'item', 130, 'Link', 'Item'),
make_column('Item Group', 'item_group', 130, 'Link', 'Item Group'),
make_column('Description', 'description', 130),
make_column('Total VAT', 'total_vat', 130, 'Currency'),
make_column('Cost Center', 'cost_center', 130, 'Link', 'Cost Center'),
make_column('Sales Person', 'sales_person_name', 130, 'Data'),
make_column('Customer', 'customer', 130, 'Link', 'Customer'),
make_column('Customer Name', 'customer_name', 130, 'Data'),
make_column('Patient', 'patient', 130, 'Link', 'Patient'),
make_column('Patient Name', 'patient_name', 130, 'Data'),
make_column('Species', 'species', 130, 'Data')
]
def _get_clauses(filters):
clauses = list(filter(lambda x: x, [
'si.docstatus = 1',
'si.posting_date BETWEEN %(from_date)s AND %(to_date)s',
'sii.cost_center = %(cost_center)s' if filters.get('cost_center') else None,
'i.item_group = %(item_group)s' if filters.get('item_group') else None
]))
return 'WHERE {}'.format(' AND '.join(clauses))
def _get_sales_person_fields():
enable_pb = frappe.db.get_single_value('Vetcare Settings', 'enable_pb')
if enable_pb:
fields = [
'si.pb_sales_employee as sales_person',
'si.pb_sales_employee_name as sales_person_name'
]
else:
fields = [
'si.pb_sales_person as sales_person',
'si.pb_sales_person_name as sales_person_name'
]
return ', '.join(fields)
def _get_data(filters):
def make_data(row):
rate = _get_rate(row.get('taxes_and_charges'), cached_taxes_and_charges) / 100.00
row['total_vat'] = row.get('amount') * rate
row['species'] = species.get(row.get('patient'))
return row
data = frappe.db.sql("""
SELECT
si.name as invoice_no,
si.posting_date as invoice_date,
sii.item_code as item,
i.item_group,
sii.description,
sii.amount,
si.taxes_and_charges,
sii.cost_center,
si.customer,
si.customer_name,
si.patient,
si.patient_name,
{sales_person_fields}
FROM `tabSales Invoice Item` sii
INNER JOIN `tabSales Invoice` si ON si.name = sii.parent
INNER JOIN `tabItem` i ON i.name = sii.item_code
{clauses}
""".format(
clauses=_get_clauses(filters),
sales_person_fields=_get_sales_person_fields()
),
filters,
as_dict=1
)
cached_taxes_and_charges = {}
species = _get_species(list(set(map(lambda x: x['patient'], data))))
return list(map(make_data, data))
def _append_summary(data):
def make_data(val):
clients = compose(
count,
unique,
pluck('customer'),
lambda: val
)
animals = compose(
valmap(count),
groupby('species'),
lambda: val
)
return {
'total_val': reduce(lambda total, x: total + x.get('total_vat'), val, 0.00),
'animals': _get_dict_to_csv(animals()),
'clients': clients()
}
sales_persons = compose(
valmap(make_data),
groupby('sales_person_name'),
lambda: data
)()
data.append({'invoice_no': "'-'"}) # for report html (break loop)
for k, v in sales_persons.items():
sales_person = k or 'Not specified'
data.append({'invoice_no': "'Sales Person'", 'item': f"'{sales_person}'"})
data.append({'invoice_no': "'Total Amt'", 'item': f"'{fmt_money(v.get('total_val'))}'"})
data.append({'invoice_no': "'Clients'", 'item': f"'{v.get('clients')}'"})
data.append({'invoice_no': "'Animals'", 'item': f"'{v.get('animals')}'"})
data.append({})
return data
def _get_rate(template, cache=None):
if cache and template in cache:
return cache[template]
if template is None:
return 0.00
taxes_and_charges = frappe.get_all(
'Sales Taxes and Charges',
filters={'parent': template},
fields=['rate']
)
rate = 0.0
if taxes_and_charges:
rate = taxes_and_charges[0].get('rate')
if cache is not None:
cache[template] = rate
return rate
def _get_species(patients):
species = compose(
valmap(lambda x: x['vc_species']),
valmap(first),
groupby('name'),
lambda: frappe.get_all(
'Patient',
filters=[['name', 'in', patients]],
fields=['name', 'vc_species']
)
)
return species()
def _get_dict_to_csv(data, sep=', ', columns=None):
csv = []
for k, v in data.items():
column_name = k or "Others"
if columns and k in columns:
column_name = columns[k]
csv.append(f'{column_name}={v}')
return sep.join(csv)
| [
"frappe.db.get_single_value",
"toolz.curried.pluck",
"toolz.curried.groupby",
"frappe._",
"toolz.curried.valmap",
"frappe.get_all"
] | [((1889, 1948), 'frappe.db.get_single_value', 'frappe.db.get_single_value', (['"""Vetcare Settings"""', '"""enable_pb"""'], {}), "('Vetcare Settings', 'enable_pb')\n", (1915, 1948), False, 'import frappe\n'), ((4319, 4411), 'frappe.get_all', 'frappe.get_all', (['"""Sales Taxes and Charges"""'], {'filters': "{'parent': template}", 'fields': "['rate']"}), "('Sales Taxes and Charges', filters={'parent': template},\n fields=['rate'])\n", (4333, 4411), False, 'import frappe\n'), ((4610, 4643), 'toolz.curried.valmap', 'valmap', (["(lambda x: x['vc_species'])"], {}), "(lambda x: x['vc_species'])\n", (4616, 4643), False, 'from toolz.curried import compose, groupby, valmap, first, reduce, unique, pluck, count, partial\n'), ((4647, 4660), 'toolz.curried.valmap', 'valmap', (['first'], {}), '(first)\n', (4653, 4660), False, 'from toolz.curried import compose, groupby, valmap, first, reduce, unique, pluck, count, partial\n'), ((4664, 4679), 'toolz.curried.groupby', 'groupby', (['"""name"""'], {}), "('name')\n", (4671, 4679), False, 'from toolz.curried import compose, groupby, valmap, first, reduce, unique, pluck, count, partial\n'), ((569, 577), 'frappe._', '_', (['label'], {}), '(label)\n', (570, 577), False, 'from frappe import _\n'), ((3288, 3305), 'toolz.curried.pluck', 'pluck', (['"""customer"""'], {}), "('customer')\n", (3293, 3305), False, 'from toolz.curried import compose, groupby, valmap, first, reduce, unique, pluck, count, partial\n'), ((3350, 3363), 'toolz.curried.valmap', 'valmap', (['count'], {}), '(count)\n', (3356, 3363), False, 'from toolz.curried import compose, groupby, valmap, first, reduce, unique, pluck, count, partial\n'), ((3368, 3386), 'toolz.curried.groupby', 'groupby', (['"""species"""'], {}), "('species')\n", (3375, 3386), False, 'from toolz.curried import compose, groupby, valmap, first, reduce, unique, pluck, count, partial\n'), ((3597, 3614), 'toolz.curried.valmap', 'valmap', (['make_data'], {}), '(make_data)\n', (3603, 3614), False, 'from toolz.curried import compose, groupby, valmap, first, reduce, unique, pluck, count, partial\n'), ((3618, 3646), 'toolz.curried.groupby', 'groupby', (['"""sales_person_name"""'], {}), "('sales_person_name')\n", (3625, 3646), False, 'from toolz.curried import compose, groupby, valmap, first, reduce, unique, pluck, count, partial\n'), ((4691, 4788), 'frappe.get_all', 'frappe.get_all', (['"""Patient"""'], {'filters': "[['name', 'in', patients]]", 'fields': "['name', 'vc_species']"}), "('Patient', filters=[['name', 'in', patients]], fields=[\n 'name', 'vc_species'])\n", (4705, 4788), False, 'import frappe\n')] |
# -*- encoding: utf-8 -*-
from . import FixtureTest
class HideEarlyNursingHomeTest(FixtureTest):
def test_nursing_home_area(self):
import dsl
z, x, y = (15, 5237, 12667)
self.generate_fixtures(
# https://www.openstreetmap.org/way/267296981
dsl.way(267296981, dsl.tile_box(z, x, y), {
'addr:city': u'San Francisco',
'addr:country': u'US',
'addr:housenumber': u'1575',
'addr:postcode': u'94122',
'addr:state': u'CA',
'addr:street': u'7th Avenue',
'amenity': u'nursing_home',
'building': u'yes',
'height': u'5',
'name': (u'Kindred Transitional Care and Rehabilitation - '
u'Lawton'),
'phone': u'+1 (415) 566-1200',
'source': u'openstreetmap.org',
'website': u'http://www.lawtonhealthcare.com',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 267296981,
'kind': u'nursing_home',
'min_zoom': 15,
})
| [
"dsl.tile_box"
] | [((316, 337), 'dsl.tile_box', 'dsl.tile_box', (['z', 'x', 'y'], {}), '(z, x, y)\n', (328, 337), False, 'import dsl\n')] |
"""
MIDI file reader and writer for Mido built on top of rawmidifile.
There is no official API in Mido for encoding and decoding meta
messages so I've had to use some internal functions.
"""
from rawmidifile import read_rawmidifile, write_rawmidifile
import mido
from mido.midifiles.meta import (build_meta_message,
_META_SPEC_BY_TYPE, UnknownMetaMessage)
def decode_msg(msg, delta=0):
if msg[0] == 0xff and len(msg) > 1:
# Meta message.
return build_meta_message(msg[1], msg[2:], delta)
else:
return mido.Message.from_bytes(msg, delta)
def decode_track(track):
return [decode_msg(msg, delta) for (delta, msg) in track]
def encode_msg(msg):
if msg.is_meta:
spec = _META_SPEC_BY_TYPE[msg.type]
data = spec.encode(msg)
msg_bytes = bytes([0xff, spec.type_byte]) + bytes(data)
elif isinstance(msg, UnknownMetaMessage):
msg_bytes = bytes([0xff, msg.type_byte]) + bytes(data)
else:
msg_bytes = bytes(msg.bytes())
return (msg.time, msg_bytes)
def encode_track(track):
return [encode_msg(msg).to_dict() for msg in track]
def read_midifile(infile):
mid = read_rawmidifile(infile)
mid = mid.copy()
mid['tracks'] = [decode_track(track) for track in mid['tracks']]
return mid
def write_midifile(infile, tracks=(), format=1, resolution=240):
mid = {
'format': format,
'resolution': resolution,
'tracks': [encode_track(track) for track in tracks],
}
write_rawmidifile(infile, **mid)
| [
"mido.Message.from_bytes",
"rawmidifile.write_rawmidifile",
"mido.midifiles.meta.build_meta_message",
"rawmidifile.read_rawmidifile"
] | [((1193, 1217), 'rawmidifile.read_rawmidifile', 'read_rawmidifile', (['infile'], {}), '(infile)\n', (1209, 1217), False, 'from rawmidifile import read_rawmidifile, write_rawmidifile\n'), ((1534, 1566), 'rawmidifile.write_rawmidifile', 'write_rawmidifile', (['infile'], {}), '(infile, **mid)\n', (1551, 1566), False, 'from rawmidifile import read_rawmidifile, write_rawmidifile\n'), ((503, 545), 'mido.midifiles.meta.build_meta_message', 'build_meta_message', (['msg[1]', 'msg[2:]', 'delta'], {}), '(msg[1], msg[2:], delta)\n', (521, 545), False, 'from mido.midifiles.meta import build_meta_message, _META_SPEC_BY_TYPE, UnknownMetaMessage\n'), ((571, 606), 'mido.Message.from_bytes', 'mido.Message.from_bytes', (['msg', 'delta'], {}), '(msg, delta)\n', (594, 606), False, 'import mido\n')] |
# ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
# ----------------------------------------------------------------------
"""Contains the scalar type info objects"""
import os
import textwrap
import CommonEnvironment
from CommonEnvironment import Interface
from Plugins.SharedLibraryTestsPluginImpl.TypeInfo import TypeInfo
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
class _ScalarTypeInfo(TypeInfo):
"""Functionality common to all scalars"""
# ----------------------------------------------------------------------
# |
# | Public Properties
# |
# ----------------------------------------------------------------------
@Interface.abstractproperty
def CType(self):
"""C type"""
raise Exception("Abstract property")
# ----------------------------------------------------------------------
# |
# | Public Methods
# |
# ----------------------------------------------------------------------
def __init__(
self,
*args,
member_type=None,
**kwargs
):
if member_type is None:
return
super(_ScalarTypeInfo, self).__init__(*args, **kwargs)
self.RequiresOptionalType = self.IsOptional and self.TypeName not in ["float", "double"]
# ----------------------------------------------------------------------
@Interface.override
def GetTransformInputArgs(
self,
input_name="input",
):
if self.RequiresOptionalType:
return "Microsoft::Featurizer::Traits<typename Microsoft::Featurizer::Traits<{cpp_type}>::nullable_type>::IsNull({input_name}) ? nullptr : &Microsoft::Featurizer::Traits<typename Microsoft::Featurizer::Traits<{cpp_type}>::nullable_type>::GetNullableValue({input_name})".format(
cpp_type=self.CppType,
input_name=input_name,
)
return input_name
# ----------------------------------------------------------------------
@Interface.override
def GetTransformInputBufferArgs(
self,
input_name='input',
):
if self.RequiresOptionalType:
raise NotImplementedError("Not implemented yet")
return "{name}.data(), {name}.size()".format(
name=input_name,
)
# ----------------------------------------------------------------------
@Interface.override
def GetOutputInfo(
self,
invocation_template,
result_name="result",
):
result_name = "{}_value".format(result_name)
if self.RequiresOptionalType:
vector_type = "nonstd::optional<{}>".format(self.CppType)
local_type = "{} *".format(self.CppType)
statement = "{name} ? std::move(*{name}) : nonstd::optional<{type}>()".format(
type=self.CppType,
name=result_name,
)
else:
vector_type = self.CppType
local_type = self.CppType
if self.TypeName == "bool":
# vector<bool> doesn't support `emplace_back` on older compilers
statement = result_name
else:
statement = "std::move({})".format(result_name)
return self.Result(
vector_type,
[self.Type(local_type, result_name)],
invocation_template.format(statement),
)
# ----------------------------------------------------------------------
@Interface.staticderived
class Int8TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("int8")
CppType = Interface.DerivedProperty("std::int8_t")
CType = Interface.DerivedProperty("int8_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class Int16TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("int16")
CppType = Interface.DerivedProperty("std::int16_t")
CType = Interface.DerivedProperty("int16_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class Int32TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("int32")
CppType = Interface.DerivedProperty("std::int32_t")
CType = Interface.DerivedProperty("int32_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class Int64TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("int64")
CppType = Interface.DerivedProperty("std::int64_t")
CType = Interface.DerivedProperty("int64_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class UInt8TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("uint8")
CppType = Interface.DerivedProperty("std::uint8_t")
CType = Interface.DerivedProperty("uint8_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class UInt16TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("uint16")
CppType = Interface.DerivedProperty("std::uint16_t")
CType = Interface.DerivedProperty("uint16_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class UInt32TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("uint32")
CppType = Interface.DerivedProperty("std::uint32_t")
CType = Interface.DerivedProperty("uint32_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class UInt64TypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("uint64")
CppType = Interface.DerivedProperty("std::uint64_t")
CType = Interface.DerivedProperty("uint64_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class FloatTypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("float")
CppType = Interface.DerivedProperty("std::float_t")
CType = Interface.DerivedProperty("float")
# ----------------------------------------------------------------------
@Interface.staticderived
class DoubleTypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("double")
CppType = Interface.DerivedProperty("std::double_t")
CType = Interface.DerivedProperty("double")
# ----------------------------------------------------------------------
@Interface.staticderived
class BoolTypeInfo(_ScalarTypeInfo):
TypeName = Interface.DerivedProperty("bool")
CppType = Interface.DerivedProperty("bool")
CType = Interface.DerivedProperty("bool")
| [
"CommonEnvironment.Interface.DerivedProperty",
"CommonEnvironment.ThisFullpath",
"os.path.split"
] | [((579, 611), 'CommonEnvironment.ThisFullpath', 'CommonEnvironment.ThisFullpath', ([], {}), '()\n', (609, 611), False, 'import CommonEnvironment\n'), ((659, 690), 'os.path.split', 'os.path.split', (['_script_fullpath'], {}), '(_script_fullpath)\n', (672, 690), False, 'import os\n'), ((4137, 4170), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""int8"""'], {}), "('int8')\n", (4162, 4170), False, 'from CommonEnvironment import Interface\n'), ((4218, 4258), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""std::int8_t"""'], {}), "('std::int8_t')\n", (4243, 4258), False, 'from CommonEnvironment import Interface\n'), ((4306, 4341), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""int8_t"""'], {}), "('int8_t')\n", (4331, 4341), False, 'from CommonEnvironment import Interface\n'), ((4532, 4566), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""int16"""'], {}), "('int16')\n", (4557, 4566), False, 'from CommonEnvironment import Interface\n'), ((4614, 4655), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""std::int16_t"""'], {}), "('std::int16_t')\n", (4639, 4655), False, 'from CommonEnvironment import Interface\n'), ((4703, 4739), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""int16_t"""'], {}), "('int16_t')\n", (4728, 4739), False, 'from CommonEnvironment import Interface\n'), ((4930, 4964), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""int32"""'], {}), "('int32')\n", (4955, 4964), False, 'from CommonEnvironment import Interface\n'), ((5012, 5053), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""std::int32_t"""'], {}), "('std::int32_t')\n", (5037, 5053), False, 'from CommonEnvironment import Interface\n'), ((5101, 5137), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""int32_t"""'], {}), "('int32_t')\n", (5126, 5137), False, 'from CommonEnvironment import Interface\n'), ((5328, 5362), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""int64"""'], {}), "('int64')\n", (5353, 5362), False, 'from CommonEnvironment import Interface\n'), ((5410, 5451), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""std::int64_t"""'], {}), "('std::int64_t')\n", (5435, 5451), False, 'from CommonEnvironment import Interface\n'), ((5499, 5535), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""int64_t"""'], {}), "('int64_t')\n", (5524, 5535), False, 'from CommonEnvironment import Interface\n'), ((5726, 5760), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""uint8"""'], {}), "('uint8')\n", (5751, 5760), False, 'from CommonEnvironment import Interface\n'), ((5808, 5849), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""std::uint8_t"""'], {}), "('std::uint8_t')\n", (5833, 5849), False, 'from CommonEnvironment import Interface\n'), ((5897, 5933), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""uint8_t"""'], {}), "('uint8_t')\n", (5922, 5933), False, 'from CommonEnvironment import Interface\n'), ((6125, 6160), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""uint16"""'], {}), "('uint16')\n", (6150, 6160), False, 'from CommonEnvironment import Interface\n'), ((6208, 6250), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""std::uint16_t"""'], {}), "('std::uint16_t')\n", (6233, 6250), False, 'from CommonEnvironment import Interface\n'), ((6298, 6335), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""uint16_t"""'], {}), "('uint16_t')\n", (6323, 6335), False, 'from CommonEnvironment import Interface\n'), ((6527, 6562), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""uint32"""'], {}), "('uint32')\n", (6552, 6562), False, 'from CommonEnvironment import Interface\n'), ((6610, 6652), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""std::uint32_t"""'], {}), "('std::uint32_t')\n", (6635, 6652), False, 'from CommonEnvironment import Interface\n'), ((6700, 6737), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""uint32_t"""'], {}), "('uint32_t')\n", (6725, 6737), False, 'from CommonEnvironment import Interface\n'), ((6929, 6964), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""uint64"""'], {}), "('uint64')\n", (6954, 6964), False, 'from CommonEnvironment import Interface\n'), ((7012, 7054), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""std::uint64_t"""'], {}), "('std::uint64_t')\n", (7037, 7054), False, 'from CommonEnvironment import Interface\n'), ((7102, 7139), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""uint64_t"""'], {}), "('uint64_t')\n", (7127, 7139), False, 'from CommonEnvironment import Interface\n'), ((7330, 7364), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""float"""'], {}), "('float')\n", (7355, 7364), False, 'from CommonEnvironment import Interface\n'), ((7412, 7453), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""std::float_t"""'], {}), "('std::float_t')\n", (7437, 7453), False, 'from CommonEnvironment import Interface\n'), ((7501, 7535), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""float"""'], {}), "('float')\n", (7526, 7535), False, 'from CommonEnvironment import Interface\n'), ((7727, 7762), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""double"""'], {}), "('double')\n", (7752, 7762), False, 'from CommonEnvironment import Interface\n'), ((7810, 7852), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""std::double_t"""'], {}), "('std::double_t')\n", (7835, 7852), False, 'from CommonEnvironment import Interface\n'), ((7900, 7935), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""double"""'], {}), "('double')\n", (7925, 7935), False, 'from CommonEnvironment import Interface\n'), ((8125, 8158), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""bool"""'], {}), "('bool')\n", (8150, 8158), False, 'from CommonEnvironment import Interface\n'), ((8206, 8239), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""bool"""'], {}), "('bool')\n", (8231, 8239), False, 'from CommonEnvironment import Interface\n'), ((8287, 8320), 'CommonEnvironment.Interface.DerivedProperty', 'Interface.DerivedProperty', (['"""bool"""'], {}), "('bool')\n", (8312, 8320), False, 'from CommonEnvironment import Interface\n')] |
#!/usr/bin/env python3
### block that grows in fiber direction triggered by fiber stretch and remodels to softer material
# TODO: Somehow, this does not converge quadratically at the end (seems irrespective of remodeling,
# but likely to be attributed to the growth in fiber direction) ---> check linearization terms!
# only one hex element in this testcase - cannot be run on multiple cores!
import ambit
import sys, traceback
import numpy as np
from pathlib import Path
import results_check
def main():
basepath = str(Path(__file__).parent.absolute())
IO_PARAMS = {'problem_type' : 'solid',
'mesh_domain' : ''+basepath+'/input/blockhex_domain.xdmf',
'mesh_boundary' : ''+basepath+'/input/blockhex_boundary.xdmf',
'fiber_data' : {'nodal' : [''+basepath+'/input/fib1_blockhex.txt',''+basepath+'/input/fib2_blockhex.txt']},
'write_results_every' : -999,
'output_path' : ''+basepath+'/tmp/',
'results_to_write' : ['displacement','theta','fiberstretch','fiberstretch_e','phi_remod'],
'simname' : 'solid_growthremodeling_fiberstretch'}
SOLVER_PARAMS_SOLID = {'solve_type' : 'direct',
'tol_res' : 1.0e-8,
'tol_inc' : 1.0e-8}
TIME_PARAMS_SOLID = {'maxtime' : 1.0,
'numstep' : 20,
'timint' : 'static'}
FEM_PARAMS = {'order_disp' : 1,
'order_pres' : 1,
'quad_degree' : 3,
'incompressible_2field' : False}
MATERIALS = {'MAT1' : {'neohooke_dev' : {'mu' : 10.},
'ogden_vol' : {'kappa' : 10./(1.-2.*0.49)},
'growth' : {'growth_dir' : 'isotropic', # isotropic, fiber, crossfiber, radial
'growth_trig' : 'fibstretch', # fibstretch, volstress, prescribed
'growth_thres' : 1.15,
'thetamax' : 3.0,
'thetamin' : 1.0,
'tau_gr' : 1.0,
'gamma_gr' : 1.72,
'tau_gr_rev' : 10000.0,
'gamma_gr_rev' : 1.0,
'remodeling_mat' : {'neohooke_dev' : {'mu' : 3.},
'ogden_vol' : {'kappa' : 3./(1.-2.*0.49)}}}}}
# define your load curves here (syntax: tcX refers to curve X, to be used in BC_DICT key 'curve' : [X,0,0], or 'curve' : X)
class time_curves():
def tc1(self, t):
pmax = 10.0
return pmax*t/TIME_PARAMS_SOLID['maxtime']
BC_DICT = { 'dirichlet' : [{'id' : [1], 'dir' : 'x', 'val' : 0.},
{'id' : [2], 'dir' : 'y', 'val' : 0.},
{'id' : [3], 'dir' : 'z', 'val' : 0.}],
'neumann' : [{'type' : 'pk1', 'id' : [4], 'dir' : 'xyz', 'curve' : [1,0,0]}] }
# problem setup
problem = ambit.Ambit(IO_PARAMS, TIME_PARAMS_SOLID, SOLVER_PARAMS_SOLID, FEM_PARAMS, MATERIALS, BC_DICT, time_curves=time_curves())
# solve time-dependent problem
problem.solve_problem()
# --- results check
tol = 1.0e-6
check_node = []
check_node.append(np.array([1.0, 1.0, 1.0]))
u_corr = np.zeros(3*len(check_node))
## correct results
u_corr[0] = 1.0812823521095760E+00 # x
u_corr[1] = -1.4360291810029382E-01 # y
u_corr[2] = -1.4360291810029457E-01 # z
check1 = results_check.results_check_node(problem.mp.u, check_node, u_corr, problem.mp.V_u, problem.mp.comm, tol=tol, nm='u')
success = results_check.success_check([check1], problem.mp.comm)
return success
if __name__ == "__main__":
success = False
try:
success = main()
except:
print(traceback.format_exc())
if success:
sys.exit(0)
else:
sys.exit(1)
| [
"traceback.format_exc",
"pathlib.Path",
"results_check.results_check_node",
"numpy.array",
"sys.exit",
"results_check.success_check"
] | [((4321, 4442), 'results_check.results_check_node', 'results_check.results_check_node', (['problem.mp.u', 'check_node', 'u_corr', 'problem.mp.V_u', 'problem.mp.comm'], {'tol': 'tol', 'nm': '"""u"""'}), "(problem.mp.u, check_node, u_corr, problem.\n mp.V_u, problem.mp.comm, tol=tol, nm='u')\n", (4353, 4442), False, 'import results_check\n'), ((4452, 4506), 'results_check.success_check', 'results_check.success_check', (['[check1]', 'problem.mp.comm'], {}), '([check1], problem.mp.comm)\n', (4479, 4506), False, 'import results_check\n'), ((4079, 4104), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4087, 4104), True, 'import numpy as np\n'), ((4704, 4715), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4712, 4715), False, 'import sys, traceback\n'), ((4734, 4745), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4742, 4745), False, 'import sys, traceback\n'), ((4651, 4673), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4671, 4673), False, 'import sys, traceback\n'), ((536, 550), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (540, 550), False, 'from pathlib import Path\n')] |
import math
x = int(input())
sinn = math.sin(math.radians(15))
son = math.exp(x)-5*x
mon = math.pow((x**2+1),0.5)
lnn = math.log(3*x)
print("%.10f"%(sinn+son/mon-lnn))
| [
"math.pow",
"math.exp",
"math.radians",
"math.log"
] | [((93, 118), 'math.pow', 'math.pow', (['(x ** 2 + 1)', '(0.5)'], {}), '(x ** 2 + 1, 0.5)\n', (101, 118), False, 'import math\n'), ((122, 137), 'math.log', 'math.log', (['(3 * x)'], {}), '(3 * x)\n', (130, 137), False, 'import math\n'), ((47, 63), 'math.radians', 'math.radians', (['(15)'], {}), '(15)\n', (59, 63), False, 'import math\n'), ((71, 82), 'math.exp', 'math.exp', (['x'], {}), '(x)\n', (79, 82), False, 'import math\n')] |
import mne
import numpy as np
import pandas as pd
from mne.beamformer import make_dics, apply_dics_csd
from config import dics_settings, fname, args
from megset.config import fname as megset_fname
from megset.config import freq_range
subject = args.subject
print(f'Running analsis for subject {subject}')
mne.set_log_level(False) # Shhh
###############################################################################
# Load the data
###############################################################################
epochs = mne.read_epochs(megset_fname.epochs_long(subject=subject))
fwd = mne.read_forward_solution(megset_fname.fwd(subject=subject))
dip = mne.read_dipole(megset_fname.ecd(subject=subject))
###############################################################################
# Sensor-level analysis for beamformer
###############################################################################
epochs_grad = epochs.copy().pick_types(meg='grad')
epochs_mag = epochs.copy().pick_types(meg='mag')
epochs_joint = epochs.copy().pick_types(meg=True)
# Make csd matrices
freqs = np.arange(*freq_range[subject])
csd = mne.time_frequency.csd_morlet(epochs, freqs, tmin=-0.8, tmax=1.0, decim=5)
csd_baseline = mne.time_frequency.csd_morlet(epochs, freqs, tmin=-0.8, tmax=0, decim=5)
# ERS activity starts at 0.5 seconds after stimulus onset
csd_ers = mne.time_frequency.csd_morlet(epochs, freqs, tmin=0.2, tmax=1.0, decim=5)
csd = csd.mean()
csd_baseline = csd_baseline.mean()
csd_ers = csd_ers.mean()
###############################################################################
# Compute dics solution and plot stc at dipole location
###############################################################################
dists = []
focs = []
ori_errors = []
for setting in dics_settings:
reg, sensor_type, pick_ori, inversion, weight_norm, normalize_fwd, real_filter, use_noise_cov, reduce_rank = setting
try:
if sensor_type == 'grad':
info = epochs_grad.info
elif sensor_type == 'mag':
info = epochs_mag.info
elif sensor_type == 'joint':
info = epochs_joint.info
else:
raise ValueError('Invalid sensor type: %s', sensor_type)
info_eq, fwd_eq, csd_eq = mne.channels.equalize_channels([info, fwd, csd])
filters = make_dics(info_eq, fwd_eq, csd_eq, reg=reg, pick_ori=pick_ori,
inversion=inversion, weight_norm=weight_norm,
noise_csd=csd_baseline if use_noise_cov else None,
normalize_fwd=normalize_fwd,
real_filter=real_filter, reduce_rank=reduce_rank)
# Compute source power
stc_baseline, _ = apply_dics_csd(csd_baseline, filters)
stc_power, _ = apply_dics_csd(csd_ers, filters)
# Normalize with baseline power.
stc_power /= stc_baseline
stc_power.data = np.log(stc_power.data)
peak_vertex, _ = stc_power.get_peak(vert_as_index=True)
# Compute distance between true and estimated source locations
pos = fwd['source_rr'][peak_vertex]
dist = np.linalg.norm(dip.pos - pos)
# Ratio between estimated peak activity and all estimated activity.
focality_score = stc_power.data[peak_vertex, 0] / stc_power.data.sum()
if pick_ori == 'max-power':
estimated_ori = filters['max_power_oris'][0][peak_vertex]
ori_error = np.rad2deg(np.arccos(estimated_ori @ dip.ori[0]))
if ori_error > 90:
ori_error = 180 - ori_error
else:
ori_error = np.nan
except Exception as e:
print(e)
dist = np.nan
focality_score = np.nan
ori_error = np.nan
print(setting, dist, focality_score, ori_error)
dists.append(dist)
focs.append(focality_score)
ori_errors.append(ori_error)
###############################################################################
# Save everything to a pandas dataframe
###############################################################################
df = pd.DataFrame(dics_settings,
columns=['reg', 'sensor_type', 'pick_ori', 'inversion',
'weight_norm', 'normalize_fwd', 'real_filter',
'use_noise_cov', 'reduce_rank'])
df['dist'] = dists
df['focality'] = focs
df['ori_error'] = ori_errors
df.to_csv(fname.dics_megset_results(subject=subject))
print('OK!')
| [
"numpy.arccos",
"mne.set_log_level",
"numpy.arange",
"megset.config.fname.fwd",
"megset.config.fname.ecd",
"mne.beamformer.apply_dics_csd",
"numpy.log",
"mne.channels.equalize_channels",
"numpy.linalg.norm",
"mne.time_frequency.csd_morlet",
"config.fname.dics_megset_results",
"pandas.DataFrame... | [((308, 332), 'mne.set_log_level', 'mne.set_log_level', (['(False)'], {}), '(False)\n', (325, 332), False, 'import mne\n'), ((1091, 1122), 'numpy.arange', 'np.arange', (['*freq_range[subject]'], {}), '(*freq_range[subject])\n', (1100, 1122), True, 'import numpy as np\n'), ((1129, 1203), 'mne.time_frequency.csd_morlet', 'mne.time_frequency.csd_morlet', (['epochs', 'freqs'], {'tmin': '(-0.8)', 'tmax': '(1.0)', 'decim': '(5)'}), '(epochs, freqs, tmin=-0.8, tmax=1.0, decim=5)\n', (1158, 1203), False, 'import mne\n'), ((1219, 1291), 'mne.time_frequency.csd_morlet', 'mne.time_frequency.csd_morlet', (['epochs', 'freqs'], {'tmin': '(-0.8)', 'tmax': '(0)', 'decim': '(5)'}), '(epochs, freqs, tmin=-0.8, tmax=0, decim=5)\n', (1248, 1291), False, 'import mne\n'), ((1360, 1433), 'mne.time_frequency.csd_morlet', 'mne.time_frequency.csd_morlet', (['epochs', 'freqs'], {'tmin': '(0.2)', 'tmax': '(1.0)', 'decim': '(5)'}), '(epochs, freqs, tmin=0.2, tmax=1.0, decim=5)\n', (1389, 1433), False, 'import mne\n'), ((4110, 4281), 'pandas.DataFrame', 'pd.DataFrame', (['dics_settings'], {'columns': "['reg', 'sensor_type', 'pick_ori', 'inversion', 'weight_norm',\n 'normalize_fwd', 'real_filter', 'use_noise_cov', 'reduce_rank']"}), "(dics_settings, columns=['reg', 'sensor_type', 'pick_ori',\n 'inversion', 'weight_norm', 'normalize_fwd', 'real_filter',\n 'use_noise_cov', 'reduce_rank'])\n", (4122, 4281), True, 'import pandas as pd\n'), ((544, 585), 'megset.config.fname.epochs_long', 'megset_fname.epochs_long', ([], {'subject': 'subject'}), '(subject=subject)\n', (568, 585), True, 'from megset.config import fname as megset_fname\n'), ((619, 652), 'megset.config.fname.fwd', 'megset_fname.fwd', ([], {'subject': 'subject'}), '(subject=subject)\n', (635, 652), True, 'from megset.config import fname as megset_fname\n'), ((676, 709), 'megset.config.fname.ecd', 'megset_fname.ecd', ([], {'subject': 'subject'}), '(subject=subject)\n', (692, 709), True, 'from megset.config import fname as megset_fname\n'), ((4427, 4469), 'config.fname.dics_megset_results', 'fname.dics_megset_results', ([], {'subject': 'subject'}), '(subject=subject)\n', (4452, 4469), False, 'from config import dics_settings, fname, args\n'), ((2260, 2308), 'mne.channels.equalize_channels', 'mne.channels.equalize_channels', (['[info, fwd, csd]'], {}), '([info, fwd, csd])\n', (2290, 2308), False, 'import mne\n'), ((2327, 2579), 'mne.beamformer.make_dics', 'make_dics', (['info_eq', 'fwd_eq', 'csd_eq'], {'reg': 'reg', 'pick_ori': 'pick_ori', 'inversion': 'inversion', 'weight_norm': 'weight_norm', 'noise_csd': '(csd_baseline if use_noise_cov else None)', 'normalize_fwd': 'normalize_fwd', 'real_filter': 'real_filter', 'reduce_rank': 'reduce_rank'}), '(info_eq, fwd_eq, csd_eq, reg=reg, pick_ori=pick_ori, inversion=\n inversion, weight_norm=weight_norm, noise_csd=csd_baseline if\n use_noise_cov else None, normalize_fwd=normalize_fwd, real_filter=\n real_filter, reduce_rank=reduce_rank)\n', (2336, 2579), False, 'from mne.beamformer import make_dics, apply_dics_csd\n'), ((2736, 2773), 'mne.beamformer.apply_dics_csd', 'apply_dics_csd', (['csd_baseline', 'filters'], {}), '(csd_baseline, filters)\n', (2750, 2773), False, 'from mne.beamformer import make_dics, apply_dics_csd\n'), ((2797, 2829), 'mne.beamformer.apply_dics_csd', 'apply_dics_csd', (['csd_ers', 'filters'], {}), '(csd_ers, filters)\n', (2811, 2829), False, 'from mne.beamformer import make_dics, apply_dics_csd\n'), ((2931, 2953), 'numpy.log', 'np.log', (['stc_power.data'], {}), '(stc_power.data)\n', (2937, 2953), True, 'import numpy as np\n'), ((3150, 3179), 'numpy.linalg.norm', 'np.linalg.norm', (['(dip.pos - pos)'], {}), '(dip.pos - pos)\n', (3164, 3179), True, 'import numpy as np\n'), ((3478, 3515), 'numpy.arccos', 'np.arccos', (['(estimated_ori @ dip.ori[0])'], {}), '(estimated_ori @ dip.ori[0])\n', (3487, 3515), True, 'import numpy as np\n')] |
import csv
from sklearn.cluster import MiniBatchKMeans
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pickle
import numpy as np
import sklearn.metrics as metrics
from yellowbrick.cluster import InterclusterDistance
from scipy.optimize import curve_fit
import umap.umap_ as umap
from colorsys import hls_to_rgb
from pylab import *
from datetime import datetime
import os
import argparse
import scipy.stats as scist
from docx import Document
from feature_extraction import LemmaStemmerTokenizer
# Allow for larger CSV files
maxInt = sys.maxsize
while True:
# decrease the maxInt value by factor 10
# as long as the OverflowError occurs.
try:
csv.field_size_limit(maxInt)
break
except OverflowError:
maxInt = int(maxInt/10)
def get_clusters(selected_k, data_file, processed_file, centers, years, save_folder="", save=True):
"""
Parameters
----------
selected_k : selected number of clusters
data_file : pickle with raw data as list of dictionaries
processed_file : pickle with transformed data as array
centers : array. initial centroids from LDA. Can be initialized as 'k-means++'
years : list of strings. years for intracluster analysis
save_folder : string. directory to save result, the default is "".
save : boolean
Returns
-------
output : dictionary. Keys:
"yr_avg_cost": List of lists. Average funding by year for each cluster.
"yr_total_cost": List of lists. Total funding by year for each cluster.
"size": List. Size of each cluster.
"data_by_cluster": List of lists of dictionaries. Points in each cluster: [ [{Cluster1pt1}, {Cluster1pt2},...], [{Cluster2pt1}, {Cluster2pt2},...], ...]
"centroids": 10 x K array of cluster centroids,
"score": List. Silhouette score by cluster
"model": MiniBatchKMeans model
"labels": Cluster labels of data points (ordered)
"""
# Load data as list of dictionaries
data = pickle.load(open(data_file,"rb"))
# Transformed data
X_transformed = pickle.load(open(processed_file,"rb"))
# Perform mini batch k means
km = MiniBatchKMeans(n_clusters=selected_k, init=centers, verbose=0, max_no_improvement=None)
clusters = km.fit_predict(X_transformed)
scores = metrics.silhouette_samples(X_transformed, clusters)
# Output data
cluster_all = []
costs = []
yoy = []
size = []
mechanisms = []
for i in range(6): # initialization
mechanisms.append([])
MECH_NAMES = "R01", "U01", "R44", "U24", "R21", "U54"
for i in range(0,selected_k):
# indices of cluster k
cluster = [idx for idx, element in enumerate(clusters) if element == i]
# get points
cluster_data = [data[ind] for ind in cluster]
cluster_scores = [scores[ind] for ind in cluster]
for i in range(len(cluster_data)):
cluster_data[i]["score"] = cluster_scores[i]
cluster_all.append(cluster_data)
# calculate average cost and std
try:
average_cost = sum([item["award_amount"] for item in cluster_data])/len(cluster_data)
except:
average_cost = 0
costs.append(average_cost)
cost_trend = []
for year in years:
year_data = [data[ind]["award_amount"] for ind in cluster if data[ind]["year"] == year]
if len(year_data) == 0:
cost_trend.append(0)
else:
year_cost = sum(year_data) # /len(year_data)
cost_trend.append(year_cost)
yoy.append(cost_trend)
size.append(len(cluster))
# get number of awards per mechanism
if len(cluster_data) != 0:
for j in range(len(mechanisms)):
mech = len([ind for ind in cluster if data[ind]["mechanism"] == MECH_NAMES[j]])/len(cluster_data)
mechanisms[j].append(mech)
else:
for j in range(len(mechanisms)):
mechanisms[j].append(0)
# Get centroids
# Identify the top terms for each cluster, using the TF-IDF terms with the highest values in the centroid
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
vectorizer = pickle.load(open("data/vectorizer.pkl","rb"))
terms = vectorizer.get_feature_names_out()
centroids = []
for i in range(selected_k):
centroid_list = []
for ind in order_centroids[i, :15]:
centroid_list.append(terms[ind])
centroids.append(centroid_list)
# Save centroids
if save:
centroid_file = open("{}/centroids".format(save_folder), "w", encoding='utf8')
for i in range(selected_k):
centroid_file.write("Cluster %d:" % i)
for ind in order_centroids[i, :15]:
centroid_file.write(" %s" % terms[ind])
centroid_file.write("\n")
centroid_file.close()
# get scores
score = metrics.silhouette_score(X_transformed, km.labels_)
output = {
"yr_avg_cost": costs, # Average award size by year by cluster
"yr_total_cost": yoy, # Total award size by year by cluster
"size": size, # Number of awards in each cluster
"data_by_cluster": cluster_all,
"centroids": centroids,
"score": score, # Silhouette score for
"model": km, # K-means model
"labels": clusters, # Ordered list of cluster number labels for each award
"mechanisms": mechanisms # List of lists: [r01, u01, r44, u24, r21, u54]. Each internal list has number of awards per mechanism by cluster
}
return output
def umap_visualization(X_transformed, cluster_labels, silhouette_scores, sizes, save_folder=""):
#outlier_scores = sklearn.neighbors.LocalOutlierFactor(contamination=0.1).fit_predict(X_transformed)
#X_transformed = X_transformed[outlier_scores != -1]
#cluster_labels = cluster_labels[outlier_scores != -1]
# product = [silhouette_scores[i]*sizes[i] for i in range(len(sizes))]
top_clusters = sorted(range(len(silhouette_scores)), key=lambda i: silhouette_scores[i], reverse=True)[:9]
n_subset = len(cluster_labels)
selected_cells = np.random.choice(np.arange(X_transformed.shape[0]), size = n_subset, replace = False)
mapper = umap.UMAP(metric='hellinger', random_state=42).fit(X_transformed[selected_cells,:])
embedding = mapper.transform(X_transformed[selected_cells,:])
# Colors
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:olive', 'tab:cyan']
selected_colors = []
for point in selected_cells:
if cluster_labels[point] in top_clusters:
selected_colors.append(colors[top_clusters.index(cluster_labels[point])])
else:
selected_colors.append('tab:gray')
# Plot Clusters on UMAP
plt.figure()
plt.grid(visible=None)
plt.scatter(embedding[:, 0], embedding[:, 1], cmap='Spectral', s=5, c=selected_colors)
plt.gca().set_aspect('equal', 'datalim')
num_clust = len(np.unique(cluster_labels[selected_cells]))
#plt.colorbar(boundaries=np.arange(num_clust+1)-0.5).set_ticks(np.arange(num_clust))
plt.title('UMAP Projection of Awards, TF-IDF', fontsize=14)
plt.xlabel("UMAP 1")
plt.ylabel("UMAP 2")
manager = plt.get_current_fig_manager()
manager.resize(*manager.window.maxsize())
plt.savefig('{}/umap.png'.format(save_folder))
def rainbow_color_stops(n=10, end=1, shade=0.9):
return [ hls_to_rgb(end * i/(n-1)*shade, 0.5*shade, 1*shade) for i in range(n) ]
def get_funding_projections(data):
# 1. Determine dimensions for plot
k = len(data["size"])
factors = []
for i in range(1, k+1):
if k / i == i:
factors.extend([i,i])
elif k % i == 0:
factors.append(i)
dim1, dim2 = factors[int(len(factors)/2)], factors[int(len(factors)/2-1)]
# 2. Create plot
fig, axs = plt.subplots(dim1, dim2, sharex='all', sharey='all')
# 3. Create hidden frame for shared labels
fig.add_subplot(111, frameon=False)
plt.grid(visible=None)
plt.tick_params(labelcolor='none', which='both', top=False, bottom=False, left=False, right=False)
plt.xlabel("Years from 1985")
plt.ylabel("Funding ($100 millions)")
# 4. Get projections
years_int = list(range(0,36))
projection = []
growth = []
bounds = []
for i in range(len(data["yr_total_cost"])):
popt, pcov = curve_fit(lambda t,a,b: a*np.exp(b*t), years_int, data["yr_total_cost"][i], p0=(4000, 0.1))
std = np.sqrt(np.diagonal(pcov))
x = np.linspace(0,21,400)
# upper0 = popt[0]+1.96*std[0]
# lower0 = popt[0]-1.96*std[0]
upper1 = popt[1]+1.96*std[1]
lower1 = popt[1]-1.96*std[1]
ypred = [popt[0]*np.exp(popt[1]*point) for point in x] #-popt[0]
projection.append(ypred[-1])
growth.append(popt[1])
bounds.append([lower1, upper1])
# projection.append(0)
# growth.append(0)
# bounds.append([0,0])
# 5. Return 2021 projections and growth rate
return projection, growth, bounds
def viz_centroids(data):
model = data["model"]
X_transformed = pickle.load(open("data/processed-data.pkl","rb"))
plt.figure()
visualizer = InterclusterDistance(model, random_state=0)
visualizer.fit(X_transformed) # Fit the data to the visualizer
visualizer.show() # Finalize and render the figure
def predict_clusters(test_data, selected_k, model):
test_data = pickle.load(open(test_data,"rb"))
vectorizer = pickle.load(open("data/vectorizer.pkl","rb"))
input_text = [item["text"] for item in test_data]
if len(input_text) == 0:
return [0 for i in range(0,selected_k)], 0
test_transformed = vectorizer.transform(input_text)
years = [str(i) for i in range(1985,2021)]
labels = model.predict(test_transformed)
# Output data
cluster_all = []
costs = []
yoy = []
size = []
for i in range(0,selected_k):
# indices of cluster k
cluster = [idx for idx, element in enumerate(labels) if element == i]
# get points
cluster_data = [test_data[ind] for ind in cluster]
cluster_all.append(cluster_data)
# calculate average cost and std
try:
average_cost = sum([item["award_amount"] for item in cluster_data])/len(cluster_data)
except:
average_cost = 0
costs.append(average_cost)
cost_trend = []
for year in years:
year_data = [test_data[ind]["award_amount"] for ind in cluster if test_data[ind]["year"] == year]
if len(year_data) == 0:
cost_trend.append(0)
else:
year_cost = sum(year_data)
cost_trend.append(year_cost)
yoy.append(cost_trend)
size.append(len(cluster))
return cluster_all, size
def get_best_cluster(selected_k, num_trials, centers, years, save_folder="", save=True):
scores = []
results = {}
print("Optimizing model...")
for i in range(num_trials):
# Generate clusters for a selected k
data = get_clusters(selected_k, "data/data.pkl", "data/processed-data.pkl", 'k-means++', years, save_folder, save=save)
j = 0
for thing in data["data_by_cluster"]:
for item in thing:
try:
results[item["id"]].append(centroids[j])
except:
results[item["id"]] = [item["id"],item["title"],item["award_amount"],data["centroids"][j]]
j+=1
print("Trial {}: Score = {:.3f}".format(str(i+1), data["score"]))
scores.append(data["score"])
if data["score"] >= max(scores):
chosen = data
return chosen, scores
def get_citations(clusters):
"""
Parameters
----------
clusters : nested lists of dictionaries representing each award in a cluster.
Returns
-------
total_citations : list of total citations by cluster
total_papers : list of total papers by cluster
apts: average APT [0.9, ...]
lower: lower bound of 95% CI of average APT: "APT (lower - upper)" [0.85,...]
upper: upper bound of 95% CI of average APT [0.95,...] - "0.9 (0.85-0.95)"
"""
# Get clusters by project number
clusters_by_project = []
for cluster in clusters:
cluster = [item["project_number"] for item in cluster]
cluster = list(set(cluster)) # Remove duplicates
clusters_by_project.append(cluster)
# Get number of citations, apt, and publication year by paper
output = {}
with open("data/citations.csv", newline='', encoding='utf8') as csvfile:
raw_data = list(csv.reader(csvfile))
for i in range(1,len(raw_data)): # "rcr": float(raw_data[i][6]),
output[raw_data[i][0]] = {
"citations": int(raw_data[i][13]),
"apt": float(raw_data[i][11]),
"year": int(raw_data[i][1])}
# Get project number and year by paper
with open("data/publications.csv", newline='', encoding='utf8') as csvfile:
raw_data = list(csv.reader(csvfile))
for i in range(1,len(raw_data)):
if raw_data[i][1] in output.keys():
output[raw_data[i][1]]["project"] = raw_data[i][0]
# Calculate total number of citations, total number of papers, average RCR, average APT for each cluster
total_citations = []
total_papers = []
apts = []
apts_95 = []
lower = []
upper = []
total_availability = []
# rcrs = []
for cluster in clusters_by_project:
cluster_citations = []
# cluster_rcr = []
cluster_apt = []
num_papers = 0
availability = []
for idd in cluster:
papers = [output[key]["citations"] for key in output if output[key]["project"]==idd] # list of all papers associated with cluster by citation count
# rcr = [output[key]["rcr"] for key in output if output[key]["project"]==idd]
apt = [output[key]["apt"] for key in output if output[key]["project"]==idd]
avail_years = [max(0, 2021-output[key]["year"]) for key in output if output[key]["project"]==idd]
# cluster_rcr.extend(rcr)
cluster_apt.extend(apt)
num_papers += len(papers)
cluster_citations.append(sum(papers))
availability.append(sum(avail_years))
total_citations.append(sum(cluster_citations))
total_papers.append(num_papers)
apts_95.append(sum([1 for i in cluster_apt if i==0.95])/len(cluster_apt))
apts.append(np.mean(cluster_apt))
#create 95% confidence interval for population mean weight
apts_interval = scist.norm.interval(alpha=0.95, loc=np.mean(cluster_apt), scale=scist.sem(cluster_apt))
lower.append(apts_interval[0])
upper.append(apts_interval[1])
# rcrs.append(sum(cluster_apt)/len(cluster_apt))
total_availability.append(int(sum(availability)))
return total_citations, total_papers, apts_95, apts, lower, upper, total_availability
def get_rep_clusters(result):
path, dirs, files = next(os.walk('{}/clusters'.format(result)))
file_count = len(files)
if file_count == 0:
return
document = Document()
for i in range(file_count):
unique_awards = {}
# open file
with open('{}/clusters/cluster-{}.csv'.format(result, str(i)), newline='', encoding='utf8') as csvfile:
raw_data = list(csv.reader(csvfile))
for j in range(1,len(raw_data)):
title = raw_data[j][1]
organization = raw_data[j][6]
mechanism = raw_data[j][7]
year = int(raw_data[j][8])
score = float(raw_data[j][11])
# If this is a new title
if title not in unique_awards:
unique_awards[title] = {
"organization": organization,
"activity": mechanism,
"year": year,
"score": score,
}
# If the title is already there
else:
current_year = unique_awards[title]["year"]
# Use the most recent one
if year > current_year:
unique_awards[title] = {
"organization": organization,
"activity": mechanism,
"year": year,
"score": score,
}
unique_awards_sorted = dict(sorted(unique_awards.items(), key = lambda item: -item[1]["score"]))
unique_awards_list = list(unique_awards_sorted.items())[0:5]
p = document.add_paragraph()
p.add_run('Cluster {}:'.format(str(i))).bold = True
table = document.add_table(rows=6, cols=5)
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Title'
hdr_cells[1].text = 'Awardee'
hdr_cells[2].text = 'Award Activity'
hdr_cells[3].text = 'Year'
hdr_cells[4].text = 'Sample Silhouette Score'
for i in range(len(unique_awards_list)):
table.cell(i+1,0).text = unique_awards_list[i][0] # Title
table.cell(i+1,1).text = unique_awards_list[i][1]['organization'] # Awardee
table.cell(i+1,2).text = unique_awards_list[i][1]['activity'] # Award Activity
table.cell(i+1,3).text = str(unique_awards_list[i][1]['year']) # Year
table.cell(i+1,4).text = "{:.2g}".format(unique_awards_list[i][1]['score']) # Sample Silhouette Score
document.add_page_break()
document.save('{}/supp_info.docx'.format(result))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--k',
type=int,
required=True,
help='number of clusters',
default=30,
)
parser.add_argument(
'--trials',
type=int,
required=True,
help='number of trials',
default=50,
)
FLAGS, unparsed = parser.parse_known_args()
years = [str(i) for i in range(1985,2021)]
selected_k = FLAGS.k
num_trials = FLAGS.trials
centers = 'k-means++'
# Create folder to save results
now = datetime.now()
save_folder = "results/"+now.strftime("%m-%d-%Y--%H%M%S")
os.mkdir(save_folder)
# Get best clustering
data, scores = get_best_cluster(selected_k, num_trials, centers, years, save_folder)
with open("{}/model_clustering.pkl".format(save_folder), 'wb') as handle:
pickle.dump(data, handle)
# Final cluster files
num = 0
os.mkdir(save_folder+"/clusters")
for cluster in data["data_by_cluster"]:
if cluster == []:
continue
keys = cluster[0].keys()
with open('{}/clusters/cluster-{}.csv'.format(save_folder,str(num)), 'w', newline='', encoding='utf8') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(cluster)
num+=1
# Silhouette score by cluster
print("")
print("------Silhouette scores------")
X_transformed = pickle.load(open("data/processed-data.pkl","rb"))
scores = metrics.silhouette_samples(X_transformed, data["labels"])
tabulated = []
pairs = [(scores[i],data["labels"][i]) for i in range(len(scores))]
for i in range(selected_k):
avg_score = np.mean([j[0] for j in pairs if j[1] == i])
print("Cluster {}: {}".format(str(i), str(avg_score)))
tabulated.append(avg_score)
print("----------------------------")
print("")
# Final centroids
order_centroids = data["model"].cluster_centers_.argsort()[:, ::-1]
vectorizer = pickle.load(open("data/vectorizer.pkl","rb"))
terms = vectorizer.get_feature_names_out()
centroids = []
centroid_file = open("{}/centroids".format(save_folder), "w", encoding='utf8')
for i in range(selected_k):
centroid_file.write("Cluster %d:" % i)
centroid_list = []
for ind in order_centroids[i, :15]:
centroid_file.write(" %s," % terms[ind])
centroid_list.append(terms[ind])
centroids.append(centroid_list)
centroid_file.write("\n")
centroid_file.close()
# UMAP Visualization
X_transformed = pickle.load(open("data/processed-data.pkl","rb"))
umap_visualization(X_transformed, data["labels"], tabulated, data["size"], save_folder)
# Get 2021 projections, projected growth rates, and confidence bounds on growth rates by cluster
projection, growth, bounds = get_funding_projections(data) # 2021 prediction
# Get 2021 clusters
model = data["model"]
clusters_test, size_test = predict_clusters("data/test-data.pkl", selected_k, model)
x = np.arange(selected_k)
if size_test == 0:
cluster_cost_2021 = [0 for i in range(0, selected_k)]
else:
cluster_cost_2021 = [(sum([item["award_amount"] for item in group]) if len(group) > 0 else 0) for group in clusters_test]
# Save 2021 clusters
num = 0
os.mkdir("{}/clusters_test".format(save_folder))
for cluster in clusters_test:
try:
keys = cluster[0].keys()
except:
num+=1
continue
with open('{}/clusters_test/cluster-{}.csv'.format(save_folder,str(num)), 'w', newline='', encoding='utf8') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(cluster)
num+=1
# Citations and papers
citations, papers, apt_pct, apt, lower, upper, availability = get_citations(data["data_by_cluster"])
# Total funding
total_cluster_funding = [sum([item["award_amount"] for item in group]) for group in data["data_by_cluster"]]
# Get representative clusters for supp info
get_rep_clusters(save_folder)
# All data - note blank columns for description, category
output = [["Cluster", "Size", "Total", "Citations", "APT % over 95%", "Avg. APT", "95%CI L", "95%CI U", "Papers", "Citations per $1mil funding", "Years of Availability", "Citations per thousand dollars of funding per year", "Projected 2021 Award", "Actual 2021 Award To Date", "Growth Rate", "95%CI L", "95%CI U", "Score", "Description", "Category", "Clinical/Technical", "Centroids", "%R01", "%U01", "%R44", "%U24", "%R21", "%U54"]]
for i in range(selected_k):
output.append([i, data["size"][i], total_cluster_funding[i], citations[i], apt_pct[i], apt[i], lower[i], upper[i], papers[i], citations[i]/total_cluster_funding[i]*1e6, availability[i], citations[i]/total_cluster_funding[i]*1e3/availability[i], projection[i], cluster_cost_2021[i], growth[i], bounds[i][0], bounds[i][1], tabulated[i], " ", " ", " ", centroids[i], data["mechanisms"][0][i], data["mechanisms"][1][i], data["mechanisms"][2][i], data["mechanisms"][3][i], data["mechanisms"][4][i], data["mechanisms"][5][i]])
with open('{}/final_data.csv'.format(save_folder), 'w', newline='', encoding='utf8') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(output)
print("Complete.")
| [
"csv.field_size_limit",
"csv.DictWriter",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"yellowbrick.cluster.InterclusterDistance",
"sklearn.metrics.silhouette_samples",
"scipy.stats.sem",
"numpy.arange",
"numpy.mean",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"numpy.exp",... | [((73, 96), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (87, 96), False, 'import matplotlib\n'), ((2181, 2273), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'selected_k', 'init': 'centers', 'verbose': '(0)', 'max_no_improvement': 'None'}), '(n_clusters=selected_k, init=centers, verbose=0,\n max_no_improvement=None)\n', (2196, 2273), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((2328, 2379), 'sklearn.metrics.silhouette_samples', 'metrics.silhouette_samples', (['X_transformed', 'clusters'], {}), '(X_transformed, clusters)\n', (2354, 2379), True, 'import sklearn.metrics as metrics\n'), ((4983, 5034), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['X_transformed', 'km.labels_'], {}), '(X_transformed, km.labels_)\n', (5007, 5034), True, 'import sklearn.metrics as metrics\n'), ((6910, 6922), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6920, 6922), True, 'import matplotlib.pyplot as plt\n'), ((6927, 6949), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'visible': 'None'}), '(visible=None)\n', (6935, 6949), True, 'import matplotlib.pyplot as plt\n'), ((6954, 7045), 'matplotlib.pyplot.scatter', 'plt.scatter', (['embedding[:, 0]', 'embedding[:, 1]'], {'cmap': '"""Spectral"""', 's': '(5)', 'c': 'selected_colors'}), "(embedding[:, 0], embedding[:, 1], cmap='Spectral', s=5, c=\n selected_colors)\n", (6965, 7045), True, 'import matplotlib.pyplot as plt\n'), ((7242, 7301), 'matplotlib.pyplot.title', 'plt.title', (['"""UMAP Projection of Awards, TF-IDF"""'], {'fontsize': '(14)'}), "('UMAP Projection of Awards, TF-IDF', fontsize=14)\n", (7251, 7301), True, 'import matplotlib.pyplot as plt\n'), ((7306, 7326), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""UMAP 1"""'], {}), "('UMAP 1')\n", (7316, 7326), True, 'import matplotlib.pyplot as plt\n'), ((7331, 7351), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""UMAP 2"""'], {}), "('UMAP 2')\n", (7341, 7351), True, 'import matplotlib.pyplot as plt\n'), ((7367, 7396), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (7394, 7396), True, 'import matplotlib.pyplot as plt\n'), ((8003, 8055), 'matplotlib.pyplot.subplots', 'plt.subplots', (['dim1', 'dim2'], {'sharex': '"""all"""', 'sharey': '"""all"""'}), "(dim1, dim2, sharex='all', sharey='all')\n", (8015, 8055), True, 'import matplotlib.pyplot as plt\n'), ((8148, 8170), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'visible': 'None'}), '(visible=None)\n', (8156, 8170), True, 'import matplotlib.pyplot as plt\n'), ((8175, 8277), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelcolor': '"""none"""', 'which': '"""both"""', 'top': '(False)', 'bottom': '(False)', 'left': '(False)', 'right': '(False)'}), "(labelcolor='none', which='both', top=False, bottom=False,\n left=False, right=False)\n", (8190, 8277), True, 'import matplotlib.pyplot as plt\n'), ((8278, 8307), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Years from 1985"""'], {}), "('Years from 1985')\n", (8288, 8307), True, 'import matplotlib.pyplot as plt\n'), ((8312, 8349), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Funding ($100 millions)"""'], {}), "('Funding ($100 millions)')\n", (8322, 8349), True, 'import matplotlib.pyplot as plt\n'), ((9347, 9359), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9357, 9359), True, 'import matplotlib.pyplot as plt\n'), ((9377, 9420), 'yellowbrick.cluster.InterclusterDistance', 'InterclusterDistance', (['model'], {'random_state': '(0)'}), '(model, random_state=0)\n', (9397, 9420), False, 'from yellowbrick.cluster import InterclusterDistance\n'), ((15427, 15437), 'docx.Document', 'Document', ([], {}), '()\n', (15435, 15437), False, 'from docx import Document\n'), ((17959, 17984), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17982, 17984), False, 'import argparse\n'), ((18505, 18519), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (18517, 18519), False, 'from datetime import datetime\n'), ((18586, 18607), 'os.mkdir', 'os.mkdir', (['save_folder'], {}), '(save_folder)\n', (18594, 18607), False, 'import os\n'), ((18879, 18914), 'os.mkdir', 'os.mkdir', (["(save_folder + '/clusters')"], {}), "(save_folder + '/clusters')\n", (18887, 18914), False, 'import os\n'), ((19496, 19553), 'sklearn.metrics.silhouette_samples', 'metrics.silhouette_samples', (['X_transformed', "data['labels']"], {}), "(X_transformed, data['labels'])\n", (19522, 19553), True, 'import sklearn.metrics as metrics\n'), ((21070, 21091), 'numpy.arange', 'np.arange', (['selected_k'], {}), '(selected_k)\n', (21079, 21091), True, 'import numpy as np\n'), ((696, 724), 'csv.field_size_limit', 'csv.field_size_limit', (['maxInt'], {}), '(maxInt)\n', (716, 724), False, 'import csv\n'), ((6243, 6276), 'numpy.arange', 'np.arange', (['X_transformed.shape[0]'], {}), '(X_transformed.shape[0])\n', (6252, 6276), True, 'import numpy as np\n'), ((7106, 7147), 'numpy.unique', 'np.unique', (['cluster_labels[selected_cells]'], {}), '(cluster_labels[selected_cells])\n', (7115, 7147), True, 'import numpy as np\n'), ((7557, 7618), 'colorsys.hls_to_rgb', 'hls_to_rgb', (['(end * i / (n - 1) * shade)', '(0.5 * shade)', '(1 * shade)'], {}), '(end * i / (n - 1) * shade, 0.5 * shade, 1 * shade)\n', (7567, 7618), False, 'from colorsys import hls_to_rgb\n'), ((8679, 8702), 'numpy.linspace', 'np.linspace', (['(0)', '(21)', '(400)'], {}), '(0, 21, 400)\n', (8690, 8702), True, 'import numpy as np\n'), ((18810, 18835), 'pickle.dump', 'pickle.dump', (['data', 'handle'], {}), '(data, handle)\n', (18821, 18835), False, 'import pickle\n'), ((19697, 19740), 'numpy.mean', 'np.mean', (['[j[0] for j in pairs if j[1] == i]'], {}), '([j[0] for j in pairs if j[1] == i])\n', (19704, 19740), True, 'import numpy as np\n'), ((23367, 23386), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (23377, 23386), False, 'import csv\n'), ((6325, 6371), 'umap.umap_.UMAP', 'umap.UMAP', ([], {'metric': '"""hellinger"""', 'random_state': '(42)'}), "(metric='hellinger', random_state=42)\n", (6334, 6371), True, 'import umap.umap_ as umap\n'), ((7045, 7054), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7052, 7054), True, 'import matplotlib.pyplot as plt\n'), ((8648, 8665), 'numpy.diagonal', 'np.diagonal', (['pcov'], {}), '(pcov)\n', (8659, 8665), True, 'import numpy as np\n'), ((12843, 12862), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (12853, 12862), False, 'import csv\n'), ((13262, 13281), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (13272, 13281), False, 'import csv\n'), ((14759, 14779), 'numpy.mean', 'np.mean', (['cluster_apt'], {}), '(cluster_apt)\n', (14766, 14779), True, 'import numpy as np\n'), ((19191, 19224), 'csv.DictWriter', 'csv.DictWriter', (['output_file', 'keys'], {}), '(output_file, keys)\n', (19205, 19224), False, 'import csv\n'), ((21707, 21740), 'csv.DictWriter', 'csv.DictWriter', (['output_file', 'keys'], {}), '(output_file, keys)\n', (21721, 21740), False, 'import csv\n'), ((8879, 8902), 'numpy.exp', 'np.exp', (['(popt[1] * point)'], {}), '(popt[1] * point)\n', (8885, 8902), True, 'import numpy as np\n'), ((14909, 14929), 'numpy.mean', 'np.mean', (['cluster_apt'], {}), '(cluster_apt)\n', (14916, 14929), True, 'import numpy as np\n'), ((14937, 14959), 'scipy.stats.sem', 'scist.sem', (['cluster_apt'], {}), '(cluster_apt)\n', (14946, 14959), True, 'import scipy.stats as scist\n'), ((15659, 15678), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (15669, 15678), False, 'import csv\n'), ((8557, 8570), 'numpy.exp', 'np.exp', (['(b * t)'], {}), '(b * t)\n', (8563, 8570), True, 'import numpy as np\n')] |
from day12 import Moon, simulate_moons, get_steps_to_find_same_state, get_total_energy
def test_init_handles_string_input():
line = "<x=15, y=-2, z=-6>"
expected_x = 15
expected_y = -2
expected_z = -6
result = Moon(line)
assert result.x == expected_x
assert result.y == expected_y
assert result.z == expected_z
def test_apply_velocity():
# x=1, y=2, z=3 and a velocity of x=-2, y=0,z=3, then its new position would be x=-1, y=2, z=6
moon = Moon("<x=1, y=2, z=3>")
moon.velocity = (-2, 0, 3)
expected = Moon("<x=-1, y=2, z=6>")
expected.velocity = (-2, 0, 3)
moon.apply_velocity()
assert moon == expected
def test_get_total_energy():
expected = 179
start_input = [
"<x=-1, y=0, z=2>",
"<x=2, y=-10, z=-7>",
"<x=4, y=-8, z=8>",
"<x=3, y=5, z=-1>",
]
moons = [Moon(row) for row in start_input]
result = get_total_energy(moons, 10)
assert result == expected
def test_get_total_energy_2nd_example():
expected = 1940
start_input = [
"<x=-8, y=-10, z=0>",
"<x=5, y=5, z=10>",
"<x=2, y=-7, z=3>",
"<x=9, y=-8, z=-3>",
]
moons = [Moon(row) for row in start_input]
result = get_total_energy(moons, 100)
assert result == expected
def test_get_steps_to_find_same_state():
expected = 4686774924
start_input = [
"<x=-8, y=-10, z=0>",
"<x=5, y=5, z=10>",
"<x=2, y=-7, z=3>",
"<x=9, y=-8, z=-3>",
]
moons = [Moon(row) for row in start_input]
result = get_steps_to_find_same_state(moons)
assert result == expected
| [
"day12.get_total_energy",
"day12.get_steps_to_find_same_state",
"day12.Moon"
] | [((233, 243), 'day12.Moon', 'Moon', (['line'], {}), '(line)\n', (237, 243), False, 'from day12 import Moon, simulate_moons, get_steps_to_find_same_state, get_total_energy\n'), ((486, 509), 'day12.Moon', 'Moon', (['"""<x=1, y=2, z=3>"""'], {}), "('<x=1, y=2, z=3>')\n", (490, 509), False, 'from day12 import Moon, simulate_moons, get_steps_to_find_same_state, get_total_energy\n'), ((556, 580), 'day12.Moon', 'Moon', (['"""<x=-1, y=2, z=6>"""'], {}), "('<x=-1, y=2, z=6>')\n", (560, 580), False, 'from day12 import Moon, simulate_moons, get_steps_to_find_same_state, get_total_energy\n'), ((920, 947), 'day12.get_total_energy', 'get_total_energy', (['moons', '(10)'], {}), '(moons, 10)\n', (936, 947), False, 'from day12 import Moon, simulate_moons, get_steps_to_find_same_state, get_total_energy\n'), ((1242, 1270), 'day12.get_total_energy', 'get_total_energy', (['moons', '(100)'], {}), '(moons, 100)\n', (1258, 1270), False, 'from day12 import Moon, simulate_moons, get_steps_to_find_same_state, get_total_energy\n'), ((1572, 1607), 'day12.get_steps_to_find_same_state', 'get_steps_to_find_same_state', (['moons'], {}), '(moons)\n', (1600, 1607), False, 'from day12 import Moon, simulate_moons, get_steps_to_find_same_state, get_total_energy\n'), ((873, 882), 'day12.Moon', 'Moon', (['row'], {}), '(row)\n', (877, 882), False, 'from day12 import Moon, simulate_moons, get_steps_to_find_same_state, get_total_energy\n'), ((1195, 1204), 'day12.Moon', 'Moon', (['row'], {}), '(row)\n', (1199, 1204), False, 'from day12 import Moon, simulate_moons, get_steps_to_find_same_state, get_total_energy\n'), ((1524, 1533), 'day12.Moon', 'Moon', (['row'], {}), '(row)\n', (1528, 1533), False, 'from day12 import Moon, simulate_moons, get_steps_to_find_same_state, get_total_energy\n')] |
import gym
import gym_tensegrity
import numpy as np
import os
from time import sleep
# Discrete action space functions testing
def main(port_num=10042):
def print_observation(obs):
print("Observations {:}".format(obs))
env = gym.make('gym_tensegrity:jumper-v0')
# action = randint(0,15)
action = 14
# print("Action: {:}".format(action))
init_obs ,_,_,_=env.step(action)
# print_observation(init_obs)
# print(env.env.actions_json)
# print("")
# input("-> check point: WAIT for INPUT !!!!")
# for i in range(50):
# input("-> check point: WAIT for INPUT !!!!")
# observation, reward, done, _= env.step(action)
# print_observation(observation)
# print("Done:???:{:}".format(done))
# input("-> check point: WAIT for INPUT !!!!")
# for i in range(1,1001):
# action = env.action_space.sample()
# # action = 2
# input("-> check point: WAIT for INPUT !!!!")
# print("--------------- ({:}) ---------------".format(i))
# print("######\nAction: {:}\n######".format(action))
# observation, reward, done, _= env.step(action)
# print_observation(observation)
# print("Done:???:{:}".format(done))
# input("-> check point: WAIT for INPUT !!!!")
# for i in range(50):
# observation, reward, done, _= env.step(2)
# input("-> check point: WAIT for INPUT !!!!")
flag = 0
# i = 0
while True:
# i += 1
# print(i)
# if(i > 100):
# i = 0
# env.reset()
inp = "d"
# inp = input("~~~~~~input: ")
if(inp == "w"):
flag = 1
elif(inp == "s"):
flag = -1
elif(inp == "d"):
flag = 0
if(flag <= 0):
observation, reward, done, _= env.step(4)
observation, reward, done, _= env.step(5)
if(flag >= 0):
observation, reward, done, _= env.step(12)
observation, reward, done, _= env.step(13)
print(observation)
print("angle:{:}".format(observation[-1]*180/np.pi))
def forked_process_main():
port_num_base = 10042
num_threads = 2
for i in range(num_threads):
pid = os.fork()
print("fork {:}".format(pid))
if(pid > 0):
print("Child: {:} -> on port: {:}".format(pid, port_num_base+i))
config = {"port_num":port_num_base+i}
main(config)
def threaded_main():
import threading
port_num_base = 10042
num_threads = 10
threads_list = []
for i in range(num_threads):
config = {"port_num":port_num_base+i}
threads_list.append(threading.Thread(target=main, args=(config,)))
for i in range(num_threads):
threads_list[i].start()
# Continuous action space for lengths function testing
def main_cont_lengths(port_num=10042):
def print_observation(obs):
print("Observations {:}".format(obs))
env = gym.make('gym_tensegrity:jumper-v0')
# action = randint(0,15)
action = [7.95 for i in range(8)]
# action[0] = 5
print("Action: {:}".format(action))
# input("-> check point: WAIT for INPUT !!!!")
init_obs ,_,_,_=env.step(action)
print_observation(init_obs)
# print(env.env.actions_json)
# print("")
# input("-> check point: WAIT for INPUT !!!!")
flag = 0
# i = 0
while True:
observation, reward, done, _= env.step(init_obs[:-1])
print(observation)
print("angle:{:}".format(observation[-1]*180/np.pi))
# Continuous action space for delta lengths function testing
def main_cont_dlengths(config):
def print_observation(obs):
print("Observations {:}".format(obs))
tot_reward = 0
env = gym.make('gym_tensegrity:jumper-v0', config=config)
# action = randint(0,15)
action = np.array([0. for i in range(8)])
# action[0] = 1.7
print("Action: {:}".format(action))
# input("-> check point: WAIT for INPUT !!!!")
init_obs ,tot_reward,done,_=env.step(action)
print_observation(init_obs)
action[0] = 0
# print(env.env.actions_json)
# print("")
input("-> check point: WAIT for INPUT !!!!")
while not done:
action = env.action_space.sample()
observation, reward, done, _= env.step(action)
tot_reward += reward
print("Action: {:}".format(action))
# input("-> check point: WAIT for INPUT !!!!")
print("Reward: {:}, Done: {:}".format(reward,done))
print("Time: {:}".format(env.env.getTime()))
print_observation(observation)
print("angle:{:}".format(env.env.getLegAngle()*180/np.pi))
print("Total Reward: {:}".format(tot_reward))
# sleep(0.01)
input("-> check point: WAIT for INPUT !!!!")
while True:
inp = "d"
inp = input("~~~~~~input: ")
#action = env.action_space.sample()
#observation, reward, done, _= env.step(action)
if(inp == "w"):
flag = 1
elif(inp == "s"):
flag = -1
elif(inp == "d"):
flag = 0
if(flag < 0):
action[0] = -0.1
observation, reward, done, _= env.step(action)
# # action[0] = 0
# # observation, reward, done, _= env.step(action)
if(flag > 0):
action[0] = 0.1
observation, reward, done, _= env.step(action)
# # action[0] = 0
# # observation, reward, done, _= env.step(action)
if(flag == 0):
action[0] = 0
observation, reward, done, _= env.step(action)
print(observation)
print("angle:{:}".format(env.env.getLegAngle()*180/np.pi))
def test(config=None):
def print_observation(obs):
# This printing for the default observation
print("Observations: ")
for i in range(6):
print("#{:} End point: {:}".format(i+1, [obs[3*i:3*(i+1)]]))
print("---")
for i in range(6):
print("#{:} End point velocity: {:}".format(i+1, [obs[3*(i+6):3*(i+1+6)]]))
print("Leg angle:{:}".format(env.env.getLegAngle()*180/np.pi))
squre_sides_angles = env.env.getSquareSidesAngles()
print("Square side angle1:{:}".format(squre_sides_angles[0]*180/np.pi))
print("Square side angle2:{:}".format(squre_sides_angles[1]*180/np.pi))
print("----------------------------------")
if(config is not None):
env = gym.make('gym_tensegrity:jumper-v0', config=config)
if(config is None):
env = gym.make('gym_tensegrity:jumper-v0')
observation = env.reset()
print_observation(observation)
tot_reward = 0
action = np.array([0. for i in range(8)])
done = False
input("-> check point: WAIT for INPUT !!!!")
while not done:
#inp = input("INPUT")
# action = env.action_space.sample()
print("Action: {:}".format(action))
observation, reward, done, _= env.step(action)
tot_reward += reward
print("Reward: {:}, Done: {:}".format(reward,done))
print("Time: {:}".format(env.env.getTime()))
print_observation(observation)
print("angle:{:}".format(env.env.getLegAngle()*180/np.pi))
print("Total Reward: {:}".format(tot_reward))
# input("-> check point: WAIT for INPUT !!!!")
# sleep(0.01)
input("-> check point: WAIT for INPUT !!!!")
flag = 0
while True:
inp = 'd'
# inp = input("~~~~~~input: ")
#action = env.action_space.sample()
#observation, reward, done, _= env.step(action)
if(inp == "w"):
flag = 1
elif(inp == "s"):
flag = -1
elif(inp == "d"):
flag = 0
if(flag < 0):
action[0] = -0.1
if(flag > 0):
action[0] = 0.1
if(flag == 0):
action[0] = 0
observation, reward, done, _= env.step(action)
print(action)
print_observation(observation)
if __name__ == "__main__":
# test({'starting_coordinates':(0,10,0), "max_num_steps":1000, "starting_angle":(1.0001*np.pi/180,0)})
# test({'starting_coordinates':(0,100,0), "max_num_steps":10000, "starting_angle":(0,0), "starting_leg_angle": (0,0), "randomized_starting": False})
# test({"max_num_steps":10000, "randomized_starting": {"angle":[False], "height":[True, 10,100]}})
# test({'starting_coordinates':[0,10,0], "max_num_steps":10000, "randomized_starting": {"angle":[[True, True], [4,1],[10,3]], "height":[False]}})
test({'starting_coordinates':[0,10,0], "max_num_steps":10000, 'starting_leg_angle':[1,2],
'observation_noise': {"uncorrelated":{"mean":0,"stdev":1}, "correlated":{"mean":0,"stdev":1}}})
| [
"os.fork",
"threading.Thread",
"gym.make"
] | [((242, 278), 'gym.make', 'gym.make', (['"""gym_tensegrity:jumper-v0"""'], {}), "('gym_tensegrity:jumper-v0')\n", (250, 278), False, 'import gym\n'), ((2982, 3018), 'gym.make', 'gym.make', (['"""gym_tensegrity:jumper-v0"""'], {}), "('gym_tensegrity:jumper-v0')\n", (2990, 3018), False, 'import gym\n'), ((3762, 3813), 'gym.make', 'gym.make', (['"""gym_tensegrity:jumper-v0"""'], {'config': 'config'}), "('gym_tensegrity:jumper-v0', config=config)\n", (3770, 3813), False, 'import gym\n'), ((2242, 2251), 'os.fork', 'os.fork', ([], {}), '()\n', (2249, 2251), False, 'import os\n'), ((6482, 6533), 'gym.make', 'gym.make', (['"""gym_tensegrity:jumper-v0"""'], {'config': 'config'}), "('gym_tensegrity:jumper-v0', config=config)\n", (6490, 6533), False, 'import gym\n'), ((6572, 6608), 'gym.make', 'gym.make', (['"""gym_tensegrity:jumper-v0"""'], {}), "('gym_tensegrity:jumper-v0')\n", (6580, 6608), False, 'import gym\n'), ((2682, 2727), 'threading.Thread', 'threading.Thread', ([], {'target': 'main', 'args': '(config,)'}), '(target=main, args=(config,))\n', (2698, 2727), False, 'import threading\n')] |
import numpy as np
import matplotlib.pyplot as plt
import cv2
class LaneIdentifier:
def __init__(self, smooth_factor, filter):
self.left_lane_inds = []
self.right_lane_inds = []
self.lane_gap = []
self.binary_warped = None
self.window_height = None
self.leftx_current = 0
self.rightx_current = 0
self.nonzeroy = None
self.nonzerox = None
self.left_fit = None
self.right_fit = None
self.margin = 100
self.nwindows = 9
self.minpix = 50
self.leftx = []
self.lefty = []
self.rightx = []
self.righty = []
self.smooth_factor = smooth_factor
self.filter = filter
return
def identify_lanes(self, binary):
self.binary_warped = binary
self.window_height = np.int(self.binary_warped.shape[0] // self.nwindows)
nonzero = binary.nonzero()
self.nonzeroy = np.array(nonzero[0])
self.nonzerox = np.array(nonzero[1])
if self.left_fit is None or self.right_fit is None:
self.blind_sliding_window_search()
else:
self.selective_window_search()
ret = self.extract_lane_lines()
if ret is False:
return False, None, None
return True, self.left_fit, self.right_fit
def blind_sliding_window_search(self):
histogram = np.sum(self.binary_warped[self.binary_warped.shape[0] // 2:, :], axis=0)
midpoint = np.int(histogram.shape[0] // 2)
leftx_current = np.argmax(histogram[:midpoint])
rightx_current = np.argmax(histogram[midpoint:]) + midpoint
l_lane_inds = []
r_lane_inds = []
for window in range(self.nwindows):
win_y_low = self.binary_warped.shape[0] - (window + 1) * self.window_height
win_y_high = self.binary_warped.shape[0] - window * self.window_height
win_xleft_low = leftx_current - self.margin
win_xleft_high = leftx_current + self.margin
win_xright_low = rightx_current - self.margin
win_xright_high = rightx_current + self.margin
good_left_inds = ((self.nonzeroy >= win_y_low) &
(self.nonzeroy < win_y_high) &
(self.nonzerox >= win_xleft_low) &
(self.nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((self.nonzeroy >= win_y_low) &
(self.nonzeroy < win_y_high) &
(self.nonzerox >= win_xright_low) &
(self.nonzerox < win_xright_high)).nonzero()[0]
l_lane_inds.append(good_left_inds)
r_lane_inds.append(good_right_inds)
if len(good_left_inds) > self.minpix:
leftx_current = np.int(np.mean(self.nonzerox[good_left_inds]))
if len(good_right_inds) > self.minpix:
rightx_current = np.int(np.mean(self.nonzerox[good_right_inds]))
self.left_lane_inds = np.concatenate(l_lane_inds)
self.right_lane_inds = np.concatenate(r_lane_inds)
return
def selective_window_search(self):
self.left_lane_inds = ((self.nonzerox >
(self.left_fit[0]*(self.nonzeroy**2) + self.left_fit[1]*self.nonzeroy +
self.left_fit[2] - self.margin)) &
(self.nonzerox <
(self.left_fit[0] * (self.nonzeroy ** 2) + self.left_fit[1]*self.nonzeroy +
self.left_fit[2] + self.margin)))
self.right_lane_inds = ((self.nonzerox >
(self.right_fit[0] * (self.nonzeroy ** 2) + self.right_fit[1] * self.nonzeroy +
self.right_fit[2] - self.margin)) &
(self.nonzerox <
(self.right_fit[0] * (self.nonzeroy ** 2) + self.right_fit[1] * self.nonzeroy +
self.right_fit[2] + self.margin)))
return
def extract_lane_lines(self):
# Extract left and right line pixel positions
leftx = self.nonzerox[self.left_lane_inds]
lefty = self.nonzeroy[self.left_lane_inds]
rightx = self.nonzerox[self.right_lane_inds]
righty = self.nonzeroy[self.right_lane_inds]
if leftx.size == 0 or rightx.size == 0:
if self.left_fit is None or self.right_fit is None:
return False
# Outliers filter, delete those that far away from previous
# recognized lane curve.
if self.left_fit is not None:
leftx_trend = self.left_fit[0]*lefty*lefty + self.left_fit[1]*lefty + self.left_fit[2]
range = abs(leftx - leftx_trend)
indices = (range > self.filter).nonzero()
leftx = np.delete(leftx, indices)
lefty = np.delete(lefty, indices)
if self.right_fit is not None:
rightx_trend = self.right_fit[0]*righty*righty + self.right_fit[1]*righty + self.right_fit[2]
range = abs(rightx - rightx_trend)
indices = (range > self.filter).nonzero()
rightx = np.delete(rightx, indices)
righty = np.delete(righty, indices)
# Take previous identified pixels into 2nd order polynomial
# calculation, in order to alleviate oscillation.
self.leftx = np.append(self.leftx, leftx)
self.lefty = np.append(self.lefty, lefty)
self.rightx = np.append(self.rightx, rightx)
self.righty = np.append(self.righty, righty)
self.leftx = self.leftx[-self.smooth_factor:]
self.lefty = self.lefty[-self.smooth_factor:]
self.rightx = self.rightx[-self.smooth_factor:]
self.righty = self.righty[-self.smooth_factor:]
# Fit a second order polynomial to each
self.left_fit = np.polyfit(self.lefty, self.leftx, 2)
self.right_fit = np.polyfit(self.righty, self.rightx, 2)
return True
def visualization(self):
# Generate x and y values for plotting
ploty = np.linspace(0, self.binary_warped.shape[0] - 1, self.binary_warped.shape[0])
left_fitx = self.left_fit[0] * ploty ** 2 + self.left_fit[1] * ploty + self.left_fit[2]
right_fitx = self.right_fit[0] * ploty ** 2 + self.right_fit[1] * ploty + self.right_fit[2]
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((self.binary_warped, self.binary_warped, self.binary_warped)) * 255
fit_img = np.zeros_like(out_img)
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[self.nonzeroy[self.left_lane_inds], self.nonzerox[self.left_lane_inds]] = [255, 0, 0]
out_img[self.nonzeroy[self.right_lane_inds], self.nonzerox[self.right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - self.margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + self.margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - self.margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + self.margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
result = cv2.addWeighted(fit_img, 1, window_img, 0.3, 0)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
ax1.imshow(out_img)
ax1.set_title('Detected Lane Points', fontsize=30)
ax2.imshow(result)
ax2.set_title('Lane Lines', fontsize=30)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0) | [
"numpy.polyfit",
"numpy.hstack",
"numpy.array",
"numpy.mean",
"numpy.delete",
"matplotlib.pyplot.plot",
"cv2.addWeighted",
"numpy.linspace",
"numpy.vstack",
"numpy.concatenate",
"matplotlib.pyplot.ylim",
"numpy.argmax",
"numpy.int_",
"matplotlib.pyplot.xlim",
"numpy.int",
"numpy.dstack... | [((841, 893), 'numpy.int', 'np.int', (['(self.binary_warped.shape[0] // self.nwindows)'], {}), '(self.binary_warped.shape[0] // self.nwindows)\n', (847, 893), True, 'import numpy as np\n'), ((954, 974), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (962, 974), True, 'import numpy as np\n'), ((999, 1019), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (1007, 1019), True, 'import numpy as np\n'), ((1405, 1477), 'numpy.sum', 'np.sum', (['self.binary_warped[self.binary_warped.shape[0] // 2:, :]'], {'axis': '(0)'}), '(self.binary_warped[self.binary_warped.shape[0] // 2:, :], axis=0)\n', (1411, 1477), True, 'import numpy as np\n'), ((1497, 1528), 'numpy.int', 'np.int', (['(histogram.shape[0] // 2)'], {}), '(histogram.shape[0] // 2)\n', (1503, 1528), True, 'import numpy as np\n'), ((1553, 1584), 'numpy.argmax', 'np.argmax', (['histogram[:midpoint]'], {}), '(histogram[:midpoint])\n', (1562, 1584), True, 'import numpy as np\n'), ((3070, 3097), 'numpy.concatenate', 'np.concatenate', (['l_lane_inds'], {}), '(l_lane_inds)\n', (3084, 3097), True, 'import numpy as np\n'), ((3129, 3156), 'numpy.concatenate', 'np.concatenate', (['r_lane_inds'], {}), '(r_lane_inds)\n', (3143, 3156), True, 'import numpy as np\n'), ((5491, 5519), 'numpy.append', 'np.append', (['self.leftx', 'leftx'], {}), '(self.leftx, leftx)\n', (5500, 5519), True, 'import numpy as np\n'), ((5541, 5569), 'numpy.append', 'np.append', (['self.lefty', 'lefty'], {}), '(self.lefty, lefty)\n', (5550, 5569), True, 'import numpy as np\n'), ((5592, 5622), 'numpy.append', 'np.append', (['self.rightx', 'rightx'], {}), '(self.rightx, rightx)\n', (5601, 5622), True, 'import numpy as np\n'), ((5645, 5675), 'numpy.append', 'np.append', (['self.righty', 'righty'], {}), '(self.righty, righty)\n', (5654, 5675), True, 'import numpy as np\n'), ((5970, 6007), 'numpy.polyfit', 'np.polyfit', (['self.lefty', 'self.leftx', '(2)'], {}), '(self.lefty, self.leftx, 2)\n', (5980, 6007), True, 'import numpy as np\n'), ((6033, 6072), 'numpy.polyfit', 'np.polyfit', (['self.righty', 'self.rightx', '(2)'], {}), '(self.righty, self.rightx, 2)\n', (6043, 6072), True, 'import numpy as np\n'), ((6188, 6264), 'numpy.linspace', 'np.linspace', (['(0)', '(self.binary_warped.shape[0] - 1)', 'self.binary_warped.shape[0]'], {}), '(0, self.binary_warped.shape[0] - 1, self.binary_warped.shape[0])\n', (6199, 6264), True, 'import numpy as np\n'), ((6655, 6677), 'numpy.zeros_like', 'np.zeros_like', (['out_img'], {}), '(out_img)\n', (6668, 6677), True, 'import numpy as np\n'), ((6699, 6721), 'numpy.zeros_like', 'np.zeros_like', (['out_img'], {}), '(out_img)\n', (6712, 6721), True, 'import numpy as np\n'), ((7422, 7471), 'numpy.hstack', 'np.hstack', (['(left_line_window1, left_line_window2)'], {}), '((left_line_window1, left_line_window2))\n', (7431, 7471), True, 'import numpy as np\n'), ((7781, 7832), 'numpy.hstack', 'np.hstack', (['(right_line_window1, right_line_window2)'], {}), '((right_line_window1, right_line_window2))\n', (7790, 7832), True, 'import numpy as np\n'), ((8048, 8095), 'cv2.addWeighted', 'cv2.addWeighted', (['fit_img', '(1)', 'window_img', '(0.3)', '(0)'], {}), '(fit_img, 1, window_img, 0.3, 0)\n', (8063, 8095), False, 'import cv2\n'), ((8121, 8157), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(20, 10)'}), '(1, 2, figsize=(20, 10))\n', (8133, 8157), True, 'import matplotlib.pyplot as plt\n'), ((8331, 8373), 'matplotlib.pyplot.plot', 'plt.plot', (['left_fitx', 'ploty'], {'color': '"""yellow"""'}), "(left_fitx, ploty, color='yellow')\n", (8339, 8373), True, 'import matplotlib.pyplot as plt\n'), ((8382, 8425), 'matplotlib.pyplot.plot', 'plt.plot', (['right_fitx', 'ploty'], {'color': '"""yellow"""'}), "(right_fitx, ploty, color='yellow')\n", (8390, 8425), True, 'import matplotlib.pyplot as plt\n'), ((8434, 8451), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1280)'], {}), '(0, 1280)\n', (8442, 8451), True, 'import matplotlib.pyplot as plt\n'), ((8460, 8476), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(720)', '(0)'], {}), '(720, 0)\n', (8468, 8476), True, 'import matplotlib.pyplot as plt\n'), ((1610, 1641), 'numpy.argmax', 'np.argmax', (['histogram[midpoint:]'], {}), '(histogram[midpoint:])\n', (1619, 1641), True, 'import numpy as np\n'), ((4928, 4953), 'numpy.delete', 'np.delete', (['leftx', 'indices'], {}), '(leftx, indices)\n', (4937, 4953), True, 'import numpy as np\n'), ((4974, 4999), 'numpy.delete', 'np.delete', (['lefty', 'indices'], {}), '(lefty, indices)\n', (4983, 4999), True, 'import numpy as np\n'), ((5268, 5294), 'numpy.delete', 'np.delete', (['rightx', 'indices'], {}), '(rightx, indices)\n', (5277, 5294), True, 'import numpy as np\n'), ((5316, 5342), 'numpy.delete', 'np.delete', (['righty', 'indices'], {}), '(righty, indices)\n', (5325, 5342), True, 'import numpy as np\n'), ((6559, 6630), 'numpy.dstack', 'np.dstack', (['(self.binary_warped, self.binary_warped, self.binary_warped)'], {}), '((self.binary_warped, self.binary_warped, self.binary_warped))\n', (6568, 6630), True, 'import numpy as np\n'), ((7919, 7943), 'numpy.int_', 'np.int_', (['[left_line_pts]'], {}), '([left_line_pts])\n', (7926, 7943), True, 'import numpy as np\n'), ((7991, 8016), 'numpy.int_', 'np.int_', (['[right_line_pts]'], {}), '([right_line_pts])\n', (7998, 8016), True, 'import numpy as np\n'), ((2867, 2905), 'numpy.mean', 'np.mean', (['self.nonzerox[good_left_inds]'], {}), '(self.nonzerox[good_left_inds])\n', (2874, 2905), True, 'import numpy as np\n'), ((2998, 3037), 'numpy.mean', 'np.mean', (['self.nonzerox[good_right_inds]'], {}), '(self.nonzerox[good_right_inds])\n', (3005, 3037), True, 'import numpy as np\n'), ((7170, 7213), 'numpy.vstack', 'np.vstack', (['[left_fitx - self.margin, ploty]'], {}), '([left_fitx - self.margin, ploty])\n', (7179, 7213), True, 'import numpy as np\n'), ((7524, 7568), 'numpy.vstack', 'np.vstack', (['[right_fitx - self.margin, ploty]'], {}), '([right_fitx - self.margin, ploty])\n', (7533, 7568), True, 'import numpy as np\n'), ((7278, 7321), 'numpy.vstack', 'np.vstack', (['[left_fitx + self.margin, ploty]'], {}), '([left_fitx + self.margin, ploty])\n', (7287, 7321), True, 'import numpy as np\n'), ((7634, 7678), 'numpy.vstack', 'np.vstack', (['[right_fitx + self.margin, ploty]'], {}), '([right_fitx + self.margin, ploty])\n', (7643, 7678), True, 'import numpy as np\n')] |
import pytest
from Tree import Tree
class TestClass:
def setup_class(self):
pass
def test_tree(self):
tree = Tree()
tree.add(3)
tree.add(4)
tree.add(0)
tree.add(8)
tree.add(2)
tree.print_tree()
assert True
def test_compare_trees(self):
tree1 = Tree()
tree2 = Tree()
assert tree1.compare_tree(tree2) is True
tree1.add(3)
assert tree1.compare_tree(tree2) is False
tree1.add(4)
tree1.add(0)
tree1.add(8)
tree1.add(2)
tree2.add(3)
tree2.add(4)
tree2.add(0)
tree2.add(8)
tree2.add(2)
assert tree1.compare_tree(tree2) is True
tree2.add(10)
assert tree1.compare_tree(tree2) is False
tree1.add(10)
assert tree1.compare_tree(tree2) is True
tree1.add(-1)
assert tree1.compare_tree(tree2) is False
tree2.add(-1)
assert tree1.compare_tree(tree2) is True
tree1.add(-5)
tree2.add(-3)
assert tree1.compare_tree(tree2) is False
tree2.add(-5)
tree1.add(-3)
tree1.print_tree()
# tree2.rebalance_tree()
#tree1.rebalance_tree()
#tree1.print_tree()
# assert tree1.compare_tree(tree2) is True
| [
"Tree.Tree"
] | [((137, 143), 'Tree.Tree', 'Tree', ([], {}), '()\n', (141, 143), False, 'from Tree import Tree\n'), ((341, 347), 'Tree.Tree', 'Tree', ([], {}), '()\n', (345, 347), False, 'from Tree import Tree\n'), ((364, 370), 'Tree.Tree', 'Tree', ([], {}), '()\n', (368, 370), False, 'from Tree import Tree\n')] |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright: (c) 2018, <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
class UTMModuleConfigurationError(Exception):
def __init__(self, msg, **args):
super(UTMModuleConfigurationError, self).__init__(self, msg)
self.msg = msg
self.module_fail_args = args
def do_fail(self, module):
module.fail_json(msg=self.msg, other=self.module_fail_args)
class UTMModule(AnsibleModule):
"""
This is a helper class to construct any UTM Module. This will automatically add the utm host, port, token,
protocol, validate_certs and state field to the module. If you want to implement your own sophos utm module
just initialize this UTMModule class and define the Payload fields that are needed for your module.
See the other modules like utm_aaa_group for example.
"""
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False,
supports_check_mode=False, required_if=None):
default_specs = dict(
headers=dict(type='dict', required=False, default={}),
utm_host=dict(type='str', required=True),
utm_port=dict(type='int', default=4444),
utm_token=dict(type='str', required=True, no_log=True),
utm_protocol=dict(type='str', required=False, default="https", choices=["https", "http"]),
validate_certs=dict(type='bool', required=False, default=True),
state=dict(default='present', choices=['present', 'absent'])
)
super(UTMModule, self).__init__(self._merge_specs(default_specs, argument_spec), bypass_checks, no_log,
mutually_exclusive, required_together, required_one_of,
add_file_common_args, supports_check_mode, required_if)
def _merge_specs(self, default_specs, custom_specs):
result = default_specs.copy()
result.update(custom_specs)
return result
class UTM:
def __init__(self, module, endpoint, change_relevant_keys, info_only=False):
"""
Initialize UTM Class
:param module: The Ansible module
:param endpoint: The corresponding endpoint to the module
:param change_relevant_keys: The keys of the object to check for changes
:param info_only: When implementing an info module, set this to true. Will allow access to the info method only
"""
self.info_only = info_only
self.module = module
self.request_url = module.params.get('utm_protocol') + "://" + module.params.get('utm_host') + ":" + to_native(
module.params.get('utm_port')) + "/api/objects/" + endpoint + "/"
"""
The change_relevant_keys will be checked for changes to determine whether the object needs to be updated
"""
self.change_relevant_keys = change_relevant_keys
self.module.params['url_username'] = 'token'
self.module.params['url_password'] = module.params.get('utm_token')
if all(elem in self.change_relevant_keys for elem in module.params.keys()):
raise UTMModuleConfigurationError(
"The keys " + to_native(
self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native(
module.params.keys()))
def execute(self):
try:
if not self.info_only:
if self.module.params.get('state') == 'present':
self._add()
elif self.module.params.get('state') == 'absent':
self._remove()
else:
self._info()
except Exception as e:
self.module.fail_json(msg=to_native(e))
def _info(self):
"""
returns the info for an object in utm
"""
info, result = self._lookup_entry(self.module, self.request_url)
if info["status"] >= 400:
self.module.fail_json(result=json.loads(info))
else:
if result is None:
self.module.exit_json(changed=False)
else:
self.module.exit_json(result=result, changed=False)
def _add(self):
"""
adds or updates a host object on utm
"""
combined_headers = self._combine_headers()
is_changed = False
info, result = self._lookup_entry(self.module, self.request_url)
if info["status"] >= 400:
self.module.fail_json(result=json.loads(info))
else:
data_as_json_string = self.module.jsonify(self.module.params)
if result is None:
response, info = fetch_url(self.module, self.request_url, method="POST",
headers=combined_headers,
data=data_as_json_string)
if info["status"] >= 400:
self.module.fail_json(msg=json.loads(info["body"]))
is_changed = True
result = self._clean_result(json.loads(response.read()))
else:
if self._is_object_changed(self.change_relevant_keys, self.module, result):
response, info = fetch_url(self.module, self.request_url + result['_ref'], method="PUT",
headers=combined_headers,
data=data_as_json_string)
if info['status'] >= 400:
self.module.fail_json(msg=json.loads(info["body"]))
is_changed = True
result = self._clean_result(json.loads(response.read()))
self.module.exit_json(result=result, changed=is_changed)
def _combine_headers(self):
"""
This will combine a header default with headers that come from the module declaration
:return: A combined headers dict
"""
default_headers = {"Accept": "application/json", "Content-type": "application/json"}
if self.module.params.get('headers') is not None:
result = default_headers.copy()
result.update(self.module.params.get('headers'))
else:
result = default_headers
return result
def _remove(self):
"""
removes an object from utm
"""
is_changed = False
info, result = self._lookup_entry(self.module, self.request_url)
if result is not None:
response, info = fetch_url(self.module, self.request_url + result['_ref'], method="DELETE",
headers={"Accept": "application/json", "X-Restd-Err-Ack": "all"},
data=self.module.jsonify(self.module.params))
if info["status"] >= 400:
self.module.fail_json(msg=json.loads(info["body"]))
else:
is_changed = True
self.module.exit_json(changed=is_changed)
def _lookup_entry(self, module, request_url):
"""
Lookup for existing entry
:param module:
:param request_url:
:return:
"""
response, info = fetch_url(module, request_url, method="GET", headers={"Accept": "application/json"})
result = None
if response is not None:
results = json.loads(response.read())
result = next(iter(filter(lambda d: d['name'] == module.params.get('name'), results)), None)
return info, result
def _clean_result(self, result):
"""
Will clean the result from irrelevant fields
:param result: The result from the query
:return: The modified result
"""
del result['utm_host']
del result['utm_port']
del result['utm_token']
del result['utm_protocol']
del result['validate_certs']
del result['url_username']
del result['url_password']
del result['state']
return result
def _is_object_changed(self, keys, module, result):
"""
Check if my object is changed
:param keys: The keys that will determine if an object is changed
:param module: The module
:param result: The result from the query
:return:
"""
for key in keys:
if module.params.get(key) != result[key]:
return True
return False
| [
"ansible.module_utils.urls.fetch_url",
"json.loads",
"ansible.module_utils._text.to_native"
] | [((9123, 9211), 'ansible.module_utils.urls.fetch_url', 'fetch_url', (['module', 'request_url'], {'method': '"""GET"""', 'headers': "{'Accept': 'application/json'}"}), "(module, request_url, method='GET', headers={'Accept':\n 'application/json'})\n", (9132, 9211), False, 'from ansible.module_utils.urls import fetch_url\n'), ((6598, 6710), 'ansible.module_utils.urls.fetch_url', 'fetch_url', (['self.module', 'self.request_url'], {'method': '"""POST"""', 'headers': 'combined_headers', 'data': 'data_as_json_string'}), "(self.module, self.request_url, method='POST', headers=\n combined_headers, data=data_as_json_string)\n", (6607, 6710), False, 'from ansible.module_utils.urls import fetch_url\n'), ((5908, 5924), 'json.loads', 'json.loads', (['info'], {}), '(info)\n', (5918, 5924), False, 'import json\n'), ((6428, 6444), 'json.loads', 'json.loads', (['info'], {}), '(info)\n', (6438, 6444), False, 'import json\n'), ((7160, 7287), 'ansible.module_utils.urls.fetch_url', 'fetch_url', (['self.module', "(self.request_url + result['_ref'])"], {'method': '"""PUT"""', 'headers': 'combined_headers', 'data': 'data_as_json_string'}), "(self.module, self.request_url + result['_ref'], method='PUT',\n headers=combined_headers, data=data_as_json_string)\n", (7169, 7287), False, 'from ansible.module_utils.urls import fetch_url\n'), ((5654, 5666), 'ansible.module_utils._text.to_native', 'to_native', (['e'], {}), '(e)\n', (5663, 5666), False, 'from ansible.module_utils._text import to_native\n'), ((8793, 8817), 'json.loads', 'json.loads', (["info['body']"], {}), "(info['body'])\n", (8803, 8817), False, 'import json\n'), ((5109, 5145), 'ansible.module_utils._text.to_native', 'to_native', (['self.change_relevant_keys'], {}), '(self.change_relevant_keys)\n', (5118, 5145), False, 'from ansible.module_utils._text import to_native\n'), ((6880, 6904), 'json.loads', 'json.loads', (["info['body']"], {}), "(info['body'])\n", (6890, 6904), False, 'import json\n'), ((7474, 7498), 'json.loads', 'json.loads', (["info['body']"], {}), "(info['body'])\n", (7484, 7498), False, 'import json\n')] |
# Generated by Django 3.1 on 2020-09-01 17:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='client',
name='link',
field=models.URLField(blank=True, null=True),
),
migrations.AlterField(
model_name='client',
name='time',
field=models.DateTimeField(blank=True, max_length=50, null=True),
),
]
| [
"django.db.models.URLField",
"django.db.models.DateTimeField"
] | [((325, 363), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (340, 363), False, 'from django.db import migrations, models\n'), ((483, 541), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'max_length': '(50)', 'null': '(True)'}), '(blank=True, max_length=50, null=True)\n', (503, 541), False, 'from django.db import migrations, models\n')] |
import sys
import time
import subprocess
import re
_setMessage = None
class Bootstrap:
def __init__( self, identifier, contractor ):
self.identifier = identifier
self.request = contractor.request
self.request( 'call', '/api/v1/Survey/Cartographer(register)', { 'identifier': identifier } )
def lookup( self, info_map ):
return self.request( 'call', '/api/v1/Survey/Cartographer:{0}:(lookup)'.format( self.identifier ), { 'info_map': info_map } )
def setMessage( self, message ):
self.request( 'call', '/api/v1/Survey/Cartographer:{0}:(setMessage)'.format( self.identifier ), { 'message': message } )
def done( self ):
return self.request( 'call', '/api/v1/Survey/Cartographer:{0}:(done)'.format( self.identifier ), {} )
def setIdMap( self, foundation_locator, id_map ):
return self.request( 'call', '/api/v1/Building/Foundation:{0}:(setIdMap)'.format( foundation_locator ), { 'id_map': id_map } )
def setPXEBoot( self, foundation_locator, pxe ):
iface_list, info = self.request( 'list', '/api/v1/Utilities/RealNetworkInterface', { 'foundation': '/api/v1/Building/Foundation:{0}:'.format( foundation_locator ) }, filter='foundation' )
if info[ 'total' ] != info[ 'count' ]:
raise Exception( 'There are more interface than we got' ) # wow, what kind of machine do you have there?
for iface in iface_list:
self.request( 'update', iface, { 'pxe': '/api/v1/BluePrint/PXE:{0}:'.format( pxe ) } )
def ipmicommand( cmd, ignore_failure=False ):
proc = subprocess.run( [ '/bin/ipmitool' ] + cmd.split() )
if proc.returncode != 0:
if ignore_failure:
print( 'WARNING: ipmi cmd "{0}" failed, ignored...'.format( cmd ) )
else:
_setMessage( 'Ipmi Error with: "{0}"'.format( cmd ) )
sys.exit( 1 )
def getLLDP():
counter = 0
lldp_values = {}
results = {}
while True:
proc = subprocess.run( [ '/sbin/lldpcli', 'show', 'neighbors', '-f', 'keyvalue' ], shell=False, stdout=subprocess.PIPE )
lldp_data = str( proc.stdout, 'utf-8' ).strip()
if len( lldp_data ) > 10:
for line in lldp_data.splitlines():
if '=' not in line:
continue
( key, value ) = line.split( '=' )
lldp_values[key] = value
break
else:
if counter >= 10:
_setMessage( 'lldp timeout waiting for data, skipping...' )
return results
counter += 1
time.sleep( 10 )
for item in lldp_values:
( protocol, interface, name ) = item.split( '.', 2 ) # protocol, interface
if interface not in results:
results[ interface ] = {}
if name == 'chassis.mac':
results[ interface ][ 'mac' ] = lldp_values[ item ]
elif name == 'chassis.name':
results[ interface ][ 'name' ] = lldp_values[ item ]
elif name in ( 'port.local', 'port.ifname' ):
parts = re.sub( '[^0-9/]', '', lldp_values[ item ] ).split( '/' )
if len( parts ) == 1:
results[ interface ][ 'slot' ] = 1
results[ interface ][ 'port' ] = int( parts[0] )
results[ interface ][ 'subport' ] = 0
elif len( parts ) == 2:
results[ interface ][ 'slot' ] = int( parts[0] )
results[ interface ][ 'port' ] = int( parts[1] )
results[ interface ][ 'subport' ] = 0
elif len( parts ) == 3:
results[ interface ][ 'slot' ] = int( parts[0] )
results[ interface ][ 'port' ] = int( parts[1] )
results[ interface ][ 'subport' ] = int( parts[2] )
else:
_setMessage( 'I don\'t know how to handle this lldp local port "{0}"'.format( lldp_values[ item ] ) )
sys.exit( 1 )
return results
def cpuPhysicalCount():
wrk = []
cpuinfo = open( '/proc/cpuinfo', 'r' )
for line in cpuinfo.readlines():
if line.startswith( 'physical id' ) and line not in wrk:
wrk.append( line )
return len( wrk )
def cpuLogicalCount():
wrk = []
cpuinfo = open( '/proc/cpuinfo', 'r' )
for line in cpuinfo.readlines():
if line.startswith( 'processor' ) and line not in wrk:
wrk.append( line )
return len( wrk )
def getRAMAmmount():
meminfo = open( '/proc/meminfo', 'r' )
for line in meminfo.readlines():
if line.startswith( 'MemTotal' ):
return int( line.split( ':' )[1].strip().split( ' ' )[0] ) / 1024
def getIPMIMAC( lan_channel ):
proc = subprocess.run( [ '/bin/ipmitool', 'lan', 'print', str( lan_channel ) ], stdout=subprocess.PIPE )
lines = str( proc.stdout, 'utf-8' ).strip().splitlines()
for line in lines:
if line.startswith( 'MAC Address' ):
return line[ 25: ].strip()
return None
def getIpAddress( interface ):
proc = subprocess.run( [ '/sbin/ip', 'addr', 'show', 'dev', interface ], shell=False, stdout=subprocess.PIPE )
lines = str( proc.stdout, 'utf-8' ).strip().splitlines()
return lines[2].split()[1].split( '/' )[0]
| [
"re.sub",
"subprocess.run",
"time.sleep",
"sys.exit"
] | [((4630, 4733), 'subprocess.run', 'subprocess.run', (["['/sbin/ip', 'addr', 'show', 'dev', interface]"], {'shell': '(False)', 'stdout': 'subprocess.PIPE'}), "(['/sbin/ip', 'addr', 'show', 'dev', interface], shell=False,\n stdout=subprocess.PIPE)\n", (4644, 4733), False, 'import subprocess\n'), ((1879, 1992), 'subprocess.run', 'subprocess.run', (["['/sbin/lldpcli', 'show', 'neighbors', '-f', 'keyvalue']"], {'shell': '(False)', 'stdout': 'subprocess.PIPE'}), "(['/sbin/lldpcli', 'show', 'neighbors', '-f', 'keyvalue'],\n shell=False, stdout=subprocess.PIPE)\n", (1893, 1992), False, 'import subprocess\n'), ((1775, 1786), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1783, 1786), False, 'import sys\n'), ((2404, 2418), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (2414, 2418), False, 'import time\n'), ((2842, 2882), 're.sub', 're.sub', (['"""[^0-9/]"""', '""""""', 'lldp_values[item]'], {}), "('[^0-9/]', '', lldp_values[item])\n", (2848, 2882), False, 'import re\n'), ((3601, 3612), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3609, 3612), False, 'import sys\n')] |
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import os
import copy
import numpy as np
import tensorflow as tf
from . import utils
from os.path import isdir, join
tf.NotDifferentiable("Spans")
tf.NotDifferentiable("Antecedents")
tf.NotDifferentiable("ExtractMentions")
tf.NotDifferentiable("DistanceBins")
seed = 5
tf.set_random_seed(seed)
class CorefModel(object):
"""
End-to-end neural model for coreference resolution.
Class that create model from https://homes.cs.washington.edu/~kentonl/pub/lhlz-emnlp.2017.pdf
"""
def __init__(self, opt):
"""Initialize the class and model according to the given parameters in opt."""
self.opt = copy.deepcopy(opt)
tf.set_random_seed(opt['random_seed'])
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8
coref_op_library = tf.load_op_library(join(opt['model_file'], "coref_kernels.so"))
self.spans = coref_op_library.spans
self.distance_bins = coref_op_library.distance_bins
self.extract_mentions = coref_op_library.extract_mentions
self.get_antecedents = coref_op_library.antecedents
dpath = join(self.opt['model_file'], self.opt['language'], 'agent')
self.log_root = join(dpath, 'logs')
self.char_embedding_size = self.opt["char_embedding_size"]
self.char_vocab_path = join(dpath, 'vocab', 'char_vocab.russian.txt')
self.char_dict = utils.load_char_dict(self.char_vocab_path)
if opt['emb_format'] == 'vec':
self.embedding_path = join(dpath, 'embeddings', 'embeddings_lenta_100.vec')
elif opt['emb_format'] == 'bin':
self.embedding_path = join(dpath, 'embeddings', 'ft_0.8.3_nltk_yalen_sg_300.bin')
else:
raise ValueError('Not supported embeddings format {}'.format(opt['emb_format']))
self.embedding_info = (self.opt["embedding_size"], self.opt["emb_lowercase"])
self.embedding_size = self.opt['embedding_size']
self.embedding_dicts = utils.load_embedding_dict(self.embedding_path, self.embedding_size,
self.opt["emb_format"])
self.max_mention_width = self.opt["max_mention_width"]
self.genres = {g: i for i, g in enumerate(self.opt["genres"])}
input_props = list()
input_props.append((tf.float64, [None, None, self.embedding_size])) # Text embeddings.
input_props.append((tf.int32, [None, None, None])) # Character indices.
input_props.append((tf.int32, [None])) # Text lengths.
input_props.append((tf.int32, [None])) # Speaker IDs.
input_props.append((tf.int32, [])) # Genre.
input_props.append((tf.bool, [])) # Is training.
input_props.append((tf.int32, [None])) # Gold starts.
input_props.append((tf.int32, [None])) # Gold ends.
input_props.append((tf.int32, [None])) # Cluster ids.
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]
dtypes, shapes = zip(*input_props)
queue = tf.PaddingFIFOQueue(capacity=1, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
# train type trigger
if self.opt['train_on_gold']:
self.predictions, self.loss = self.get_predictions_and_loss_on_gold(*self.input_tensors)
else:
self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors)
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.reset_global_step = tf.assign(self.global_step, 0)
learning_rate = tf.train.exponential_decay(self.opt["learning_rate"], self.global_step,
self.opt["decay_frequency"], self.opt["decay_rate"],
staircase=True)
learning_rate = tf.cond(learning_rate < opt['final_rate'],
lambda: tf.Variable(opt['final_rate'], tf.float32),
lambda: learning_rate)
trainable_params = tf.trainable_variables()
gradients = tf.gradients(self.loss, trainable_params)
# gradients = [g if g is None else tf.cast(g, tf.float64) for g in gradients]
# gradients, _ = tf.clip_by_global_norm(gradients, self.opt["max_gradient_norm"])
optimizers = {
"adam": tf.train.AdamOptimizer,
"sgd": tf.train.GradientDescentOptimizer
}
optimizer = optimizers[self.opt["optimizer"]](learning_rate)
self.train_op = optimizer.apply_gradients(zip(gradients, trainable_params), global_step=self.global_step)
self.sess = tf.Session(config=config)
self.init_op = tf.global_variables_initializer()
self.sess.run(self.init_op)
def start_enqueue_thread(self, train_example, is_training, returning=False):
"""
Initialize queue of tensors that feed one at the input of the model.
Args:
train_example: modified dict from agent
is_training: training flag
returning: returning flag
Returns:
if returning is True, return list of variables:
[word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids]
"""
tensorized_example = self.tensorize_example(train_example, is_training=is_training)
feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))
self.sess.run(self.enqueue_op, feed_dict=feed_dict)
if returning:
return tensorized_example
def tensorize_mentions(self, mentions):
"""
Create two np.array of starts end ends positions of gold mentions.
Args:
mentions: list of tuple
Returns:
np.array(starts positions), np.array(ends positions)
"""
if len(mentions) > 0:
starts, ends = zip(*mentions)
else:
starts, ends = [], []
return np.array(starts), np.array(ends)
def tensorize_example(self, example, is_training):
"""
Takes a dictionary from the observation and transforms it into a set of tensors
for tensorflow placeholders.
Args:
example: dict from observation
is_training: True or False value, use as a returned parameter or flag
Returns: word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids;
it numpy tensors for placeholders (is_training - bool)
If length of the longest sentence in the document is greater than parameter "max_training_sentences",
the returning method calls the 'truncate_example' function.
"""
clusters = example["clusters"]
gold_mentions = sorted(tuple(m) for m in utils.flatten(clusters))
gold_mention_map = {m: i for i, m in enumerate(gold_mentions)}
cluster_ids = np.zeros(len(gold_mentions))
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id
sentences = example["sentences"]
num_words = sum(len(s) for s in sentences)
speakers = utils.flatten(example["speakers"])
assert num_words == len(speakers)
max_sentence_length = max(len(s) for s in sentences)
max_word_length = max(max(max(len(w) for w in s) for s in sentences), max(self.opt["filter_widths"]))
word_emb = np.zeros([len(sentences), max_sentence_length, self.embedding_size])
char_index = np.zeros([len(sentences), max_sentence_length, max_word_length])
text_len = np.array([len(s) for s in sentences])
for i, sentence in enumerate(sentences):
for j, word in enumerate(sentence):
current_dim = 0
d = self.embedding_dicts
(s, l) = self.embedding_info
current_word = word
if l:
cerrent_word = word.lower()
if self.opt['emb_format'] == 'vec':
word_emb[i, j, current_dim:current_dim + s] = utils.normalize(d[current_word])
else:
word_emb[i, j, current_dim:current_dim + s] = utils.normalize(np.array(d[current_word]))
current_dim += s
char_index[i, j, :len(word)] = [self.char_dict[c] for c in word]
speaker_dict = {s: i for i, s in enumerate(set(speakers))}
speaker_ids = np.array([speaker_dict[s] for s in speakers]) # numpy
doc_key = example["doc_key"]
genre = self.genres[doc_key[:2]] # int 1
gold_starts, gold_ends = self.tensorize_mentions(gold_mentions) # numpy of unicode str
if is_training and len(sentences) > self.opt["max_training_sentences"]:
return self.truncate_example(word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts,
gold_ends, cluster_ids)
else:
return word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids
def truncate_example(self, word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends,
cluster_ids):
"""
It takes the output of the function "tensorize_example" and cuts off the excess part of the tensor.
Args:
word_emb: [Amount of sentences, Amount of words in sentence (max len), self.embedding_size],
float64, Text embeddings.
char_index: [Amount of words, Amount of chars in word (max len), char_embedding_size],
tf.int32, Character indices.
text_len: tf.int32, [Amount of sentences]
speaker_ids: [Amount of independent speakers], tf.int32, Speaker IDs.
genre: [Amount of independent genres], tf.int32, Genre
is_training: tf.bool
gold_starts: tf.int32, [Amount of gold mentions]
gold_ends: tf.int32, [Amount of gold mentions]
cluster_ids: tf.int32, [Amount of independent clusters]
Returns: word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids;
The same set of tensors as in the input, but with a corrected shape.
Additional Information:
"None" in some form-size tensors, for example "word_emb", means that this axis measurement can vary
from document to document.
"""
max_training_sentences = self.opt["max_training_sentences"]
num_sentences = word_emb.shape[0]
assert num_sentences > max_training_sentences
sentence_offset = random.randint(0, num_sentences - max_training_sentences)
word_offset = text_len[:sentence_offset].sum()
# don't clear what exactly is happening here
# why they cat the first part of tensor instead of second ???
num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum()
word_emb = word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]
char_index = char_index[sentence_offset:sentence_offset + max_training_sentences, :, :]
text_len = text_len[sentence_offset:sentence_offset + max_training_sentences]
speaker_ids = speaker_ids[word_offset: word_offset + num_words]
assert len(gold_ends) == len(gold_starts)
Gold_starts = np.zeros((len(gold_starts)))
Gold_ends = np.zeros((len(gold_ends)))
for i in range(len(gold_ends)):
Gold_ends[i] = int(gold_ends[i])
Gold_starts[i] = int(gold_starts[i])
gold_starts = Gold_starts
gold_ends = Gold_ends
# here hernya
gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words)
gold_starts = gold_starts[gold_spans] - word_offset
gold_ends = gold_ends[gold_spans] - word_offset
cluster_ids = cluster_ids[gold_spans]
return word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids
def get_mention_emb(self, text_emb, text_outputs, mention_starts, mention_ends):
"""
Forms a tensor that contains of embeddings of specific mentions.
Args:
text_emb: boolean mask, [num_sentences, max_sentence_length, emb]
text_outputs: tf.float64, [num_sentences, max_sentence_length, emb]
mention_starts: tf.int32, [Amount of mentions]
mention_ends: tf.int32, [Amount of mentions]
Returns: tf.float64, [num_mentions, emb]
Mentions embeddings tensor.
"""
mention_emb_list = []
mention_start_emb = tf.gather(text_outputs, mention_starts) # [num_mentions, emb]
mention_emb_list.append(mention_start_emb)
mention_end_emb = tf.gather(text_outputs, mention_ends) # [num_mentions, emb]
mention_emb_list.append(mention_end_emb)
mention_width = 1 + mention_ends - mention_starts # [num_mentions]
if self.opt["use_features"]:
mention_width_index = mention_width - 1 # [num_mentions]
mention_width_emb = tf.gather(tf.get_variable("mention_width_embeddings", [self.opt["max_mention_width"],
self.opt["feature_size"]],
dtype=tf.float64),
mention_width_index) # [num_mentions, emb]
mention_width_emb = tf.nn.dropout(mention_width_emb, self.dropout)
mention_emb_list.append(mention_width_emb)
if self.opt["model_heads"]:
mention_indices = tf.expand_dims(tf.range(self.opt["max_mention_width"]), 0) + tf.expand_dims(
mention_starts, 1) # [num_mentions, max_mention_width]
mention_indices = tf.minimum(utils.shape(text_outputs, 0) - 1,
mention_indices) # [num_mentions, max_mention_width]
mention_text_emb = tf.gather(text_emb, mention_indices) # [num_mentions, max_mention_width, emb]
self.head_scores = utils.projection(text_outputs, 1) # [num_words, 1]
mention_head_scores = tf.gather(self.head_scores, mention_indices) # [num_mentions, max_mention_width, 1]
mention_mask = tf.expand_dims(
tf.sequence_mask(mention_width, self.opt["max_mention_width"], dtype=tf.float64),
2) # [num_mentions, max_mention_width, 1]
mention_attention = tf.nn.softmax(mention_head_scores + tf.log(mention_mask),
dim=1) # [num_mentions, max_mention_width, 1]
mention_head_emb = tf.reduce_sum(mention_attention * mention_text_emb, 1) # [num_mentions, emb]
mention_emb_list.append(mention_head_emb)
mention_emb = tf.concat(mention_emb_list, 1) # [num_mentions, emb]
return mention_emb
def get_mention_scores(self, mention_emb):
"""
Sends a mentions tensor to the input of a fully connected network, and outputs its output.
It compute mentions scores.
Args:
mention_emb: tf.float64, [num_mentions, emb], a tensor that contains of embeddings of specific mentions
Returns: [num_mentions, 1]
Output of the fully-connected network, that compute the mentions scores.
"""
with tf.variable_scope("mention_scores"):
return utils.ffnn(mention_emb, self.opt["ffnn_depth"], self.opt["ffnn_size"], 1,
self.dropout) # [num_mentions, 1]
def softmax_loss(self, antecedent_scores, antecedent_labels):
"""
Computes the value of the loss function using antecedent_scores and antecedent_labels.
Practically standard softmax function.
Args:
antecedent_scores: tf.float64, [num_mentions, max_ant + 1], output of fully-connected network that compute
antecedent scores.
antecedent_labels: True labels for antecedent.
Returns: [num_mentions]
The value of loss function.
"""
gold_scores = antecedent_scores + tf.log(tf.cast(antecedent_labels, tf.float64)) # [num_mentions, max_ant + 1]
marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [num_mentions]
log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [num_mentions]
return log_norm - marginalized_gold_scores # [num_mentions]
def get_antecedent_scores(self, mention_emb, mention_scores, antecedents, antecedents_len, mention_speaker_ids,
genre_emb):
"""
Forms a new tensor using special features, mentions embeddings, mentions scores, etc.
and passes it through a fully-connected network that compute antecedent scores.
Args:
mention_emb: [num_mentions, emb], a tensor that contains of embeddings of specific mentions
mention_scores: [num_mentions, 1], Output of the fully-connected network, that compute the mentions scores.
antecedents: [] get from C++ function
antecedents_len: [] get from C++ function
mention_speaker_ids: [num_mentions, speaker_emb_size], tf.float64, Speaker IDs.
genre_emb: [genre_emb_size], tf.float64, Genre
Returns: tf.float64, [num_mentions, max_ant + 1], antecedent scores.
"""
num_mentions = utils.shape(mention_emb, 0)
max_antecedents = utils.shape(antecedents, 1)
feature_emb_list = []
if self.opt["use_metadata"]:
antecedent_speaker_ids = tf.gather(mention_speaker_ids, antecedents) # [num_mentions, max_ant]
same_speaker = tf.equal(tf.expand_dims(mention_speaker_ids, 1),
antecedent_speaker_ids) # [num_mentions, max_ant]
speaker_pair_emb = tf.gather(tf.get_variable("same_speaker_emb", [2, self.opt["feature_size"]],
dtype=tf.float64),
tf.to_int32(same_speaker)) # [num_mentions, max_ant, emb]
feature_emb_list.append(speaker_pair_emb)
tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0),
[num_mentions, max_antecedents, 1]) # [num_mentions, max_ant, emb]
feature_emb_list.append(tiled_genre_emb)
if self.opt["use_features"]:
target_indices = tf.range(num_mentions) # [num_mentions]
mention_distance = tf.expand_dims(target_indices, 1) - antecedents # [num_mentions, max_ant]
mention_distance_bins = self.distance_bins(mention_distance) # [num_mentions, max_ant]
mention_distance_bins.set_shape([None, None])
mention_distance_emb = tf.gather(tf.get_variable("mention_distance_emb", [10, self.opt["feature_size"]],
dtype=tf.float64),
mention_distance_bins) # [num_mentions, max_ant]
feature_emb_list.append(mention_distance_emb)
feature_emb = tf.concat(feature_emb_list, 2) # [num_mentions, max_ant, emb]
feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [num_mentions, max_ant, emb]
antecedent_emb = tf.gather(mention_emb, antecedents) # [num_mentions, max_ant, emb]
target_emb_tiled = tf.tile(tf.expand_dims(mention_emb, 1),
[1, max_antecedents, 1]) # [num_mentions, max_ant, emb]
similarity_emb = antecedent_emb * target_emb_tiled # [num_mentions, max_ant, emb]
pair_emb = tf.concat([target_emb_tiled, antecedent_emb, similarity_emb, feature_emb], 2)
# [num_mentions, max_ant, emb]
with tf.variable_scope("iteration"):
with tf.variable_scope("antecedent_scoring"):
antecedent_scores = utils.ffnn(pair_emb, self.opt["ffnn_depth"], self.opt["ffnn_size"], 1,
self.dropout) # [num_mentions, max_ant, 1]
antecedent_scores = tf.squeeze(antecedent_scores, 2) # [num_mentions, max_ant]
antecedent_mask = tf.log(
tf.sequence_mask(antecedents_len, max_antecedents, dtype=tf.float64)) # [num_mentions, max_ant]
antecedent_scores += antecedent_mask # [num_mentions, max_ant]
antecedent_scores += tf.expand_dims(mention_scores, 1) + tf.gather(mention_scores,
antecedents) # [num_mentions, max_ant]
antecedent_scores = tf.concat([tf.zeros([utils.shape(mention_scores, 0), 1], dtype=tf.float64),
antecedent_scores],
1) # [num_mentions, max_ant + 1]
return antecedent_scores # [num_mentions, max_ant + 1]
def flatten_emb_by_sentence(self, emb, text_len_mask):
"""
Create boolean mask for emb tensor.
Args:
emb: Some embeddings tensor with rank 2 or 3
text_len_mask: A mask tensor representing the first N positions of each row.
Returns: emb tensor after mask applications.
"""
num_sentences = tf.shape(emb)[0]
max_sentence_length = tf.shape(emb)[1]
emb_rank = len(emb.get_shape())
if emb_rank == 2:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length])
elif emb_rank == 3:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, utils.shape(emb, 2)])
else:
raise ValueError("Unsupported rank: {}".format(emb_rank))
return tf.boolean_mask(flattened_emb, text_len_mask)
def encode_sentences(self, text_emb, text_len, text_len_mask):
"""
Passes the input tensor through bi_LSTM.
Args:
text_emb: [num_sentences, max_sentence_length, emb], text code in tensor
text_len: tf.int32, [Amount of sentences]
text_len_mask: boolean mask for text_emb
Returns: [num_sentences, max_sentence_length, emb], output of bi-LSTM after boolean mask application
"""
num_sentences = tf.shape(text_emb)[0]
max_sentence_length = tf.shape(text_emb)[1]
# Transpose before and after for efficiency.
inputs = tf.transpose(text_emb, [1, 0, 2]) # [max_sentence_length, num_sentences, emb]
with tf.variable_scope("fw_cell"):
cell_fw = utils.CustomLSTMCell(self.opt["lstm_size"], num_sentences, self.dropout)
preprocessed_inputs_fw = cell_fw.preprocess_input(inputs)
with tf.variable_scope("bw_cell"):
cell_bw = utils.CustomLSTMCell(self.opt["lstm_size"], num_sentences, self.dropout)
preprocessed_inputs_bw = cell_bw.preprocess_input(inputs)
preprocessed_inputs_bw = tf.reverse_sequence(preprocessed_inputs_bw,
seq_lengths=text_len,
seq_dim=0,
batch_dim=1)
state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]),
tf.tile(cell_fw.initial_state.h, [num_sentences, 1]))
state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]),
tf.tile(cell_bw.initial_state.h, [num_sentences, 1]))
with tf.variable_scope("lstm"):
with tf.variable_scope("fw_lstm"):
fw_outputs, fw_states = tf.nn.dynamic_rnn(cell=cell_fw,
inputs=preprocessed_inputs_fw,
sequence_length=text_len,
initial_state=state_fw,
time_major=True)
with tf.variable_scope("bw_lstm"):
bw_outputs, bw_states = tf.nn.dynamic_rnn(cell=cell_bw,
inputs=preprocessed_inputs_bw,
sequence_length=text_len,
initial_state=state_bw,
time_major=True)
bw_outputs = tf.reverse_sequence(bw_outputs,
seq_lengths=text_len,
seq_dim=0,
batch_dim=1)
text_outputs = tf.concat([fw_outputs, bw_outputs], 2)
text_outputs = tf.transpose(text_outputs, [1, 0, 2]) # [num_sentences, max_sentence_length, emb]
return self.flatten_emb_by_sentence(text_outputs, text_len_mask)
def get_predicted_antecedents(self, antecedents, antecedent_scores):
"""
Forms a list of predicted antecedent labels
Args:
antecedents: [] get from C++ function
antecedent_scores: [num_mentions, max_ant + 1] output of fully-connected network
that compute antecedent_scores
Returns: a list of predicted antecedent labels
"""
predicted_antecedents = []
for i, index in enumerate(np.argmax(antecedent_scores, axis=1) - 1):
if index < 0:
predicted_antecedents.append(-1)
else:
predicted_antecedents.append(antecedents[i, index])
return predicted_antecedents
def get_predictions_and_loss(self, word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts,
gold_ends, cluster_ids):
"""
Connects all elements of the network to one complete graph, that compute mentions spans independently
And passes through it the tensors that came to the input of placeholders.
Args:
word_emb: [Amount of sentences, Amount of words in sentence (max len), self.embedding_size],
float64, Text embeddings.
char_index: [Amount of words, Amount of chars in word (max len), char_embedding_size],
tf.int32, Character indices.
text_len: tf.int32, [Amount of sentences]
speaker_ids: [Amount of independent speakers], tf.int32, Speaker IDs.
genre: [Amount of independent genres], tf.int32, Genre
is_training: tf.bool
gold_starts: tf.int32, [Amount of gold mentions]
gold_ends: tf.int32, [Amount of gold mentions]
cluster_ids: tf.int32, [Amount of independent clusters]
Returns:[candidate_starts, candidate_ends, candidate_mention_scores, mention_starts, mention_ends, antecedents,
antecedent_scores], loss
List of predictions and scores, and Loss function value
"""
self.dropout = 1 - (tf.cast(is_training, tf.float64) * self.opt["dropout_rate"])
self.lexical_dropout = 1 - (tf.cast(is_training, tf.float64) * self.opt["lexical_dropout_rate"])
num_sentences = tf.shape(word_emb)[0]
max_sentence_length = tf.shape(word_emb)[1]
text_emb_list = [word_emb]
if self.opt["char_embedding_size"] > 0:
char_emb = tf.gather(
tf.get_variable("char_embeddings", [len(self.char_dict), self.opt["char_embedding_size"]]),
char_index, tf.float64) # [num_sentences, max_sentence_length, max_word_length, emb]
flattened_char_emb = tf.reshape(char_emb, [num_sentences * max_sentence_length, utils.shape(char_emb, 2),
utils.shape(char_emb, 3)])
# [num_sentences * max_sentence_length, max_word_length, emb]
flattened_aggregated_char_emb = utils.cnn(flattened_char_emb, self.opt["filter_widths"], self.opt[
"filter_size"]) # [num_sentences * max_sentence_length, emb]
aggregated_char_emb = tf.reshape(flattened_aggregated_char_emb,
[num_sentences,
max_sentence_length,
utils.shape(flattened_aggregated_char_emb, 1)])
# [num_sentences, max_sentence_length, emb]
text_emb_list.append(aggregated_char_emb)
text_emb = tf.concat(text_emb_list, 2)
text_emb = tf.nn.dropout(text_emb, self.lexical_dropout)
text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length)
text_len_mask = tf.reshape(text_len_mask, [num_sentences * max_sentence_length])
text_outputs = self.encode_sentences(text_emb, text_len, text_len_mask)
text_outputs = tf.nn.dropout(text_outputs, self.dropout)
genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.opt["feature_size"]],
dtype=tf.float64),
genre) # [emb]
sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1),
[1, max_sentence_length]) # [num_sentences, max_sentence_length]
flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words]
flattened_text_emb = self.flatten_emb_by_sentence(text_emb, text_len_mask) # [num_words]
candidate_starts, candidate_ends = self.spans(
sentence_indices=flattened_sentence_indices,
max_width=self.max_mention_width)
candidate_starts.set_shape([None])
candidate_ends.set_shape([None])
candidate_mention_emb = self.get_mention_emb(flattened_text_emb, text_outputs, candidate_starts,
candidate_ends) # [num_candidates, emb]
candidate_mention_scores = self.get_mention_scores(candidate_mention_emb) # [num_mentions, 1]
candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [num_mentions]
k = tf.to_int32(tf.floor(tf.to_float(tf.shape(text_outputs)[0]) * self.opt["mention_ratio"]))
predicted_mention_indices = self.extract_mentions(candidate_mention_scores, candidate_starts,
candidate_ends, k) # ([k], [k])
predicted_mention_indices.set_shape([None])
mention_starts = tf.gather(candidate_starts, predicted_mention_indices) # [num_mentions]
mention_ends = tf.gather(candidate_ends, predicted_mention_indices) # [num_mentions]
mention_emb = tf.gather(candidate_mention_emb, predicted_mention_indices) # [num_mentions, emb]
mention_scores = tf.gather(candidate_mention_scores, predicted_mention_indices) # [num_mentions]
# mention_start_emb = tf.gather(text_outputs, mention_starts) # [num_mentions, emb]
# mention_end_emb = tf.gather(text_outputs, mention_ends) # [num_mentions, emb]
mention_speaker_ids = tf.gather(speaker_ids, mention_starts) # [num_mentions]
max_antecedents = self.opt["max_antecedents"]
antecedents, antecedent_labels, antecedents_len = self.get_antecedents(mention_starts, mention_ends,
gold_starts, gold_ends, cluster_ids,
max_antecedents)
# ([num_mentions, max_ant], [num_mentions, max_ant + 1], [num_mentions]
antecedents.set_shape([None, None])
antecedent_labels.set_shape([None, None])
antecedents_len.set_shape([None])
antecedent_scores = self.get_antecedent_scores(mention_emb, mention_scores, antecedents, antecedents_len,
mention_speaker_ids, genre_emb) # [num_mentions, max_ant + 1]
loss = self.softmax_loss(antecedent_scores, antecedent_labels) # [num_mentions]
loss = tf.reduce_sum(loss) # []
return [candidate_starts, candidate_ends, candidate_mention_scores, mention_starts, mention_ends, antecedents,
antecedent_scores], loss
def get_predicted_clusters(self, mention_starts, mention_ends, predicted_antecedents):
"""
Creates a list of clusters, as in dict from observation, and dict mentions with a list of clusters
to which they belong. They are necessary for inference mode and marking a new conll documents without
last column.
Args:
mention_starts: tf.float64, [Amount of mentions]
mention_ends: tf.float64, [Amount of mentions]
predicted_antecedents: [len antecedent scores]
Returns:
predicted_clusters = [[(),(),()],[(),()]] list like, with mention id
mention_to_predicted = {mentions id: [(),(),()], ...}
"""
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index
predicted_antecedent = (int(mention_starts[predicted_index]), int(mention_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster
mention = (int(mention_starts[i]), int(mention_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster
predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = {m: predicted_clusters[i] for m, i in mention_to_predicted.items()}
return predicted_clusters, mention_to_predicted
def init_from_saved(self, saver):
"""
Load model from saved checkpoint.
Args:
saver: tf.saver
Returns: Nothing
"""
# checkpoint_path = join(self.log_root, self.opt['name'])
checkpoint_path = self.opt['model_file']
if os.path.isfile(join(checkpoint_path, "model.max.ckpt.meta")):
saver.restore(self.sess, join(checkpoint_path, "model.max.ckpt"))
else:
print('{0} not found'.format(checkpoint_path))
print('Init from scratch')
def shutdown(self):
"""Reset the model"""
tf.reset_default_graph()
def save(self, saver):
"""Save model checkpoint"""
# log_dir = self.log_root
# if isdir(log_dir):
# if isdir(join(log_dir, self.opt['name'])):
# print('saving path ' + join(log_dir, self.opt['name'], 'model.max.ckpt'))
# saver.save(self.sess, join(log_dir, self.opt['name'], 'model.max.ckpt'))
# else:
# os.mkdir(self.opt['name'])
# print('saving path ' + join(log_dir, self.opt['name'], 'model.max.ckpt'))
# saver.save(self.sess, join(log_dir, self.opt['name'], 'model.max.ckpt'))
# else:
# os.mkdir(self.opt["log_root"])
# if isdir(join(log_dir, self.opt['name'])):
# print('saving path ' + join(log_dir, self.opt['name'], 'model.max.ckpt'))
# saver.save(self.sess, join(log_dir, self.opt['name'], 'model.max.ckpt'))
# else:
# os.mkdir(self.opt['name'])
# print('saving path ' + join(log_dir, self.opt['name'], 'model.max.ckpt'))
# saver.save(self.sess, join(log_dir, self.opt['name'], 'model.max.ckpt'))
# save in root folder
print('saving path ' + join(self.opt['model_file'], 'model.max.ckpt'))
saver.save(self.sess, join(self.opt['model_file'], 'model.max.ckpt'))
def train(self, batch):
"""
Run train operation on one batch/document
Args:
batch: list of tensors for placeholders, output of "tensorize_example" function
Returns: Loss functions value and tf.global_step
"""
self.start_enqueue_thread(batch, True)
self.tf_loss, tf_global_step, _ = self.sess.run([self.loss, self.global_step, self.train_op])
return self.tf_loss, tf_global_step
def predict(self, batch, out_file):
"""
Make prediction of new coreference clusters and write it conll document.
Args:
batch: list of tensors for placeholders, output of "tensorize_example" function
out_file: original conll document
Returns: str with new conll document, with new coreference clusters
"""
self.start_enqueue_thread(batch, False)
if self.opt['train_on_gold']:
_, mention_starts, mention_ends, antecedents, antecedent_scores = self.sess.run(self.predictions)
else:
_, _, _, mention_starts, mention_ends, antecedents, antecedent_scores = self.sess.run(self.predictions)
predicted_antecedents = self.get_predicted_antecedents(antecedents, antecedent_scores)
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(mention_starts, mention_ends,
predicted_antecedents)
new_cluters = dict()
new_cluters[batch['doc_key']] = predicted_clusters
outconll = utils.output_conll(out_file, new_cluters)
return outconll
def get_predictions_and_loss_on_gold(self, word_emb, char_index, text_len, speaker_ids, genre, is_training,
gold_starts, gold_ends, cluster_ids):
"""
Connects all elements of the network to one complete graph, that use gold mentions.
And passes through it the tensors that came to the input of placeholders.
Args:
word_emb: [Amount of sentences, Amount of words in sentence (max len), self.embedding_size],
float64, Text embeddings.
char_index: [Amount of words, Amount of chars in word (max len), char_embedding_size],
tf.int32, Character indices.
text_len: tf.int32, [Amount of sentences]
speaker_ids: [Amount of independent speakers], tf.int32, Speaker IDs.
genre: [Amount of independent genres], tf.int32, Genre
is_training: tf.bool
gold_starts: tf.int32, [Amount of gold mentions]
gold_ends: tf.int32, [Amount of gold mentions]
cluster_ids: tf.int32, [Amount of independent clusters]
Returns:[candidate_starts, candidate_ends, candidate_mention_scores, mention_starts, mention_ends, antecedents,
antecedent_scores], loss
List of predictions and scores, and Loss function value
"""
self.dropout = 1 - (tf.cast(is_training, tf.float64) * self.opt["dropout_rate"])
self.lexical_dropout = 1 - (tf.cast(is_training, tf.float64) * self.opt["lexical_dropout_rate"])
# assert gold_ends.shape == gold_starts.shape,\
# ('Amount of starts and ends of gold mentions are not equal: '
# 'Length of gold starts: {1}; Length of gold ends: {0}'.format(gold_ends.shape, gold_starts.shape))
num_sentences = tf.shape(word_emb)[0]
max_sentence_length = tf.shape(word_emb)[1]
text_emb_list = [word_emb]
if self.opt["char_embedding_size"] > 0:
char_emb = tf.gather(
tf.get_variable("char_embeddings", [len(self.char_dict), self.opt["char_embedding_size"]],
dtype=tf.float64),
char_index) # [num_sentences, max_sentence_length, max_word_length, emb]
flattened_char_emb = tf.reshape(char_emb, [num_sentences * max_sentence_length, utils.shape(char_emb, 2),
utils.shape(char_emb,
3)])
# [num_sentences * max_sentence_length, max_word_length, emb]
flattened_aggregated_char_emb = utils.cnn(flattened_char_emb, self.opt["filter_widths"], self.opt[
"filter_size"]) # [num_sentences * max_sentence_length, emb]
aggregated_char_emb = tf.reshape(flattened_aggregated_char_emb,
[num_sentences,
max_sentence_length,
utils.shape(flattened_aggregated_char_emb, 1)])
# [num_sentences, max_sentence_length, emb]
text_emb_list.append(aggregated_char_emb)
text_emb = tf.concat(text_emb_list, 2)
text_emb = tf.nn.dropout(text_emb, self.lexical_dropout)
text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length)
text_len_mask = tf.reshape(text_len_mask, [num_sentences * max_sentence_length])
text_outputs = self.encode_sentences(text_emb, text_len, text_len_mask)
text_outputs = tf.nn.dropout(text_outputs, self.dropout)
genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.opt["feature_size"]],
dtype=tf.float64),
genre) # [emb]
# sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1),
# [1, max_sentence_length]) # [num_sentences, max_sentence_length]
# flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words]
flattened_text_emb = self.flatten_emb_by_sentence(text_emb, text_len_mask) # [num_words]
candidate_mention_emb = self.get_mention_emb(flattened_text_emb, text_outputs, gold_starts,
gold_ends) # [num_candidates, emb]
# candidate_mention_scores = self.get_mention_scores(candidate_mention_emb) # [num_mentions, 1]
# candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [num_mentions]
gold_len = tf.shape(gold_ends)
candidate_mention_scores = tf.ones(gold_len, dtype=tf.float64)
mention_starts = gold_starts
mention_ends = gold_ends
mention_emb = candidate_mention_emb
mention_scores = candidate_mention_scores
# mention_start_emb = tf.gather(text_outputs, mention_starts) # [num_mentions, emb]
# mention_end_emb = tf.gather(text_outputs, mention_ends) # [num_mentions, emb]
mention_speaker_ids = tf.gather(speaker_ids, mention_starts) # [num_mentions]
max_antecedents = self.opt["max_antecedents"]
antecedents, antecedent_labels, antecedents_len = self.get_antecedents(mention_starts, mention_ends,
gold_starts, gold_ends, cluster_ids,
max_antecedents)
# ([num_mentions, max_ant], [num_mentions, max_ant + 1], [num_mentions]
antecedents.set_shape([None, None])
antecedent_labels.set_shape([None, None])
antecedents_len.set_shape([None])
antecedent_scores = self.get_antecedent_scores(mention_emb, mention_scores, antecedents, antecedents_len,
mention_speaker_ids, genre_emb) # [num_mentions, max_ant + 1]
loss = self.softmax_loss(tf.cast(antecedent_scores, tf.float64), antecedent_labels) # [num_mentions]
loss = tf.reduce_sum(loss) # []
return [candidate_mention_scores, mention_starts, mention_ends, antecedents, antecedent_scores], loss
| [
"tensorflow.tile",
"tensorflow.NotDifferentiable",
"tensorflow.shape",
"tensorflow.get_variable",
"tensorflow.boolean_mask",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.gradients",
"numpy.array",
"tensorflow.nn.dropout",
"tensorflow.reverse_sequence",
"copy.deepcopy",
"tenso... | [((741, 770), 'tensorflow.NotDifferentiable', 'tf.NotDifferentiable', (['"""Spans"""'], {}), "('Spans')\n", (761, 770), True, 'import tensorflow as tf\n'), ((771, 806), 'tensorflow.NotDifferentiable', 'tf.NotDifferentiable', (['"""Antecedents"""'], {}), "('Antecedents')\n", (791, 806), True, 'import tensorflow as tf\n'), ((807, 846), 'tensorflow.NotDifferentiable', 'tf.NotDifferentiable', (['"""ExtractMentions"""'], {}), "('ExtractMentions')\n", (827, 846), True, 'import tensorflow as tf\n'), ((847, 883), 'tensorflow.NotDifferentiable', 'tf.NotDifferentiable', (['"""DistanceBins"""'], {}), "('DistanceBins')\n", (867, 883), True, 'import tensorflow as tf\n'), ((894, 918), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (912, 918), True, 'import tensorflow as tf\n'), ((1253, 1271), 'copy.deepcopy', 'copy.deepcopy', (['opt'], {}), '(opt)\n', (1266, 1271), False, 'import copy\n'), ((1281, 1319), 'tensorflow.set_random_seed', 'tf.set_random_seed', (["opt['random_seed']"], {}), "(opt['random_seed'])\n", (1299, 1319), True, 'import tensorflow as tf\n'), ((1337, 1353), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1351, 1353), True, 'import tensorflow as tf\n'), ((1774, 1833), 'os.path.join', 'join', (["self.opt['model_file']", "self.opt['language']", '"""agent"""'], {}), "(self.opt['model_file'], self.opt['language'], 'agent')\n", (1778, 1833), False, 'from os.path import isdir, join\n'), ((1858, 1877), 'os.path.join', 'join', (['dpath', '"""logs"""'], {}), "(dpath, 'logs')\n", (1862, 1877), False, 'from os.path import isdir, join\n'), ((1976, 2022), 'os.path.join', 'join', (['dpath', '"""vocab"""', '"""char_vocab.russian.txt"""'], {}), "(dpath, 'vocab', 'char_vocab.russian.txt')\n", (1980, 2022), False, 'from os.path import isdir, join\n'), ((3709, 3770), 'tensorflow.PaddingFIFOQueue', 'tf.PaddingFIFOQueue', ([], {'capacity': '(1)', 'dtypes': 'dtypes', 'shapes': 'shapes'}), '(capacity=1, dtypes=dtypes, shapes=shapes)\n', (3728, 3770), True, 'import tensorflow as tf\n'), ((4186, 4237), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (4197, 4237), True, 'import tensorflow as tf\n'), ((4271, 4301), 'tensorflow.assign', 'tf.assign', (['self.global_step', '(0)'], {}), '(self.global_step, 0)\n', (4280, 4301), True, 'import tensorflow as tf\n'), ((4326, 4470), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (["self.opt['learning_rate']", 'self.global_step', "self.opt['decay_frequency']", "self.opt['decay_rate']"], {'staircase': '(True)'}), "(self.opt['learning_rate'], self.global_step,\n self.opt['decay_frequency'], self.opt['decay_rate'], staircase=True)\n", (4352, 4470), True, 'import tensorflow as tf\n'), ((4820, 4844), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4842, 4844), True, 'import tensorflow as tf\n'), ((4866, 4907), 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'trainable_params'], {}), '(self.loss, trainable_params)\n', (4878, 4907), True, 'import tensorflow as tf\n'), ((5426, 5451), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (5436, 5451), True, 'import tensorflow as tf\n'), ((5476, 5509), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5507, 5509), True, 'import tensorflow as tf\n'), ((9332, 9377), 'numpy.array', 'np.array', (['[speaker_dict[s] for s in speakers]'], {}), '([speaker_dict[s] for s in speakers])\n', (9340, 9377), True, 'import numpy as np\n'), ((11559, 11616), 'random.randint', 'random.randint', (['(0)', '(num_sentences - max_training_sentences)'], {}), '(0, num_sentences - max_training_sentences)\n', (11573, 11616), False, 'import random\n'), ((12627, 12706), 'numpy.logical_and', 'np.logical_and', (['(gold_ends >= word_offset)', '(gold_starts < word_offset + num_words)'], {}), '(gold_ends >= word_offset, gold_starts < word_offset + num_words)\n', (12641, 12706), True, 'import numpy as np\n'), ((13606, 13645), 'tensorflow.gather', 'tf.gather', (['text_outputs', 'mention_starts'], {}), '(text_outputs, mention_starts)\n', (13615, 13645), True, 'import tensorflow as tf\n'), ((13747, 13784), 'tensorflow.gather', 'tf.gather', (['text_outputs', 'mention_ends'], {}), '(text_outputs, mention_ends)\n', (13756, 13784), True, 'import tensorflow as tf\n'), ((15837, 15867), 'tensorflow.concat', 'tf.concat', (['mention_emb_list', '(1)'], {}), '(mention_emb_list, 1)\n', (15846, 15867), True, 'import tensorflow as tf\n'), ((17273, 17310), 'tensorflow.reduce_logsumexp', 'tf.reduce_logsumexp', (['gold_scores', '[1]'], {}), '(gold_scores, [1])\n', (17292, 17310), True, 'import tensorflow as tf\n'), ((17348, 17391), 'tensorflow.reduce_logsumexp', 'tf.reduce_logsumexp', (['antecedent_scores', '[1]'], {}), '(antecedent_scores, [1])\n', (17367, 17391), True, 'import tensorflow as tf\n'), ((20191, 20221), 'tensorflow.concat', 'tf.concat', (['feature_emb_list', '(2)'], {}), '(feature_emb_list, 2)\n', (20200, 20221), True, 'import tensorflow as tf\n'), ((20276, 20316), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['feature_emb', 'self.dropout'], {}), '(feature_emb, self.dropout)\n', (20289, 20316), True, 'import tensorflow as tf\n'), ((20375, 20410), 'tensorflow.gather', 'tf.gather', (['mention_emb', 'antecedents'], {}), '(mention_emb, antecedents)\n', (20384, 20410), True, 'import tensorflow as tf\n'), ((20713, 20790), 'tensorflow.concat', 'tf.concat', (['[target_emb_tiled, antecedent_emb, similarity_emb, feature_emb]', '(2)'], {}), '([target_emb_tiled, antecedent_emb, similarity_emb, feature_emb], 2)\n', (20722, 20790), True, 'import tensorflow as tf\n'), ((21160, 21192), 'tensorflow.squeeze', 'tf.squeeze', (['antecedent_scores', '(2)'], {}), '(antecedent_scores, 2)\n', (21170, 21192), True, 'import tensorflow as tf\n'), ((22754, 22799), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['flattened_emb', 'text_len_mask'], {}), '(flattened_emb, text_len_mask)\n', (22769, 22799), True, 'import tensorflow as tf\n'), ((23427, 23460), 'tensorflow.transpose', 'tf.transpose', (['text_emb', '[1, 0, 2]'], {}), '(text_emb, [1, 0, 2])\n', (23439, 23460), True, 'import tensorflow as tf\n'), ((25593, 25670), 'tensorflow.reverse_sequence', 'tf.reverse_sequence', (['bw_outputs'], {'seq_lengths': 'text_len', 'seq_dim': '(0)', 'batch_dim': '(1)'}), '(bw_outputs, seq_lengths=text_len, seq_dim=0, batch_dim=1)\n', (25612, 25670), True, 'import tensorflow as tf\n'), ((25818, 25856), 'tensorflow.concat', 'tf.concat', (['[fw_outputs, bw_outputs]', '(2)'], {}), '([fw_outputs, bw_outputs], 2)\n', (25827, 25856), True, 'import tensorflow as tf\n'), ((25880, 25917), 'tensorflow.transpose', 'tf.transpose', (['text_outputs', '[1, 0, 2]'], {}), '(text_outputs, [1, 0, 2])\n', (25892, 25917), True, 'import tensorflow as tf\n'), ((29619, 29646), 'tensorflow.concat', 'tf.concat', (['text_emb_list', '(2)'], {}), '(text_emb_list, 2)\n', (29628, 29646), True, 'import tensorflow as tf\n'), ((29666, 29711), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['text_emb', 'self.lexical_dropout'], {}), '(text_emb, self.lexical_dropout)\n', (29679, 29711), True, 'import tensorflow as tf\n'), ((29737, 29791), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['text_len'], {'maxlen': 'max_sentence_length'}), '(text_len, maxlen=max_sentence_length)\n', (29753, 29791), True, 'import tensorflow as tf\n'), ((29816, 29880), 'tensorflow.reshape', 'tf.reshape', (['text_len_mask', '[num_sentences * max_sentence_length]'], {}), '(text_len_mask, [num_sentences * max_sentence_length])\n', (29826, 29880), True, 'import tensorflow as tf\n'), ((29985, 30026), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['text_outputs', 'self.dropout'], {}), '(text_outputs, self.dropout)\n', (29998, 30026), True, 'import tensorflow as tf\n'), ((31225, 31264), 'tensorflow.squeeze', 'tf.squeeze', (['candidate_mention_scores', '(1)'], {}), '(candidate_mention_scores, 1)\n', (31235, 31264), True, 'import tensorflow as tf\n'), ((31657, 31711), 'tensorflow.gather', 'tf.gather', (['candidate_starts', 'predicted_mention_indices'], {}), '(candidate_starts, predicted_mention_indices)\n', (31666, 31711), True, 'import tensorflow as tf\n'), ((31753, 31805), 'tensorflow.gather', 'tf.gather', (['candidate_ends', 'predicted_mention_indices'], {}), '(candidate_ends, predicted_mention_indices)\n', (31762, 31805), True, 'import tensorflow as tf\n'), ((31846, 31905), 'tensorflow.gather', 'tf.gather', (['candidate_mention_emb', 'predicted_mention_indices'], {}), '(candidate_mention_emb, predicted_mention_indices)\n', (31855, 31905), True, 'import tensorflow as tf\n'), ((31954, 32016), 'tensorflow.gather', 'tf.gather', (['candidate_mention_scores', 'predicted_mention_indices'], {}), '(candidate_mention_scores, predicted_mention_indices)\n', (31963, 32016), True, 'import tensorflow as tf\n'), ((32248, 32286), 'tensorflow.gather', 'tf.gather', (['speaker_ids', 'mention_starts'], {}), '(speaker_ids, mention_starts)\n', (32257, 32286), True, 'import tensorflow as tf\n'), ((33236, 33255), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (33249, 33255), True, 'import tensorflow as tf\n'), ((35882, 35906), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (35904, 35906), True, 'import tensorflow as tf\n'), ((42119, 42146), 'tensorflow.concat', 'tf.concat', (['text_emb_list', '(2)'], {}), '(text_emb_list, 2)\n', (42128, 42146), True, 'import tensorflow as tf\n'), ((42166, 42211), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['text_emb', 'self.lexical_dropout'], {}), '(text_emb, self.lexical_dropout)\n', (42179, 42211), True, 'import tensorflow as tf\n'), ((42237, 42291), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['text_len'], {'maxlen': 'max_sentence_length'}), '(text_len, maxlen=max_sentence_length)\n', (42253, 42291), True, 'import tensorflow as tf\n'), ((42316, 42380), 'tensorflow.reshape', 'tf.reshape', (['text_len_mask', '[num_sentences * max_sentence_length]'], {}), '(text_len_mask, [num_sentences * max_sentence_length])\n', (42326, 42380), True, 'import tensorflow as tf\n'), ((42485, 42526), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['text_outputs', 'self.dropout'], {}), '(text_outputs, self.dropout)\n', (42498, 42526), True, 'import tensorflow as tf\n'), ((43562, 43581), 'tensorflow.shape', 'tf.shape', (['gold_ends'], {}), '(gold_ends)\n', (43570, 43581), True, 'import tensorflow as tf\n'), ((43617, 43652), 'tensorflow.ones', 'tf.ones', (['gold_len'], {'dtype': 'tf.float64'}), '(gold_len, dtype=tf.float64)\n', (43624, 43652), True, 'import tensorflow as tf\n'), ((44032, 44070), 'tensorflow.gather', 'tf.gather', (['speaker_ids', 'mention_starts'], {}), '(speaker_ids, mention_starts)\n', (44041, 44070), True, 'import tensorflow as tf\n'), ((45041, 45060), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (45054, 45060), True, 'import tensorflow as tf\n'), ((1474, 1517), 'os.path.join', 'join', (["opt['model_file']", '"""coref_kernels.so"""'], {}), "(opt['model_file'], 'coref_kernels.so')\n", (1478, 1517), False, 'from os.path import isdir, join\n'), ((2165, 2218), 'os.path.join', 'join', (['dpath', '"""embeddings"""', '"""embeddings_lenta_100.vec"""'], {}), "(dpath, 'embeddings', 'embeddings_lenta_100.vec')\n", (2169, 2218), False, 'from os.path import isdir, join\n'), ((3588, 3616), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', 'shape'], {}), '(dtype, shape)\n', (3602, 3616), True, 'import tensorflow as tf\n'), ((6778, 6794), 'numpy.array', 'np.array', (['starts'], {}), '(starts)\n', (6786, 6794), True, 'import numpy as np\n'), ((6796, 6810), 'numpy.array', 'np.array', (['ends'], {}), '(ends)\n', (6804, 6810), True, 'import numpy as np\n'), ((14468, 14514), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['mention_width_emb', 'self.dropout'], {}), '(mention_width_emb, self.dropout)\n', (14481, 14514), True, 'import tensorflow as tf\n'), ((14987, 15023), 'tensorflow.gather', 'tf.gather', (['text_emb', 'mention_indices'], {}), '(text_emb, mention_indices)\n', (14996, 15023), True, 'import tensorflow as tf\n'), ((15183, 15227), 'tensorflow.gather', 'tf.gather', (['self.head_scores', 'mention_indices'], {}), '(self.head_scores, mention_indices)\n', (15192, 15227), True, 'import tensorflow as tf\n'), ((15682, 15736), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(mention_attention * mention_text_emb)', '(1)'], {}), '(mention_attention * mention_text_emb, 1)\n', (15695, 15736), True, 'import tensorflow as tf\n'), ((16389, 16424), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""mention_scores"""'], {}), "('mention_scores')\n", (16406, 16424), True, 'import tensorflow as tf\n'), ((18627, 18670), 'tensorflow.gather', 'tf.gather', (['mention_speaker_ids', 'antecedents'], {}), '(mention_speaker_ids, antecedents)\n', (18636, 18670), True, 'import tensorflow as tf\n'), ((19513, 19535), 'tensorflow.range', 'tf.range', (['num_mentions'], {}), '(num_mentions)\n', (19521, 19535), True, 'import tensorflow as tf\n'), ((20478, 20508), 'tensorflow.expand_dims', 'tf.expand_dims', (['mention_emb', '(1)'], {}), '(mention_emb, 1)\n', (20492, 20508), True, 'import tensorflow as tf\n'), ((20844, 20874), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""iteration"""'], {}), "('iteration')\n", (20861, 20874), True, 'import tensorflow as tf\n'), ((21267, 21335), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['antecedents_len', 'max_antecedents'], {'dtype': 'tf.float64'}), '(antecedents_len, max_antecedents, dtype=tf.float64)\n', (21283, 21335), True, 'import tensorflow as tf\n'), ((21466, 21499), 'tensorflow.expand_dims', 'tf.expand_dims', (['mention_scores', '(1)'], {}), '(mention_scores, 1)\n', (21480, 21499), True, 'import tensorflow as tf\n'), ((21502, 21540), 'tensorflow.gather', 'tf.gather', (['mention_scores', 'antecedents'], {}), '(mention_scores, antecedents)\n', (21511, 21540), True, 'import tensorflow as tf\n'), ((22309, 22322), 'tensorflow.shape', 'tf.shape', (['emb'], {}), '(emb)\n', (22317, 22322), True, 'import tensorflow as tf\n'), ((22356, 22369), 'tensorflow.shape', 'tf.shape', (['emb'], {}), '(emb)\n', (22364, 22369), True, 'import tensorflow as tf\n'), ((22468, 22522), 'tensorflow.reshape', 'tf.reshape', (['emb', '[num_sentences * max_sentence_length]'], {}), '(emb, [num_sentences * max_sentence_length])\n', (22478, 22522), True, 'import tensorflow as tf\n'), ((23282, 23300), 'tensorflow.shape', 'tf.shape', (['text_emb'], {}), '(text_emb)\n', (23290, 23300), True, 'import tensorflow as tf\n'), ((23334, 23352), 'tensorflow.shape', 'tf.shape', (['text_emb'], {}), '(text_emb)\n', (23342, 23352), True, 'import tensorflow as tf\n'), ((23520, 23548), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fw_cell"""'], {}), "('fw_cell')\n", (23537, 23548), True, 'import tensorflow as tf\n'), ((23728, 23756), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""bw_cell"""'], {}), "('bw_cell')\n", (23745, 23756), True, 'import tensorflow as tf\n'), ((23960, 24053), 'tensorflow.reverse_sequence', 'tf.reverse_sequence', (['preprocessed_inputs_bw'], {'seq_lengths': 'text_len', 'seq_dim': '(0)', 'batch_dim': '(1)'}), '(preprocessed_inputs_bw, seq_lengths=text_len, seq_dim=0,\n batch_dim=1)\n', (23979, 24053), True, 'import tensorflow as tf\n'), ((24270, 24322), 'tensorflow.tile', 'tf.tile', (['cell_fw.initial_state.c', '[num_sentences, 1]'], {}), '(cell_fw.initial_state.c, [num_sentences, 1])\n', (24277, 24322), True, 'import tensorflow as tf\n'), ((24373, 24425), 'tensorflow.tile', 'tf.tile', (['cell_fw.initial_state.h', '[num_sentences, 1]'], {}), '(cell_fw.initial_state.h, [num_sentences, 1])\n', (24380, 24425), True, 'import tensorflow as tf\n'), ((24476, 24528), 'tensorflow.tile', 'tf.tile', (['cell_bw.initial_state.c', '[num_sentences, 1]'], {}), '(cell_bw.initial_state.c, [num_sentences, 1])\n', (24483, 24528), True, 'import tensorflow as tf\n'), ((24579, 24631), 'tensorflow.tile', 'tf.tile', (['cell_bw.initial_state.h', '[num_sentences, 1]'], {}), '(cell_bw.initial_state.h, [num_sentences, 1])\n', (24586, 24631), True, 'import tensorflow as tf\n'), ((24646, 24671), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""lstm"""'], {}), "('lstm')\n", (24663, 24671), True, 'import tensorflow as tf\n'), ((28323, 28341), 'tensorflow.shape', 'tf.shape', (['word_emb'], {}), '(word_emb)\n', (28331, 28341), True, 'import tensorflow as tf\n'), ((28375, 28393), 'tensorflow.shape', 'tf.shape', (['word_emb'], {}), '(word_emb)\n', (28383, 28393), True, 'import tensorflow as tf\n'), ((35582, 35626), 'os.path.join', 'join', (['checkpoint_path', '"""model.max.ckpt.meta"""'], {}), "(checkpoint_path, 'model.max.ckpt.meta')\n", (35586, 35626), False, 'from os.path import isdir, join\n'), ((37211, 37257), 'os.path.join', 'join', (["self.opt['model_file']", '"""model.max.ckpt"""'], {}), "(self.opt['model_file'], 'model.max.ckpt')\n", (37215, 37257), False, 'from os.path import isdir, join\n'), ((40718, 40736), 'tensorflow.shape', 'tf.shape', (['word_emb'], {}), '(word_emb)\n', (40726, 40736), True, 'import tensorflow as tf\n'), ((40770, 40788), 'tensorflow.shape', 'tf.shape', (['word_emb'], {}), '(word_emb)\n', (40778, 40788), True, 'import tensorflow as tf\n'), ((44949, 44987), 'tensorflow.cast', 'tf.cast', (['antecedent_scores', 'tf.float64'], {}), '(antecedent_scores, tf.float64)\n', (44956, 44987), True, 'import tensorflow as tf\n'), ((2294, 2353), 'os.path.join', 'join', (['dpath', '"""embeddings"""', '"""ft_0.8.3_nltk_yalen_sg_300.bin"""'], {}), "(dpath, 'embeddings', 'ft_0.8.3_nltk_yalen_sg_300.bin')\n", (2298, 2353), False, 'from os.path import isdir, join\n'), ((4685, 4727), 'tensorflow.Variable', 'tf.Variable', (["opt['final_rate']", 'tf.float32'], {}), "(opt['final_rate'], tf.float32)\n", (4696, 4727), True, 'import tensorflow as tf\n'), ((14083, 14207), 'tensorflow.get_variable', 'tf.get_variable', (['"""mention_width_embeddings"""', "[self.opt['max_mention_width'], self.opt['feature_size']]"], {'dtype': 'tf.float64'}), "('mention_width_embeddings', [self.opt['max_mention_width'],\n self.opt['feature_size']], dtype=tf.float64)\n", (14098, 14207), True, 'import tensorflow as tf\n'), ((14698, 14731), 'tensorflow.expand_dims', 'tf.expand_dims', (['mention_starts', '(1)'], {}), '(mention_starts, 1)\n', (14712, 14731), True, 'import tensorflow as tf\n'), ((15327, 15412), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['mention_width', "self.opt['max_mention_width']"], {'dtype': 'tf.float64'}), "(mention_width, self.opt['max_mention_width'], dtype=tf.float64\n )\n", (15343, 15412), True, 'import tensorflow as tf\n'), ((17167, 17205), 'tensorflow.cast', 'tf.cast', (['antecedent_labels', 'tf.float64'], {}), '(antecedent_labels, tf.float64)\n', (17174, 17205), True, 'import tensorflow as tf\n'), ((18734, 18772), 'tensorflow.expand_dims', 'tf.expand_dims', (['mention_speaker_ids', '(1)'], {}), '(mention_speaker_ids, 1)\n', (18748, 18772), True, 'import tensorflow as tf\n'), ((18902, 18991), 'tensorflow.get_variable', 'tf.get_variable', (['"""same_speaker_emb"""', "[2, self.opt['feature_size']]"], {'dtype': 'tf.float64'}), "('same_speaker_emb', [2, self.opt['feature_size']], dtype=tf\n .float64)\n", (18917, 18991), True, 'import tensorflow as tf\n'), ((19086, 19111), 'tensorflow.to_int32', 'tf.to_int32', (['same_speaker'], {}), '(same_speaker)\n', (19097, 19111), True, 'import tensorflow as tf\n'), ((19585, 19618), 'tensorflow.expand_dims', 'tf.expand_dims', (['target_indices', '(1)'], {}), '(target_indices, 1)\n', (19599, 19618), True, 'import tensorflow as tf\n'), ((19863, 19956), 'tensorflow.get_variable', 'tf.get_variable', (['"""mention_distance_emb"""', "[10, self.opt['feature_size']]"], {'dtype': 'tf.float64'}), "('mention_distance_emb', [10, self.opt['feature_size']],\n dtype=tf.float64)\n", (19878, 19956), True, 'import tensorflow as tf\n'), ((20893, 20932), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""antecedent_scoring"""'], {}), "('antecedent_scoring')\n", (20910, 20932), True, 'import tensorflow as tf\n'), ((24690, 24718), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fw_lstm"""'], {}), "('fw_lstm')\n", (24707, 24718), True, 'import tensorflow as tf\n'), ((24760, 24893), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'cell_fw', 'inputs': 'preprocessed_inputs_fw', 'sequence_length': 'text_len', 'initial_state': 'state_fw', 'time_major': '(True)'}), '(cell=cell_fw, inputs=preprocessed_inputs_fw,\n sequence_length=text_len, initial_state=state_fw, time_major=True)\n', (24777, 24893), True, 'import tensorflow as tf\n'), ((25139, 25167), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""bw_lstm"""'], {}), "('bw_lstm')\n", (25156, 25167), True, 'import tensorflow as tf\n'), ((25209, 25342), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'cell_bw', 'inputs': 'preprocessed_inputs_bw', 'sequence_length': 'text_len', 'initial_state': 'state_bw', 'time_major': '(True)'}), '(cell=cell_bw, inputs=preprocessed_inputs_bw,\n sequence_length=text_len, initial_state=state_bw, time_major=True)\n', (25226, 25342), True, 'import tensorflow as tf\n'), ((26516, 26552), 'numpy.argmax', 'np.argmax', (['antecedent_scores'], {'axis': '(1)'}), '(antecedent_scores, axis=1)\n', (26525, 26552), True, 'import numpy as np\n'), ((28132, 28164), 'tensorflow.cast', 'tf.cast', (['is_training', 'tf.float64'], {}), '(is_training, tf.float64)\n', (28139, 28164), True, 'import tensorflow as tf\n'), ((28229, 28261), 'tensorflow.cast', 'tf.cast', (['is_training', 'tf.float64'], {}), '(is_training, tf.float64)\n', (28236, 28261), True, 'import tensorflow as tf\n'), ((30302, 30325), 'tensorflow.range', 'tf.range', (['num_sentences'], {}), '(num_sentences)\n', (30310, 30325), True, 'import tensorflow as tf\n'), ((35666, 35705), 'os.path.join', 'join', (['checkpoint_path', '"""model.max.ckpt"""'], {}), "(checkpoint_path, 'model.max.ckpt')\n", (35670, 35705), False, 'from os.path import isdir, join\n'), ((37133, 37179), 'os.path.join', 'join', (["self.opt['model_file']", '"""model.max.ckpt"""'], {}), "(self.opt['model_file'], 'model.max.ckpt')\n", (37137, 37179), False, 'from os.path import isdir, join\n'), ((40280, 40312), 'tensorflow.cast', 'tf.cast', (['is_training', 'tf.float64'], {}), '(is_training, tf.float64)\n', (40287, 40312), True, 'import tensorflow as tf\n'), ((40377, 40409), 'tensorflow.cast', 'tf.cast', (['is_training', 'tf.float64'], {}), '(is_training, tf.float64)\n', (40384, 40409), True, 'import tensorflow as tf\n'), ((14652, 14691), 'tensorflow.range', 'tf.range', (["self.opt['max_mention_width']"], {}), "(self.opt['max_mention_width'])\n", (14660, 14691), True, 'import tensorflow as tf\n'), ((15536, 15556), 'tensorflow.log', 'tf.log', (['mention_mask'], {}), '(mention_mask)\n', (15542, 15556), True, 'import tensorflow as tf\n'), ((19253, 19281), 'tensorflow.expand_dims', 'tf.expand_dims', (['genre_emb', '(0)'], {}), '(genre_emb, 0)\n', (19267, 19281), True, 'import tensorflow as tf\n'), ((9100, 9125), 'numpy.array', 'np.array', (['d[current_word]'], {}), '(d[current_word])\n', (9108, 9125), True, 'import numpy as np\n'), ((31329, 31351), 'tensorflow.shape', 'tf.shape', (['text_outputs'], {}), '(text_outputs)\n', (31337, 31351), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
#_*_coding:utf-8_*_
import sys, os, re
pPath = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(pPath)
import checkFasta
def TA(fastas, **kw):
if checkFasta.checkFasta(fastas) == False:
print('Error: for "TA" encoding, the input fasta sequences should be with equal length. \n\n')
return 0
encodings = []
header = ['#']
for p in range(1, len(fastas[0][1])+1):
header.append('TA.F' + str(p) + '.phi')
header.append('TA.F' + str(p) + '.psi')
encodings.append(header)
disDir = kw['path']
if disDir == None:
print('Error: please specify the directory of predicted protein TA file by "--path"')
return 0
for i in fastas:
name, sequence = i[0], i[1]
code = [name]
if os.path.exists(disDir + '/' + name + '.spXout') == False:
print('Error: the predicted TA information file (.spXout) for protein ' + name + ' does not exist.')
return 0
with open(disDir + '/' + name + '.spXout') as f:
records = f.readlines()[1:]
proteinSeq = ''
asaValue = []
for line in records:
array = line.strip().split() if line.strip() != '' else None
proteinSeq = proteinSeq + array[1]
asaValue.append(array[3:5])
pos = proteinSeq.find(sequence)
if pos == -1:
print('Warning: could not find the peptide in proteins.\n\n')
else:
for p in range(pos, pos+len(sequence)):
code.append(asaValue[p][0])
code.append(asaValue[p][1])
encodings.append(code)
return encodings
| [
"os.path.realpath",
"os.path.exists",
"sys.path.append",
"checkFasta.checkFasta"
] | [((115, 137), 'sys.path.append', 'sys.path.append', (['pPath'], {}), '(pPath)\n', (130, 137), False, 'import sys, os, re\n'), ((84, 110), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (100, 110), False, 'import sys, os, re\n'), ((183, 212), 'checkFasta.checkFasta', 'checkFasta.checkFasta', (['fastas'], {}), '(fastas)\n', (204, 212), False, 'import checkFasta\n'), ((725, 772), 'os.path.exists', 'os.path.exists', (["(disDir + '/' + name + '.spXout')"], {}), "(disDir + '/' + name + '.spXout')\n", (739, 772), False, 'import sys, os, re\n')] |
# Generated by Django 2.0 on 2018-02-24 11:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sky', '0007_auto_20180224_1120'),
]
operations = [
migrations.RemoveField(
model_name='news',
name='label',
),
]
| [
"django.db.migrations.RemoveField"
] | [((221, 276), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""news"""', 'name': '"""label"""'}), "(model_name='news', name='label')\n", (243, 276), False, 'from django.db import migrations\n')] |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 17:12, 09/07/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import platform
from matplotlib import pyplot as plt
from numpy import arange
from pathlib import Path
import re
LIST_LINESTYLES = [
'-', # solid line style
'--', # dashed line style
'-.', # dash-dot line style
':', # point marker
's', # square marker
'*', # star marker
'p', # pentagon marker
'+', # plus marker
'x', # x marker
'd', # thin diamond marker
]
LIST_COLORS = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
def __clean_filename__(filename):
chars_to_remove = ["`", "~", "!", "@", "#", "$", "%", "^", "&", "*", ":", ",", "<", ">", ";", "+", "|"]
regular_expression = '[' + re.escape(''.join(chars_to_remove)) + ']'
temp = filename.encode("ascii", "ignore")
fname = temp.decode() # Removed all non-ascii characters
fname = re.sub(regular_expression, '', fname) # Removed all special characters
fname.replace("_", "-") # Replaced _ by -
return fname
def __check_filepath__(filename):
filename.replace("\\", "/") # For better handling the parent folder
if "/" in filename:
list_names = filename.split("/")[:-1] # Remove last element because it is filename
filepath = "/".join(list_names)
print(f"Fucking for real? {filepath}")
Path(filepath).mkdir(parents=True, exist_ok=True)
return filename
def _draw_line_(data=None, title=None, linestyle='-', color='b', x_label="#Iteration", y_label="Function Value",
filename=None, exts=(".png", ".pdf"), verbose=True):
x = arange(0, len(data))
y = data
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.plot(x, y, linestyle=linestyle, color=color,)
plt.legend() # show a legend on the plot
if filename is not None:
filepath = __check_filepath__(__clean_filename__(filename))
for idx, ext in enumerate(exts):
plt.savefig(f"{filepath}{ext}", bbox_inches='tight')
if platform.system() != "Linux" and verbose:
plt.show()
plt.close()
def _draw_multi_line_(data=None, title=None, list_legends=None, list_styles=None, list_colors=None,
x_label="#Iteration", y_label="Function Value", filename=None, exts=(".png", ".pdf"), verbose=True):
x = arange(0, len(data[0]))
for idx, y in enumerate(data):
plt.plot(x, y, label=list_legends[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend() # show a legend on the plot
if filename is not None:
filepath = __check_filepath__(__clean_filename__(filename))
for idx, ext in enumerate(exts):
plt.savefig(f"{filepath}{ext}", bbox_inches='tight')
if platform.system() != "Linux" and verbose:
plt.show()
plt.close()
def _draw_multi_line_in_same_figure_(data=None, title=None, list_legends=None, list_styles=None, list_colors=None,
x_label="#Iteration", y_label="Objective", filename=None, exts=(".png", ".pdf"), verbose=True):
n_lines = len(data)
len_lines = len(data[0])
x = arange(0, len_lines)
if n_lines == 1:
fig, ax = plt.subplots()
if list_legends is None:
ax.plot(x, data[0])
else:
ax.plot(x, data[0], label=list_legends[0])
ax.set_title(title)
elif n_lines > 1:
fig, ax_list = plt.subplots(n_lines, sharex=True)
fig.suptitle(title)
for idx, ax in enumerate(ax_list):
if list_legends is None:
ax.plot(x, data[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
else:
ax.plot(x, data[idx], label=list_legends[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
ax.set_ylabel(f"Objective {idx + 1}")
if idx == (n_lines - 1):
ax.set_xlabel(x_label)
if filename is not None:
filepath = __check_filepath__(__clean_filename__(filename))
for idx, ext in enumerate(exts):
plt.savefig(f"{filepath}{ext}", bbox_inches='tight')
if platform.system() != "Linux" and verbose:
plt.show()
plt.close()
def export_convergence_chart(data=None, title="Convergence Chart", linestyle='-', color='b', x_label="#Iteration",
y_label="Function Value", filename="convergence_chart", exts=(".png", ".pdf"), verbose=True):
_draw_line_(data, title=title, linestyle=linestyle, color=color, x_label=x_label, y_label=y_label,
filename=filename, exts=exts, verbose=verbose)
def export_explore_exploit_chart(data=None, title="Exploration vs Exploitation Percentages", list_legends=("Exploration %", "Exploitation %"),
list_styles=('-', '-'), list_colors=('blue', 'orange'), x_label="#Iteration", y_label="Percentage",
filename="explore_exploit_chart", exts=(".png", ".pdf"), verbose=True):
_draw_multi_line_(data=data, title=title, list_legends=list_legends, list_styles=list_styles, list_colors=list_colors,
x_label=x_label, y_label=y_label, filename=filename, exts=exts, verbose=verbose)
def export_diversity_chart(data=None, title='Diversity Measurement Chart', list_legends=None,
list_styles=None, list_colors=None, x_label="#Iteration", y_label="Diversity Measurement",
filename="diversity_chart", exts=(".png", ".pdf"), verbose=True):
if list_styles is None:
list_styles = LIST_LINESTYLES[:len(data)]
if list_colors is None:
list_colors = LIST_COLORS[:len(data)]
_draw_multi_line_(data=data, title=title, list_legends=list_legends, list_styles=list_styles, list_colors=list_colors,
x_label=x_label, y_label=y_label, filename=filename, exts=exts, verbose=verbose)
def export_objectives_chart(data=None, title="Objectives chart", list_legends=None, list_styles=None, list_colors=None,
x_label="#Iteration", y_label="Function Value", filename="Objective-chart", exts=(".png", ".pdf"), verbose=True):
if list_styles is None:
list_styles = LIST_LINESTYLES[:len(data)]
if list_colors is None:
list_colors = LIST_COLORS[:len(data)]
_draw_multi_line_in_same_figure_(data=data, title=title, list_legends=list_legends, list_styles=list_styles, list_colors=list_colors,
x_label=x_label, y_label=y_label, filename=filename, exts=exts, verbose=verbose)
def export_trajectory_chart(data=None, n_dimensions=1, title="Trajectory of some first agents after generations", list_legends=None,
list_styles=None, list_colors=None, x_label="#Iteration", y_label="X1",
filename="1d_trajectory", exts=(".png", ".pdf"), verbose=True):
if list_styles is None:
list_styles = LIST_LINESTYLES[:len(data)]
if list_colors is None:
list_colors = LIST_COLORS[:len(data)]
if n_dimensions == 1:
x = arange(0, len(data[0]))
for idx, y in enumerate(data):
plt.plot(x, y, label=list_legends[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
elif n_dimensions == 2:
for idx, point in enumerate(data):
plt.plot(point[0], point[1], label=list_legends[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend() # show a legend on the plot
if filename is not None:
filepath = __check_filepath__(__clean_filename__(filename))
for idx, ext in enumerate(exts):
plt.savefig(f"{filepath}{ext}", bbox_inches='tight')
if platform.system() != "Linux" and verbose:
plt.show()
plt.close()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"pathlib.Path",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"platform.system",
"re.sub",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplot... | [((1732, 1769), 're.sub', 're.sub', (['regular_expression', '""""""', 'fname'], {}), "(regular_expression, '', fname)\n", (1738, 1769), False, 'import re\n'), ((2541, 2557), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2550, 2557), True, 'from matplotlib import pyplot as plt\n'), ((2562, 2581), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (2572, 2581), True, 'from matplotlib import pyplot as plt\n'), ((2586, 2605), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (2596, 2605), True, 'from matplotlib import pyplot as plt\n'), ((2610, 2658), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'linestyle': 'linestyle', 'color': 'color'}), '(x, y, linestyle=linestyle, color=color)\n', (2618, 2658), True, 'from matplotlib import pyplot as plt\n'), ((2664, 2676), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2674, 2676), True, 'from matplotlib import pyplot as plt\n'), ((2981, 2992), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2990, 2992), True, 'from matplotlib import pyplot as plt\n'), ((3400, 3416), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3409, 3416), True, 'from matplotlib import pyplot as plt\n'), ((3421, 3440), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (3431, 3440), True, 'from matplotlib import pyplot as plt\n'), ((3445, 3464), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (3455, 3464), True, 'from matplotlib import pyplot as plt\n'), ((3469, 3481), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3479, 3481), True, 'from matplotlib import pyplot as plt\n'), ((3786, 3797), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3795, 3797), True, 'from matplotlib import pyplot as plt\n'), ((4109, 4129), 'numpy.arange', 'arange', (['(0)', 'len_lines'], {}), '(0, len_lines)\n', (4115, 4129), False, 'from numpy import arange\n'), ((5180, 5191), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5189, 5191), True, 'from matplotlib import pyplot as plt\n'), ((8456, 8472), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (8465, 8472), True, 'from matplotlib import pyplot as plt\n'), ((8477, 8496), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (8487, 8496), True, 'from matplotlib import pyplot as plt\n'), ((8501, 8520), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (8511, 8520), True, 'from matplotlib import pyplot as plt\n'), ((8525, 8537), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8535, 8537), True, 'from matplotlib import pyplot as plt\n'), ((8842, 8853), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8851, 8853), True, 'from matplotlib import pyplot as plt\n'), ((2966, 2976), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2974, 2976), True, 'from matplotlib import pyplot as plt\n'), ((3293, 3398), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': 'list_legends[idx]', 'markerfacecolor': 'list_colors[idx]', 'linestyle': 'list_styles[idx]'}), '(x, y, label=list_legends[idx], markerfacecolor=list_colors[idx],\n linestyle=list_styles[idx])\n', (3301, 3398), True, 'from matplotlib import pyplot as plt\n'), ((3771, 3781), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3779, 3781), True, 'from matplotlib import pyplot as plt\n'), ((4170, 4184), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4182, 4184), True, 'from matplotlib import pyplot as plt\n'), ((5165, 5175), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5173, 5175), True, 'from matplotlib import pyplot as plt\n'), ((8827, 8837), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8835, 8837), True, 'from matplotlib import pyplot as plt\n'), ((2856, 2908), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{filepath}{ext}"""'], {'bbox_inches': '"""tight"""'}), "(f'{filepath}{ext}', bbox_inches='tight')\n", (2867, 2908), True, 'from matplotlib import pyplot as plt\n'), ((2916, 2933), 'platform.system', 'platform.system', ([], {}), '()\n', (2931, 2933), False, 'import platform\n'), ((3661, 3713), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{filepath}{ext}"""'], {'bbox_inches': '"""tight"""'}), "(f'{filepath}{ext}', bbox_inches='tight')\n", (3672, 3713), True, 'from matplotlib import pyplot as plt\n'), ((3721, 3738), 'platform.system', 'platform.system', ([], {}), '()\n', (3736, 3738), False, 'import platform\n'), ((4392, 4426), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_lines'], {'sharex': '(True)'}), '(n_lines, sharex=True)\n', (4404, 4426), True, 'from matplotlib import pyplot as plt\n'), ((5055, 5107), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{filepath}{ext}"""'], {'bbox_inches': '"""tight"""'}), "(f'{filepath}{ext}', bbox_inches='tight')\n", (5066, 5107), True, 'from matplotlib import pyplot as plt\n'), ((5115, 5132), 'platform.system', 'platform.system', ([], {}), '()\n', (5130, 5132), False, 'import platform\n'), ((8150, 8255), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': 'list_legends[idx]', 'markerfacecolor': 'list_colors[idx]', 'linestyle': 'list_styles[idx]'}), '(x, y, label=list_legends[idx], markerfacecolor=list_colors[idx],\n linestyle=list_styles[idx])\n', (8158, 8255), True, 'from matplotlib import pyplot as plt\n'), ((8717, 8769), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{filepath}{ext}"""'], {'bbox_inches': '"""tight"""'}), "(f'{filepath}{ext}', bbox_inches='tight')\n", (8728, 8769), True, 'from matplotlib import pyplot as plt\n'), ((8777, 8794), 'platform.system', 'platform.system', ([], {}), '()\n', (8792, 8794), False, 'import platform\n'), ((2236, 2250), 'pathlib.Path', 'Path', (['filepath'], {}), '(filepath)\n', (2240, 2250), False, 'from pathlib import Path\n'), ((8335, 8455), 'matplotlib.pyplot.plot', 'plt.plot', (['point[0]', 'point[1]'], {'label': 'list_legends[idx]', 'markerfacecolor': 'list_colors[idx]', 'linestyle': 'list_styles[idx]'}), '(point[0], point[1], label=list_legends[idx], markerfacecolor=\n list_colors[idx], linestyle=list_styles[idx])\n', (8343, 8455), True, 'from matplotlib import pyplot as plt\n')] |
import json
from interactions.models import (
SelfAnswerGroup,
)
def update_dict_with_score(valid_dict: list) -> list:
""" Updates the dict (from single and multiple_result_view) with
the scores of each user present in the list, by calculating their
``answer_choice`` and multiplying them with corresponding
question factors. """
for dictionary in valid_dict:
answer_group = SelfAnswerGroup.objects.get(
pk=dictionary['answer_group_pk']
)
scores = answer_group.scores
dictionary.update({'score': scores})
return valid_dict
def update_percentage_deviation(valid_dict: list) -> list:
for dictionary in valid_dict:
if dictionary['master']:
focus = dictionary['score'] # type: dict
for dictionary in valid_dict:
score = dictionary['score']
deviation_dict = {}
for subclass in score:
deviation = abs(score[subclass]-focus[subclass])
deviation_dict.update({subclass: deviation})
dictionary.update({'deviation': deviation_dict})
return valid_dict
| [
"interactions.models.SelfAnswerGroup.objects.get"
] | [((411, 472), 'interactions.models.SelfAnswerGroup.objects.get', 'SelfAnswerGroup.objects.get', ([], {'pk': "dictionary['answer_group_pk']"}), "(pk=dictionary['answer_group_pk'])\n", (438, 472), False, 'from interactions.models import SelfAnswerGroup\n')] |
#
# This file is part of snmpsim software.
#
# Copyright (c) 2010-2017, <NAME> <<EMAIL>>
# License: http://snmpsim.sf.net/license.html
#
# Managed value variation module: simulate a live Agent using
# a series of snapshots.
#
import os, time, bisect
from pyasn1.compat.octets import str2octs
from pysnmp.proto import rfc1902
from snmpsim.record.snmprec import SnmprecRecord
from snmpsim.record.search.file import searchRecordByOid, getRecord
from snmpsim.record.search.database import RecordIndex
from snmpsim import confdir
from snmpsim.mltsplit import split
from snmpsim import log
from snmpsim import error
def init(**context):
if context['options']:
for k, v in [split(x, ':') for x in split(context['options'], ',')]:
if k == 'addon':
if k in moduleContext:
moduleContext[k].append(v)
else:
moduleContext[k] = [v]
else:
moduleContext[k] = v
if context['mode'] == 'variating':
moduleContext['booted'] = time.time()
elif context['mode'] == 'recording':
if 'dir' not in moduleContext:
raise error.SnmpsimError('SNMP snapshots directory not specified')
if not os.path.exists(moduleContext['dir']):
log.msg('multiplex: creating %s...' % moduleContext['dir'])
os.makedirs(moduleContext['dir'])
if 'iterations' in moduleContext:
moduleContext['iterations'] = max(0, int(moduleContext['iterations']) - 1)
if 'period' in moduleContext:
moduleContext['period'] = float(moduleContext['period'])
else:
moduleContext['period'] = 10.0
moduleContext['ready'] = True
def variate(oid, tag, value, **context):
if 'settings' not in recordContext:
recordContext['settings'] = dict([split(x, '=') for x in split(value, ',')])
if 'dir' not in recordContext['settings']:
log.msg('multiplex: snapshot directory not specified')
return context['origOid'], tag, context['errorStatus']
recordContext['settings']['dir'] = recordContext['settings']['dir'].replace(
'/', os.path.sep
)
if recordContext['settings']['dir'][0] != os.path.sep:
for x in confdir.data:
d = os.path.join(x, recordContext['settings']['dir'])
if os.path.exists(d):
break
else:
log.msg('multiplex: directory %s not found' % recordContext['settings']['dir'])
return context['origOid'], tag, context['errorStatus']
else:
d = recordContext['settings']['dir']
recordContext['dirmap'] = dict(
[(int(os.path.basename(x).split(os.path.extsep)[0]), os.path.join(d, x)) for x in os.listdir(d) if
x[-7:] == 'snmprec']
)
recordContext['keys'] = list(
recordContext['dirmap'].keys()
)
recordContext['bounds'] = (
min(recordContext['keys']), max(recordContext['keys'])
)
if 'period' in recordContext['settings']:
recordContext['settings']['period'] = float(recordContext['settings']['period'])
else:
recordContext['settings']['period'] = 60.0
if 'wrap' in recordContext['settings']:
recordContext['settings']['wrap'] = bool(recordContext['settings']['wrap'])
else:
recordContext['settings']['wrap'] = False
if 'control' in recordContext['settings']:
recordContext['settings']['control'] = rfc1902.ObjectName(
recordContext['settings']['control']
)
log.msg('multiplex: using control OID %s for subtree %s, time-based multiplexing disabled' % (
recordContext['settings']['control'], oid))
recordContext['ready'] = True
if 'ready' not in recordContext:
return context['origOid'], tag, context['errorStatus']
if oid not in moduleContext:
moduleContext[oid] = {}
if context['setFlag']:
if 'control' in recordContext['settings'] and \
recordContext['settings']['control'] == context['origOid']:
fileno = int(context['origValue'])
if fileno >= len(recordContext['keys']):
log.msg('multiplex: .snmprec file number %s over limit of %s' % (fileno, len(recordContext['keys'])))
return context['origOid'], tag, context['errorStatus']
moduleContext[oid]['fileno'] = fileno
log.msg('multiplex: switched to file #%s (%s)' % (
recordContext['keys'][fileno], recordContext['dirmap'][recordContext['keys'][fileno]]))
return context['origOid'], tag, context['origValue']
else:
return context['origOid'], tag, context['errorStatus']
if 'control' in recordContext['settings']:
if 'fileno' not in moduleContext[oid]:
moduleContext[oid]['fileno'] = 0
if not context['nextFlag'] and \
recordContext['settings']['control'] == context['origOid']:
return context['origOid'], tag, rfc1902.Integer32(moduleContext[oid]['fileno'])
else:
timeslot = (time.time() - moduleContext['booted']) % (
recordContext['settings']['period'] * len(recordContext['dirmap']))
fileslot = int(timeslot / recordContext['settings']['period']) + recordContext['bounds'][0]
fileno = bisect.bisect(recordContext['keys'], fileslot) - 1
if 'fileno' not in moduleContext[oid] or \
moduleContext[oid]['fileno'] < fileno or \
recordContext['settings']['wrap']:
moduleContext[oid]['fileno'] = fileno
datafile = recordContext['dirmap'][
recordContext['keys'][moduleContext[oid]['fileno']]
]
if 'datafile' not in moduleContext[oid] or \
moduleContext[oid]['datafile'] != datafile:
if 'datafileobj' in moduleContext[oid]:
moduleContext[oid]['datafileobj'].close()
moduleContext[oid]['datafileobj'] = RecordIndex(
datafile, SnmprecRecord()
).create()
moduleContext[oid]['datafile'] = datafile
log.msg('multiplex: switching to data file %s for %s' % (datafile, context['origOid']))
text, db = moduleContext[oid]['datafileobj'].getHandles()
textOid = str(rfc1902.OctetString('.'.join(['%s' % x for x in context['origOid']])))
try:
line = moduleContext[oid]['datafileobj'].lookup(textOid)
except KeyError:
offset = searchRecordByOid(context['origOid'], text, SnmprecRecord())
exactMatch = False
else:
offset, subtreeFlag, prevOffset = line.split(str2octs(','))
exactMatch = True
text.seek(int(offset))
line, _, _ = getRecord(text) # matched line
if context['nextFlag']:
if exactMatch:
line, _, _ = getRecord(text)
else:
if not exactMatch:
return context['origOid'], tag, context['errorStatus']
if not line:
return context['origOid'], tag, context['errorStatus']
try:
oid, value = SnmprecRecord().evaluate(line)
except error.SnmpsimError:
oid, value = context['origOid'], context['errorStatus']
return oid, tag, value
def record(oid, tag, value, **context):
if 'ready' not in moduleContext:
raise error.SnmpsimError('module not initialized')
if 'started' not in moduleContext:
moduleContext['started'] = time.time()
if context['stopFlag']:
if 'file' in moduleContext:
moduleContext['file'].close()
del moduleContext['file']
else:
moduleContext['filenum'] = 0
if 'iterations' in moduleContext and moduleContext['iterations']:
log.msg('multiplex: %s iterations remaining' % moduleContext['iterations'])
moduleContext['started'] = time.time()
moduleContext['iterations'] -= 1
moduleContext['filenum'] += 1
wait = max(0, moduleContext['period'] - (time.time() - moduleContext['started']))
raise error.MoreDataNotification(period=wait)
else:
raise error.NoDataNotification()
if 'file' not in moduleContext:
if 'filenum' not in moduleContext:
moduleContext['filenum'] = 0
snmprecfile = os.path.join(moduleContext['dir'],
'%.5d%ssnmprec' % (moduleContext['filenum'],
os.path.extsep))
moduleContext['file'] = open(snmprecfile, 'wb')
log.msg('multiplex: writing into %s file...' % snmprecfile)
moduleContext['file'].write(
SnmprecRecord().format(context['origOid'], context['origValue'])
)
if not context['total']:
settings = {
'dir': moduleContext['dir'].replace(os.path.sep, '/')
}
if 'period' in moduleContext:
settings['period'] = '%.2f' % float(moduleContext['period'])
if 'addon' in moduleContext:
settings.update(
dict([split(x, '=') for x in moduleContext['addon']])
)
value = ','.join(['%s=%s' % (k, v) for k, v in settings.items()])
return str(context['startOID']), ':multiplex', value
else:
raise error.NoDataNotification()
def shutdown(**context):
pass
| [
"os.path.exists",
"snmpsim.record.search.file.getRecord",
"snmpsim.log.msg",
"pysnmp.proto.rfc1902.ObjectName",
"snmpsim.mltsplit.split",
"os.makedirs",
"snmpsim.error.MoreDataNotification",
"snmpsim.error.NoDataNotification",
"os.listdir",
"os.path.join",
"pysnmp.proto.rfc1902.Integer32",
"sn... | [((6845, 6860), 'snmpsim.record.search.file.getRecord', 'getRecord', (['text'], {}), '(text)\n', (6854, 6860), False, 'from snmpsim.record.search.file import searchRecordByOid, getRecord\n'), ((1045, 1056), 'time.time', 'time.time', ([], {}), '()\n', (1054, 1056), False, 'import os, time, bisect\n'), ((6253, 6345), 'snmpsim.log.msg', 'log.msg', (["('multiplex: switching to data file %s for %s' % (datafile, context['origOid'])\n )"], {}), "('multiplex: switching to data file %s for %s' % (datafile, context[\n 'origOid']))\n", (6260, 6345), False, 'from snmpsim import log\n'), ((7433, 7477), 'snmpsim.error.SnmpsimError', 'error.SnmpsimError', (['"""module not initialized"""'], {}), "('module not initialized')\n", (7451, 7477), False, 'from snmpsim import error\n'), ((7552, 7563), 'time.time', 'time.time', ([], {}), '()\n', (7561, 7563), False, 'import os, time, bisect\n'), ((8417, 8518), 'os.path.join', 'os.path.join', (["moduleContext['dir']", "('%.5d%ssnmprec' % (moduleContext['filenum'], os.path.extsep))"], {}), "(moduleContext['dir'], '%.5d%ssnmprec' % (moduleContext[\n 'filenum'], os.path.extsep))\n", (8429, 8518), False, 'import os, time, bisect\n'), ((8667, 8726), 'snmpsim.log.msg', 'log.msg', (["('multiplex: writing into %s file...' % snmprecfile)"], {}), "('multiplex: writing into %s file...' % snmprecfile)\n", (8674, 8726), False, 'from snmpsim import log\n'), ((9387, 9413), 'snmpsim.error.NoDataNotification', 'error.NoDataNotification', ([], {}), '()\n', (9411, 9413), False, 'from snmpsim import error\n'), ((681, 694), 'snmpsim.mltsplit.split', 'split', (['x', '""":"""'], {}), "(x, ':')\n", (686, 694), False, 'from snmpsim.mltsplit import split\n'), ((1946, 2000), 'snmpsim.log.msg', 'log.msg', (['"""multiplex: snapshot directory not specified"""'], {}), "('multiplex: snapshot directory not specified')\n", (1953, 2000), False, 'from snmpsim import log\n'), ((3590, 3646), 'pysnmp.proto.rfc1902.ObjectName', 'rfc1902.ObjectName', (["recordContext['settings']['control']"], {}), "(recordContext['settings']['control'])\n", (3608, 3646), False, 'from pysnmp.proto import rfc1902\n'), ((3689, 3836), 'snmpsim.log.msg', 'log.msg', (["('multiplex: using control OID %s for subtree %s, time-based multiplexing disabled'\n % (recordContext['settings']['control'], oid))"], {}), "(\n 'multiplex: using control OID %s for subtree %s, time-based multiplexing disabled'\n % (recordContext['settings']['control'], oid))\n", (3696, 3836), False, 'from snmpsim import log\n'), ((4565, 4707), 'snmpsim.log.msg', 'log.msg', (["('multiplex: switched to file #%s (%s)' % (recordContext['keys'][fileno],\n recordContext['dirmap'][recordContext['keys'][fileno]]))"], {}), "('multiplex: switched to file #%s (%s)' % (recordContext['keys'][\n fileno], recordContext['dirmap'][recordContext['keys'][fileno]]))\n", (4572, 4707), False, 'from snmpsim import log\n'), ((5486, 5532), 'bisect.bisect', 'bisect.bisect', (["recordContext['keys']", 'fileslot'], {}), "(recordContext['keys'], fileslot)\n", (5499, 5532), False, 'import os, time, bisect\n'), ((6758, 6771), 'pyasn1.compat.octets.str2octs', 'str2octs', (['""","""'], {}), "(',')\n", (6766, 6771), False, 'from pyasn1.compat.octets import str2octs\n'), ((6954, 6969), 'snmpsim.record.search.file.getRecord', 'getRecord', (['text'], {}), '(text)\n', (6963, 6969), False, 'from snmpsim.record.search.file import searchRecordByOid, getRecord\n'), ((7849, 7924), 'snmpsim.log.msg', 'log.msg', (["('multiplex: %s iterations remaining' % moduleContext['iterations'])"], {}), "('multiplex: %s iterations remaining' % moduleContext['iterations'])\n", (7856, 7924), False, 'from snmpsim import log\n'), ((7964, 7975), 'time.time', 'time.time', ([], {}), '()\n', (7973, 7975), False, 'import os, time, bisect\n'), ((8175, 8214), 'snmpsim.error.MoreDataNotification', 'error.MoreDataNotification', ([], {'period': 'wait'}), '(period=wait)\n', (8201, 8214), False, 'from snmpsim import error\n'), ((8247, 8273), 'snmpsim.error.NoDataNotification', 'error.NoDataNotification', ([], {}), '()\n', (8271, 8273), False, 'from snmpsim import error\n'), ((704, 734), 'snmpsim.mltsplit.split', 'split', (["context['options']", '""","""'], {}), "(context['options'], ',')\n", (709, 734), False, 'from snmpsim.mltsplit import split\n'), ((1155, 1215), 'snmpsim.error.SnmpsimError', 'error.SnmpsimError', (['"""SNMP snapshots directory not specified"""'], {}), "('SNMP snapshots directory not specified')\n", (1173, 1215), False, 'from snmpsim import error\n'), ((1231, 1267), 'os.path.exists', 'os.path.exists', (["moduleContext['dir']"], {}), "(moduleContext['dir'])\n", (1245, 1267), False, 'import os, time, bisect\n'), ((1281, 1340), 'snmpsim.log.msg', 'log.msg', (["('multiplex: creating %s...' % moduleContext['dir'])"], {}), "('multiplex: creating %s...' % moduleContext['dir'])\n", (1288, 1340), False, 'from snmpsim import log\n'), ((1353, 1386), 'os.makedirs', 'os.makedirs', (["moduleContext['dir']"], {}), "(moduleContext['dir'])\n", (1364, 1386), False, 'import os, time, bisect\n'), ((1840, 1853), 'snmpsim.mltsplit.split', 'split', (['x', '"""="""'], {}), "(x, '=')\n", (1845, 1853), False, 'from snmpsim.mltsplit import split\n'), ((2311, 2360), 'os.path.join', 'os.path.join', (['x', "recordContext['settings']['dir']"], {}), "(x, recordContext['settings']['dir'])\n", (2323, 2360), False, 'import os, time, bisect\n'), ((2380, 2397), 'os.path.exists', 'os.path.exists', (['d'], {}), '(d)\n', (2394, 2397), False, 'import os, time, bisect\n'), ((2459, 2538), 'snmpsim.log.msg', 'log.msg', (["('multiplex: directory %s not found' % recordContext['settings']['dir'])"], {}), "('multiplex: directory %s not found' % recordContext['settings']['dir'])\n", (2466, 2538), False, 'from snmpsim import log\n'), ((5171, 5218), 'pysnmp.proto.rfc1902.Integer32', 'rfc1902.Integer32', (["moduleContext[oid]['fileno']"], {}), "(moduleContext[oid]['fileno'])\n", (5188, 5218), False, 'from pysnmp.proto import rfc1902\n'), ((5249, 5260), 'time.time', 'time.time', ([], {}), '()\n', (5258, 5260), False, 'import os, time, bisect\n'), ((6651, 6666), 'snmpsim.record.snmprec.SnmprecRecord', 'SnmprecRecord', ([], {}), '()\n', (6664, 6666), False, 'from snmpsim.record.snmprec import SnmprecRecord\n'), ((7186, 7201), 'snmpsim.record.snmprec.SnmprecRecord', 'SnmprecRecord', ([], {}), '()\n', (7199, 7201), False, 'from snmpsim.record.snmprec import SnmprecRecord\n'), ((8769, 8784), 'snmpsim.record.snmprec.SnmprecRecord', 'SnmprecRecord', ([], {}), '()\n', (8782, 8784), False, 'from snmpsim.record.snmprec import SnmprecRecord\n'), ((1863, 1880), 'snmpsim.mltsplit.split', 'split', (['value', '""","""'], {}), "(value, ',')\n", (1868, 1880), False, 'from snmpsim.mltsplit import split\n'), ((2778, 2796), 'os.path.join', 'os.path.join', (['d', 'x'], {}), '(d, x)\n', (2790, 2796), False, 'import os, time, bisect\n'), ((2807, 2820), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (2817, 2820), False, 'import os, time, bisect\n'), ((6159, 6174), 'snmpsim.record.snmprec.SnmprecRecord', 'SnmprecRecord', ([], {}), '()\n', (6172, 6174), False, 'from snmpsim.record.snmprec import SnmprecRecord\n'), ((8116, 8127), 'time.time', 'time.time', ([], {}), '()\n', (8125, 8127), False, 'import os, time, bisect\n'), ((9166, 9179), 'snmpsim.mltsplit.split', 'split', (['x', '"""="""'], {}), "(x, '=')\n", (9171, 9179), False, 'from snmpsim.mltsplit import split\n'), ((2731, 2750), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (2747, 2750), False, 'import os, time, bisect\n')] |
"""empty message
Revision ID: <KEY>
Revises: 3b042c12d85e
Create Date: 2018-12-12 22:47:40.070151
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('roles_parents',
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['roles.id'], ),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], )
)
op.create_table('users_roles',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.drop_table('sysconfig')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('sysconfig',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('web_name', mysql.VARCHAR(length=80), nullable=False),
sa.Column('web_describe', mysql.VARCHAR(length=500), nullable=True),
sa.Column('user_register', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),
sa.Column('active_site', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),
sa.Column('close_register_user_message', mysql.VARCHAR(length=500), nullable=True),
sa.Column('close_website_message', mysql.VARCHAR(length=500), nullable=True),
sa.Column('withdraw_money', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
op.drop_table('users_roles')
op.drop_table('roles_parents')
op.drop_table('roles')
# ### end Alembic commands ###
| [
"sqlalchemy.ForeignKeyConstraint",
"alembic.op.drop_table",
"sqlalchemy.dialects.mysql.INTEGER",
"sqlalchemy.dialects.mysql.VARCHAR",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.Integer",
"sqlalchemy.String",
"sqlalchemy.dialects.mysql.TINYINT"
] | [((1118, 1144), 'alembic.op.drop_table', 'op.drop_table', (['"""sysconfig"""'], {}), "('sysconfig')\n", (1131, 1144), False, 'from alembic import op\n'), ((2107, 2135), 'alembic.op.drop_table', 'op.drop_table', (['"""users_roles"""'], {}), "('users_roles')\n", (2120, 2135), False, 'from alembic import op\n'), ((2140, 2170), 'alembic.op.drop_table', 'op.drop_table', (['"""roles_parents"""'], {}), "('roles_parents')\n", (2153, 2170), False, 'from alembic import op\n'), ((2175, 2197), 'alembic.op.drop_table', 'op.drop_table', (['"""roles"""'], {}), "('roles')\n", (2188, 2197), False, 'from alembic import op\n'), ((540, 569), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (563, 569), True, 'import sqlalchemy as sa\n'), ((729, 781), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['parent_id']", "['roles.id']"], {}), "(['parent_id'], ['roles.id'])\n", (752, 781), True, 'import sqlalchemy as sa\n'), ((789, 839), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['role_id']", "['roles.id']"], {}), "(['role_id'], ['roles.id'])\n", (812, 839), True, 'import sqlalchemy as sa\n'), ((997, 1047), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['role_id']", "['roles.id']"], {}), "(['role_id'], ['roles.id'])\n", (1020, 1047), True, 'import sqlalchemy as sa\n'), ((1055, 1105), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['user_id']", "['users.id']"], {}), "(['user_id'], ['users.id'])\n", (1078, 1105), True, 'import sqlalchemy as sa\n'), ((2003, 2032), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (2026, 2032), True, 'import sqlalchemy as sa\n'), ((445, 457), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (455, 457), True, 'import sqlalchemy as sa\n'), ((498, 518), 'sqlalchemy.String', 'sa.String', ([], {'length': '(80)'}), '(length=80)\n', (507, 518), True, 'import sqlalchemy as sa\n'), ((638, 650), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (648, 650), True, 'import sqlalchemy as sa\n'), ((695, 707), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (705, 707), True, 'import sqlalchemy as sa\n'), ((908, 920), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (918, 920), True, 'import sqlalchemy as sa\n'), ((963, 975), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (973, 975), True, 'import sqlalchemy as sa\n'), ((1318, 1349), 'sqlalchemy.dialects.mysql.INTEGER', 'mysql.INTEGER', ([], {'display_width': '(11)'}), '(display_width=11)\n', (1331, 1349), False, 'from sqlalchemy.dialects import mysql\n'), ((1414, 1438), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(80)'}), '(length=80)\n', (1427, 1438), False, 'from sqlalchemy.dialects import mysql\n'), ((1487, 1512), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(500)'}), '(length=500)\n', (1500, 1512), False, 'from sqlalchemy.dialects import mysql\n'), ((1561, 1591), 'sqlalchemy.dialects.mysql.TINYINT', 'mysql.TINYINT', ([], {'display_width': '(1)'}), '(display_width=1)\n', (1574, 1591), False, 'from sqlalchemy.dialects import mysql\n'), ((1659, 1689), 'sqlalchemy.dialects.mysql.TINYINT', 'mysql.TINYINT', ([], {'display_width': '(1)'}), '(display_width=1)\n', (1672, 1689), False, 'from sqlalchemy.dialects import mysql\n'), ((1773, 1798), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(500)'}), '(length=500)\n', (1786, 1798), False, 'from sqlalchemy.dialects import mysql\n'), ((1855, 1880), 'sqlalchemy.dialects.mysql.VARCHAR', 'mysql.VARCHAR', ([], {'length': '(500)'}), '(length=500)\n', (1868, 1880), False, 'from sqlalchemy.dialects import mysql\n'), ((1930, 1960), 'sqlalchemy.dialects.mysql.TINYINT', 'mysql.TINYINT', ([], {'display_width': '(1)'}), '(display_width=1)\n', (1943, 1960), False, 'from sqlalchemy.dialects import mysql\n')] |
from tkinter import Tk, Button, Label
from threading import Thread
from queue import Queue
import configparser
import sys
import os
def init_config(path):
config.optionxform = str
config.read(path)
def maingui():
for name in tt['modules'].keys():
tt[name]['label'].config(text=str(tt[name].get('cnt', 0)),
bg=('lime' if (tt[name].get('is_working', False)) else 'white'))
root.after(1000, maingui)
def rstart(name):
if not tt[name].get('is_enable', True):
tt[name].update({'is_enable': True})
elif not tt[name].get('is_enable', False):
tt[name].update({'is_enable': True})
thread = Thread(target=eval(name), args=(tt,))
thread.daemon = True
thread.start()
def rstop(name):
if tt[name].get('is_enable', False):
tt[name].update({'is_enable': False})
if __name__ == '__main__':
root = Tk()
root.geometry('+200+200')
root.overrideredirect(0)
# uncomment for minimize
# root.iconify()
tt = {}
modules = []
if len(sys.argv) < 2:
path = './settings.ini'
else:
print(sys.argv[1])
path = './%s' % (sys.argv[1])
if not os.path.exists(path):
print('Settings file %s not found' % (path))
sys.exit()
config = configparser.ConfigParser()
init_config(path)
for section in config.sections():
tt.update({section.lower():dict(config[section])})
if section == 'MODULES':
for key in config[section]:
modules.append([key, config[section][key]])
exec('from modules.%s import %s' % (key, key))
for [name, autostart] in modules:
module = tt.get(name, {})
q = Queue()
module.update({'queue': q})
module.update({'cnt': 0})
tt.update({name: module})
for i, [name, autostart] in enumerate(modules):
module = tt.get(name, {})
Label(text=name).grid(row=i, column=0)
Button(text="Start", command=lambda x=name: rstart(x)).grid(row=i, column=1)
Button(text="Stop", command=lambda x=name: rstop(x)).grid(row=i, column=2)
label = Label(root, bg='white', text='0')
label.grid(row=i, column=3)
module.update({'label': label})
tt.update({name: module})
if autostart:
rstart(name)
root.after(100, maingui)
root.mainloop()
| [
"os.path.exists",
"configparser.ConfigParser",
"tkinter.Tk",
"tkinter.Label",
"sys.exit",
"queue.Queue"
] | [((912, 916), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (914, 916), False, 'from tkinter import Tk, Button, Label\n'), ((1306, 1333), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1331, 1333), False, 'import configparser\n'), ((1199, 1219), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1213, 1219), False, 'import os\n'), ((1282, 1292), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1290, 1292), False, 'import sys\n'), ((1733, 1740), 'queue.Queue', 'Queue', ([], {}), '()\n', (1738, 1740), False, 'from queue import Queue\n'), ((2162, 2195), 'tkinter.Label', 'Label', (['root'], {'bg': '"""white"""', 'text': '"""0"""'}), "(root, bg='white', text='0')\n", (2167, 2195), False, 'from tkinter import Tk, Button, Label\n'), ((1939, 1955), 'tkinter.Label', 'Label', ([], {'text': 'name'}), '(text=name)\n', (1944, 1955), False, 'from tkinter import Tk, Button, Label\n')] |
'''OpenGL extension ARB.map_buffer_range
This module customises the behaviour of the
OpenGL.raw.GL.ARB.map_buffer_range to provide a more
Python-friendly API
Overview (from the spec)
ARB_map_buffer_range expands the buffer object API to allow greater
performance when a client application only needs to write to a sub-range
of a buffer object. To that end, this extension introduces two new buffer
object features: non-serialized buffer modification and explicit sub-range
flushing for mapped buffer objects.
OpenGL requires that commands occur in a FIFO manner meaning that any
changes to buffer objects either block until the data has been processed by
the OpenGL pipeline or else create extra copies to avoid such a block. By
providing a method to asynchronously modify buffer object data, an
application is then able to manage the synchronization points themselves
and modify ranges of data contained by a buffer object even though OpenGL
might still be using other parts of it.
This extension also provides a method for explicitly flushing ranges of a
mapped buffer object so OpenGL does not have to assume that the entire
range may have been modified. Further, it allows the application to more
precisely specify its intent with respect to reading, writing, and whether
the previous contents of a mapped range of interest need be preserved
prior to modification.
Affects ARB_vertex_buffer_object, ARB_pixel_buffer_object and OpenGL 1.5
Buffer Objects.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/map_buffer_range.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.map_buffer_range import *
from OpenGL.raw.GL.ARB.map_buffer_range import _EXTENSION_NAME
def glInitMapBufferRangeARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | [
"OpenGL.extensions.hasGLExtension"
] | [((2070, 2112), 'OpenGL.extensions.hasGLExtension', 'extensions.hasGLExtension', (['_EXTENSION_NAME'], {}), '(_EXTENSION_NAME)\n', (2095, 2112), False, 'from OpenGL import extensions\n')] |
from __future__ import division
import torch
import torch.nn as nn
from mmdet import ops
from ..registry import ROI_EXTRACTORS
import pdb
@ROI_EXTRACTORS.register_module
class ARboxMultiRoIExtractor(nn.Module):
"""Extract RoI features from a single level feature map.
If there are mulitple input feature levels, each RoI is mapped to a level
according to its scale.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (int): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0.
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
finest_scale=56,
w_enlarge=1.2,
h_enlarge=1.4,
ratio_max=5.0):
super(ARboxMultiRoIExtractor, self).__init__()
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
self.finest_scale = finest_scale
self.w_enlarge = w_enlarge
self.h_enlarge = h_enlarge
self.ratio_max = ratio_max
@property
def num_inputs(self):
"""int: Input feature map levels."""
return len(self.featmap_strides)
def init_weights(self):
pass
def build_roi_layers(self, layer_cfg, featmap_strides):
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def get_poolwh(self, rois, base_size):
ratios = rois[:, 3] / rois[:, 4]
assert ratios.min() >= 1.0
ratios = ratios.ceil()
ratio = ratios.max()
ratio = min(ratio, self.ratio_max)
pool_h = int(base_size)
pool_w = int(ratio * base_size)
return pool_w, pool_h
def forward(self, feats, rois):
if len(feats) == 1:
return self.roi_layers[0](feats[0], rois)
out_size = self.roi_layers[0].out_size
base_size = out_size
out_w, out_h = self.get_poolwh(rois, base_size)
num_levels = len(feats)
roi_feats=[]
for i in range(num_levels):
roi_feats_t = self.roi_layers[i](feats[i], rois, out_w, out_h)
roi_feats.append(roi_feats_t)
# max pool
feature_size = roi_feats[0].size()
roi_feats = [var.view(var.size(0),-1) for var in roi_feats]
for i in range(1, num_levels):
roi_feats[0] = torch.max(roi_feats[0], roi_feats[i])
roi_feats = roi_feats[0]
roi_feats = roi_feats.view(feature_size)
return roi_feats | [
"torch.max"
] | [((2792, 2829), 'torch.max', 'torch.max', (['roi_feats[0]', 'roi_feats[i]'], {}), '(roi_feats[0], roi_feats[i])\n', (2801, 2829), False, 'import torch\n')] |