code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
"""Transforms the XML module definitions parsed from the PDF into a verilog representation"""
from lxml import etree
from datetime import datetime
def format_port(name, width, type, **kwargs):
wstr = '' if int(width) == 1 else '[%s:0]\t' % width
return '\t%s\t%s%s;\n' % (type, wstr, name)
def format_attrib(name, type, default, **kwargs):
if type == 'STRING':
default = '"%s"' % default # need to ensure strings are quoted
return '\tparameter %s = %s;\n' % (name, default)
def process(infile, outfile):
tree = etree.parse(infile)
root = tree.getroot()
with open(outfile, "w") as output:
output.write(
'// Automatically generated from %s on %s\n\n' %
(infile, datetime.now().isoformat())
)
for module in root.getchildren():
ports = module.xpath('port')
attrs = module.xpath('attribute')
output.write(
'module %s (%s);\n' % (
module.attrib['name'],
', '.join([port.attrib['name'] for port in ports])
)
)
for port in ports:
output.write(format_port(**dict(port.attrib)))
if len(attrs):
output.write('\n')
for attr in attrs:
output.write(format_attrib(**dict(attr.attrib)))
output.write('endmodule\n\n')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i', nargs='?', default='cells_xtra.xml')
parser.add_argument('--output', '-o', nargs='?', default='cells_xtra.v')
args = parser.parse_args()
process(args.input, args.output)
|
[
"lxml.etree.parse",
"datetime.datetime.now",
"argparse.ArgumentParser"
] |
[((546, 565), 'lxml.etree.parse', 'etree.parse', (['infile'], {}), '(infile)\n', (557, 565), False, 'from lxml import etree\n'), ((1470, 1495), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1493, 1495), False, 'import argparse\n'), ((735, 749), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (747, 749), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 14 10:40:39 2016
Converting reflectance spectrum to a CIE coordinate
@author: Bonan
"""
import numpy as np
from scipy import interpolate
import os
# Adobe RGB (1998) D65 as reference white
# http://www.brucelindbloom.com/index.html?Eqn_XYZ_to_RGB.html
_RGB_to_XYZ = np.array([
[0.5767309, 0.1855540, 0.1881852],
[0.2973769, 0.6273491, 0.0752741],
[0.0270343, 0.0706872, 0.9911085], ])
_XYZ_to_RGB = np.array([
[2.0413690, -0.5649464, -0.3446944],
[-0.9692660, 1.8760108, 0.0415560],
[0.0134474, -0.1183897, 1.0154096], ])
# Load the
_dirname = os.path.dirname(__file__)
fn1 = os.path.join(_dirname, 'CIE_1931_XYZ.txt')
fn2 = os.path.join(_dirname, 'CIE_A.txt')
fn3 = os.path.join(_dirname, 'CIE_D65.txt')
CIE_XYZ_table = np.loadtxt(fn1).T # Transpose column into rows
CIE_A = np.loadtxt(fn2).T
CIE_D65 = np.loadtxt(fn3).T
def splineInterp(xNew, xRaw, yRaw):
"""
Compute the spline interpolation(cubic) of the data
"""
tck = interpolate.splrep(xRaw, yRaw)
return interpolate.splev(xNew, tck, der=0, ext=1)
def specToXYZ(spec, SI='D65'):
"""
Calculate the XYZ coordinate of the spectrum input.
It interpolates the charts to every wavelength that was inputed.
By default the input spectrum was first eveloped using a SPD function
to simulation illumination.
spec: input spectrum, 2*N ndarray, 1st row must be the wavelength
return: (X,Y,Z)
"""
wl = spec[0] # the input must have the 1st element as the wavelength
XYZ = CIE_XYZ_table
if SI == 'D65':
interpSI = splineInterp(wl, CIE_D65[0], CIE_D65[1])
if SI == 'A':
interpSI = splineInterp(wl, CIE_A[0], CIE_A[1])
else:
interpSI = np.ones(len(wl))
interpX = splineInterp(wl, XYZ[0], XYZ[1])
interpY = splineInterp(wl, XYZ[0], XYZ[2])
interpZ = splineInterp(wl, XYZ[0], XYZ[3])
interpXYZ = np.array([interpX, interpY, interpZ])
X, Y, Z = np.sum(spec[1] * interpSI * interpXYZ, axis=1)
return X, Y, Z
def specToxyz(spec, SI='D65'):
"""
Transfer spectrum into normalised x,y,z coordinates
Return: (x, y, z)
"""
X, Y, Z = specToXYZ(spec, SI)
x = X / (X + Y + Z)
y = Y / (X + Y + Z)
z = 1 - x - y
return x, y, z
def specToRGB(spec, SI='D65', scale_factor=1):
"""
Convert the spectrum(reflectivity) into an RGB value
Return: (R,G,B)
"""
XYZArray = specToxyz(spec, SI)
RGBArray = np.dot(_XYZ_to_RGB, XYZArray).clip(0, 1)
RGBArray *= scale_factor
return tuple(RGBArray.clip(0, 1))
if __name__ == '__main__':
# Testing of the module
import matplotlib.pyplot as pl
wlRange = np.linspace(400, 800, 100)
example = np.sin((wlRange - 400) * np.pi / 400)
spec = np.array([wlRange, example])
c = specToRGB(spec)
pl.plot(spec[0], spec[1] / spec[1].max(),
label='Example distribution', color=c)
print(c)
# Use the D65 as the light source
spec = CIE_D65
c = specToRGB(spec, SI='D65')
print('Test using D65 illumination. Should give R=G=B')
print(c)
pl.plot(spec[0], spec[1] / spec[1].max(),
label='D65 distribution', color=np.array(c))
pl.title('Coloured Spectrum')
pl.legend()
|
[
"matplotlib.pyplot.title",
"numpy.sum",
"os.path.join",
"os.path.dirname",
"matplotlib.pyplot.legend",
"numpy.sin",
"numpy.array",
"numpy.loadtxt",
"numpy.linspace",
"scipy.interpolate.splev",
"numpy.dot",
"scipy.interpolate.splrep"
] |
[((313, 432), 'numpy.array', 'np.array', (['[[0.5767309, 0.185554, 0.1881852], [0.2973769, 0.6273491, 0.0752741], [\n 0.0270343, 0.0706872, 0.9911085]]'], {}), '([[0.5767309, 0.185554, 0.1881852], [0.2973769, 0.6273491, \n 0.0752741], [0.0270343, 0.0706872, 0.9911085]])\n', (321, 432), True, 'import numpy as np\n'), ((458, 579), 'numpy.array', 'np.array', (['[[2.041369, -0.5649464, -0.3446944], [-0.969266, 1.8760108, 0.041556], [\n 0.0134474, -0.1183897, 1.0154096]]'], {}), '([[2.041369, -0.5649464, -0.3446944], [-0.969266, 1.8760108, \n 0.041556], [0.0134474, -0.1183897, 1.0154096]])\n', (466, 579), True, 'import numpy as np\n'), ((615, 640), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (630, 640), False, 'import os\n'), ((647, 689), 'os.path.join', 'os.path.join', (['_dirname', '"""CIE_1931_XYZ.txt"""'], {}), "(_dirname, 'CIE_1931_XYZ.txt')\n", (659, 689), False, 'import os\n'), ((696, 731), 'os.path.join', 'os.path.join', (['_dirname', '"""CIE_A.txt"""'], {}), "(_dirname, 'CIE_A.txt')\n", (708, 731), False, 'import os\n'), ((738, 775), 'os.path.join', 'os.path.join', (['_dirname', '"""CIE_D65.txt"""'], {}), "(_dirname, 'CIE_D65.txt')\n", (750, 775), False, 'import os\n'), ((792, 807), 'numpy.loadtxt', 'np.loadtxt', (['fn1'], {}), '(fn1)\n', (802, 807), True, 'import numpy as np\n'), ((848, 863), 'numpy.loadtxt', 'np.loadtxt', (['fn2'], {}), '(fn2)\n', (858, 863), True, 'import numpy as np\n'), ((876, 891), 'numpy.loadtxt', 'np.loadtxt', (['fn3'], {}), '(fn3)\n', (886, 891), True, 'import numpy as np\n'), ((1014, 1044), 'scipy.interpolate.splrep', 'interpolate.splrep', (['xRaw', 'yRaw'], {}), '(xRaw, yRaw)\n', (1032, 1044), False, 'from scipy import interpolate\n'), ((1056, 1098), 'scipy.interpolate.splev', 'interpolate.splev', (['xNew', 'tck'], {'der': '(0)', 'ext': '(1)'}), '(xNew, tck, der=0, ext=1)\n', (1073, 1098), False, 'from scipy import interpolate\n'), ((1926, 1963), 'numpy.array', 'np.array', (['[interpX, interpY, interpZ]'], {}), '([interpX, interpY, interpZ])\n', (1934, 1963), True, 'import numpy as np\n'), ((1978, 2024), 'numpy.sum', 'np.sum', (['(spec[1] * interpSI * interpXYZ)'], {'axis': '(1)'}), '(spec[1] * interpSI * interpXYZ, axis=1)\n', (1984, 2024), True, 'import numpy as np\n'), ((2699, 2725), 'numpy.linspace', 'np.linspace', (['(400)', '(800)', '(100)'], {}), '(400, 800, 100)\n', (2710, 2725), True, 'import numpy as np\n'), ((2740, 2777), 'numpy.sin', 'np.sin', (['((wlRange - 400) * np.pi / 400)'], {}), '((wlRange - 400) * np.pi / 400)\n', (2746, 2777), True, 'import numpy as np\n'), ((2789, 2817), 'numpy.array', 'np.array', (['[wlRange, example]'], {}), '([wlRange, example])\n', (2797, 2817), True, 'import numpy as np\n'), ((3223, 3252), 'matplotlib.pyplot.title', 'pl.title', (['"""Coloured Spectrum"""'], {}), "('Coloured Spectrum')\n", (3231, 3252), True, 'import matplotlib.pyplot as pl\n'), ((3257, 3268), 'matplotlib.pyplot.legend', 'pl.legend', ([], {}), '()\n', (3266, 3268), True, 'import matplotlib.pyplot as pl\n'), ((2485, 2514), 'numpy.dot', 'np.dot', (['_XYZ_to_RGB', 'XYZArray'], {}), '(_XYZ_to_RGB, XYZArray)\n', (2491, 2514), True, 'import numpy as np\n'), ((3206, 3217), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (3214, 3217), True, 'import numpy as np\n')]
|
from pathlib import Path
from typing import Dict
from environs import Env
from furl import furl
from .utils import FilterSettings
env = Env()
DEBUG = False
BASE_DIR = Path(__file__).parent.parent
LOCALES_DIR = BASE_DIR / "locales"
I18N_DOMAIN = "messages"
BOT_TOKEN = env("BOT_TOKEN")
ADMINS = env.list("ADMINS", subcast=int)
BASE_URL = env("BASE_URL")
API_BASE_URL = furl(BASE_URL).add(path=env("API_PATH")).url
API_TOKEN = env("API_TOKEN")
PAYMENTS_PROVIDER_TOKEN = env("PAYMENTS_PROVIDER_TOKEN")
FSM_STORAGE = {"host": env("STORAGE_HOST"), "port": env.int("STORAGE_PORT")}
TIMEZONE = env("TIMEZONE", "UTC")
DATETIME_FORMAT = "%d/%m/%Y %H:%M:%S %Z%z"
SHORT_DATETIME_FORMAT = "%d/%m/%Y %H:%M"
FILTERS_STORAGE_KEY = "filters"
CACHED_PAGE_STORAGE_KEY = "cached_page"
PRODUCT_PAGE_SIZE = 10
_ = lambda s: s # noqa
PRODUCT_FILTERS: Dict[str, FilterSettings] = {
"gender": FilterSettings(
_("Gender"), ("title",), api_endpoint="/categories/", query_name="category"
),
"category": FilterSettings(
_("Category"),
("title",),
api_endpoint="/categories/",
depends_on="gender",
choices_keyboard_width=3,
),
"season": FilterSettings(_("Season"), ("name",), choices_keyboard_width=2),
"brand": FilterSettings(_("Brand"), ("name",), api_endpoint="/brands/"),
"color": FilterSettings(_("Color"), ("name",), api_endpoint="/colors/"),
"outer_material": FilterSettings(
_("Outer material"), ("name",), api_endpoint="/outer_materials/"
),
}
SHIPPING_OPTIONS = [
("nova_poshta", _("Nova Poshta (Сustomer pays shipping)"), [(_("Nova Poshta"), 0)]),
("pickup", _("Local pickup (Mykolaiv)"), [(_("Local pickup"), 0)]),
]
SUCCESSFUL_PAYMENT_STICKER_ID = env("SUCCESSFUL_PAYMENT_STICKER_ID")
CONTACT_PHONES = env.list("CONTACT_PHONES")
CONTACT_EMAILS = env.list("CONTACT_EMAILS")
# fmt: off
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "%(asctime)s %(levelname)s %(name)s: %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "default",
},
},
"loggers": {
"bot": {
"handlers": ["console"],
"level": "INFO",
},
},
}
# fmt: on
|
[
"pathlib.Path",
"furl.furl",
"environs.Env"
] |
[((139, 144), 'environs.Env', 'Env', ([], {}), '()\n', (142, 144), False, 'from environs import Env\n'), ((172, 186), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (176, 186), False, 'from pathlib import Path\n'), ((379, 393), 'furl.furl', 'furl', (['BASE_URL'], {}), '(BASE_URL)\n', (383, 393), False, 'from furl import furl\n')]
|
'''
Nesse exer você deve casar linha que tenham 'b' ou 'c' seguido de uma vogal,
duas vezes seguidas.
exemplo: "baba", "caca", ou "cabo"
Para fixação, usar o '[]' (lista).
'''
import re
import sys
REGEX = r''
lines = sys.stdin.readlines()
for line in lines:
if re.search(REGEX, line):
print(line.replace('\n', ''))
|
[
"re.search",
"sys.stdin.readlines"
] |
[((221, 242), 'sys.stdin.readlines', 'sys.stdin.readlines', ([], {}), '()\n', (240, 242), False, 'import sys\n'), ((270, 292), 're.search', 're.search', (['REGEX', 'line'], {}), '(REGEX, line)\n', (279, 292), False, 'import re\n')]
|
import h5py
import tables
import numpy as np
import sys
args=int(sys.argv[1])
# Read hdf5 file
h5file = tables.open_file(f"./data/atraining-{args}.h5", "r")
WaveformTable = h5file.root.Waveform
GroundTruthTable = h5file.root.GroundTruth
sinevet,sinchan,sintime=[],[],[]
#根据groundtruth找出只有单光子的事例
i=1
while i <100000:
if GroundTruthTable[i]['ChannelID']!=GroundTruthTable[i-1]['ChannelID'] and GroundTruthTable[i]['ChannelID']!=GroundTruthTable[i+1]['ChannelID']:
sinevet.append(GroundTruthTable[i]['EventID'])
sintime.append(GroundTruthTable[i]['PETime'])
sinchan.append(GroundTruthTable[i]['ChannelID'])
i+=1
#将单光子事例波形累加
sumwave=np.zeros(1029,dtype=np.int32)
sinlen=len(sinevet)
for x in range(sinlen):
if x%100==0:
print(f"{x*100/sinlen}%")
posi=0
while True:
if WaveformTable[posi]["EventID"]==sinevet[x] and WaveformTable[posi]["ChannelID"]==sinchan[x]:
break
posi+=1
sumwave+=np.append(WaveformTable[posi]['Waveform'][sintime[x]:],WaveformTable[posi]['Waveform'][:sintime[x]])-972
#求得平均值
averwave=sumwave/sinlen
averzero=np.average(averwave[100:])
spe=averwave-averzero
with h5py.File(f"medium/average{args+1}.h5", "w") as opt1:
opt1.create_dataset("averzero", data=np.array([averzero]))
with h5py.File(f'medium/singlewave{args+1}.h5',"w") as opt2:
opt2.create_dataset("spe",data=spe,compression="gzip", shuffle=True)
#写入文件
h5file.close()
|
[
"h5py.File",
"numpy.average",
"numpy.zeros",
"numpy.append",
"numpy.array",
"tables.open_file"
] |
[((105, 157), 'tables.open_file', 'tables.open_file', (['f"""./data/atraining-{args}.h5"""', '"""r"""'], {}), "(f'./data/atraining-{args}.h5', 'r')\n", (121, 157), False, 'import tables\n'), ((665, 695), 'numpy.zeros', 'np.zeros', (['(1029)'], {'dtype': 'np.int32'}), '(1029, dtype=np.int32)\n', (673, 695), True, 'import numpy as np\n'), ((1114, 1140), 'numpy.average', 'np.average', (['averwave[100:]'], {}), '(averwave[100:])\n', (1124, 1140), True, 'import numpy as np\n'), ((1169, 1215), 'h5py.File', 'h5py.File', (['f"""medium/average{args + 1}.h5"""', '"""w"""'], {}), "(f'medium/average{args + 1}.h5', 'w')\n", (1178, 1215), False, 'import h5py\n'), ((1291, 1340), 'h5py.File', 'h5py.File', (['f"""medium/singlewave{args + 1}.h5"""', '"""w"""'], {}), "(f'medium/singlewave{args + 1}.h5', 'w')\n", (1300, 1340), False, 'import h5py\n'), ((968, 1074), 'numpy.append', 'np.append', (["WaveformTable[posi]['Waveform'][sintime[x]:]", "WaveformTable[posi]['Waveform'][:sintime[x]]"], {}), "(WaveformTable[posi]['Waveform'][sintime[x]:], WaveformTable[posi]\n ['Waveform'][:sintime[x]])\n", (977, 1074), True, 'import numpy as np\n'), ((1264, 1284), 'numpy.array', 'np.array', (['[averzero]'], {}), '([averzero])\n', (1272, 1284), True, 'import numpy as np\n')]
|
from brownie import LinearVesting, Contract
from scripts.helper_functions import get_account
custom_token_address = "0x61c2984d0D60e8C498bdEE6dbE4A4E83E53ecfE8"
amount = 1000000 * 10 ** 18
def deploy():
account = get_account()
publish_source = True
vesting = LinearVesting.deploy(
custom_token_address,
{"from": account},
publish_source=publish_source,
)
print(f"Contract {vesting.address} deployed succesfully!")
print(
f"View the contract at https://rinkeby.etherscan.io/address/{vesting.address}"
)
def load_tokens():
account = get_account()
vesting = LinearVesting[-1]
custom_token = Contract(custom_token_address)
custom_token.transfer(vesting.address, amount, {"from": account})
def add_new_recipient():
account = get_account()
vesting = LinearVesting[-1]
vesting.addNewRecipient(account, amount, {"from": account})
def withdraw():
account = get_account()
vesting = LinearVesting[-1]
vesting.withdrawToken(account, {"from": account})
def print_values():
account = get_account()
vesting = LinearVesting[-1]
print("Amount locked: ", vesting.getLocked(account))
print("Amount withdrawable: ", vesting.getWithdrawable(account))
print("LOLLOL:", vesting.getVested(account))
def main():
deploy()
load_tokens()
add_new_recipient()
print_values()
# withdraw()
|
[
"brownie.Contract",
"brownie.LinearVesting.deploy",
"scripts.helper_functions.get_account"
] |
[((220, 233), 'scripts.helper_functions.get_account', 'get_account', ([], {}), '()\n', (231, 233), False, 'from scripts.helper_functions import get_account\n'), ((274, 370), 'brownie.LinearVesting.deploy', 'LinearVesting.deploy', (['custom_token_address', "{'from': account}"], {'publish_source': 'publish_source'}), "(custom_token_address, {'from': account},\n publish_source=publish_source)\n", (294, 370), False, 'from brownie import LinearVesting, Contract\n'), ((600, 613), 'scripts.helper_functions.get_account', 'get_account', ([], {}), '()\n', (611, 613), False, 'from scripts.helper_functions import get_account\n'), ((665, 695), 'brownie.Contract', 'Contract', (['custom_token_address'], {}), '(custom_token_address)\n', (673, 695), False, 'from brownie import LinearVesting, Contract\n'), ((807, 820), 'scripts.helper_functions.get_account', 'get_account', ([], {}), '()\n', (818, 820), False, 'from scripts.helper_functions import get_account\n'), ((949, 962), 'scripts.helper_functions.get_account', 'get_account', ([], {}), '()\n', (960, 962), False, 'from scripts.helper_functions import get_account\n'), ((1085, 1098), 'scripts.helper_functions.get_account', 'get_account', ([], {}), '()\n', (1096, 1098), False, 'from scripts.helper_functions import get_account\n')]
|
from classifier.dataset_readers.dataset_reader import ClassificationTsvReader
from classifier.dataset_readers.dataset_reader_pt import ClassificationPtTsvReader
from allennlp.common.util import ensure_list
def test_rey_reader_1(project_root_dir_path, test_fixtures_dir_path, test_log):
data_file_path = test_fixtures_dir_path / 'data' / 'train_500.tsv'
reader = ClassificationTsvReader()
instances = ensure_list(reader.read(str(data_file_path)))
print(instances)
assert len(instances) == 10
# instances[0].fields["text"].tokens
assert instances[0].fields["label"].label == '27'
def test_rey_reader_2(project_root_dir_path, test_fixtures_dir_path, test_log):
data_file_path = test_fixtures_dir_path / 'data' / 'train_500.tsv'
reader = ClassificationPtTsvReader()
instances = ensure_list(reader.read(str(data_file_path)))
print(instances)
assert len(instances) == 10
print(instances[0].fields["text"].tokens)
assert instances[0].fields["label"].label == '27'
|
[
"classifier.dataset_readers.dataset_reader_pt.ClassificationPtTsvReader",
"classifier.dataset_readers.dataset_reader.ClassificationTsvReader"
] |
[((372, 397), 'classifier.dataset_readers.dataset_reader.ClassificationTsvReader', 'ClassificationTsvReader', ([], {}), '()\n', (395, 397), False, 'from classifier.dataset_readers.dataset_reader import ClassificationTsvReader\n'), ((776, 803), 'classifier.dataset_readers.dataset_reader_pt.ClassificationPtTsvReader', 'ClassificationPtTsvReader', ([], {}), '()\n', (801, 803), False, 'from classifier.dataset_readers.dataset_reader_pt import ClassificationPtTsvReader\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A custom script example that utilizes the .JSON contents of the tryjob."""
from __future__ import print_function
import json
import sys
from update_tryjob_status import TryjobStatus
def main():
"""Determines the exit code based off of the contents of the .JSON file."""
# Index 1 in 'sys.argv' is the path to the .JSON file which contains
# the contents of the tryjob.
#
# Format of the tryjob contents:
# {
# "status" : [TRYJOB_STATUS],
# "buildbucket_id" : [BUILDBUCKET_ID],
# "extra_cls" : [A_LIST_OF_EXTRA_CLS_PASSED_TO_TRYJOB],
# "url" : [GERRIT_URL],
# "builder" : [TRYJOB_BUILDER_LIST],
# "rev" : [REVISION],
# "link" : [LINK_TO_TRYJOB],
# "options" : [A_LIST_OF_OPTIONS_PASSED_TO_TRYJOB]
# }
abs_path_json_file = sys.argv[1]
with open(abs_path_json_file) as f:
tryjob_contents = json.load(f)
CUTOFF_PENDING_REVISION = 369416
SKIP_REVISION_CUTOFF_START = 369420
SKIP_REVISION_CUTOFF_END = 369428
if tryjob_contents['status'] == TryjobStatus.PENDING.value:
if tryjob_contents['rev'] <= CUTOFF_PENDING_REVISION:
# Exit code 0 means to set the tryjob 'status' as 'good'.
sys.exit(0)
# Exit code 124 means to set the tryjob 'status' as 'bad'.
sys.exit(124)
if tryjob_contents['status'] == TryjobStatus.BAD.value:
# Need to take a closer look at the contents of the tryjob to then decide
# what that tryjob's 'status' value should be.
#
# Since the exit code is not in the mapping, an exception will occur which
# will save the file in the directory of this custom script example.
sys.exit(1)
if tryjob_contents['status'] == TryjobStatus.SKIP.value:
# Validate that the 'skip value is really set between the cutoffs.
if SKIP_REVISION_CUTOFF_START < tryjob_contents['rev'] < \
SKIP_REVISION_CUTOFF_END:
# Exit code 125 means to set the tryjob 'status' as 'skip'.
sys.exit(125)
if tryjob_contents['rev'] >= SKIP_REVISION_CUTOFF_END:
sys.exit(124)
if __name__ == '__main__':
main()
|
[
"json.load",
"sys.exit"
] |
[((1084, 1096), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1093, 1096), False, 'import json\n'), ((1479, 1492), 'sys.exit', 'sys.exit', (['(124)'], {}), '(124)\n', (1487, 1492), False, 'import sys\n'), ((1843, 1854), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1851, 1854), False, 'import sys\n'), ((1399, 1410), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1407, 1410), False, 'import sys\n'), ((2155, 2168), 'sys.exit', 'sys.exit', (['(125)'], {}), '(125)\n', (2163, 2168), False, 'import sys\n'), ((2235, 2248), 'sys.exit', 'sys.exit', (['(124)'], {}), '(124)\n', (2243, 2248), False, 'import sys\n')]
|
from flask import render_template, url_for
# importation de render_template (pour relier les templates aux routes) et d'url_for (pour construire des URL vers les
# fonctions et les pages html)
from ..app import app
# importation de la variable app qui instancie l'application
# | ROUTES POUR LES ERREURS COURANTES |
@app.errorhandler(401)
def not_found_error(error):
"""
Route qui permet en cas d'erreur 401 (accès non autorisé) de renvoyer vers la page 401.html
:return: template 401.html
:rtype: template
"""
return render_template('error/401.html'), 401
@app.errorhandler(404)
def not_found_error(error):
"""
Route qui permet en cas d'erreur 404 (page introuvable) de renvoyer vers la page 404.html
:return: template 404.html
:rtype: template
"""
return render_template('error/404.html'), 404
@app.errorhandler(500)
def internal_error(error):
"""
Route qui permet en cas d'erreur 500 (erreur de serveur interne) de renvoyer vers la page 500.html
:return: template 500.html
:rtype: template
"""
return render_template('error/500.html'), 500
|
[
"flask.render_template"
] |
[((545, 578), 'flask.render_template', 'render_template', (['"""error/401.html"""'], {}), "('error/401.html')\n", (560, 578), False, 'from flask import render_template, url_for\n'), ((810, 843), 'flask.render_template', 'render_template', (['"""error/404.html"""'], {}), "('error/404.html')\n", (825, 843), False, 'from flask import render_template, url_for\n'), ((1083, 1116), 'flask.render_template', 'render_template', (['"""error/500.html"""'], {}), "('error/500.html')\n", (1098, 1116), False, 'from flask import render_template, url_for\n')]
|
from pathlib import Path
from typing import Union
__all__ = ("ScreenshotPath",)
class ScreenshotPath:
def __init__(self, dir_: Path) -> None:
self.dir = dir_
self.rerun: Union[int, None] = None
self.timestamp: Union[int, None] = None
self.scenario_path: Union[Path, None] = None
self.scenario_subject: Union[str, None] = None
self.step_name: Union[str, None] = None
self.tab_index: Union[int, None] = None
def resolve(self) -> Path:
dir_path = self.dir
if self.scenario_path is not None:
cwd = Path().resolve()
rel_path = self.scenario_path.relative_to(cwd)
dir_path = self.dir.joinpath(rel_path.with_suffix(""))
file_path = "screenshot"
if self.scenario_subject is not None:
file_path = self.scenario_subject
if self.rerun is not None:
file_path = f"[{self.rerun}]{file_path}"
if self.timestamp is not None:
file_path += f"__{self.timestamp}"
if self.step_name is not None:
file_path += f"__{self.step_name}"
if self.tab_index is not None:
file_path = f"tab{self.tab_index}__{file_path}"
return dir_path / (file_path + ".png")
def __repr__(self) -> str:
path = self.resolve()
return f"{self.__class__.__name__}<{path}>"
|
[
"pathlib.Path"
] |
[((590, 596), 'pathlib.Path', 'Path', ([], {}), '()\n', (594, 596), False, 'from pathlib import Path\n')]
|
import tensorflow as tf
import dataIO
import numpy as np
from datetime import datetime
from model import model
from parameters import *
# preprocess input data
def prepareDataTraining(seg_data, somae_data_raw):
somae_data = seg_data.copy()
somae_data[somae_data_raw==0]=0
seg_data = seg_data[:,:network_size,:network_size]
somae_data = somae_data[:,:network_size,:network_size]
# create object to hold elements for 3D input tensors of depth(*2)+1
seg_deep = np.zeros((seg_data.shape[0],seg_data.shape[1],seg_data.shape[2],depth*2+1), dtype=np.uint8)
# populate deep segmentation tensor
seg_deep[:,:,:,depth]=seg_data
for d in range(1,depth+1):
seg_deep[:-d,:,:,depth+d]=seg_data[d:,:,:]
seg_deep[d:,:,:,depth-d]=seg_data[:-d,:,:]
# cut training and validation dataset
valid_seg = seg_deep[:val_data_size,:,:,:]
valid_mask = somae_data[:val_data_size,:,:]
train_seg = seg_deep[val_data_size:,:,:,:]
train_mask = somae_data[val_data_size:,:,:]
# shuffle both training and validation data
valid_ids = np.random.permutation(valid_seg.shape[0])
train_ids = np.random.permutation(train_seg.shape[0])
valid_seg[:,:,:] = valid_seg[valid_ids,:,:,:]
valid_mask[:,:,:] = valid_mask[valid_ids,:,:]
train_seg[:,:,:] = train_seg[train_ids,:,:,:]
train_mask[:,:,:] = train_mask[train_ids,:,:]
return train_seg, train_mask, valid_seg, valid_mask
# preprocess input data
def prepareDataPrediction(seg_data):
seg_data = seg_data[:,:network_size,:network_size]
# create object to hold elements for 3D input tensors of depth(*2)+1
seg_deep = np.zeros((seg_data.shape[0],seg_data.shape[1],seg_data.shape[2],depth*2+1), dtype=np.uint8)
# populate deep segmentation tensor
seg_deep[:,:,:,depth]=seg_data
for d in range(1,depth+1):
seg_deep[:-d,:,:,depth+d]=seg_data[d:,:,:]
seg_deep[d:,:,:,depth-d]=seg_data[:-d,:,:]
# cut training and validation dataset
valid_seg = seg_deep[:,:,:,:]
return valid_seg
# define the weighted loss function
class WeightedBinaryCrossEntropy(tf.losses.Loss):
"""
Args:
pos_weight: Scalar to affect the positive labels of the loss function.
weight: Scalar to affect the entirety of the loss function.
from_logits: Whether to compute loss form logits or the probability.
reduction: Type of tf.losses.Reduction to apply to loss.
name: Name of the loss function.
"""
def __init__(self, pos_weight, weight, from_logits=False,
reduction=tf.losses.Reduction.AUTO,
name='weighted_binary_crossentropy'):
super(WeightedBinaryCrossEntropy, self).__init__(reduction=reduction,
name=name)
self.pos_weight = pos_weight
self.weight = weight
self.from_logits = from_logits
def call(self, y_true, y_pred):
if not self.from_logits:
# Manually calculate the weighted cross entropy.
# Formula is qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
# where z are labels, x is logits, and q is the weight.
# Since the values passed are from sigmoid (assuming in this case)
# sigmoid(x) will be replaced by y_pred
# qz * -log(sigmoid(x)) 1e-6 is added as an epsilon to stop passing a zero into the log
x_1 = y_true * self.pos_weight * -tf.math.log(y_pred + 1e-6)
# (1 - z) * -log(1 - sigmoid(x)). Epsilon is added to prevent passing a zero into the log
x_2 = (1 - y_true) * -tf.math.log(1 - y_pred + 1e-6)
return tf.add(x_1, x_2) * self.weight
# Use built in function
return tf.nn.weighted_cross_entropy_with_logits(y_true, y_pred, self.pos_weight) * self.weight
# model weights
class model_weights:
def __init__(self, shapes):
self.values = []
self.checkpoint_path = './ckpt_'+ datetime.now().strftime("%Y%m%d-%H%M%S")+'/'
initializer = tf.initializers.RandomNormal()
def get_weight( shape , name ):
return tf.Variable( initializer( shape ) , name=name , trainable=True , dtype=tf.float32 )
for i in range( len( shapes ) ):
self.values.append( get_weight( shapes[ i ] , 'weight{}'.format( i ) ) )
self.ckpt = tf.train.Checkpoint(**{f'values{i}': v for i, v in enumerate(self.values)})
def saveWeights(self):
self.ckpt.save(self.checkpoint_path)
def restoreWeights(self, ckpt_restore):
print("restoring weights from: " + str(ckpt_restore))
status = self.ckpt.restore(ckpt_restore)
status.assert_consumed() # Optional check
def initializeModel(restore, ckpt_restore):
# filters for the UNET layers:
# filters = [depth*2+1,64,128,256,512,1024,1] #original UNET
filters = [depth*2+1, 16,32, 64, 128,256,1] # modified, lighter UNET
# shapes of the weight tensors
shapes = [
[ 3, 3, filters[0], filters[1]], #L11 -> L12
[ 3, 3, filters[1], filters[1]], #L12 -> L13
[ 3, 3, filters[1], filters[2]], #L21 -> L22
[ 3, 3, filters[2], filters[2]], #L22 -> L23
[ 3, 3, filters[2], filters[3]], #L31 -> L32
[ 3, 3, filters[3], filters[3]], #L32 -> L33
[ 3, 3, filters[3], filters[4]], #L41 -> L42
[ 3, 3, filters[4], filters[4]], #L42 -> L43
[ 3, 3, filters[4], filters[5]], #L51 -> L52
[ 3, 3, filters[5], filters[5]], #L52 -> L53
[ 2, 2, filters[4], filters[5]], #L53 -> L44
[ 3, 3, 2*filters[4], filters[4]], #L44 -> L45
[ 3, 3, filters[4], filters[4]], #L45 -> L46
[ 2, 2, filters[3], filters[4]], #L46 -> L34
[ 3, 3, 2*filters[3], filters[3]], #L34 -> L35
[ 3, 3, filters[3], filters[3]], #L35 -> L36
[ 2, 2, filters[2], filters[3]], #L36 -> L24
[ 3, 3, 2*filters[2], filters[2]], #L24 -> L25
[ 3, 3, filters[2], filters[2]], #L25 -> L26
[ 2, 2, filters[1], filters[2]], #L25 -> L14
[ 3, 3, 2*filters[1], filters[1]], #L14 -> L15
[ 3, 3, filters[1], filters[1]], #L15 -> L16
[ 1, 1, filters[1], filters[6]], #L16 -> L17
]
weights = model_weights(shapes)
if restore:
weights.restoreWeights(ckpt_restore)
# initialize loss
w_loss = WeightedBinaryCrossEntropy(12, 1)
# initialize optimizer
optimizer = tf.optimizers.Adam(learning_rate)
# initialize accuracy objects
train_acc = tf.metrics.BinaryAccuracy()
valid_acc = tf.metrics.BinaryAccuracy()
train_loss = tf.metrics.Mean()
valid_loss = tf.metrics.Mean()
TP = tf.keras.metrics.TruePositives()
FP = tf.keras.metrics.FalsePositives()
TN = tf.keras.metrics.TrueNegatives()
FN = tf.keras.metrics.FalseNegatives()
return weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN
# define train step
def train_step(model, weights, inputs, gt, optimizer, w_loss, train_loss, train_acc):
with tf.GradientTape() as tape:
pred = model(inputs, weights)
current_loss = w_loss( gt, pred)
grads = tape.gradient(current_loss, weights.values )
optimizer.apply_gradients(zip(grads , weights.values ) )
train_loss.update_state(current_loss)
train_acc.update_state(gt, pred)
return optimizer
#define prediction step
def predict_step(model, weights, inputs, gt, w_loss, valid_loss, valid_acc, TP, FP, TN, FN): #TODO remove paqssing of model here
pred = model(inputs, weights)
current_loss = w_loss( gt, pred)
valid_loss.update_state(current_loss)
valid_acc.update_state(gt, pred)
TP.update_state(gt,pred)
FP.update_state(gt,pred)
TN.update_state(gt,pred)
FN.update_state(gt,pred)
return pred
def trainOnEpochs(train_seg, train_mask, valid_seg, valid_mask, weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN):
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/gradient_tape/' + current_time + '/train'
valid_log_dir = 'logs/gradient_tape/' + current_time + '/valid'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
valid_summary_writer = tf.summary.create_file_writer(valid_log_dir)
valid_loss_best = 1000000000
for epoch in range(epochs):
print("TP: ")
print(TP.result().numpy())
print("FN: ")
print(FN.result().numpy())
print("FP: ")
print(FP.result().numpy())
print("TN: ")
print(TN.result().numpy())
TPR = TP.result().numpy()/(TP.result().numpy()+FN.result().numpy())
FPR = FP.result().numpy()/(FP.result().numpy()+TN.result().numpy())
print("TPR: ")
print(TPR)
print("FPR: ")
print(FPR)
with train_summary_writer.as_default():
tf.summary.scalar('loss', train_loss.result(), step=epoch)
tf.summary.scalar('accuracy', train_acc.result(), step=epoch)
with valid_summary_writer.as_default():
tf.summary.scalar('loss', valid_loss.result(), step=epoch)
tf.summary.scalar('accuracy', valid_acc.result(), step=epoch)
tf.summary.scalar('TPR', TPR, step=epoch)
tf.summary.scalar('FPR', FPR, step=epoch)
train_acc.reset_states()
valid_acc.reset_states()
train_loss.reset_states()
valid_loss.reset_states()
print("---------------------")
print("Epoch: " + str(epoch))
for k in np.arange(0,train_seg.shape[0],batch_size):
image = train_seg[k:k+batch_size,:,:,:].copy()
mask = train_mask[k:k+batch_size,:,:,None].copy()
# choose random ID
ids_present = np.unique(mask)
if ids_present[0]==0: ids_present=ids_present[1:]
id_rand = np.random.choice(ids_present)
# binarize
image[image!=id_rand]=0
image[image==id_rand]=1
mask[mask!=id_rand]=0
mask[mask==id_rand]=1
image = tf.convert_to_tensor(image, dtype=tf.float32 )
mask_gt = tf.convert_to_tensor(mask, dtype=tf.float32 )
optimizer = train_step(model, weights, image, mask_gt, optimizer, w_loss, train_loss, train_acc)
for j in np.arange(0,valid_seg.shape[0],batch_size):
image = valid_seg[j:j+batch_size,:,:,:].copy()
mask = valid_mask[j:j+batch_size,:,:,None].copy()
# choose random ID
ids_present = np.unique(mask)
if ids_present[0]==0: ids_present=ids_present[1:]
id_rand = np.random.choice(ids_present)
# binarize
image[image!=id_rand]=0
image[image==id_rand]=1
mask[mask!=id_rand]=0
mask[mask==id_rand]=1
image = tf.convert_to_tensor( image , dtype=tf.float32 )
mask_gt = tf.convert_to_tensor( mask , dtype=tf.float32 )
mask_pred = predict_step(model, weights, image, mask_gt, w_loss, valid_loss, valid_acc, TP, FP, TN, FN).numpy()
if epoch%10==0:
with valid_summary_writer.as_default():
tf.summary.image("valid-epoch"+str(epoch)+"j-"+str(j), tf.concat([tf.expand_dims(image[:,:,:,depth],3), mask_gt, mask_pred],axis=1), step=epoch, max_outputs=5)
print("Train loss: " + str(train_loss.result().numpy()))
print("Train accu: " + str(train_acc.result().numpy()))
print("Valid loss: " + str(valid_loss.result().numpy()))
print("Valid accu: " + str(valid_acc.result().numpy()))
weights.saveWeights()
print("Weights saved ------------------")
def Train(restore, ckpt_restore):
# Mouse
seg_filepath = train_seg_in_filepath
somae_filepath = train_somae_in_filepath
seg_data = dataIO.ReadH5File(seg_filepath, [1])
somae_data = dataIO.ReadH5File(somae_filepath, [1])
train_seg, train_mask, valid_seg, valid_mask = prepareDataTraining(seg_data, somae_data)
weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN = initializeModel(restore=restore, ckpt_restore=ckpt_restore)
trainOnEpochs(train_seg, train_mask, valid_seg, valid_mask, weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN)
def Predict(ckpt_restore):
# Zebrafinch
seg_filepath = predict_seg_in_filepath
seg_data = dataIO.ReadH5File(seg_filepath, [1])
seg_data = seg_data[:,:network_size,:network_size]
somae_mask_out = np.zeros((seg_data.shape[0],seg_data.shape[1],seg_data.shape[2]), dtype=np.float64)
weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN = initializeModel(restore=True, ckpt_restore=ckpt_restore)
seg_data_prep = prepareDataPrediction(seg_data)
unique_ids = np.unique(seg_data)
for ID in unique_ids:
print("Processind ID " + str(ID))
seg_data_filtered = seg_data_prep.copy()
seg_data_filtered[seg_data_filtered!=ID]=0
# mask the data to be binary
seg_data_filtered[seg_data_filtered>0]=1
for j in np.arange(0,seg_data_filtered.shape[0],batch_size):
image = seg_data_filtered[j:j+batch_size,:,:,:]
image = tf.convert_to_tensor( image , dtype=tf.float32 )
if np.max(image[:,:,:,depth])!=0:
mask_pred = tf.squeeze(model(image, weights)).numpy()
mask_pred[mask_pred<=0.5]=0
mask_pred[mask_pred>0.5]=1
mask_pred = image[:,:,:,depth]*mask_pred
somae_mask_out[j:j+batch_size,:,:] = somae_mask_out[j:j+batch_size,:,:]+mask_pred[:,:,:]
del seg_data_filtered
somae_mask_out = somae_mask_out.astype(np.uint64)
dataIO.WriteH5File(somae_mask_out, somae_prediction_out_filepath, "main")
|
[
"tensorflow.keras.metrics.FalseNegatives",
"numpy.arange",
"numpy.unique",
"tensorflow.math.log",
"tensorflow.keras.metrics.TrueNegatives",
"tensorflow.keras.metrics.FalsePositives",
"model.model",
"numpy.max",
"numpy.random.choice",
"datetime.datetime.now",
"tensorflow.initializers.RandomNormal",
"tensorflow.summary.scalar",
"tensorflow.nn.weighted_cross_entropy_with_logits",
"tensorflow.metrics.Mean",
"tensorflow.add",
"dataIO.ReadH5File",
"tensorflow.optimizers.Adam",
"numpy.random.permutation",
"tensorflow.keras.metrics.TruePositives",
"tensorflow.expand_dims",
"dataIO.WriteH5File",
"tensorflow.convert_to_tensor",
"numpy.zeros",
"tensorflow.metrics.BinaryAccuracy",
"tensorflow.summary.create_file_writer",
"tensorflow.GradientTape"
] |
[((486, 589), 'numpy.zeros', 'np.zeros', (['(seg_data.shape[0], seg_data.shape[1], seg_data.shape[2], depth * 2 + 1)'], {'dtype': 'np.uint8'}), '((seg_data.shape[0], seg_data.shape[1], seg_data.shape[2], depth * \n 2 + 1), dtype=np.uint8)\n', (494, 589), True, 'import numpy as np\n'), ((1085, 1126), 'numpy.random.permutation', 'np.random.permutation', (['valid_seg.shape[0]'], {}), '(valid_seg.shape[0])\n', (1106, 1126), True, 'import numpy as np\n'), ((1143, 1184), 'numpy.random.permutation', 'np.random.permutation', (['train_seg.shape[0]'], {}), '(train_seg.shape[0])\n', (1164, 1184), True, 'import numpy as np\n'), ((1650, 1753), 'numpy.zeros', 'np.zeros', (['(seg_data.shape[0], seg_data.shape[1], seg_data.shape[2], depth * 2 + 1)'], {'dtype': 'np.uint8'}), '((seg_data.shape[0], seg_data.shape[1], seg_data.shape[2], depth * \n 2 + 1), dtype=np.uint8)\n', (1658, 1753), True, 'import numpy as np\n'), ((6791, 6824), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['learning_rate'], {}), '(learning_rate)\n', (6809, 6824), True, 'import tensorflow as tf\n'), ((6876, 6903), 'tensorflow.metrics.BinaryAccuracy', 'tf.metrics.BinaryAccuracy', ([], {}), '()\n', (6901, 6903), True, 'import tensorflow as tf\n'), ((6920, 6947), 'tensorflow.metrics.BinaryAccuracy', 'tf.metrics.BinaryAccuracy', ([], {}), '()\n', (6945, 6947), True, 'import tensorflow as tf\n'), ((6965, 6982), 'tensorflow.metrics.Mean', 'tf.metrics.Mean', ([], {}), '()\n', (6980, 6982), True, 'import tensorflow as tf\n'), ((7000, 7017), 'tensorflow.metrics.Mean', 'tf.metrics.Mean', ([], {}), '()\n', (7015, 7017), True, 'import tensorflow as tf\n'), ((7028, 7060), 'tensorflow.keras.metrics.TruePositives', 'tf.keras.metrics.TruePositives', ([], {}), '()\n', (7058, 7060), True, 'import tensorflow as tf\n'), ((7070, 7103), 'tensorflow.keras.metrics.FalsePositives', 'tf.keras.metrics.FalsePositives', ([], {}), '()\n', (7101, 7103), True, 'import tensorflow as tf\n'), ((7113, 7145), 'tensorflow.keras.metrics.TrueNegatives', 'tf.keras.metrics.TrueNegatives', ([], {}), '()\n', (7143, 7145), True, 'import tensorflow as tf\n'), ((7155, 7188), 'tensorflow.keras.metrics.FalseNegatives', 'tf.keras.metrics.FalseNegatives', ([], {}), '()\n', (7186, 7188), True, 'import tensorflow as tf\n'), ((7897, 7919), 'model.model', 'model', (['inputs', 'weights'], {}), '(inputs, weights)\n', (7902, 7919), False, 'from model import model\n'), ((8550, 8594), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['train_log_dir'], {}), '(train_log_dir)\n', (8579, 8594), True, 'import tensorflow as tf\n'), ((8622, 8666), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['valid_log_dir'], {}), '(valid_log_dir)\n', (8651, 8666), True, 'import tensorflow as tf\n'), ((12246, 12282), 'dataIO.ReadH5File', 'dataIO.ReadH5File', (['seg_filepath', '[1]'], {}), '(seg_filepath, [1])\n', (12263, 12282), False, 'import dataIO\n'), ((12300, 12338), 'dataIO.ReadH5File', 'dataIO.ReadH5File', (['somae_filepath', '[1]'], {}), '(somae_filepath, [1])\n', (12317, 12338), False, 'import dataIO\n'), ((12848, 12884), 'dataIO.ReadH5File', 'dataIO.ReadH5File', (['seg_filepath', '[1]'], {}), '(seg_filepath, [1])\n', (12865, 12884), False, 'import dataIO\n'), ((12963, 13053), 'numpy.zeros', 'np.zeros', (['(seg_data.shape[0], seg_data.shape[1], seg_data.shape[2])'], {'dtype': 'np.float64'}), '((seg_data.shape[0], seg_data.shape[1], seg_data.shape[2]), dtype=\n np.float64)\n', (12971, 13053), True, 'import numpy as np\n'), ((13272, 13291), 'numpy.unique', 'np.unique', (['seg_data'], {}), '(seg_data)\n', (13281, 13291), True, 'import numpy as np\n'), ((14211, 14284), 'dataIO.WriteH5File', 'dataIO.WriteH5File', (['somae_mask_out', 'somae_prediction_out_filepath', '"""main"""'], {}), "(somae_mask_out, somae_prediction_out_filepath, 'main')\n", (14229, 14284), False, 'import dataIO\n'), ((4048, 4078), 'tensorflow.initializers.RandomNormal', 'tf.initializers.RandomNormal', ([], {}), '()\n', (4076, 4078), True, 'import tensorflow as tf\n'), ((7407, 7424), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7422, 7424), True, 'import tensorflow as tf\n'), ((7449, 7471), 'model.model', 'model', (['inputs', 'weights'], {}), '(inputs, weights)\n', (7454, 7471), False, 'from model import model\n'), ((9928, 9972), 'numpy.arange', 'np.arange', (['(0)', 'train_seg.shape[0]', 'batch_size'], {}), '(0, train_seg.shape[0], batch_size)\n', (9937, 9972), True, 'import numpy as np\n'), ((10709, 10753), 'numpy.arange', 'np.arange', (['(0)', 'valid_seg.shape[0]', 'batch_size'], {}), '(0, valid_seg.shape[0], batch_size)\n', (10718, 10753), True, 'import numpy as np\n'), ((13568, 13620), 'numpy.arange', 'np.arange', (['(0)', 'seg_data_filtered.shape[0]', 'batch_size'], {}), '(0, seg_data_filtered.shape[0], batch_size)\n', (13577, 13620), True, 'import numpy as np\n'), ((3755, 3828), 'tensorflow.nn.weighted_cross_entropy_with_logits', 'tf.nn.weighted_cross_entropy_with_logits', (['y_true', 'y_pred', 'self.pos_weight'], {}), '(y_true, y_pred, self.pos_weight)\n', (3795, 3828), True, 'import tensorflow as tf\n'), ((8346, 8360), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8358, 8360), False, 'from datetime import datetime\n'), ((9603, 9644), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""TPR"""', 'TPR'], {'step': 'epoch'}), "('TPR', TPR, step=epoch)\n", (9620, 9644), True, 'import tensorflow as tf\n'), ((9657, 9698), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""FPR"""', 'FPR'], {'step': 'epoch'}), "('FPR', FPR, step=epoch)\n", (9674, 9698), True, 'import tensorflow as tf\n'), ((10152, 10167), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (10161, 10167), True, 'import numpy as np\n'), ((10252, 10281), 'numpy.random.choice', 'np.random.choice', (['ids_present'], {}), '(ids_present)\n', (10268, 10281), True, 'import numpy as np\n'), ((10467, 10512), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {'dtype': 'tf.float32'}), '(image, dtype=tf.float32)\n', (10487, 10512), True, 'import tensorflow as tf\n'), ((10536, 10580), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['mask'], {'dtype': 'tf.float32'}), '(mask, dtype=tf.float32)\n', (10556, 10580), True, 'import tensorflow as tf\n'), ((10933, 10948), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (10942, 10948), True, 'import numpy as np\n'), ((11033, 11062), 'numpy.random.choice', 'np.random.choice', (['ids_present'], {}), '(ids_present)\n', (11049, 11062), True, 'import numpy as np\n'), ((11248, 11293), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {'dtype': 'tf.float32'}), '(image, dtype=tf.float32)\n', (11268, 11293), True, 'import tensorflow as tf\n'), ((11319, 11363), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['mask'], {'dtype': 'tf.float32'}), '(mask, dtype=tf.float32)\n', (11339, 11363), True, 'import tensorflow as tf\n'), ((13702, 13747), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {'dtype': 'tf.float32'}), '(image, dtype=tf.float32)\n', (13722, 13747), True, 'import tensorflow as tf\n'), ((3676, 3692), 'tensorflow.add', 'tf.add', (['x_1', 'x_2'], {}), '(x_1, x_2)\n', (3682, 3692), True, 'import tensorflow as tf\n'), ((13767, 13796), 'numpy.max', 'np.max', (['image[:, :, :, depth]'], {}), '(image[:, :, :, depth])\n', (13773, 13796), True, 'import numpy as np\n'), ((3461, 3488), 'tensorflow.math.log', 'tf.math.log', (['(y_pred + 1e-06)'], {}), '(y_pred + 1e-06)\n', (3472, 3488), True, 'import tensorflow as tf\n'), ((3625, 3656), 'tensorflow.math.log', 'tf.math.log', (['(1 - y_pred + 1e-06)'], {}), '(1 - y_pred + 1e-06)\n', (3636, 3656), True, 'import tensorflow as tf\n'), ((3981, 3995), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3993, 3995), False, 'from datetime import datetime\n'), ((13838, 13859), 'model.model', 'model', (['image', 'weights'], {}), '(image, weights)\n', (13843, 13859), False, 'from model import model\n'), ((11662, 11702), 'tensorflow.expand_dims', 'tf.expand_dims', (['image[:, :, :, depth]', '(3)'], {}), '(image[:, :, :, depth], 3)\n', (11676, 11702), True, 'import tensorflow as tf\n')]
|
import os
import platform
import pytest
from mist.action_run import execute_from_text
CHECK_FILE = "scopes.mist"
@pytest.mark.asyncio
async def test_check_if_bool_functions(examples_path):
with open(os.path.join(examples_path, CHECK_FILE), "r") as f:
content = f.read()
output = await execute_from_text(content)
assert """Test before - global: Global, outerLocal: Local
InnerTest before - global: test, outerLocal: Local, local: local
InnerTest after - global: innerTest, outerLocal: innerLocal, local: local
Test after - global: innerTest, outerLocal: innerLocal
Test: global: innerTest
""" == output
|
[
"mist.action_run.execute_from_text",
"os.path.join"
] |
[((305, 331), 'mist.action_run.execute_from_text', 'execute_from_text', (['content'], {}), '(content)\n', (322, 331), False, 'from mist.action_run import execute_from_text\n'), ((206, 245), 'os.path.join', 'os.path.join', (['examples_path', 'CHECK_FILE'], {}), '(examples_path, CHECK_FILE)\n', (218, 245), False, 'import os\n')]
|
import pytest
from unittest.mock import patch
import headlock.c_data_model as cdm
from headlock.address_space.virtual import VirtualAddressSpace
@pytest.fixture
def carray_type(cint_type, addrspace):
return cdm.CArrayType(cint_type, 10, addrspace)
class TestCArrayType:
def test_init_returnsArrayCProxy(self, unbound_cint_type):
carray_type = cdm.CArrayType(unbound_cint_type, 10)
assert carray_type.__addrspace__ is None
assert carray_type.base_type is unbound_cint_type
assert carray_type.element_count == 10
def test_init_onBaseTypeWithDifferentAddrSpaceSet_raisesInvalidAddressSpace(self, cint_type):
other_addrspace = VirtualAddressSpace()
with pytest.raises(cdm.InvalidAddressSpaceError):
_ = cdm.CArrayType(cint_type, 10, other_addrspace)
def test_bind_bindsAlsoBaseElement(self, addrspace):
ctype = cdm.CProxyType(1)
carray_type = cdm.CArrayType(ctype, 10)
bound_carray_type = carray_type.bind(addrspace)
assert bound_carray_type.base_type.__addrspace__ is addrspace
def test_shallowIterSubTypes_returnsBaseType(self, carray_type):
assert list(carray_type.shallow_iter_subtypes()) \
== [carray_type.base_type]
def test_eq_onSamePointer_returnsTrue(self, cint_type):
assert cdm.CPointerType(cint_type, 32, 'little') \
== cdm.CPointerType(cint_type, 32, 'little')
@pytest.mark.parametrize('diff_carr_type', [
"othertype",
cdm.CArrayType(cdm.CIntType('x', 32, True, cdm.ENDIANESS), 10)
.with_attr('attr'),
cdm.CArrayType(cdm.CIntType('x', 32, True, cdm.ENDIANESS), 1000),
cdm.CArrayType(cdm.CIntType('y', 16, False, cdm.ENDIANESS), 10)])
def test_eq_onSamePointer_returnsTrue(self, diff_carr_type):
basetype = cdm.CIntType('x', 32, True, cdm.ENDIANESS)
assert cdm.CArrayType(basetype, 10) != diff_carr_type
def test_len_returnsSizeOfObject(self, carray_type):
assert len(carray_type) == carray_type.element_count
def test_sizeof_returnsSizeInBytes(self, carray_type):
assert carray_type.sizeof \
== carray_type.element_count * carray_type.base_type.sizeof
@patch.object(cdm.CIntType, 'null_val')
def test_nullValue_ok(self, null_val, carray_type):
assert carray_type.null_val == [null_val] * carray_type.element_count
def test_cDefinition_onRefDef_returnsWithRefDef(self, cint_type):
assert cint_type.array(12).c_definition('x') == 'cint x[12]'
def test_cDefinition_onNoRefDef_returnsWithoutRefDef(self, cint_type):
assert cint_type.array(12).c_definition() == 'cint [12]'
def test_cDefinition_onArrayOfArrays_ok(self, cint_type):
assert cint_type.array(11).array(22).c_definition() == 'cint [22][11]'
def test_cDefinition_onArrayOfPtr_ok(self, cint_type):
assert cint_type.ptr.array(10).c_definition('x') == 'cint *x[10]'
def test_cDefinition_onPtrToArray_ok(self, cint_type):
assert cint_type.array(10).ptr.c_definition('x') == 'cint (*x)[10]'
def test_repr_returnsBaseNamePlusArray(self, unbound_cint_type):
cptr_type = cdm.CArrayType(unbound_cint_type, 123).with_attr('attr')
assert repr(cptr_type) == 'ts.cint_attr_array123'
def test_convertToCRepr_onPyIterable_initializesElementsWithIterablePlusNullVals(self):
carray_type = cdm.CArrayType(cdm.CIntType('i', 32, False, 'big'), 5)
c_repr = carray_type.convert_to_c_repr([0x11, 0x22, 0x33445566])
assert c_repr == b'\x00\x00\x00\x11\x00\x00\x00\x22\x33\x44\x55\x66' \
b'\x00\x00\x00\x00\x00\x00\x00\x00'
def test_convertToCRepr_onUtf8WithBigCodepoint_returnsArrayOfCorrectSize(self):
carray_type = cdm.CArrayType(cdm.CIntType('i', 32, False, 'big'), 4)
c_repr = carray_type.convert_to_c_repr('A\u1122')
assert c_repr == b'\x00\x00\x00\x41\x00\x00\x11\x22' \
b'\x00\x00\x00\x00\x00\x00\x00\x00'
def test_convertFromCRepr_returnsArrayOfCorrectSize(self):
carray_type = cdm.CArrayType(cdm.CIntType('i', 32, False, 'big'), 5)
py_repr = carray_type.convert_from_c_repr(
b'\x00\x00\x00\x11\x00\x00\x00\x22\x33\x44\x55\x66')
assert py_repr == [0x11, 0x22, 0x33445566, 0, 0]
def test_init_onConstArray_ok(self, cint_type):
carray_type = cint_type.with_attr('const').array(1)
_ = carray_type()
@pytest.mark.parametrize('size', [1, 4])
def test_getAlignment_returnsAlignmentOfBase(self, size, unbound_cint_type):
with patch.object(cdm.CIntType, 'alignment', size):
carray_type = cdm.CArrayType(unbound_cint_type, 4)
assert carray_type.alignment == size
class TestCArray:
def create_int_carray_obj(self, bits, init_val):
cint_type = cdm.CIntType('i'+str(bits), bits, False, cdm.ENDIANESS)
content = b''.join(map(cint_type.convert_to_c_repr, init_val))
addrspace = VirtualAddressSpace(content)
carray_type = cdm.CArrayType(cint_type.bind(addrspace), len(init_val),
addrspace)
return cdm.CArray(carray_type, 0)
def test_str_returnsStringWithZeros(self):
test_vector = [ord('x'), ord('Y'), 0]
carray_obj = self.create_int_carray_obj(16, test_vector)
assert str(carray_obj) == 'xY\0'
def test_getCStr_onZeroTerminatedStr_returnsBytes(self):
test_vector = [ord('X'), ord('y'), 0]
carray_obj = self.create_int_carray_obj(16, test_vector)
assert carray_obj.c_str == b'Xy'
def test_setCStr_onPyStr_changesArrayToZeroTerminatedString(self):
carray_obj = self.create_int_carray_obj(16, [111]*6)
carray_obj.c_str = 'Xy\0z'
assert carray_obj.val == [ord('X'), ord('y'), 0, ord('z'), 0, 0]
def test_setCStr_onTooLongPyStr_raisesValueError(self):
array = self.create_int_carray_obj(16, [111] * 3)
with pytest.raises(ValueError):
array.c_str = 'Xyz'
def test_getUnicodeStr_onZeroTerminatedStr_returnsPyString(self):
test_vector = [0x1234, 0x56, 0]
carray_obj = self.create_int_carray_obj(16, test_vector)
assert carray_obj.unicode_str == '\u1234\x56'
def test_setUnicodeStr_onPyStr_changesArrayToZeroTerminatedString(self):
carray_obj = self.create_int_carray_obj(16, [111] * 6)
carray_obj.unicode_str = '\u1234\x56\0\x78'
assert carray_obj.val == [0x1234, 0x56, 0, 0x78, 0, 0]
def test_getItem_returnsObjectAtNdx(self):
carray_obj = self.create_int_carray_obj(16, [1, 2, 3, 4])
assert carray_obj[2].__address__ \
== carray_obj.__address__ + 2*carray_obj.base_type.sizeof
def test_getItem_onNegativeIndex_returnsElementFromEnd(self):
carray_obj = self.create_int_carray_obj(16, [0]*5)
assert carray_obj[-2].__address__ == carray_obj[3].__address__
def test_getItem_onSlice_returnsSubArray(self):
carray_obj = self.create_int_carray_obj(16, [1, 2, 3, 4])
sliced_carray_obj = carray_obj[1:3]
assert isinstance(sliced_carray_obj, cdm.CArray)
assert sliced_carray_obj.base_type == carray_obj.base_type
assert sliced_carray_obj.__address__ == carray_obj[1].__address__
assert sliced_carray_obj.element_count == 2
def test_getItem_onSliceWithSteps_raiseValueError(self):
carray_obj = self.create_int_carray_obj(16, [1, 2, 3, 4])
with pytest.raises(ValueError):
_ = carray_obj[0:4:2]
def test_getItem_onSliceWithNegativeBoundaries_returnsPartOfArrayFromEnd(self):
carray_obj = self.create_int_carray_obj(16, [0x11, 0x22, 0x33, 0x44])
assert carray_obj[-3:-1] == [0x22, 0x33]
def test_getItem_onSliceWithOpenEnd_returnsPartOfArrayUntilEnd(self):
carray_obj = self.create_int_carray_obj(16, [0x11, 0x22, 0x33, 0x44])
assert carray_obj[1:] == [0x22, 0x33, 0x44]
def test_getItem_onSliceWithOpenStart_returnsPartOfArrayFromStart(self):
carray_obj = self.create_int_carray_obj(16, [0x11, 0x22, 0x33, 0x44])
assert carray_obj[:3] == [0x11, 0x22, 0x33]
def test_add_returnsPointer(self):
carray_obj = self.create_int_carray_obj(8, [0x11] * 32)
added_cproxy = carray_obj + 3
assert isinstance(added_cproxy, cdm.CPointer)
assert added_cproxy.val == carray_obj[3].__address__
def test_repr_returnsClassNameAndContent(self, cint_type, addrspace):
carray_type = cdm.CArrayType(cint_type, 3, addrspace)
carray_obj = carray_type([1, 2, 3])
assert repr(carray_obj) == 'ts.cint_array3([1, 2, 3])'
def test_iter_returnsIterOfElements(self):
data = [0x11, 0x22, 0x33, 0x44]
carray_obj = self.create_int_carray_obj(8, data)
assert list(iter(carray_obj)) == data
|
[
"unittest.mock.patch.object",
"headlock.c_data_model.CIntType",
"headlock.c_data_model.CPointerType",
"headlock.c_data_model.CArray",
"headlock.c_data_model.CArrayType",
"pytest.raises",
"headlock.c_data_model.CProxyType",
"pytest.mark.parametrize",
"headlock.address_space.virtual.VirtualAddressSpace"
] |
[((215, 255), 'headlock.c_data_model.CArrayType', 'cdm.CArrayType', (['cint_type', '(10)', 'addrspace'], {}), '(cint_type, 10, addrspace)\n', (229, 255), True, 'import headlock.c_data_model as cdm\n'), ((2267, 2305), 'unittest.mock.patch.object', 'patch.object', (['cdm.CIntType', '"""null_val"""'], {}), "(cdm.CIntType, 'null_val')\n", (2279, 2305), False, 'from unittest.mock import patch\n'), ((4524, 4563), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""size"""', '[1, 4]'], {}), "('size', [1, 4])\n", (4547, 4563), False, 'import pytest\n'), ((366, 403), 'headlock.c_data_model.CArrayType', 'cdm.CArrayType', (['unbound_cint_type', '(10)'], {}), '(unbound_cint_type, 10)\n', (380, 403), True, 'import headlock.c_data_model as cdm\n'), ((683, 704), 'headlock.address_space.virtual.VirtualAddressSpace', 'VirtualAddressSpace', ([], {}), '()\n', (702, 704), False, 'from headlock.address_space.virtual import VirtualAddressSpace\n'), ((900, 917), 'headlock.c_data_model.CProxyType', 'cdm.CProxyType', (['(1)'], {}), '(1)\n', (914, 917), True, 'import headlock.c_data_model as cdm\n'), ((940, 965), 'headlock.c_data_model.CArrayType', 'cdm.CArrayType', (['ctype', '(10)'], {}), '(ctype, 10)\n', (954, 965), True, 'import headlock.c_data_model as cdm\n'), ((1866, 1908), 'headlock.c_data_model.CIntType', 'cdm.CIntType', (['"""x"""', '(32)', '(True)', 'cdm.ENDIANESS'], {}), "('x', 32, True, cdm.ENDIANESS)\n", (1878, 1908), True, 'import headlock.c_data_model as cdm\n'), ((5058, 5086), 'headlock.address_space.virtual.VirtualAddressSpace', 'VirtualAddressSpace', (['content'], {}), '(content)\n', (5077, 5086), False, 'from headlock.address_space.virtual import VirtualAddressSpace\n'), ((5229, 5255), 'headlock.c_data_model.CArray', 'cdm.CArray', (['carray_type', '(0)'], {}), '(carray_type, 0)\n', (5239, 5255), True, 'import headlock.c_data_model as cdm\n'), ((8609, 8648), 'headlock.c_data_model.CArrayType', 'cdm.CArrayType', (['cint_type', '(3)', 'addrspace'], {}), '(cint_type, 3, addrspace)\n', (8623, 8648), True, 'import headlock.c_data_model as cdm\n'), ((718, 761), 'pytest.raises', 'pytest.raises', (['cdm.InvalidAddressSpaceError'], {}), '(cdm.InvalidAddressSpaceError)\n', (731, 761), False, 'import pytest\n'), ((779, 825), 'headlock.c_data_model.CArrayType', 'cdm.CArrayType', (['cint_type', '(10)', 'other_addrspace'], {}), '(cint_type, 10, other_addrspace)\n', (793, 825), True, 'import headlock.c_data_model as cdm\n'), ((1339, 1380), 'headlock.c_data_model.CPointerType', 'cdm.CPointerType', (['cint_type', '(32)', '"""little"""'], {}), "(cint_type, 32, 'little')\n", (1355, 1380), True, 'import headlock.c_data_model as cdm\n'), ((1401, 1442), 'headlock.c_data_model.CPointerType', 'cdm.CPointerType', (['cint_type', '(32)', '"""little"""'], {}), "(cint_type, 32, 'little')\n", (1417, 1442), True, 'import headlock.c_data_model as cdm\n'), ((1924, 1952), 'headlock.c_data_model.CArrayType', 'cdm.CArrayType', (['basetype', '(10)'], {}), '(basetype, 10)\n', (1938, 1952), True, 'import headlock.c_data_model as cdm\n'), ((3468, 3503), 'headlock.c_data_model.CIntType', 'cdm.CIntType', (['"""i"""', '(32)', '(False)', '"""big"""'], {}), "('i', 32, False, 'big')\n", (3480, 3503), True, 'import headlock.c_data_model as cdm\n'), ((3843, 3878), 'headlock.c_data_model.CIntType', 'cdm.CIntType', (['"""i"""', '(32)', '(False)', '"""big"""'], {}), "('i', 32, False, 'big')\n", (3855, 3878), True, 'import headlock.c_data_model as cdm\n'), ((4166, 4201), 'headlock.c_data_model.CIntType', 'cdm.CIntType', (['"""i"""', '(32)', '(False)', '"""big"""'], {}), "('i', 32, False, 'big')\n", (4178, 4201), True, 'import headlock.c_data_model as cdm\n'), ((4658, 4703), 'unittest.mock.patch.object', 'patch.object', (['cdm.CIntType', '"""alignment"""', 'size'], {}), "(cdm.CIntType, 'alignment', size)\n", (4670, 4703), False, 'from unittest.mock import patch\n'), ((4731, 4767), 'headlock.c_data_model.CArrayType', 'cdm.CArrayType', (['unbound_cint_type', '(4)'], {}), '(unbound_cint_type, 4)\n', (4745, 4767), True, 'import headlock.c_data_model as cdm\n'), ((6043, 6068), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6056, 6068), False, 'import pytest\n'), ((7569, 7594), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7582, 7594), False, 'import pytest\n'), ((1657, 1699), 'headlock.c_data_model.CIntType', 'cdm.CIntType', (['"""x"""', '(32)', '(True)', 'cdm.ENDIANESS'], {}), "('x', 32, True, cdm.ENDIANESS)\n", (1669, 1699), True, 'import headlock.c_data_model as cdm\n'), ((1731, 1774), 'headlock.c_data_model.CIntType', 'cdm.CIntType', (['"""y"""', '(16)', '(False)', 'cdm.ENDIANESS'], {}), "('y', 16, False, cdm.ENDIANESS)\n", (1743, 1774), True, 'import headlock.c_data_model as cdm\n'), ((3223, 3261), 'headlock.c_data_model.CArrayType', 'cdm.CArrayType', (['unbound_cint_type', '(123)'], {}), '(unbound_cint_type, 123)\n', (3237, 3261), True, 'import headlock.c_data_model as cdm\n'), ((1537, 1579), 'headlock.c_data_model.CIntType', 'cdm.CIntType', (['"""x"""', '(32)', '(True)', 'cdm.ENDIANESS'], {}), "('x', 32, True, cdm.ENDIANESS)\n", (1549, 1579), True, 'import headlock.c_data_model as cdm\n')]
|
'''
Handle transactional file via github's labgaif/td2dot.py
Similar to integration tests inside maindecomposition but
trying them "from outside file".
Yesterday I got some strange error in the union/find str
but I cannot reproduce it anymore :(
It read:
if x.parent == x:
AttributeError: 'str' object has no attribute 'parent'
'''
from maindecomposition import decompose, stdGgraph, labGgraph, hack_items_in, hack_graph_in
from td2dot import read_graph_in
# ~ from td2dot import dump_graph # might become necessary to track read in graph
datasetfile = 'titanic_'
graph, items = read_graph_in(datasetfile + '.td')
# make items available as global variable, necessary for Ely's code to work
# there, replace '-' and '=' in names as disallowed by dot
# means currently:
# TotalAttributesValues = [ item.replace('-', '_').replace('=', '_') for item in items ]
hack_items_in(items)
# option 1 for original labeled Gaifman graph
# ~ my_graph = labGgraph(graph, items)
# option 2 for standard Gaifman graph
my_graph = stdGgraph(graph, items)
# make my_graph available as global variable, necessary for Ely's code to work
hack_graph_in(my_graph)
#decompose it
decompose(my_graph, '2', datasetfile + '_std_decomp')
|
[
"maindecomposition.stdGgraph",
"td2dot.read_graph_in",
"maindecomposition.hack_graph_in",
"maindecomposition.decompose",
"maindecomposition.hack_items_in"
] |
[((588, 622), 'td2dot.read_graph_in', 'read_graph_in', (["(datasetfile + '.td')"], {}), "(datasetfile + '.td')\n", (601, 622), False, 'from td2dot import read_graph_in\n'), ((873, 893), 'maindecomposition.hack_items_in', 'hack_items_in', (['items'], {}), '(items)\n', (886, 893), False, 'from maindecomposition import decompose, stdGgraph, labGgraph, hack_items_in, hack_graph_in\n'), ((1030, 1053), 'maindecomposition.stdGgraph', 'stdGgraph', (['graph', 'items'], {}), '(graph, items)\n', (1039, 1053), False, 'from maindecomposition import decompose, stdGgraph, labGgraph, hack_items_in, hack_graph_in\n'), ((1134, 1157), 'maindecomposition.hack_graph_in', 'hack_graph_in', (['my_graph'], {}), '(my_graph)\n', (1147, 1157), False, 'from maindecomposition import decompose, stdGgraph, labGgraph, hack_items_in, hack_graph_in\n'), ((1173, 1226), 'maindecomposition.decompose', 'decompose', (['my_graph', '"""2"""', "(datasetfile + '_std_decomp')"], {}), "(my_graph, '2', datasetfile + '_std_decomp')\n", (1182, 1226), False, 'from maindecomposition import decompose, stdGgraph, labGgraph, hack_items_in, hack_graph_in\n')]
|
import numpy as np
# Select dataset
dataset = ['A', 'B', 'C']
dataset_id = 0
print(dataset[dataset_id])
# Select model
models = ['fNIRS-T', 'fNIRS-PreT']
models_id = 0
print(models[models_id])
test_acc = []
for tr in range(1, 26):
path = 'save/' + dataset[dataset_id] + '/KFold/' + models[models_id] + '/' + str(tr)
test_max_acc = open(path + '/test_max_acc.txt', "r")
string = test_max_acc.read()
acc = string.split('best_acc=')[1]
acc = float(acc)
test_acc.append(acc)
test_acc = np.array(test_acc)
print('mean = %.2f' % np.mean(test_acc))
print('std = %.2f' % np.std(test_acc))
|
[
"numpy.std",
"numpy.mean",
"numpy.array"
] |
[((511, 529), 'numpy.array', 'np.array', (['test_acc'], {}), '(test_acc)\n', (519, 529), True, 'import numpy as np\n'), ((552, 569), 'numpy.mean', 'np.mean', (['test_acc'], {}), '(test_acc)\n', (559, 569), True, 'import numpy as np\n'), ((592, 608), 'numpy.std', 'np.std', (['test_acc'], {}), '(test_acc)\n', (598, 608), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from asyncio.log import logger
from typing import List
from torch.nn import Module
from torch.nn.modules.loss import _Loss
from torch.optim import Optimizer
from colossalai.logging import get_dist_logger
from torch import Tensor
from colossalai.engine.ophooks import register_ophooks_recursively, BaseOpHook
from typing import Optional, Type
from colossalai.engine.gradient_handler import BaseGradientHandler
from colossalai.logging import get_dist_logger
class Engine:
"""Basic engine class for training and evaluation. It runs a specific process method
:meth:`step` which is based on the given :attr:`schedule` over each batch of a dataset.
It controls a iteration in training.
Args:
model (``torch.nn.Module``): The neural network model.
optimizer (``torch.optim.Optimizer``): Optimizer for updating the parameters.
criterion (``torch.nn.modules.loss._Loss``, optional): Loss function for calculating loss.
gradient_handlers (List[``BaseGradientHandler``], optional): A list of gradient handler used in backward.
clip_grad_norm (float, optional): The norm of gradient clipping.
ophook_list (list): List of ophook.
verbose (bool): whether to display log info.
Examples:
>>> # define model, criterion, optimizer, lr_scheduler, train_dataloader for your training
>>> model = ...
>>> criterion = ...
>>> optimizer = ...
>>> train_dataloader = ...
>>> engine, _, _, _ = colossalai.initialize(model, optimizer, criterion)
>>> engine.train()
>>> for inputs, labels in train_dataloader
>>> # set gradients to zero
>>> engine.zero_grad()
>>> # run forward pass
>>> outputs = engine(inputs)
>>> # compute loss value and run backward pass
>>> loss = engine.criterion(outputs, labels)
>>> engine.backward(loss)
>>> # update parameters
>>> engine.step()
The example of using Engine in training could be find in
`Training with engine and trainer <https://www.colossalai.org/docs/basics/engine_trainer>`_. and
`Run resnet cifar10 with engine <https://github.com/hpcaitech/ColossalAI-Examples/blob/main/image/resnet/run_resnet_cifar10_with_engine.py>`_.
"""
def __init__(self,
model: Module,
optimizer: Optimizer,
criterion: Optional[_Loss] = None,
gradient_handlers: Optional[List[BaseGradientHandler]] = None,
clip_grad_norm: float = 0.0,
ophook_list: Optional[List[BaseOpHook]] = None,
verbose: bool = True):
self._model = model
self._optimizer = optimizer
self._criterion = criterion
self._clip_grad_norm = clip_grad_norm
self._verbose = verbose
self._logger = get_dist_logger()
# state
self.training = True # default
# build gradient handler
if gradient_handlers:
self._gradient_handlers = gradient_handlers
else:
self._gradient_handlers = []
if ophook_list is None:
self._ophook_list = []
else:
self._ophook_list = ophook_list
register_ophooks_recursively(self._model, self._ophook_list)
@property
def ophooks(self):
"""show current activated ophooks"""
return self._ophook_list
@property
def model(self):
"""Model attached to the engine"""
return self._model
@property
def optimizer(self):
"""Optimizer attached to the engine"""
return self._optimizer
@property
def criterion(self):
"""Criterion attached to the engine"""
return self._criterion
def add_hook(self, ophook: Type[BaseOpHook]) -> None:
"""add necessary hook"""
# whether this hook exist
for h in self._ophook_list:
if type(h) == type(ophook):
logger = get_dist_logger()
logger.warning(f"duplicate hooks, at least two instance of {type(ophook)}")
self._ophook_list.append(ophook)
register_ophooks_recursively(self._model, self._ophook_list)
def remove_hook(self, ophook: Type[BaseOpHook]) -> None:
"""remove hook"""
logger = get_dist_logger()
logger.warning(f"removing hooks is currently not supported")
def zero_grad(self):
"""Set the gradient of parameters to zero
"""
self.optimizer.zero_grad()
def step(self):
"""Execute parameter update
"""
self._all_reduce_gradients()
self.optimizer.clip_grad_norm(self.model, self._clip_grad_norm)
return self.optimizer.step()
def backward(self, loss: Tensor):
"""Start backward propagation given the loss value computed by a loss function.
Args:
loss (:class:`torch.Tensor`): Loss value computed by a loss function.
"""
ret = self.optimizer.backward(loss)
for ophook in self._ophook_list:
ophook.post_iter()
return ret
def backward_by_grad(self, tensor, grad):
"""Start backward propagation given the gradient of the output tensor.
Args:
tensor (:class:`torch.Tensor`): Output tensor.
grad (:class:`torch.Tensor`): Gradient passed back to the output.
"""
ret = self.optimizer.backward_by_grad(tensor, grad)
for ophook in self._ophook_list:
ophook.post_iter()
return ret
def __call__(self, *args, **kwargs):
"""Run the forward step for the model.
Returns:
Tuple[:class:`torch.Tensor`] or :class:`torch.Tensor`: Output of the model.
"""
return self.model(*args, **kwargs)
def _all_reduce_gradients(self):
"""Handles all-reduce operations of gradients across different parallel groups.
"""
for handler in self._gradient_handlers:
handler.handle_gradient()
def train(self):
"""Sets the model to training mode.
"""
self.training = True
self._model.train()
def eval(self):
"""Sets the model to evaluation mode.
"""
self.training = False
self._model.eval()
|
[
"asyncio.log.logger.warning",
"colossalai.logging.get_dist_logger",
"colossalai.engine.ophooks.register_ophooks_recursively"
] |
[((2942, 2959), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (2957, 2959), False, 'from colossalai.logging import get_dist_logger\n'), ((3328, 3388), 'colossalai.engine.ophooks.register_ophooks_recursively', 'register_ophooks_recursively', (['self._model', 'self._ophook_list'], {}), '(self._model, self._ophook_list)\n', (3356, 3388), False, 'from colossalai.engine.ophooks import register_ophooks_recursively, BaseOpHook\n'), ((4233, 4293), 'colossalai.engine.ophooks.register_ophooks_recursively', 'register_ophooks_recursively', (['self._model', 'self._ophook_list'], {}), '(self._model, self._ophook_list)\n', (4261, 4293), False, 'from colossalai.engine.ophooks import register_ophooks_recursively, BaseOpHook\n'), ((4399, 4416), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (4414, 4416), False, 'from colossalai.logging import get_dist_logger\n'), ((4425, 4485), 'asyncio.log.logger.warning', 'logger.warning', (['f"""removing hooks is currently not supported"""'], {}), "(f'removing hooks is currently not supported')\n", (4439, 4485), False, 'from asyncio.log import logger\n'), ((4074, 4091), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (4089, 4091), False, 'from colossalai.logging import get_dist_logger\n')]
|
import torch
import os
def download_process_data(path="colab_demo"):
os.makedirs(path, exist_ok=True)
print("Downloading data")
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_bedroom1.pth', os.path.join(path, 'lsun_bedroom1.pth'))
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_bedroom2.pth', os.path.join(path, 'lsun_bedroom2.pth'))
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_bedroom3.pth', os.path.join(path, 'lsun_bedroom3.pth'))
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_edit.pth', os.path.join(path, 'lsun_edit.pth'))
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_church.pth', os.path.join(path, 'lsun_church.pth'))
print("Data downloaded")
|
[
"os.path.join",
"os.makedirs"
] |
[((75, 107), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (86, 107), False, 'import os\n'), ((269, 308), 'os.path.join', 'os.path.join', (['path', '"""lsun_bedroom1.pth"""'], {}), "(path, 'lsun_bedroom1.pth')\n", (281, 308), False, 'import os\n'), ((441, 480), 'os.path.join', 'os.path.join', (['path', '"""lsun_bedroom2.pth"""'], {}), "(path, 'lsun_bedroom2.pth')\n", (453, 480), False, 'import os\n'), ((613, 652), 'os.path.join', 'os.path.join', (['path', '"""lsun_bedroom3.pth"""'], {}), "(path, 'lsun_bedroom3.pth')\n", (625, 652), False, 'import os\n'), ((781, 816), 'os.path.join', 'os.path.join', (['path', '"""lsun_edit.pth"""'], {}), "(path, 'lsun_edit.pth')\n", (793, 816), False, 'import os\n'), ((947, 984), 'os.path.join', 'os.path.join', (['path', '"""lsun_church.pth"""'], {}), "(path, 'lsun_church.pth')\n", (959, 984), False, 'import os\n')]
|
import nltk
import random
import re
from flask import Flask, request
from flask_restful import Resource, Api
from gensim.models import KeyedVectors
from flask_cors import CORS
from functools import lru_cache
app = Flask(__name__)
api = Api(app)
CORS(app)
class RandomWordPair(Resource):
def get(self, degrees):
nouns = get_nouns()
first_word = nouns[random.randint(0, len(nouns) - 1)]
second_word = first_word
for i in range(degrees):
similar_words = get_similar_words(second_word)
second_word = similar_words[random.randint(0, len(similar_words) - 1)]
return {'words': [first_word, second_word]}
class SimilarWords(Resource):
def get(self, word):
return {'words': get_similar_words(word)}
api.add_resource(RandomWordPair, '/word-pair/<int:degrees>')
api.add_resource(SimilarWords, '/similar-words/<string:word>')
def get_similar_words(word):
model = load_vectors()
# Check if the word exists in vocabulary, return 404 if not
similar = model.most_similar([word], topn=50)
result = [item[0] for item in similar]
tagged = nltk.pos_tag(result)
nouns = [tag[0] for tag in tagged if (tag[1] == "NN" or tag[1] == "NNS")]
common = ["things", "something", "everything", "nothing", "anything", "thing", "anyone", "anybody", "nobody", "somebody", "everybody", "everyone", "knows"]
less_common = [noun for noun in nouns if noun not in common]
expr = re.compile('[0-9]+|@|\.')
less_common = [word for word in less_common if not expr.search(word)]
return less_common[0:25]
@lru_cache(maxsize=None)
def load_vectors():
return KeyedVectors.load(r"data/embeddings/vectors.300.kv")
@lru_cache(maxsize=None)
def get_nouns():
with open('data/nouns.txt', 'r') as file:
return [noun.replace('\n', '') for noun in file.readlines()]
if __name__ == '__main__':
app.run(debug=True)
|
[
"flask_restful.Api",
"flask_cors.CORS",
"flask.Flask",
"gensim.models.KeyedVectors.load",
"nltk.pos_tag",
"functools.lru_cache",
"re.compile"
] |
[((215, 230), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (220, 230), False, 'from flask import Flask, request\n'), ((237, 245), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (240, 245), False, 'from flask_restful import Resource, Api\n'), ((246, 255), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (250, 255), False, 'from flask_cors import CORS\n'), ((1598, 1621), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (1607, 1621), False, 'from functools import lru_cache\n'), ((1709, 1732), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (1718, 1732), False, 'from functools import lru_cache\n'), ((1131, 1151), 'nltk.pos_tag', 'nltk.pos_tag', (['result'], {}), '(result)\n', (1143, 1151), False, 'import nltk\n'), ((1466, 1492), 're.compile', 're.compile', (['"""[0-9]+|@|\\\\."""'], {}), "('[0-9]+|@|\\\\.')\n", (1476, 1492), False, 'import re\n'), ((1653, 1704), 'gensim.models.KeyedVectors.load', 'KeyedVectors.load', (['"""data/embeddings/vectors.300.kv"""'], {}), "('data/embeddings/vectors.300.kv')\n", (1670, 1704), False, 'from gensim.models import KeyedVectors\n')]
|
from datetime import datetime
from collections import namedtuple
import re
from aiogopro.types import CommandType, StatusType
RESERVED_WORDS = ['type', 'class']
T1 = ' ' * 4
T2 = T1 * 2
T3 = T1 * 3
T4 = T1 * 4
SUBMODE_PREFIX = {
'resolution': 'res_',
'aspect_ratio': 'aspect_',
'fps': 'Fps_',
'looping': 'Time_',
'protune_white_balance': 'color_',
'protune_iso': 'iso_',
'protune_iso_min': 'iso_',
'protune_exposure_time': 'time_',
'capture_delay': 'delay_',
'burst_rate': 'rate_',
'timelapse_rate': 'rate_',
'nightlapse_rate': 'rate_',
'short_clip_length': 'length_',
'protune_ev': 'ev_',
'timewarp_speed': 'speed_',
'exposure_time': 'time_',
'record_resolution': 'r_',
'record_fps': 'fps_',
'window_size': 'size_',
'bit_rate': 'rate_',
'lcd_sleep': 'sleep_',
'auto_power_down': 'off_',
'gop_size': 'size_',
'idr_interval': 'interval_',
'stream_bit_rate': 'rate_',
'stream_window_size': 'size_',
'stream_gop_size': 'size_',
'stream_idr_interval': 'interval_',
'lcd_brightness_v2': 'percent_'
}
rx_replace1 = re.compile(r'([\.])')
rx_replace2 = re.compile(r'([_]{2,})')
rx_remove = re.compile(r'([%])')
def filterbyvalue(seq, value):
for el in seq:
if el.attribute == value:
yield el
def dictToObject(d, name):
if not isinstance(d, dict):
return d
for k, v in d.copy().items():
if isinstance(v, dict):
d[k] = dictToObject(v, f"{capitalize(k)}Type")
elif isinstance(v, list):
d[k] = list(map(lambda x: dictToObject(x, f"{capitalize(k)}List"), v))
return namedtuple(name, d.keys())(*d.values())
def pythonify(text):
s = str(text).lower().replace(' ', '_')
s = s.replace('-', '_neg_').replace('+', '_plus_')
s = s.replace(':', 'to') # a ratio is always TO
s = s.replace('/', '_in_')
s = rx_remove.sub('', s)
s = rx_replace1.sub('_', s)
s = rx_replace2.sub('_', s)
return s
def capitalize(text):
s = str(text).replace(' ', '_').split('_')
regex = re.compile(r'([\/:])')
for index in range(len(s)):
s[index] = regex.sub('_', s[index]).capitalize()
s[index] = s[index].replace('.', '').replace('-', 'Neg').replace('%', '')
return ''.join(s)
def prefix_reserved(value, prefix):
if value in RESERVED_WORDS:
return f'{prefix}_{value}'
return value
class SchemaType(object):
def __init__(self, schema_version, version):
self.schema_version = schema_version
self.version = version
self.commands = {}
self.modes = {}
self.status = {}
def addCommand(self, cmd):
if 'wifi' not in cmd['network_types']:
return
key = cmd['key']
self.commands[key] = CommandType(
key,
cmd['url'],
cmd['widget_type'],
cmd['display_name']
)
def addMode(self, mode):
key = mode['path_segment']
self.modes[key] = dictToObject(mode, 'ModeType')
def addStatus(self, groupname, field):
group = self.status.get(groupname, {})
group[field['name']] = StatusType(**field)
self.status[groupname] = group
@staticmethod
def parse(data):
parser = SchemaType(data['schema_version'], data['version'])
for cmd in data['commands']:
parser.addCommand(cmd)
if data['modes']:
for mode in data['modes']:
parser.addMode(mode)
for group in data['status']['groups']:
for field in group['fields']:
parser.addStatus(group['group'], field)
return parser
def schema_pythonify(schema, filename):
header = [
f'# Autogenerated by {__name__}.schema_pythonify at {datetime.now()}\n'
]
types = []
# Status options
types.append('StatusType')
status = ['\n\n']
status.append('class Status(object):')
extra = ''
for groupname in schema.status.keys():
status.append(f'{extra} class {capitalize(groupname)}(object):')
for key, field in schema.status[groupname].items():
status.append(f' {prefix_reserved(field.name, groupname)} = StatusType("{field.name}", {field.id})')
extra = '\n'
# Commands
types.append('CommandType')
command = ['\n\n']
extra = False
command.append('class Command(object):')
for key, cmd in schema.commands.items():
if extra:
command.append('')
extra = True
command.append(' {0} = CommandType(\n "{0}",\n "{1}",\n "{2}",\n "{3}")'.format(
cmd.name,
cmd.url,
cmd.widget,
cmd.display_name
))
# Modes
# types.append('ModeType')
mode = ['\n\n']
othermode = []
extra = False
mode.append('class Mode(Enum):')
submode = [f'\n\nclass SubMode(object):']
for k, v in schema.modes.items():
mode.append(f"{T1}{pythonify(k)} = '{v.value}'")
if v.settings:
# default settings
defaults = [x for x in v.settings if x.path_segment == 'default_sub_mode']
if len(defaults) > 0:
submode.append(f"\n{T1}class {capitalize(k)}(Enum):")
for sm in defaults:
prefix = ''
if sm.path_segment in SUBMODE_PREFIX:
prefix = SUBMODE_PREFIX[sm.path_segment]
for o in sm.options:
submode.append(f"{T2}{pythonify(prefix + o.display_name)} = '{o.value}'")
# non default settings
others = [x for x in v.settings if x.path_segment != 'default_sub_mode']
if len(others) > 0:
othermode.append(f'\n\nclass {capitalize(k)}(object):')
for sm in others:
othermode.append(f"\n{T1}{capitalize(sm.path_segment).upper()} = '{sm.id}'")
othermode.append(f"\n{T1}class {capitalize(sm.path_segment)}(Enum):")
prefix = ''
if sm.path_segment in SUBMODE_PREFIX:
prefix = SUBMODE_PREFIX[sm.path_segment]
for o in sm.options:
othermode.append(f"{T2}{pythonify(prefix + o.display_name)} = '{o.value}'")
mode = mode + submode + othermode
# finalize
lines = []
header.append("from enum import Enum")
header.append("from aiogopro.types import {0}".format(", ".join(types)))
lines.append("\n".join(header))
lines.append("\n".join(status))
lines.append("\n".join(command))
lines.append("\n".join(mode))
lines.append("\n")
with open(filename, 'w') as f:
f.writelines(lines)
def command_compare(firsts, seconds, firstname='first', secondname='second'):
for key, cmdA in firsts.items():
if key in seconds:
cmdB = seconds[key]
if cmdA.url != cmdB.url:
print("Same command {0} different url {1} != {2} ({3}/{4})".format(cmdA.url, cmdB.url, firstname, secondname))
else:
print('Only in {2}, Command {0}: {1}'.format(cmdA.name.ljust(47), cmdA.display_name, firstname))
def mode_compare(firsts, seconds, firstname='first', secondname='last'):
for key, modeA in firsts.items():
if key in seconds:
modeB = seconds[key]
if (modeA.value != modeB.value):
print("Same mode {0} different value {1} != {2} ({3}/{4})".format(modeA.value, modeB.value, firstname, secondname))
else:
print('Only in {2}, Mode {0}: {1}'.format(modeA.name.ljust(20), modeA.display_name, firstname))
def status_compare(groupname, firsts, seconds, firstname='first', secondname='second'):
for key, statusA in firsts.items():
if key in seconds:
statusB = seconds[key]
if (statusA.id != statusB.id):
print("Same status {0}.{1} different id {2} != {3} ({4}/{5})".format(
groupname, key, statusA.id, statusB.id, firstname, secondname))
else:
print('Only in {3}, Status {0}.{1}: {2}'.format(groupname, statusA.name.ljust(47), statusA.id, firstname))
def schema_compare(first, second, firstname='first', secondname='second'):
command_compare(first.commands, second.commands, firstname, secondname)
command_compare(second.commands, first.commands, secondname, firstname)
mode_compare(first.modes, second.modes, firstname, secondname)
mode_compare(second.modes, first.modes, secondname, firstname)
for groupname in first.status.keys():
status_compare(groupname, first.status[groupname], second.status, firstname, secondname)
status_compare(groupname, second.status[groupname], first.status, secondname, firstname)
|
[
"aiogopro.types.CommandType",
"datetime.datetime.now",
"aiogopro.types.StatusType",
"re.compile"
] |
[((1177, 1198), 're.compile', 're.compile', (['"""([\\\\.])"""'], {}), "('([\\\\.])')\n", (1187, 1198), False, 'import re\n'), ((1214, 1237), 're.compile', 're.compile', (['"""([_]{2,})"""'], {}), "('([_]{2,})')\n", (1224, 1237), False, 'import re\n'), ((1252, 1271), 're.compile', 're.compile', (['"""([%])"""'], {}), "('([%])')\n", (1262, 1271), False, 'import re\n'), ((2179, 2201), 're.compile', 're.compile', (['"""([\\\\/:])"""'], {}), "('([\\\\/:])')\n", (2189, 2201), False, 'import re\n'), ((2925, 2994), 'aiogopro.types.CommandType', 'CommandType', (['key', "cmd['url']", "cmd['widget_type']", "cmd['display_name']"], {}), "(key, cmd['url'], cmd['widget_type'], cmd['display_name'])\n", (2936, 2994), False, 'from aiogopro.types import CommandType, StatusType\n'), ((3310, 3329), 'aiogopro.types.StatusType', 'StatusType', ([], {}), '(**field)\n', (3320, 3329), False, 'from aiogopro.types import CommandType, StatusType\n'), ((3964, 3978), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3976, 3978), False, 'from datetime import datetime\n')]
|
from flask import flash
from getDomainAge.models.enums import NotificationCategory
class NotificationService:
"""
Service class for showing all kinds of notificatin in the webpage
"""
def notify_success(self, message: str) -> None:
"""
method to show success message
:param message: the message to be flashed
"""
if message:
flash(message, NotificationCategory.SUCCESS.value)
def notify_warning(self, message: str) -> None:
"""
method to show warning message
:param message: the message to be flashed
"""
if message:
flash(message, NotificationCategory.WARNING.value)
def notify_error(self, message: str) -> None:
"""
method to show dannger or error message
:param message: the message to be flashed
"""
if message:
flash(message, NotificationCategory.DANGER.value)
|
[
"flask.flash"
] |
[((396, 446), 'flask.flash', 'flash', (['message', 'NotificationCategory.SUCCESS.value'], {}), '(message, NotificationCategory.SUCCESS.value)\n', (401, 446), False, 'from flask import flash\n'), ((645, 695), 'flask.flash', 'flash', (['message', 'NotificationCategory.WARNING.value'], {}), '(message, NotificationCategory.WARNING.value)\n', (650, 695), False, 'from flask import flash\n'), ((901, 950), 'flask.flash', 'flash', (['message', 'NotificationCategory.DANGER.value'], {}), '(message, NotificationCategory.DANGER.value)\n', (906, 950), False, 'from flask import flash\n')]
|
train_imgs_path="path_to_train_images"
test_imgs_path="path_to_val/test images"
dnt_names=[]
import os
with open("dont_include_to_train.txt","r") as dnt:
for name in dnt:
dnt_names.append(name.strip("\n").strip(".json"))
dnt.close()
print(dnt_names)
with open("baseline_train.txt","w") as btr:
for file in os.listdir(train_imgs_path):
if file not in dnt_names:
btr.write(train_imgs_path+file+"\n")
btr.close()
with open("baseline_val.txt","w") as bv:
for file in os.listdir(test_imgs_path):
bv.write(test_imgs_path+file+"\n")
bv.close()
|
[
"os.listdir"
] |
[((326, 353), 'os.listdir', 'os.listdir', (['train_imgs_path'], {}), '(train_imgs_path)\n', (336, 353), False, 'import os\n'), ((506, 532), 'os.listdir', 'os.listdir', (['test_imgs_path'], {}), '(test_imgs_path)\n', (516, 532), False, 'import os\n')]
|
"""
General-purpose and HTML lexical preprocessors.
The `preprocessors <preprocessor>`:term: accept lines of text
(`Preprocessor.insert_lines`) and files (`Preprocessor.insert_file`). A
preprocessor remembers all the input files that it opens
(`Preprocessor.input_paths`).
Preprocessor `directives <preprocessor directive>`:term: and variables
(also known as `nodes <preprocessor node>`:term:) are distinguished from
the source text by customisable delimiters (`directive_delimiter`,
`node_delimiters`, `code_nodes`).
The HTML preprocessor recognises character references (also known as
character entities) and can replace them with characters, other
character references, etc (`HTMLPreprocessor.character_references`).
Warnings about unknown character references can be suppressed
(`HTMLPreprocessor.suppress_character_reference_warnings`).
Exports
-------
Preprocessor
A general-purpose lexical preprocessor.
HTMLPreprocessor
A lexical preprocessor for HTML.
directive_delimiter
Customise the opening delimiter of the preprocessor directives.
node_delimiters
Customise the delimiters of the preprocessor *nodes*.
code_nodes
Modify a preprocessor to use code-syntax-friendly *node* delimiters.
"""
import inspect
import re
import shlex
import doxhooks.console as console
__all__ = [
"HTMLPreprocessor",
"Preprocessor",
"code_nodes",
"directive_delimiter",
"node_delimiters",
]
def _compile_match_directive(opening_delimiter):
# Return a regex match method for a preprocessor directive pattern.
#
# A directive can be distinguished from a node. The keyword of a
# directive is immediately followed by whitespace, whereas
# whitespace is not allowed inside a node.
directive_pattern = "".join((
r"(?P<indentation>[ \t]*)", opening_delimiter,
r"(?P<keyword>\w+)(?:[ \t]+(?P<block>.+))?[ \t]*\n"))
# fullmatch is new in Python 3.4.
# return re.compile(directive_pattern).fullmatch
return re.compile(directive_pattern + "$").match
def _compile_replace_nodes(opening_delimiter, closing_delimiter=None):
# Return a regex substitution method for a 'node' pattern.
if closing_delimiter is None:
closing_delimiter = opening_delimiter
node_pattern = "".join((
opening_delimiter, r"(?P<identifier>(?:\w+\.)*\w+)",
closing_delimiter))
return re.compile(node_pattern).sub
class Preprocessor:
"""
A general-purpose lexical preprocessor.
The default opening delimiter for a preprocessor directive is
``##``. This delimiter can be customised with the
`directive_delimiter` class decorator. The closing delimiter is the
end of the line.
The default opening and closing delimiters for a preprocessor *node*
are both ``##``. These delimiters can be replaced with the
code-syntax-friendly `code_nodes` class decorator or customised with
the `node_delimiters` class decorator.
The `~doxhooks.preprocessor_factories.PreprocessorFactory` not only
hides the construction of a preprocessor from the caller, but also
parameterises the types of preprocessor and preprocessor context
that the caller receives.
Class Interface
---------------
insert_file
Push the contents of a file onto the preprocessor stack.
insert_lines
Push some lines of text onto the preprocessor stack.
"""
def __init__(self, context, input_file_domain, output_file):
"""
Initialise the preprocessor with a context and files.
Parameters
----------
context : ~doxhooks.preprocessor_contexts.BasePreprocessorContext
A context that defines and interprets the preprocessor
directives and variables in the input files.
input_file_domain : ~doxhooks.file_domains.InputFileDomain
The input-file domain.
output_file : TextIO
An open file object that the preprocessor writes its output
to.
Attributes
----------
input_paths : set
The path to each input file that has been preprocessed.
"""
self._context = context
self._input = input_file_domain
self._output = output_file
self._indentation = ""
self.input_paths = set()
_match_directive = _compile_match_directive("##")
def _eval_directive(self, indentation, directive):
# Tokenise a directive and interpret the tokens in the context.
self._indentation = indentation + directive.group("indentation")
keyword_token, block = directive.group("keyword", "block")
tokens = shlex.split(block, comments=True) if block is not None else ()
self._context.interpret(keyword_token, *tokens, preprocessor=self)
_replace_nodes = _compile_replace_nodes("##")
def _flatten_node(self, node):
# Recursively flatten a 'node' and return the output text.
identifier = node.group("identifier")
node_value = self._context.get(identifier)
try:
return self._replace_nodes(self._flatten_node, node_value)
except Exception:
console.error_trace("Node `{}`".format(identifier), node_value)
raise
def _eval_line(self, line):
# Evaluate a line of input text and return the output text.
return self._replace_nodes(self._flatten_node, line)
def insert_lines(self, lines, name=None):
"""
Push some lines of text onto the preprocessor stack.
Parameters
----------
lines : Iterable[str]
The lines of text.
name : str or None, optional
A name for the lines. The name is only used when tracing the
source of an error. If a name is not provided, the name of
the function that called `insert_lines` is used. Defaults to
``None``.
"""
indentation = self._indentation
# Silently fix a deceptive user error:
# lines should be Iterable[str], but not str[str].
if isinstance(lines, str):
lines = lines.splitlines(keepends=True)
for line_no, line in enumerate(lines, start=1):
directive = self._match_directive(line)
try:
if directive:
self._eval_directive(indentation, directive)
continue
output_line = self._eval_line(line)
except Exception:
# inspect.stack()[1][3] references the name
# of the function that called insert_lines:
name = name or inspect.stack()[1][3] + "()"
console.error_trace(
"In: {}\n >> line {:3}".format(name, line_no), line)
raise
if output_line == "\n":
# Do not write indentation without content.
self._output.write("\n")
elif output_line:
self._output.write(indentation + output_line)
self._indentation = indentation
def insert_file(self, filename, *, idempotent=False):
"""
Push the contents of a file onto the preprocessor stack.
The file path is added to the set of *input paths* opened by
this `Preprocessor`.
Parameters
----------
filename : str or None
The name of the file.
idempotent : bool, optional
Keyword-only. Whether to silently ignore the file if it has
already been preprocessed and written to the output file.
Defaults to ``False``.
Raises
------
~doxhooks.errors.DoxhooksFileError
If the file cannot be opened.
"""
if not filename:
return
path = self._input.path(filename)
if idempotent and path in self.input_paths:
return
self.input_paths.add(path)
with self._input.open(filename) as lines:
self.insert_lines(lines, filename)
class HTMLPreprocessor(Preprocessor):
"""
A lexical preprocessor for HTML.
The base class is `Preprocessor`.
Class Interface
---------------
character_references
A dictionary of character names and their replacement string
values.
suppress_character_reference_warnings
Suppress warning messages about unknown HTML character
references.
"""
character_references = {}
"""
A dictionary of character names and their replacement string values.
*dict*
Each dictionary item is a character name (also known as a character
'entity') paired with a replacement string value. The name does not
include the ``&`` and ``;`` delimiters.
The character name can also be a decimal or hexadecimal code point.
The code point does not include the preceding ``#``, but a
hexadecimal code point does require the ``x`` prefix.
Defaults to an empty dictionary.
"""
suppress_character_reference_warnings = False
"""
Suppress warning messages about unknown HTML character references.
*bool*
A warning message is written to stderr each time that an unknown
character reference (i.e. a reference that is not in
`self.character_references`) is written to the output file. These
warnings can be suppressed by setting
`self.suppress_character_reference_warnings` to ``True`` (or another
'truthy' value). Defaults to ``False``.
"""
_replace_character_references = re.compile(r"&#?(?P<reference>\w+);").sub
def _get_character(self, character_reference):
reference = character_reference.group("reference")
try:
character = self.character_references[reference]
except KeyError:
if not self.suppress_character_reference_warnings:
console.warning(
"Unknown HTML character reference:", reference)
character = character_reference.group()
return character
def _eval_line(self, line):
preprocessed_line = super()._eval_line(line)
return self._replace_character_references(
self._get_character, preprocessed_line)
def directive_delimiter(opening_delimiter):
"""
Customise the opening delimiter of the preprocessor directives.
It is usually desirable for the opening delimiter to start with the
line-comment delimiter of a particular language (often ``#`` or
``//``), followed by additional characters to distinguish the
directives from comments.
The closing delimiter is the end of the line.
The delimiters must be valid regular-expression patterns.
Parameters
----------
opening_delimiter : str
The opening-delimiter pattern.
Returns
-------
~collections.abc.Callable
A decorator for modifying a subclass of `Preprocessor`.
"""
def decorator(preprocessor_class):
preprocessor_class._match_directive = _compile_match_directive(
opening_delimiter)
return preprocessor_class
return decorator
def node_delimiters(opening_delimiter, closing_delimiter=None):
"""
Customise the delimiters of the preprocessor *nodes*.
The delimiters must be valid regular-expression patterns.
Parameters
----------
opening_delimiter : str
The opening-delimiter pattern.
closing_delimiter : str or None, optional
The closing-delimiter pattern. The pattern is the same as
`opening_delimiter` if the argument is ``None`` or not provided.
Defaults to ``None``.
Returns
-------
~collections.abc.Callable
A decorator for modifying a subclass of `Preprocessor`.
"""
def decorator(preprocessor_class):
preprocessor_class._replace_nodes = _compile_replace_nodes(
opening_delimiter, closing_delimiter)
return preprocessor_class
return decorator
_replace_code_nodes = _compile_replace_nodes(
r"(?:(##|\$\$)|(['\"])\+\+)", r"(?:\1|\+\+\2)")
def code_nodes(preprocessor_class):
"""
Modify a preprocessor to use code-syntax-friendly *node* delimiters.
These delimiters do not break the syntax of programming languages
for which ``$`` is a valid character in identifiers. (``#`` is
usually not a valid character in identifiers.)
===================== ====================== ===================
Preprocessor variable Preprocessor input Preprocessor output
===================== ====================== ===================
``my_var = "x"`` ``$$my_var$$ = 0`` ``x = 0``
``obj.$$my_var$$ = 0`` ``obj.x = 0``
``my_str = "abc"`` ``s = '##my_str##'`` ``s = 'abc'``
``s = "##my_str##"`` ``s = "abc"``
``my_num = 1.23`` ``i = '++my_num++'`` ``i = 1.23``
``i = "++my_num++"`` ``i = 1.23``
``my_bool = "true"`` ``ok = '++my_bool++'`` ``ok = true``
``ok = "++my_bool++"`` ``ok = true``
``my_bool = True`` ``ok = "++my_bool++"`` ``ok = true`` [1]_
``ok = "++my_bool++"`` ``ok = True`` [2]_
===================== ====================== ===================
.. [1] With `doxhooks.preprocessor_contexts.lowercase_booleans`.
.. [2] With `doxhooks.preprocessor_contexts.startcase_booleans`.
Parameters
----------
preprocessor_class : type
The subclass of `Preprocessor` to be modified.
Returns
-------
type
The modified subclass of `Preprocessor`.
"""
preprocessor_class._replace_nodes = _replace_code_nodes
return preprocessor_class
|
[
"doxhooks.console.warning",
"inspect.stack",
"shlex.split",
"re.compile"
] |
[((1989, 2024), 're.compile', 're.compile', (["(directive_pattern + '$')"], {}), "(directive_pattern + '$')\n", (1999, 2024), False, 'import re\n'), ((2378, 2402), 're.compile', 're.compile', (['node_pattern'], {}), '(node_pattern)\n', (2388, 2402), False, 'import re\n'), ((9544, 9581), 're.compile', 're.compile', (['"""&#?(?P<reference>\\\\w+);"""'], {}), "('&#?(?P<reference>\\\\w+);')\n", (9554, 9581), False, 'import re\n'), ((4653, 4686), 'shlex.split', 'shlex.split', (['block'], {'comments': '(True)'}), '(block, comments=True)\n', (4664, 4686), False, 'import shlex\n'), ((9875, 9938), 'doxhooks.console.warning', 'console.warning', (['"""Unknown HTML character reference:"""', 'reference'], {}), "('Unknown HTML character reference:', reference)\n", (9890, 9938), True, 'import doxhooks.console as console\n'), ((6633, 6648), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (6646, 6648), False, 'import inspect\n')]
|
import torch
import torchvision
from torch import nn
from torch import optim
from torch.nn import init
import torch.nn.functional as F
from torch.autograd import Variable
from torch import autograd
from torchvision import transforms, utils
import os
def init_weights(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
init.xavier_normal_(m.weight.data, gain=0.02)
def conv_block(in_channels, out_channels, kernel_size, stride, padding, bias=False):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm2d(out_channels),
nn.ReLU(True),
)
def convt_block(in_channels, out_channels, kernel_size, stride, padding, output_padding, bias=False):
return nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=bias),
nn.BatchNorm2d(out_channels),
nn.ReLU(True),
)
class ResidualBlock(nn.Module):
def __init__(self, in_channels):
super(ResidualBlock, self).__init__()
self.model = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(in_channels, in_channels, 3, stride=1, padding=0),
nn.BatchNorm2d(in_channels),
nn.ReLU(True),
nn.ReflectionPad2d(1),
nn.Conv2d(in_channels, in_channels, 3, stride=1, padding=0),
nn.BatchNorm2d(in_channels),
)
def forward(self, x):
#return F.relu(self.shortcut(x) + self.model(x))
return x + self.model(x)
class Generator(nn.Module):
def __init__(self, in_channels=3, out_channels=3, dim=64, n_blocks=6, use_bias=False):
super(Generator, self).__init__()
self.model = nn.Sequential(
nn.ReflectionPad2d(3),
conv_block(in_channels, dim, 7, stride=1, padding=0),
conv_block(dim, dim*2, 3, stride=2, padding=1, bias=use_bias),
conv_block(dim*2, dim*4, 3, stride=2, padding=1, bias=use_bias),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
convt_block(dim*4, dim*2, 3, stride=2,padding=1, output_padding=1, bias=use_bias),
convt_block(dim*2, dim, 3, stride=2, padding=1, output_padding=1, bias=use_bias),
nn.ReflectionPad2d(3),
nn.Conv2d(dim, out_channels, 7, stride=1, padding=0),
nn.Tanh()
)
init_weights(self.model)
def forward(self, x):
return self.model(x)
class Discriminator(nn.Module):
def __init__(self, in_channels=3, dim=64):
super(Discriminator, self).__init__()
kw = 4
self.model = nn.Sequential(
nn.Conv2d(in_channels, dim, kw, stride=2, padding=1),
nn.LeakyReLU(0.2, True),
nn.Conv2d(dim, dim*2, kw, stride=2, padding=1, bias=False),
nn.BatchNorm2d(dim*2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(dim*2, dim*4, kw, stride=2, padding=(1,2), bias=False),
nn.BatchNorm2d(dim*4),
nn.LeakyReLU(0.2, True),
nn.Conv2d(dim*4, dim*8, kw, stride=1, padding=(2,1), bias=False),
nn.BatchNorm2d(dim*8),
nn.LeakyReLU(0.2, True),
#nn.Conv2d(dim*8, dim*8, kw, stride=1, padding=1, bias=False),
#nn.BatchNorm2d(dim*8),
#nn.LeakyReLU(0.2, True),
nn.Conv2d(dim*8, 1, kw, stride=1, padding=1),
)
init_weights(self.model)
def forward(self, x):
return self.model(x)
class CycleGANLoss(nn.Module):
def __init__(self,):
super(CycleGANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(1.0).cuda())
self.register_buffer('fake_label', torch.tensor(0.0).cuda())
self.loss = nn.MSELoss()
def __call__(self, pred, target_is_real):
#if target_is_real:
# return -pred.mean()
#else:
# return pred.mean()
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
target_tensor = target_tensor.expand_as(pred)
#print("pred, target:", pred.shape, target_tensor.shape)
return self.loss(pred, target_tensor)
def calculate_gradient_penalty(netD, real_images, fake_images):
batch_size = real_images.size(0)
eta = torch.FloatTensor(batch_size,1,1,1).uniform_(0,1)
eta = eta.expand(batch_size, real_images.size(1), real_images.size(2), real_images.size(3)).cuda()
interpolated = eta * real_images + ((1 - eta) * fake_images)
interpolated = interpolated.cuda()
# define it to calculate gradient
interpolated = Variable(interpolated, requires_grad=True)
# calculate probability of interpolated examples
prob_interpolated = netD(interpolated)
# calculate gradients of probabilities with respect to examples
gradients = autograd.grad(outputs=prob_interpolated, inputs=interpolated,
grad_outputs=torch.ones(
prob_interpolated.size()).cuda(),
create_graph=True, retain_graph=True)[0]
grad_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * 10.0
return grad_penalty
|
[
"torch.nn.MSELoss",
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d",
"torch.nn.ReflectionPad2d",
"torch.autograd.Variable",
"torch.nn.Tanh",
"torch.nn.Conv2d",
"torch.nn.init.xavier_normal_",
"torch.FloatTensor",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.tensor"
] |
[((5233, 5275), 'torch.autograd.Variable', 'Variable', (['interpolated'], {'requires_grad': '(True)'}), '(interpolated, requires_grad=True)\n', (5241, 5275), False, 'from torch.autograd import Variable\n'), ((432, 477), 'torch.nn.init.xavier_normal_', 'init.xavier_normal_', (['m.weight.data'], {'gain': '(0.02)'}), '(m.weight.data, gain=0.02)\n', (451, 477), False, 'from torch.nn import init\n'), ((602, 699), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', 'kernel_size'], {'stride': 'stride', 'padding': 'padding', 'bias': 'bias'}), '(in_channels, out_channels, kernel_size, stride=stride, padding=\n padding, bias=bias)\n', (611, 699), False, 'from torch import nn\n'), ((705, 733), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (719, 733), False, 'from torch import nn\n'), ((744, 757), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (751, 757), False, 'from torch import nn\n'), ((907, 1043), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['in_channels', 'out_channels', 'kernel_size'], {'stride': 'stride', 'padding': 'padding', 'output_padding': 'output_padding', 'bias': 'bias'}), '(in_channels, out_channels, kernel_size, stride=stride,\n padding=padding, output_padding=output_padding, bias=bias)\n', (925, 1043), False, 'from torch import nn\n'), ((1050, 1078), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (1064, 1078), False, 'from torch import nn\n'), ((1089, 1102), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1096, 1102), False, 'from torch import nn\n'), ((4321, 4333), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4331, 4333), False, 'from torch import nn\n'), ((1281, 1302), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (1299, 1302), False, 'from torch import nn\n'), ((1317, 1376), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'in_channels', '(3)'], {'stride': '(1)', 'padding': '(0)'}), '(in_channels, in_channels, 3, stride=1, padding=0)\n', (1326, 1376), False, 'from torch import nn\n'), ((1391, 1418), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['in_channels'], {}), '(in_channels)\n', (1405, 1418), False, 'from torch import nn\n'), ((1433, 1446), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1440, 1446), False, 'from torch import nn\n'), ((1461, 1482), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (1479, 1482), False, 'from torch import nn\n'), ((1497, 1556), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'in_channels', '(3)'], {'stride': '(1)', 'padding': '(0)'}), '(in_channels, in_channels, 3, stride=1, padding=0)\n', (1506, 1556), False, 'from torch import nn\n'), ((1571, 1598), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['in_channels'], {}), '(in_channels)\n', (1585, 1598), False, 'from torch import nn\n'), ((1950, 1971), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(3)'], {}), '(3)\n', (1968, 1971), False, 'from torch import nn\n'), ((2735, 2756), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(3)'], {}), '(3)\n', (2753, 2756), False, 'from torch import nn\n'), ((2771, 2823), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'out_channels', '(7)'], {'stride': '(1)', 'padding': '(0)'}), '(dim, out_channels, 7, stride=1, padding=0)\n', (2780, 2823), False, 'from torch import nn\n'), ((2838, 2847), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2845, 2847), False, 'from torch import nn\n'), ((3164, 3216), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'dim', 'kw'], {'stride': '(2)', 'padding': '(1)'}), '(in_channels, dim, kw, stride=2, padding=1)\n', (3173, 3216), False, 'from torch import nn\n'), ((3231, 3254), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (3243, 3254), False, 'from torch import nn\n'), ((3271, 3331), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', '(dim * 2)', 'kw'], {'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(dim, dim * 2, kw, stride=2, padding=1, bias=False)\n', (3280, 3331), False, 'from torch import nn\n'), ((3344, 3367), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(dim * 2)'], {}), '(dim * 2)\n', (3358, 3367), False, 'from torch import nn\n'), ((3380, 3403), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (3392, 3403), False, 'from torch import nn\n'), ((3420, 3489), 'torch.nn.Conv2d', 'nn.Conv2d', (['(dim * 2)', '(dim * 4)', 'kw'], {'stride': '(2)', 'padding': '(1, 2)', 'bias': '(False)'}), '(dim * 2, dim * 4, kw, stride=2, padding=(1, 2), bias=False)\n', (3429, 3489), False, 'from torch import nn\n'), ((3499, 3522), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(dim * 4)'], {}), '(dim * 4)\n', (3513, 3522), False, 'from torch import nn\n'), ((3535, 3558), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (3547, 3558), False, 'from torch import nn\n'), ((3575, 3644), 'torch.nn.Conv2d', 'nn.Conv2d', (['(dim * 4)', '(dim * 8)', 'kw'], {'stride': '(1)', 'padding': '(2, 1)', 'bias': '(False)'}), '(dim * 4, dim * 8, kw, stride=1, padding=(2, 1), bias=False)\n', (3584, 3644), False, 'from torch import nn\n'), ((3654, 3677), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(dim * 8)'], {}), '(dim * 8)\n', (3668, 3677), False, 'from torch import nn\n'), ((3690, 3713), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (3702, 3713), False, 'from torch import nn\n'), ((3884, 3930), 'torch.nn.Conv2d', 'nn.Conv2d', (['(dim * 8)', '(1)', 'kw'], {'stride': '(1)', 'padding': '(1)'}), '(dim * 8, 1, kw, stride=1, padding=1)\n', (3893, 3930), False, 'from torch import nn\n'), ((4910, 4948), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_size', '(1)', '(1)', '(1)'], {}), '(batch_size, 1, 1, 1)\n', (4927, 4948), False, 'import torch\n'), ((4204, 4221), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (4216, 4221), False, 'import torch\n'), ((4274, 4291), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (4286, 4291), False, 'import torch\n')]
|
from os.path import join
import cv2
import numpy as np
from numpy.random import uniform
from sys import exit
import tensorflow as tf
model_path = join('models', 'symbol_classifier', 'model.h5')
model = tf.keras.models.load_model(model_path)
path = join('data', 'raw', 'n', '1.jpeg')
image_name = "data"
drawing = False
pt1_x , pt1_y = None , None
img = None
color = None
thickness = None
def draw(event, x, y, r1, r2):
global pt1_x, pt1_y, drawing, img, color
if event==cv2.EVENT_LBUTTONDOWN:
drawing=True
pt1_x,pt1_y=x,y
elif event==cv2.EVENT_LBUTTONUP:
drawing=False
cv2.line(img,(pt1_x,pt1_y),(x,y),color=color,thickness=thickness)
elif event==cv2.EVENT_MOUSEMOVE:
if drawing==True:
cv2.line(img,(pt1_x,pt1_y),(x,y),color=color,thickness=thickness)
pt1_x,pt1_y=x,y
elif event==cv2.EVENT_RBUTTONUP:
image = tf.convert_to_tensor(np.asarray(img, np.uint8), np.uint8)
tensor = tf.io.encode_jpeg(image)
print(predict(tensor))
new_image()
elif event==cv2.EVENT_MBUTTONUP:
new_image()
def new_image():
global img, color, thickness
w_on_b = round(uniform())
thickness = 5 + round(uniform(0, 255))
img = np.ones((512,512,3), np.uint8)
img *= round(uniform(0, 255))
color = (255,255,255) if w_on_b else (0,0,0)
def predict(image):
label = ['n', 'o', 'x']
blob = tf.io.decode_jpeg(image, channels=1)
blob = tf.image.convert_image_dtype(blob, tf.float32)
blob = tf.image.resize(blob, (32, 32))
blob = tf.reshape(blob, (1, 32, 32, 1))
pred = list(model.predict(blob, steps = 1)[0])
index = pred.index(max(pred))
return label[index]
new_image()
cv2.namedWindow(image_name)
cv2.setMouseCallback(image_name, draw)
while(1):
cv2.imshow(image_name, img)
if cv2.waitKey(1)&0xFF==27:
break
cv2.destroyAllWindows()
|
[
"cv2.line",
"numpy.random.uniform",
"tensorflow.keras.models.load_model",
"cv2.waitKey",
"tensorflow.io.encode_jpeg",
"numpy.asarray",
"tensorflow.reshape",
"cv2.imshow",
"numpy.ones",
"tensorflow.io.decode_jpeg",
"cv2.setMouseCallback",
"tensorflow.image.resize",
"cv2.destroyAllWindows",
"os.path.join",
"cv2.namedWindow",
"tensorflow.image.convert_image_dtype"
] |
[((148, 195), 'os.path.join', 'join', (['"""models"""', '"""symbol_classifier"""', '"""model.h5"""'], {}), "('models', 'symbol_classifier', 'model.h5')\n", (152, 195), False, 'from os.path import join\n'), ((204, 242), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_path'], {}), '(model_path)\n', (230, 242), True, 'import tensorflow as tf\n'), ((251, 285), 'os.path.join', 'join', (['"""data"""', '"""raw"""', '"""n"""', '"""1.jpeg"""'], {}), "('data', 'raw', 'n', '1.jpeg')\n", (255, 285), False, 'from os.path import join\n'), ((1728, 1755), 'cv2.namedWindow', 'cv2.namedWindow', (['image_name'], {}), '(image_name)\n', (1743, 1755), False, 'import cv2\n'), ((1756, 1794), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['image_name', 'draw'], {}), '(image_name, draw)\n', (1776, 1794), False, 'import cv2\n'), ((1884, 1907), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1905, 1907), False, 'import cv2\n'), ((1249, 1281), 'numpy.ones', 'np.ones', (['(512, 512, 3)', 'np.uint8'], {}), '((512, 512, 3), np.uint8)\n', (1256, 1281), True, 'import numpy as np\n'), ((1423, 1459), 'tensorflow.io.decode_jpeg', 'tf.io.decode_jpeg', (['image'], {'channels': '(1)'}), '(image, channels=1)\n', (1440, 1459), True, 'import tensorflow as tf\n'), ((1471, 1517), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['blob', 'tf.float32'], {}), '(blob, tf.float32)\n', (1499, 1517), True, 'import tensorflow as tf\n'), ((1529, 1560), 'tensorflow.image.resize', 'tf.image.resize', (['blob', '(32, 32)'], {}), '(blob, (32, 32))\n', (1544, 1560), True, 'import tensorflow as tf\n'), ((1572, 1604), 'tensorflow.reshape', 'tf.reshape', (['blob', '(1, 32, 32, 1)'], {}), '(blob, (1, 32, 32, 1))\n', (1582, 1604), True, 'import tensorflow as tf\n'), ((1810, 1837), 'cv2.imshow', 'cv2.imshow', (['image_name', 'img'], {}), '(image_name, img)\n', (1820, 1837), False, 'import cv2\n'), ((1185, 1194), 'numpy.random.uniform', 'uniform', ([], {}), '()\n', (1192, 1194), False, 'from numpy.random import uniform\n'), ((1297, 1312), 'numpy.random.uniform', 'uniform', (['(0)', '(255)'], {}), '(0, 255)\n', (1304, 1312), False, 'from numpy.random import uniform\n'), ((618, 689), 'cv2.line', 'cv2.line', (['img', '(pt1_x, pt1_y)', '(x, y)'], {'color': 'color', 'thickness': 'thickness'}), '(img, (pt1_x, pt1_y), (x, y), color=color, thickness=thickness)\n', (626, 689), False, 'import cv2\n'), ((1222, 1237), 'numpy.random.uniform', 'uniform', (['(0)', '(255)'], {}), '(0, 255)\n', (1229, 1237), False, 'from numpy.random import uniform\n'), ((1845, 1859), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1856, 1859), False, 'import cv2\n'), ((759, 830), 'cv2.line', 'cv2.line', (['img', '(pt1_x, pt1_y)', '(x, y)'], {'color': 'color', 'thickness': 'thickness'}), '(img, (pt1_x, pt1_y), (x, y), color=color, thickness=thickness)\n', (767, 830), False, 'import cv2\n'), ((982, 1006), 'tensorflow.io.encode_jpeg', 'tf.io.encode_jpeg', (['image'], {}), '(image)\n', (999, 1006), True, 'import tensorflow as tf\n'), ((928, 953), 'numpy.asarray', 'np.asarray', (['img', 'np.uint8'], {}), '(img, np.uint8)\n', (938, 953), True, 'import numpy as np\n')]
|
import numpy as np
import os
import os.path
from rechtschreib import correct
folder="../../write/data/"
def fixstring(qq,bef=None):
# print("fixing",qq)
intags=False
inhash=False
ac=""
ret=""
stopat=[".","(",")",">","\n"]
lq=len(qq)
for ii,zw in enumerate(qq):
basei=[intags,inhash]
if zw=="<":intags=True
if zw=="#" and inhash==False:inhash=True
ac+=zw
if zw==">":intags=False
if zw=="#" and inhash==True:inhash=False
if len(ret)>5:
if ret[-6:]=="<note ":
intags=False
if zw in stopat or zw==">" or (zw=="#" and inhash==False) or (ii==lq-1):
if len(ac)==0:continue
if len(ac)<5 or intags or inhash or zw==">" or (zw=="#" and not inhash):
ret+=ac
ac=""
continue
# print(intags,inhash,zw)
ac=correct(ac,bef)
# print(ac)
# exit()
ret+=ac
ac=""
return ret
def noignorefix2(q,befstr=None):
fix1="#"
fix2="#"
ret=""
while fix1 in q:
i1=q.find(fix1)
if i1<0:continue
bef=q[:i1]
q=q[i1:]
i2=q.find(fix2)
if i2<0:continue
mid=q[:i2+len(fix2)]
post=q[i2+len(fix2):]
# print("bef",bef)
# print("mid",mid)
# print("post",post)
bef=fixstring(bef,befstr)
# print("bef2",bef)
# exit()
ret+=bef
ret+=mid
q=post
if len(q)>0:ret+=fixstring(q,befstr)
# exit()
return ret
def noignorefix(q,befstr=None):
fix1="<ignore>"
fix2="</ignore>"
ret=""
while fix1 in q:
i1=q.find(fix1)
if i1<0:continue
bef=q[:i1]
q=q[i1:]
i2=q.find(fix2)
if i2<0:continue
mid=q[:i2+len(fix2)]
post=q[i2+len(fix2):]
# print("bef",bef)
# print("mid",mid)
# print("post",post)
bef=noignorefix2(bef,befstr)
# print("bef2",bef)
# exit()
ret+=bef
ret+=mid
q=post
if len(q)>0:ret+=noignorefix2(q,befstr)
# exit()
return ret
def correctfile(q):
print("correcting file",q)
with open(q,"r") as f:
qq=f.read()
ret=noignorefix(qq,"working on "+q)
with open(q,"w") as f:
f.write(ret)
for dirpath, dirnames, filenames in os.walk(folder):
for filename in [f for f in filenames]:
correctfile(dirpath+"/"+filename)
|
[
"rechtschreib.correct",
"os.walk"
] |
[((2225, 2240), 'os.walk', 'os.walk', (['folder'], {}), '(folder)\n', (2232, 2240), False, 'import os\n'), ((828, 844), 'rechtschreib.correct', 'correct', (['ac', 'bef'], {}), '(ac, bef)\n', (835, 844), False, 'from rechtschreib import correct\n')]
|
import numpy as np
import g2o
class MotionModel(object):
def __init__(self,
timestamp=None,
initial_position=np.zeros(3),
initial_orientation=g2o.Quaternion(),
initial_covariance=None):
self.timestamp = timestamp
self.position = initial_position
self.orientation = initial_orientation
self.covariance = initial_covariance # pose covariance
self.v_linear = np.zeros(3) # linear velocity
self.v_angular_angle = 0
self.v_angular_axis = np.array([1, 0, 0])
self.initialized = False
# damping factor
self.damp = 0.95
def current_pose(self):
'''
Get the current camera pose.
'''
return (g2o.Isometry3d(self.orientation, self.position),
self.covariance)
def predict_pose(self, timestamp):
'''
Predict the next camera pose.
'''
if not self.initialized:
return (g2o.Isometry3d(self.orientation, self.position),
self.covariance)
dt = timestamp - self.timestamp
delta_angle = g2o.AngleAxis(
self.v_angular_angle * dt * self.damp,
self.v_angular_axis)
delta_orientation = g2o.Quaternion(delta_angle)
position = self.position + self.v_linear * dt * self.damp
orientation = self.orientation * delta_orientation
return (g2o.Isometry3d(orientation, position), self.covariance)
def update_pose(self, timestamp,
new_position, new_orientation, new_covariance=None):
'''
Update the motion model when given a new camera pose.
'''
if self.initialized:
dt = timestamp - self.timestamp
assert dt != 0
v_linear = (new_position - self.position) / dt
self.v_linear = v_linear
delta_q = self.orientation.inverse() * new_orientation
delta_q.normalize()
delta_angle = g2o.AngleAxis(delta_q)
angle = delta_angle.angle()
axis = delta_angle.axis()
if angle > np.pi:
axis = axis * -1
angle = 2 * np.pi - angle
self.v_angular_axis = axis
self.v_angular_angle = angle / dt
self.timestamp = timestamp
self.position = new_position
self.orientation = new_orientation
self.covariance = new_covariance
self.initialized = True
def apply_correction(self, correction): # corr: g2o.Isometry3d or matrix44
'''
Reset the model given a new camera pose.
Note: This method will be called when it happens an abrupt change in the pose (LoopClosing)
'''
if not isinstance(correction, g2o.Isometry3d):
correction = g2o.Isometry3d(correction)
current = g2o.Isometry3d(self.orientation, self.position)
current = current * correction
self.position = current.position()
self.orientation = current.orientation()
self.v_linear = (
correction.inverse().orientation() * self.v_linear)
self.v_angular_axis = (
correction.inverse().orientation() * self.v_angular_axis)
|
[
"g2o.Quaternion",
"g2o.AngleAxis",
"g2o.Isometry3d",
"numpy.zeros",
"numpy.array"
] |
[((143, 154), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (151, 154), True, 'import numpy as np\n'), ((189, 205), 'g2o.Quaternion', 'g2o.Quaternion', ([], {}), '()\n', (203, 205), False, 'import g2o\n'), ((461, 472), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (469, 472), True, 'import numpy as np\n'), ((557, 576), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (565, 576), True, 'import numpy as np\n'), ((1156, 1229), 'g2o.AngleAxis', 'g2o.AngleAxis', (['(self.v_angular_angle * dt * self.damp)', 'self.v_angular_axis'], {}), '(self.v_angular_angle * dt * self.damp, self.v_angular_axis)\n', (1169, 1229), False, 'import g2o\n'), ((1284, 1311), 'g2o.Quaternion', 'g2o.Quaternion', (['delta_angle'], {}), '(delta_angle)\n', (1298, 1311), False, 'import g2o\n'), ((2902, 2949), 'g2o.Isometry3d', 'g2o.Isometry3d', (['self.orientation', 'self.position'], {}), '(self.orientation, self.position)\n', (2916, 2949), False, 'import g2o\n'), ((767, 814), 'g2o.Isometry3d', 'g2o.Isometry3d', (['self.orientation', 'self.position'], {}), '(self.orientation, self.position)\n', (781, 814), False, 'import g2o\n'), ((1455, 1492), 'g2o.Isometry3d', 'g2o.Isometry3d', (['orientation', 'position'], {}), '(orientation, position)\n', (1469, 1492), False, 'import g2o\n'), ((2025, 2047), 'g2o.AngleAxis', 'g2o.AngleAxis', (['delta_q'], {}), '(delta_q)\n', (2038, 2047), False, 'import g2o\n'), ((2856, 2882), 'g2o.Isometry3d', 'g2o.Isometry3d', (['correction'], {}), '(correction)\n', (2870, 2882), False, 'import g2o\n'), ((1001, 1048), 'g2o.Isometry3d', 'g2o.Isometry3d', (['self.orientation', 'self.position'], {}), '(self.orientation, self.position)\n', (1015, 1048), False, 'import g2o\n')]
|
"""Added organization table
Revision ID: ea281d3f1673
Revises: <KEY>
Create Date: 2021-11-04 15:04:47.282526
"""
from uuid import uuid4
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'ea281d3f1673'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def new_uuid() -> str:
return uuid4().hex
def upgrade():
op.create_table(
'organization',
sa.Column('id', sa.String(32), primary_key=True, default=new_uuid),
sa.Column('name', sa.String(128), unique=True, nullable=False),
)
def downgrade():
op.drop_table('organization')
|
[
"alembic.op.drop_table",
"uuid.uuid4",
"sqlalchemy.String"
] |
[((602, 631), 'alembic.op.drop_table', 'op.drop_table', (['"""organization"""'], {}), "('organization')\n", (615, 631), False, 'from alembic import op\n'), ((352, 359), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (357, 359), False, 'from uuid import uuid4\n'), ((449, 462), 'sqlalchemy.String', 'sa.String', (['(32)'], {}), '(32)\n', (458, 462), True, 'import sqlalchemy as sa\n'), ((527, 541), 'sqlalchemy.String', 'sa.String', (['(128)'], {}), '(128)\n', (536, 541), True, 'import sqlalchemy as sa\n')]
|
# Generated by Django 3.0.8 on 2020-07-07 10:37
from django.db import migrations, models
import measurements.validators
class Migration(migrations.Migration):
dependencies = [
('measurements', '0002_auto_20200706_1258'),
]
operations = [
migrations.AlterField(
model_name='measurement',
name='diastolic_pressure',
field=models.SmallIntegerField(default=80, validators=[measurements.validators.max_diastolic_pressure, measurements.validators.min_diastolic_pressure], verbose_name='Ciśnienie rozkurczowe'),
),
migrations.AlterField(
model_name='measurement',
name='pulse',
field=models.SmallIntegerField(default=60, validators=[measurements.validators.max_pulse, measurements.validators.min_pulse], verbose_name='Tętno'),
),
migrations.AlterField(
model_name='measurement',
name='systolic_pressure',
field=models.SmallIntegerField(default=120, validators=[measurements.validators.max_systolic_pressure, measurements.validators.min_systolic_pressure], verbose_name='Ciśnienie skurczowe'),
),
]
|
[
"django.db.models.SmallIntegerField"
] |
[((389, 581), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'default': '(80)', 'validators': '[measurements.validators.max_diastolic_pressure, measurements.validators.\n min_diastolic_pressure]', 'verbose_name': '"""Ciśnienie rozkurczowe"""'}), "(default=80, validators=[measurements.validators.\n max_diastolic_pressure, measurements.validators.min_diastolic_pressure],\n verbose_name='Ciśnienie rozkurczowe')\n", (413, 581), False, 'from django.db import migrations, models\n'), ((698, 844), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'default': '(60)', 'validators': '[measurements.validators.max_pulse, measurements.validators.min_pulse]', 'verbose_name': '"""Tętno"""'}), "(default=60, validators=[measurements.validators.\n max_pulse, measurements.validators.min_pulse], verbose_name='Tętno')\n", (722, 844), False, 'from django.db import migrations, models\n'), ((977, 1166), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'default': '(120)', 'validators': '[measurements.validators.max_systolic_pressure, measurements.validators.\n min_systolic_pressure]', 'verbose_name': '"""Ciśnienie skurczowe"""'}), "(default=120, validators=[measurements.validators.\n max_systolic_pressure, measurements.validators.min_systolic_pressure],\n verbose_name='Ciśnienie skurczowe')\n", (1001, 1166), False, 'from django.db import migrations, models\n')]
|
import csv
import matplotlib.pyplot as plt
rawmeanbeforeNormFile = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/MyAlgorithm_V2/GBM_SlidingWindow_TextureMap/CE_slice22_T2_ROI_Texture_Map.csv'
rawmeanafterNormFile ='/Users/yanzhexu/Desktop/Research/Sliding box GBM/MyAlgorithm_V2/addYlabel/GBM_SlidingWindow_TextureMap/CE_slice22_T2_ROI_Texture_Map.csv'
saveDir = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/MyAlgorithm_V2/Test/'
with open(rawmeanbeforeNormFile,'r') as featuresfile:
featuresfile.readline()
rowFile = csv.reader(featuresfile, delimiter=',')
xlist = list()
ylist = list()
rawmeanlist = list()
for row in rowFile:
if row[0] =='SPGRC':
xlist.append(int(row[2]))
ylist.append(int(row[3]))
rawmeanlist.append(float(row[42]))
cm = plt.cm.get_cmap('jet')
plt.scatter(xlist, ylist, c=rawmeanlist, cmap=cm)
plt.colorbar()
plt.savefig(saveDir + 'Raw Mean Before Normalization.png')
plt.cla()
plt.close()
# plt.show()
|
[
"csv.reader",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.close",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.savefig"
] |
[((831, 853), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (846, 853), True, 'import matplotlib.pyplot as plt\n'), ((854, 903), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xlist', 'ylist'], {'c': 'rawmeanlist', 'cmap': 'cm'}), '(xlist, ylist, c=rawmeanlist, cmap=cm)\n', (865, 903), True, 'import matplotlib.pyplot as plt\n'), ((904, 918), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (916, 918), True, 'import matplotlib.pyplot as plt\n'), ((920, 978), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(saveDir + 'Raw Mean Before Normalization.png')"], {}), "(saveDir + 'Raw Mean Before Normalization.png')\n", (931, 978), True, 'import matplotlib.pyplot as plt\n'), ((980, 989), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (987, 989), True, 'import matplotlib.pyplot as plt\n'), ((990, 1001), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (999, 1001), True, 'import matplotlib.pyplot as plt\n'), ((543, 582), 'csv.reader', 'csv.reader', (['featuresfile'], {'delimiter': '""","""'}), "(featuresfile, delimiter=',')\n", (553, 582), False, 'import csv\n')]
|
import numpy as np
import string
import pandas as pd
from keras.preprocessing.sequence import pad_sequences
char_limit = 1014
def get_data(path):
labels = []
inputs = []
df = pd.read_csv(path, names=['one','second','third'])
df = df.drop('second', axis=1)
data = df.values
for label,text in data:
inputs.append(text.lower())
if label == 1:
labels.append([1, 0, 0, 0])
elif label == 2:
labels.append([0, 1, 0, 0])
elif label == 3:
labels.append([0, 0, 1, 0])
else:
labels.append([0, 0, 0, 1])
return inputs, np.array(labels, dtype=np.float32)
def create_vocab_set(inputs):
vocab = set()
for i in inputs:
for j in i.split(' '):
vocab.add(j)
vocab_size = len(vocab)
word2idx = {}
for i, c in enumerate(vocab):
word2idx[c] = i
return vocab, vocab_size, word2idx
def _encode_text(s, word2idx):
vec = []
for i in s.split(' '):
vec.append(word2idx[i])
return np.array(vec)
def get_encoded_text(text, word2idx, sent_limit):
encoded_text = []
for single_text in text:
encoded_text.append(_encode_text(single_text, word2idx))
encoded_text = pad_sequences(encoded_text, maxlen=sent_limit, value=0.)
return np.array(encoded_text)
def batch_gen(encoded_text, labels, batch_size):
for ii in range(0, len(encoded_text), batch_size):
x = encoded_text[ii:ii + batch_size]
y = labels[ii:ii + batch_size]
yield (x, y)
|
[
"pandas.read_csv",
"keras.preprocessing.sequence.pad_sequences",
"numpy.array"
] |
[((190, 241), 'pandas.read_csv', 'pd.read_csv', (['path'], {'names': "['one', 'second', 'third']"}), "(path, names=['one', 'second', 'third'])\n", (201, 241), True, 'import pandas as pd\n'), ((1047, 1060), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (1055, 1060), True, 'import numpy as np\n'), ((1248, 1305), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['encoded_text'], {'maxlen': 'sent_limit', 'value': '(0.0)'}), '(encoded_text, maxlen=sent_limit, value=0.0)\n', (1261, 1305), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((1316, 1338), 'numpy.array', 'np.array', (['encoded_text'], {}), '(encoded_text)\n', (1324, 1338), True, 'import numpy as np\n'), ((626, 660), 'numpy.array', 'np.array', (['labels'], {'dtype': 'np.float32'}), '(labels, dtype=np.float32)\n', (634, 660), True, 'import numpy as np\n')]
|
#%%
import matplotlib.pyplot as plt
import numpy as np
Rload = 3300
R_25 = 10000
T_25 = 25 + 273.15 #Kelvin
Beta = 3434
Tmin = 0
Tmax = 140
temps = np.linspace(Tmin, Tmax, 1000)
tempsK = temps + 273.15
# https://en.wikipedia.org/wiki/Thermistor#B_or_%CE%B2_parameter_equation
r_inf = R_25 * np.exp(-Beta/T_25)
R_temps = r_inf * np.exp(Beta/tempsK)
V = Rload / (Rload + R_temps)
fit = np.polyfit(V, temps, 3)
p1 = np.poly1d(fit)
fit_temps = p1(V)
#%%
print(fit)
plt.plot(V, temps, label='actual')
plt.plot(V, fit_temps, label='fit')
plt.xlabel('normalized voltage')
plt.ylabel('Temp [C]')
plt.legend(loc=0)
plt.show()
|
[
"numpy.poly1d",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.polyfit",
"matplotlib.pyplot.legend",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((150, 179), 'numpy.linspace', 'np.linspace', (['Tmin', 'Tmax', '(1000)'], {}), '(Tmin, Tmax, 1000)\n', (161, 179), True, 'import numpy as np\n'), ((388, 411), 'numpy.polyfit', 'np.polyfit', (['V', 'temps', '(3)'], {}), '(V, temps, 3)\n', (398, 411), True, 'import numpy as np\n'), ((417, 431), 'numpy.poly1d', 'np.poly1d', (['fit'], {}), '(fit)\n', (426, 431), True, 'import numpy as np\n'), ((467, 501), 'matplotlib.pyplot.plot', 'plt.plot', (['V', 'temps'], {'label': '"""actual"""'}), "(V, temps, label='actual')\n", (475, 501), True, 'import matplotlib.pyplot as plt\n'), ((502, 537), 'matplotlib.pyplot.plot', 'plt.plot', (['V', 'fit_temps'], {'label': '"""fit"""'}), "(V, fit_temps, label='fit')\n", (510, 537), True, 'import matplotlib.pyplot as plt\n'), ((538, 570), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""normalized voltage"""'], {}), "('normalized voltage')\n", (548, 570), True, 'import matplotlib.pyplot as plt\n'), ((571, 593), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Temp [C]"""'], {}), "('Temp [C]')\n", (581, 593), True, 'import matplotlib.pyplot as plt\n'), ((594, 611), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (604, 611), True, 'import matplotlib.pyplot as plt\n'), ((612, 622), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (620, 622), True, 'import matplotlib.pyplot as plt\n'), ((294, 314), 'numpy.exp', 'np.exp', (['(-Beta / T_25)'], {}), '(-Beta / T_25)\n', (300, 314), True, 'import numpy as np\n'), ((331, 352), 'numpy.exp', 'np.exp', (['(Beta / tempsK)'], {}), '(Beta / tempsK)\n', (337, 352), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# @Date : 2020/5/24
# @Author: Luokun
# @Email : <EMAIL>
import sys
from os.path import dirname, abspath
import matplotlib.pyplot as plt
import numpy as np
sys.path.append(dirname(dirname(abspath(__file__))))
def test_knn():
from models.knn import KNN
x, y = np.random.randn(3, 200, 2), np.zeros([3, 200])
x[0] += np.array([2, 2]) # 右偏移2,上偏移2
x[1] += np.array([2, -2]) # 右偏移2,下偏移2
y[1] = 1
y[2] = 2
plot_scatter(x, 'Real')
x = x.reshape(-1, 2)
y = y.flatten()
# train
knn = KNN(3)
knn.fit(x, y)
pred = knn.predict(x)
plot_scatter([x[pred == i] for i in [0, 1, 2]], 'Pred')
# print accuracy
acc = np.sum(pred == y) / len(pred)
print(f'Acc = {100 * acc:.2f}%')
def plot_scatter(xys, title):
plt.figure(figsize=(10, 10))
for xy, color in zip(xys, ['r', 'g', 'b']):
plt.scatter(xy[:, 0], xy[:, 1], color=color)
plt.title(title)
plt.show()
if __name__ == '__main__':
test_knn()
|
[
"matplotlib.pyplot.title",
"os.path.abspath",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.random.randn",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"models.knn.KNN",
"matplotlib.pyplot.figure",
"numpy.array"
] |
[((357, 373), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (365, 373), True, 'import numpy as np\n'), ((399, 416), 'numpy.array', 'np.array', (['[2, -2]'], {}), '([2, -2])\n', (407, 416), True, 'import numpy as np\n'), ((553, 559), 'models.knn.KNN', 'KNN', (['(3)'], {}), '(3)\n', (556, 559), False, 'from models.knn import KNN\n'), ((800, 828), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (810, 828), True, 'import matplotlib.pyplot as plt\n'), ((934, 950), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (943, 950), True, 'import matplotlib.pyplot as plt\n'), ((955, 965), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (963, 965), True, 'import matplotlib.pyplot as plt\n'), ((298, 324), 'numpy.random.randn', 'np.random.randn', (['(3)', '(200)', '(2)'], {}), '(3, 200, 2)\n', (313, 324), True, 'import numpy as np\n'), ((326, 344), 'numpy.zeros', 'np.zeros', (['[3, 200]'], {}), '([3, 200])\n', (334, 344), True, 'import numpy as np\n'), ((697, 714), 'numpy.sum', 'np.sum', (['(pred == y)'], {}), '(pred == y)\n', (703, 714), True, 'import numpy as np\n'), ((885, 929), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xy[:, 0]', 'xy[:, 1]'], {'color': 'color'}), '(xy[:, 0], xy[:, 1], color=color)\n', (896, 929), True, 'import matplotlib.pyplot as plt\n'), ((216, 233), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (223, 233), False, 'from os.path import dirname, abspath\n')]
|
import os
import concurrent.futures
from tqdm import tqdm
from phi_angles import PhiDihedralAngleStatistics
import argparse
parser = argparse.ArgumentParser(description='To set to the path to the data')
parser.add_argument('-i', '--input_directory', help='An input directory for the psi angles must be named', required=True)
parser.add_argument('-a', '--amino_acid_input', help='An input directory for the amino acid tags must be named', required=True)
args = parser.parse_args()
phi_data_path = args.input_directory
amino_acid_data_path = args.amino_acid_input
# psi_data_path = '../dihedral_coordinates'
# amino_acid_data_path = '../../structure_prediction/output/final_features'
def main(phi_data_path, amino_acid_data_path):
for root, dirs, files in os.walk(phi_data_path, topdown=False):
for name in tqdm(files):
if 'phi' in name:
phi = PhiDihedralAngleStatistics(phi_data_path, amino_acid_data_path, name.split('_')[0])
phi.get_amino_acid_array()
phi.encode()
phi.get_phi()
phi.check_length()
phi.combine_amino_acid_phi()
phi.save_phi_angles()
if __name__ == '__main__':
main(phi_data_path, amino_acid_data_path)
|
[
"tqdm.tqdm",
"os.walk",
"argparse.ArgumentParser"
] |
[((135, 204), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""To set to the path to the data"""'}), "(description='To set to the path to the data')\n", (158, 204), False, 'import argparse\n'), ((763, 800), 'os.walk', 'os.walk', (['phi_data_path'], {'topdown': '(False)'}), '(phi_data_path, topdown=False)\n', (770, 800), False, 'import os\n'), ((822, 833), 'tqdm.tqdm', 'tqdm', (['files'], {}), '(files)\n', (826, 833), False, 'from tqdm import tqdm\n')]
|
from dataclasses import dataclass
import numpy as np
@dataclass
class ObjectTrackingResult:
frame_index: int
tracking_id: int
class_id: int
class_name: str
xmin: int
ymin: int
xmax: int
ymax: int
confidence: float
is_active: bool
def to_txt(self):
return "{} {} {} {} {} {}".format(self.class_name,
self.confidence,
self.xmin,
self.ymin,
self.xmax,
self.ymax)
def to_dict(self):
return self.__dict__
def to_array(self):
return np.array([self.xmin,
self.ymin,
self.xmax,
self.ymax,
self.confidence,
])
def to_list(self):
return [self.xmin,
self.ymin,
self.xmax,
self.ymax,
self.confidence,
]
|
[
"numpy.array"
] |
[((718, 789), 'numpy.array', 'np.array', (['[self.xmin, self.ymin, self.xmax, self.ymax, self.confidence]'], {}), '([self.xmin, self.ymin, self.xmax, self.ymax, self.confidence])\n', (726, 789), True, 'import numpy as np\n')]
|
import tensorflow as tf
import os
from tf2_models.keras_callbacks import CheckpointCallback, SummaryCallback
from tf2_models.train_utils import RectifiedAdam, ExponentialDecayWithWarmpUp
OPTIMIZER_DIC = {'adam': tf.keras.optimizers.Adam,
'radam': RectifiedAdam,
}
class Trainer(object):
def __init__(self, hparams, strategy, model, task, train_params, log_dir, ckpt_dir):
self.hparams = hparams
self.model = model
self.task = task
self.train_params = train_params
self.strategy = strategy
lr_schedule = self.get_lr_schedule()
self.optimizer = OPTIMIZER_DIC[self.train_params.optimizer](learning_rate=lr_schedule, epsilon=1e-08, clipnorm=1.0)
self.ckpt = tf.train.Checkpoint(step=tf.Variable(1, name='checkpoint_step'), optimizer=self.optimizer, net=self.model)
self.manager = tf.train.CheckpointManager(self.ckpt, ckpt_dir,
keep_checkpoint_every_n_hours=self.hparams.keep_checkpoint_every_n_hours,
max_to_keep=2)
with self.strategy.scope():
x, y = iter(self.task.valid_dataset).next()
model(x)
model.summary()
model.compile(
optimizer=self.optimizer,
loss=self.task.get_loss_fn(),
metrics=self.task.metrics())#[self.task.get_loss_fn()])
#tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),)
summary_dir = os.path.join(log_dir, 'summaries')
tf.io.gfile.makedirs(log_dir)
self.summary_writer = tf.compat.v2.summary.create_file_writer(os.path.join(summary_dir, 'train'))
tf.compat.v2.summary.experimental.set_step(self.optimizer.iterations)
ckpt_callback = CheckpointCallback(manager=self.manager, ckpt=self.ckpt)
summary_callback = SummaryCallback(summary_writer=self.summary_writer)
self.callbacks = [ckpt_callback, summary_callback]
def get_lr_schedule(self):
if 'crs' in self.train_params.schedule:
initial_learning_rate = self.train_params.learning_rate
lr_schedule = (
tf.keras.experimental.CosineDecayRestarts(
initial_learning_rate,
self.train_params.decay_steps,
t_mul=2.0,
m_mul=0.9,
alpha=0.001,
))
elif self.train_params.optimizer == 'radam':
initial_learning_rate = self.train_params.learning_rate
lr_schedule = ExponentialDecayWithWarmpUp(
initial_learning_rate=initial_learning_rate,
decay_steps=self.train_params.decay_steps,
hold_base_rate_steps=self.train_params.hold_base_rate_steps,
decay_rate=0.96,
warmup_steps=0.0)
else:
initial_learning_rate = self.train_params.learning_rate
lr_schedule = ExponentialDecayWithWarmpUp(
initial_learning_rate=initial_learning_rate,
decay_steps=self.train_params.decay_steps,
decay_rate=0.96,
hold_base_rate_steps=self.train_params.hold_base_rate_steps,
warmup_steps=self.train_params.warmup_steps)
return lr_schedule
def restore(self):
with self.strategy.scope():
self.ckpt.restore(self.manager.latest_checkpoint)
if self.manager.latest_checkpoint:
print("Restored from {}".format(self.manager.latest_checkpoint))
else:
print("Initializing from scratch.")
def train(self):
with self.strategy.scope():
with self.summary_writer.as_default():
print("initial learning rate:", self.model.optimizer.learning_rate(self.model.optimizer.iterations))
self.model.fit(self.task.train_dataset,
epochs=self.train_params.num_train_epochs,
steps_per_epoch=self.task.n_train_batches,
validation_steps=self.task.n_valid_batches,
callbacks=self.callbacks,
validation_data=self.task.valid_dataset,
verbose=2
)
|
[
"os.path.join",
"tf2_models.keras_callbacks.SummaryCallback",
"tensorflow.keras.experimental.CosineDecayRestarts",
"tensorflow.io.gfile.makedirs",
"tensorflow.compat.v2.summary.experimental.set_step",
"tensorflow.Variable",
"tf2_models.train_utils.ExponentialDecayWithWarmpUp",
"tf2_models.keras_callbacks.CheckpointCallback",
"tensorflow.train.CheckpointManager"
] |
[((853, 998), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['self.ckpt', 'ckpt_dir'], {'keep_checkpoint_every_n_hours': 'self.hparams.keep_checkpoint_every_n_hours', 'max_to_keep': '(2)'}), '(self.ckpt, ckpt_dir,\n keep_checkpoint_every_n_hours=self.hparams.\n keep_checkpoint_every_n_hours, max_to_keep=2)\n', (879, 998), True, 'import tensorflow as tf\n'), ((1458, 1492), 'os.path.join', 'os.path.join', (['log_dir', '"""summaries"""'], {}), "(log_dir, 'summaries')\n", (1470, 1492), False, 'import os\n'), ((1499, 1528), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['log_dir'], {}), '(log_dir)\n', (1519, 1528), True, 'import tensorflow as tf\n'), ((1639, 1708), 'tensorflow.compat.v2.summary.experimental.set_step', 'tf.compat.v2.summary.experimental.set_step', (['self.optimizer.iterations'], {}), '(self.optimizer.iterations)\n', (1681, 1708), True, 'import tensorflow as tf\n'), ((1732, 1788), 'tf2_models.keras_callbacks.CheckpointCallback', 'CheckpointCallback', ([], {'manager': 'self.manager', 'ckpt': 'self.ckpt'}), '(manager=self.manager, ckpt=self.ckpt)\n', (1750, 1788), False, 'from tf2_models.keras_callbacks import CheckpointCallback, SummaryCallback\n'), ((1814, 1865), 'tf2_models.keras_callbacks.SummaryCallback', 'SummaryCallback', ([], {'summary_writer': 'self.summary_writer'}), '(summary_writer=self.summary_writer)\n', (1829, 1865), False, 'from tf2_models.keras_callbacks import CheckpointCallback, SummaryCallback\n'), ((2090, 2225), 'tensorflow.keras.experimental.CosineDecayRestarts', 'tf.keras.experimental.CosineDecayRestarts', (['initial_learning_rate', 'self.train_params.decay_steps'], {'t_mul': '(2.0)', 'm_mul': '(0.9)', 'alpha': '(0.001)'}), '(initial_learning_rate, self.\n train_params.decay_steps, t_mul=2.0, m_mul=0.9, alpha=0.001)\n', (2131, 2225), True, 'import tensorflow as tf\n'), ((752, 790), 'tensorflow.Variable', 'tf.Variable', (['(1)'], {'name': '"""checkpoint_step"""'}), "(1, name='checkpoint_step')\n", (763, 790), True, 'import tensorflow as tf\n'), ((1597, 1631), 'os.path.join', 'os.path.join', (['summary_dir', '"""train"""'], {}), "(summary_dir, 'train')\n", (1609, 1631), False, 'import os\n'), ((2414, 2634), 'tf2_models.train_utils.ExponentialDecayWithWarmpUp', 'ExponentialDecayWithWarmpUp', ([], {'initial_learning_rate': 'initial_learning_rate', 'decay_steps': 'self.train_params.decay_steps', 'hold_base_rate_steps': 'self.train_params.hold_base_rate_steps', 'decay_rate': '(0.96)', 'warmup_steps': '(0.0)'}), '(initial_learning_rate=initial_learning_rate,\n decay_steps=self.train_params.decay_steps, hold_base_rate_steps=self.\n train_params.hold_base_rate_steps, decay_rate=0.96, warmup_steps=0.0)\n', (2441, 2634), False, 'from tf2_models.train_utils import RectifiedAdam, ExponentialDecayWithWarmpUp\n'), ((2759, 3009), 'tf2_models.train_utils.ExponentialDecayWithWarmpUp', 'ExponentialDecayWithWarmpUp', ([], {'initial_learning_rate': 'initial_learning_rate', 'decay_steps': 'self.train_params.decay_steps', 'decay_rate': '(0.96)', 'hold_base_rate_steps': 'self.train_params.hold_base_rate_steps', 'warmup_steps': 'self.train_params.warmup_steps'}), '(initial_learning_rate=initial_learning_rate,\n decay_steps=self.train_params.decay_steps, decay_rate=0.96,\n hold_base_rate_steps=self.train_params.hold_base_rate_steps,\n warmup_steps=self.train_params.warmup_steps)\n', (2786, 3009), False, 'from tf2_models.train_utils import RectifiedAdam, ExponentialDecayWithWarmpUp\n')]
|
#!/usr/bin/env python3
# import modules.
import sys; sys.path.append("..")
import logging
import math
import plac
import unittest
from tomes_tagger.lib.text_to_nlp import *
# enable logging.
logging.basicConfig(level=logging.DEBUG)
class Test_TextToNLP(unittest.TestCase):
def setUp(self):
# set attributes.
self.host = "http://localhost:-1"
self.t2n = TextToNLP(host=self.host)
def test__failed_annotate(self):
""" Since we can't connect to port -1, is a ConnectionError raised? """
# call CoreNLP instance's annotator.
try:
self.t2n.corenlp.annotate("")
is_connection_error = False
except ConnectionError as err:
is_connection_error = True
# check if result is as expected.
self.assertTrue(is_connection_error)
def test__gets_empty_list(self):
""" If we try and tag an empty string, is an empty list returned? """
results = self.t2n.get_NER("")
self.assertTrue(results == [])
# CLI.
def main(text="North Carolina.", host="http://localhost:9003"):
"Prints list of NER results.\
\nexample: `python3 test__text_to_nlp.py '<NAME>'`"
# print NER results.
t2n = TextToNLP(host=host)
ner = t2n.get_NER(text)
print(ner)
if __name__ == "__main__":
plac.call(main)
|
[
"sys.path.append",
"plac.call",
"logging.basicConfig"
] |
[((54, 75), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (69, 75), False, 'import sys\n'), ((193, 233), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (212, 233), False, 'import logging\n'), ((1363, 1378), 'plac.call', 'plac.call', (['main'], {}), '(main)\n', (1372, 1378), False, 'import plac\n')]
|
from threading import Thread
import time
from plugins.trivia.questions import QuestionGenerator
# This class will put itself in a pseudo-while loop that is non-blocking
# to the rest of the program.
class Question:
def __init__(self, q, a):
self.text = q
self.ans = a
class Trivia:
def __init__(self, ws, room, kind):
self.ws = ws
self.room = room
self.kind = kind
self.generator = QuestionGenerator()
self.question = None
self.solved = False
self.multiple = False
self.solver = ''
self.endSession = False
# Create the first thread that starts the loop
self.thread = Thread(target = self.customWait,
name = 'customWait',
args = (5,),
daemon = True)
self.thread.start()
def notify(self, msg):
self.ws.send('{room}|{msg}'.format(room = self.room, msg = msg))
def customWait(self, secondsToWait):
''' Between every question there is a 10 second waiting time '''
secondsPassed = 0
while secondsPassed <= secondsToWait or self.endSession:
time.sleep(1)
secondsPassed += 1
if self.endSession:
# This breaks the pseudo-while loop and kills the Trivia session
return
newQ = self.generator.makeQuestion()
self.question = Question(newQ['q'], newQ['a'])
self.notify('Next question:')
self.notify(self.question.text)
# Create the waiting thread that'll time out after 40 seconds
self.thread = Thread(target = self.wait30Sec,
name = 'longWait',
daemon = True)
self.thread.start()
def wait30Sec(self):
''' Every question have a 30 second answering period or until someone get the question right '''
secondsPassed = 0
while secondsPassed <= 30 or self.endSession:
time.sleep(1)
secondsPassed += 1
if self.solved:
self.notify('{name} was correct{extra}!'.format(name = self.solver,
extra = ' first' if self.multiple else ''))
else:
self.notify('No one got it right')
self.notify('The answer was {ans}.'.format(ans = self.question.ans))
if self.endSession:
return
self.clear()
self.notify('Next round will start soon.')
self.thread = Thread(target = self.customWait,
name = 'customWait',
args = (10,),
daemon = True)
self.thread.start()
def tryAnswer(self, guess):
if guess.lower() == self.question.ans:
self.solved = True
return self.solved
def wasSolved(self, by):
self.solver = by
def clear(self):
self.solved = False
self.multiple = False
self.solver = ''
def status(self):
return self.thread.name
|
[
"threading.Thread",
"plugins.trivia.questions.QuestionGenerator",
"time.sleep"
] |
[((439, 458), 'plugins.trivia.questions.QuestionGenerator', 'QuestionGenerator', ([], {}), '()\n', (456, 458), False, 'from plugins.trivia.questions import QuestionGenerator\n'), ((680, 753), 'threading.Thread', 'Thread', ([], {'target': 'self.customWait', 'name': '"""customWait"""', 'args': '(5,)', 'daemon': '(True)'}), "(target=self.customWait, name='customWait', args=(5,), daemon=True)\n", (686, 753), False, 'from threading import Thread\n'), ((1635, 1694), 'threading.Thread', 'Thread', ([], {'target': 'self.wait30Sec', 'name': '"""longWait"""', 'daemon': '(True)'}), "(target=self.wait30Sec, name='longWait', daemon=True)\n", (1641, 1694), False, 'from threading import Thread\n'), ((2540, 2614), 'threading.Thread', 'Thread', ([], {'target': 'self.customWait', 'name': '"""customWait"""', 'args': '(10,)', 'daemon': '(True)'}), "(target=self.customWait, name='customWait', args=(10,), daemon=True)\n", (2546, 2614), False, 'from threading import Thread\n'), ((1196, 1209), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1206, 1209), False, 'import time\n'), ((2010, 2023), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2020, 2023), False, 'import time\n')]
|
"""Users Models."""
# Django
from django.db import models
from django.contrib.auth.models import User
class Profile(models.Model):
"""Profile extended:
Proxy model that extends the base data with other information.
"""
# this links Profile with the User profile, one to one relationship
profile_user = models.OneToOneField(User, on_delete=models.CASCADE)
# Website filed
profile_website = models.URLField(max_length=200, blank=True)
# Biography
profile_biography = models.TextField(blank=True)
# Phone number
profile_phone_number = models.CharField(max_length=20, blank=True)
# Picture
profile_picture = models.ImageField(
upload_to = 'users/pictures',
blank = True,
null = True,)
profile_created_on = models.DateTimeField(auto_now_add = True)
profile_modified_on = models.DateTimeField(auto_now = True)
def __str__(self):
"""TODO: description."""
return self.profile_user.username
|
[
"django.db.models.OneToOneField",
"django.db.models.TextField",
"django.db.models.URLField",
"django.db.models.CharField",
"django.db.models.ImageField",
"django.db.models.DateTimeField"
] |
[((324, 376), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (344, 376), False, 'from django.db import models\n'), ((419, 462), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(200)', 'blank': '(True)'}), '(max_length=200, blank=True)\n', (434, 462), False, 'from django.db import models\n'), ((503, 531), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (519, 531), False, 'from django.db import models\n'), ((578, 621), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)'}), '(max_length=20, blank=True)\n', (594, 621), False, 'from django.db import models\n'), ((658, 726), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""users/pictures"""', 'blank': '(True)', 'null': '(True)'}), "(upload_to='users/pictures', blank=True, null=True)\n", (675, 726), False, 'from django.db import models\n'), ((793, 832), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (813, 832), False, 'from django.db import models\n'), ((861, 896), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (881, 896), False, 'from django.db import models\n')]
|
from flask import render_template, url_for
from app import app
script_list = ['demo',
'format_DNA',
'translate',
'extra_sites']
default_choice = 'format_DNA'
def render_index_template():
return render_template(
"index.html",
script_list = script_list,
default=default_choice)
# index shows form with scripts listed
@app.route('/', methods = ['GET'])
@app.route('/index', methods = ['GET'])
def index():
return render_index_template()
|
[
"app.app.route",
"flask.render_template"
] |
[((419, 450), 'app.app.route', 'app.route', (['"""/"""'], {'methods': "['GET']"}), "('/', methods=['GET'])\n", (428, 450), False, 'from app import app\n'), ((454, 490), 'app.app.route', 'app.route', (['"""/index"""'], {'methods': "['GET']"}), "('/index', methods=['GET'])\n", (463, 490), False, 'from app import app\n'), ((262, 340), 'flask.render_template', 'render_template', (['"""index.html"""'], {'script_list': 'script_list', 'default': 'default_choice'}), "('index.html', script_list=script_list, default=default_choice)\n", (277, 340), False, 'from flask import render_template, url_for\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
#%%
def tol2side_x_eq_y(x, y, tol_below=0.0, tol_above=0.0):
'''在上界误差tol_above和下界误差tol_below范围内判断x是否等于y'''
return y - tol_below <= x <= y + tol_above
def tol_eq(x, y, tol=0.0):
'''在绝对误差tol范围内判断x和y相等'''
return abs(x - y) <= tol
def tol_x_big_y(x, y, tol=0.0):
'''在绝对误差tol范围外判断x大于y'''
return x > y and abs(x - y) > tol
def tol_x_big_eq_y(x, y, tol=0.0):
'''在绝对误差tol范围内判断x大于等于y'''
return tol_x_big_y(x, y, tol) or tol_eq(x, y, tol)
def tol_x_sml_y(x, y, tol=0.0):
'''在绝对误差tol范围外判断x小于y'''
return x < y and abs(y - x) > tol
def tol_x_sml_eq_y(x, y, tol=0.0):
'''在绝对误差tol范围内判断x小于等于y'''
return tol_x_sml_y(x, y, tol) or tol_eq(x, y, tol)
#%%
def get_alts_sml(tgt_sum, alts, sort_type='descend', tol=0.0, add_num=None):
'''
从给定备选列表alts中挑选出和小于等于tgt_sum的可行备选数
Parameters
----------
tgt_sum : float, int
目标和
alts : list
备选数列表
sort_type : str
对alts进行排序的方式,默认'descend'降序,可选'ascend'升序、None不排
tol : float
两个数进行比较时的绝对误差控制范围
add_num : int, None
限制在加起来和大于等于tgt_sum的基础上增加的备选数个数,默认无限制
Returns
-------
alts : list
可行备选数列表
'''
# 备选数不能大于目标和
alts = [x for x in alts if tol_x_sml_eq_y(x, tgt_sum, tol)]
if len(alts) == 0:
return []
if sort_type == 'descend':
alts = sorted(alts, reverse=True)
if sort_type == 'ascend':
alts = sorted(alts, reverse=False)
if add_num is None or add_num >= len(alts):
return alts
cumSum = list(np.cumsum(alts))
tmp = [1 if s >= tgt_sum else 0 for s in cumSum]
try:
strt_idx = tmp.index(1)
if strt_idx+add_num+1 <= len(alts):
return alts[:strt_idx+add_num+1]
else:
return alts
except:
return alts
#%%
def backfind_sml1st_index(tgt_sum, alts, tol=0.0, loop_count=None):
'''
alts从后往前搜索,返回第一个小于等于tgt_sum的数的索引
Parameters
----------
tgt_sum : int, float
目标值
alts : list
待比较数列表
tol : float
两个数进行比较时的绝对误差控制范围
loop_count : int
初始迭代次数值,默认为None;若loop_count为None,则不记录迭代次数,
否则在loop_count基础上继续记录迭代次数
Returns
-------
idx : int
从后往前搜索,alts中小于等于tgt_sum的第一个数的索引
loop_count : int
搜索结束时的迭代次数
'''
if len(alts) == 0:
return -1, loop_count
idx = len(alts) - 1
if loop_count is None:
while idx >= 1 and tol_x_big_y(alts[idx], tgt_sum, tol):
idx -= 1
return idx, loop_count
else:
while idx >= 1 and tol_x_big_y(alts[idx], tgt_sum, tol):
idx -= 1
loop_count += 1
return idx, loop_count
#%%
if __name__ == '__main__':
tgt_sum = 10
alts = [2, 5, 12, 11, 7, 8, 6, 3, 1, 10, 13]
sort_type = 'descend'
tol = 1.0
add_num = None
alts_new = get_alts_sml(tgt_sum, alts, sort_type=sort_type, tol=tol,
add_num=add_num)
print(alts_new)
alts = sorted(alts, reverse=False)
idx, loop_count = backfind_sml1st_index(tgt_sum, alts, tol=tol,
loop_count=None)
print(alts)
print(idx, loop_count)
|
[
"numpy.cumsum"
] |
[((1570, 1585), 'numpy.cumsum', 'np.cumsum', (['alts'], {}), '(alts)\n', (1579, 1585), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-01-03 13:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('invoice', '0015_auto_20180102_0048'),
]
operations = [
migrations.CreateModel(
name='ExpenseGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('expense', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='group', to='invoice.Invoice')),
],
),
]
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.AutoField"
] |
[((434, 527), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (450, 527), False, 'from django.db import migrations, models\n'), ((551, 582), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(80)'}), '(max_length=80)\n', (567, 582), False, 'from django.db import migrations, models\n'), ((613, 747), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""group"""', 'to': '"""invoice.Invoice"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='group', to='invoice.Invoice')\n", (630, 747), False, 'from django.db import migrations, models\n')]
|
# python3 yammler.py ~/Games/openxcom_71_40k/user/mods/ROSIGMA/Ruleset ~/Games/openxcom_71_40k/user/mods/40k/Ruleset/
import sys, os
import yaml
print(sys.argv)
# os.chdir(sys.argv[1])
paths = sys.argv[1:]
fileList = []
DEBUG = False
def debugPrint(debugText):
if DEBUG:
print(debugText)
def addTrailingSlash(path):
if path[-1] != "/":
return (path + "/")
else:
return path
def isFolder(path, fileName):
stMode = os.stat(path + fileName).st_mode
stMode //= 0x4000 # directory
if stMode == 1:
return True
return False
def isRulFile(path, fileName):
stMode = os.stat(path + fileName).st_mode
stMode //= 0x8000 # file
if ".rul" == fileName[-4:] and stMode == 1:
return True
return False
def populateFileList(path): # recursive
for x in os.listdir(path):
path = addTrailingSlash(path)
if isFolder(path, x):
populateFileList(path+x)
elif isRulFile(path, x):
fileList.append(path+x)
print("Searching for Ruleset Files in:")
for path in paths:
print(path)
populateFileList(path)
print("Number of Ruleset Files: " + str(len(fileList)))
def tryYamlSafeLoad(fileHandler):
try:
return yaml.safe_load(yamlFile)
except yaml.constructor.ConstructorError:
print("Constructor Error; Affected file: " + str(fileHandler.name))
return dict()
except yaml.composer.ComposerError:
print("Composer Error; Affected file: " + str(fileHandler.name))
return dict()
class yamlItemEntry:
def __init__(self, yamlEntry):
itemName = self.safeInsert(yamlEntry, "type")
battleType = self.safeInsert(yamlEntry, "battleType")
tuAuto = self.safeInsert(yamlEntry, "tuAuto")
tuSnap = self.safeInsert(yamlEntry, "tuSnap")
tuAimed = self.safeInsert(yamlEntry, "tuAimed")
rosigmaComment = self.safeInsert(yamlEntry, "rosigmaComment")
def safeInsert(self, yamlEntry, key):
try:
return yamlEntry[key]
except KeyError:
return ""
yamlEntries = []
for filePath in fileList:
yamlFile = open(filePath, 'r')
yamlContent = tryYamlSafeLoad(yamlFile)
debugPrint(filePath)
debugPrint(yamlContent.keys())
if "items" in yamlContent.keys():
print(filePath)
#print(yamlContent["items"])
#print(len(yamlContent["items"]))
#print(type(yamlContent))
#print(yamlContent.keys())
for x in yamlContent["items"]:
if "type" in x.keys():
print(x["type"])
print(x)
yamlEntries.append(yamlItemEntry(x))
break
yamlFile.close()
print(yamlEntries)
|
[
"yaml.safe_load",
"os.listdir",
"os.stat"
] |
[((830, 846), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (840, 846), False, 'import sys, os\n'), ((459, 483), 'os.stat', 'os.stat', (['(path + fileName)'], {}), '(path + fileName)\n', (466, 483), False, 'import sys, os\n'), ((628, 652), 'os.stat', 'os.stat', (['(path + fileName)'], {}), '(path + fileName)\n', (635, 652), False, 'import sys, os\n'), ((1242, 1266), 'yaml.safe_load', 'yaml.safe_load', (['yamlFile'], {}), '(yamlFile)\n', (1256, 1266), False, 'import yaml\n')]
|
from distutils.core import setup
classifiers = [
'Development Status :: 3 - Alpha'
, 'Intended Audience :: Developers'
, 'License :: OSI Approved :: BSD License'
, 'Natural Language :: English'
, 'Operating System :: MacOS :: MacOS X'
, 'Operating System :: Microsoft :: Windows'
, 'Operating System :: POSIX'
, 'Operating System :: Unix'
, 'Programming Language :: Python'
, 'Topic :: Internet :: WWW/HTTP'
, 'Topic :: Internet :: WWW/HTTP :: WSGI'
, 'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware'
, 'Topic :: Software Development :: Libraries :: Python Modules'
]
setup( name='httpy'
, version='~~VERSION~~'
, package_dir = {'':'src'}
, py_modules=['httpy']
, description = "httpy smooths out a few of WSGI's most glaring warts."
, author = '<NAME>'
, author_email = '<EMAIL>'
, url = 'http://www.zetadev.com/software/httpy/'
, classifiers = classifiers
)
|
[
"distutils.core.setup"
] |
[((608, 897), 'distutils.core.setup', 'setup', ([], {'name': '"""httpy"""', 'version': '"""~~VERSION~~"""', 'package_dir': "{'': 'src'}", 'py_modules': "['httpy']", 'description': '"""httpy smooths out a few of WSGI\'s most glaring warts."""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""http://www.zetadev.com/software/httpy/"""', 'classifiers': 'classifiers'}), '(name=\'httpy\', version=\'~~VERSION~~\', package_dir={\'\': \'src\'},\n py_modules=[\'httpy\'], description=\n "httpy smooths out a few of WSGI\'s most glaring warts.", author=\n \'<NAME>\', author_email=\'<EMAIL>\', url=\n \'http://www.zetadev.com/software/httpy/\', classifiers=classifiers)\n', (613, 897), False, 'from distutils.core import setup\n')]
|
import os
"""Plotly Dash HTML layout override."""
dir_path = os.getcwd()
with open(os.path.join(dir_path, 'main', 'templates', 'base.html'), 'r') as f:
rows = f.readlines()
rows = [row.strip() for row in rows]
nav_index = rows.index('</nav>')
dash_str = rows[:nav_index+1] + ['{%app_entry%}',
'<footer>'
'{%config%}',
'{%scripts%}',
'{%renderer%}',
'</footer>',
'</body>',
'</html>']
html_layout = "\n".join(dash_str)
|
[
"os.getcwd",
"os.path.join"
] |
[((61, 72), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (70, 72), False, 'import os\n'), ((83, 139), 'os.path.join', 'os.path.join', (['dir_path', '"""main"""', '"""templates"""', '"""base.html"""'], {}), "(dir_path, 'main', 'templates', 'base.html')\n", (95, 139), False, 'import os\n')]
|
#!/usr/bin/python
# Copyright 2019 Fetch Robotics Inc.
# Author(s): <NAME>
# Python
from __future__ import print_function
from datetime import datetime
from datetime import timedelta
# ROS
import rospy
import actionlib
from fetchit_challenge.msg import SchunkMachineAction, SchunkMachineResult, SchunkMachineGoal
# # GPIO
# try:
# import RPi.GPIO as GPIO
# except ImportError:
# print("This script must be run on a Raspberry Pi. (RPi.GPIO import failed)")
# sys.exit(1)
class SchunkMachineServer(object):
"""Class for using Schunk Machine chuck."""
_result = SchunkMachineResult()
def __init__(self):
"""Specifies pin numbering schema and sets up GPIO channels"""
# # Setup GPIO
# GPIO.setwarnings(False)
# mode = GPIO.getmode()
# if mode is None:
# GPIO.setmode(GPIO.BOARD)
# elif mode == GPIO.BCM:
# GPIO.setup([], GPIO.OUT)
# GPIO.cleanup()
# GPIO.setmode(GPIO.BOARD)
# GPIO.setup(37, GPIO.OUT, initial=1)
# GPIO.setup(40, GPIO.OUT, initial=0)
self._current_state = SchunkMachineGoal.OPEN
# Minimum time for chuck to be closed
self._lock_time = 120.0
self.server = actionlib.SimpleActionServer('schunk_machine', SchunkMachineAction, self.callback, False)
self.server.start()
rospy.loginfo("Simulated SCHUNK machine is ready")
def callback(self, goal):
"""Action server callback."""
print("Received goal: " + str(goal))
if goal.state == self._current_state:
self._result.success = True
self._result.message = "Schunk Machine Chuck already in desired state."
self.server.set_succeeded(self._result)
elif goal.state == SchunkMachineGoal.CLOSE:
self._lock_until = datetime.now() + timedelta(seconds=self._lock_time)
self.close()
self._result.success = True
self._result.message = "Schunk Machine Chuck closed."
self.server.set_succeeded(self._result)
self._current_state = SchunkMachineGoal.CLOSE
elif goal.state == SchunkMachineGoal.OPEN:
time_left = self._lock_until - datetime.now()
if time_left.total_seconds() > 0.0:
self._result.success = False
self._result.message = "Schunk Machine Chuck must be closed for at least " + str(self._lock_time) +\
"s. Please wait " + str(time_left.total_seconds()) + "s."
self.server.set_aborted(self._result)
else:
self.open()
self._result.success = True
self._result.message = "Schunk Machine Chuck open."
self.server.set_succeeded(self._result)
self._current_state = SchunkMachineGoal.OPEN
else:
self._result.success = False
self._result.message = "Unknown goal type"
self.server.set_aborted(self._result)
def open(self):
"""Set Pi pins to open chuck."""
# GPIO.output(40, 0)
# GPIO.output(37, 1)
rospy.loginfo("Opening SCHUNK machine")
def close(self):
"""Set Pi pins to close chuck."""
# GPIO.output(40, 1)
# GPIO.output(37, 0)
rospy.loginfo("Closing SCHUNK machine")
if __name__ == "__main__":
rospy.init_node('schunk_machine_server')
machine = SchunkMachineServer()
rospy.spin()
|
[
"fetchit_challenge.msg.SchunkMachineResult",
"rospy.loginfo",
"datetime.timedelta",
"rospy.init_node",
"actionlib.SimpleActionServer",
"rospy.spin",
"datetime.datetime.now"
] |
[((585, 606), 'fetchit_challenge.msg.SchunkMachineResult', 'SchunkMachineResult', ([], {}), '()\n', (604, 606), False, 'from fetchit_challenge.msg import SchunkMachineAction, SchunkMachineResult, SchunkMachineGoal\n'), ((3395, 3435), 'rospy.init_node', 'rospy.init_node', (['"""schunk_machine_server"""'], {}), "('schunk_machine_server')\n", (3410, 3435), False, 'import rospy\n'), ((3476, 3488), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3486, 3488), False, 'import rospy\n'), ((1245, 1339), 'actionlib.SimpleActionServer', 'actionlib.SimpleActionServer', (['"""schunk_machine"""', 'SchunkMachineAction', 'self.callback', '(False)'], {}), "('schunk_machine', SchunkMachineAction, self.\n callback, False)\n", (1273, 1339), False, 'import actionlib\n'), ((1371, 1421), 'rospy.loginfo', 'rospy.loginfo', (['"""Simulated SCHUNK machine is ready"""'], {}), "('Simulated SCHUNK machine is ready')\n", (1384, 1421), False, 'import rospy\n'), ((3152, 3191), 'rospy.loginfo', 'rospy.loginfo', (['"""Opening SCHUNK machine"""'], {}), "('Opening SCHUNK machine')\n", (3165, 3191), False, 'import rospy\n'), ((3322, 3361), 'rospy.loginfo', 'rospy.loginfo', (['"""Closing SCHUNK machine"""'], {}), "('Closing SCHUNK machine')\n", (3335, 3361), False, 'import rospy\n'), ((1841, 1855), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1853, 1855), False, 'from datetime import datetime\n'), ((1858, 1892), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self._lock_time'}), '(seconds=self._lock_time)\n', (1867, 1892), False, 'from datetime import timedelta\n'), ((2228, 2242), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2240, 2242), False, 'from datetime import datetime\n')]
|
#! /usr/bin/env python
# title : Trellis.py
# description : This class generates a trellis based on a trellis definition class.
# Parameters such as reduction (radix) can be used to construct the trellis.
# author : <NAME>
# python_version : 3.5.2
import utils
class Trellis(object):
def __init__(self, trellisDefinition, reduction=1, merge_parallel=False):
self.tdef = trellisDefinition
self.reduction = reduction
self.merge_parallel = merge_parallel
self.radix = 2 ** reduction # relationship between reduction factor and radix
self.Ns = self.tdef.Ns # number of states
self.Nb = self.tdef.Nb * 2 ** (reduction - 1) # number of branches
self.wc = self.tdef.wc * reduction # number of coded bits
self.wu = self.tdef.wu * reduction # number of data bits
# empty precomputed lists
self.get_dat_pc = []
self.get_enc_bits_pc = []
self.get_next_state_pc = []
self.get_prev_state_pc = []
self.get_next_branches_pc = []
self.get_prev_branches_pc = []
# perform computation of trellis
if reduction == 1 and not self.merge_parallel:
self.pre_calc_reduction1()
else:
self.pre_calculation()
def get_rate(self):
return self.wc / self.wu
def pre_calc_reduction1(self):
"""
Pre calculate the functions of a trellis:
- get_dat, get_enc_bits, get_next_state, get_prev_state
- get_next_branches, get_prev_branches
"""
self.get_dat_pc = [self.tdef.get_dat(x) for x in range(self.Nb)]
self.get_enc_bits_pc = [self.tdef.get_enc_bits(x) for x in range(self.Nb)]
self.get_next_state_pc = [self.tdef.get_next_state(x) for x in range(self.Nb)]
self.get_prev_state_pc = [self.tdef.get_prev_state(x) for x in range(self.Nb)]
self.get_prev_branches_pc = [self.tdef.get_prev_branches(x) for x in range(self.Ns)]
# for the pre calculation of 'next branches' we, we do the same
# but additionally sort for the data output generated by this branch
# this way the encoder can use get the next branch via
# this code -> trellis.get_next_branches_pc[current_state][data_input]
get_next_branches_pc_unsorted = [self.tdef.get_next_branches(x) for x in range(self.Ns)]
self.get_next_branches_pc = []
for b in get_next_branches_pc_unsorted:
dat_b = [self.get_dat_pc[x] for x in b]
dat_d = [utils.bin2dec(x) for x in dat_b]
b_new = [x for _, x in sorted(zip(dat_d, b))]
self.get_next_branches_pc.append(b_new)
def pre_calculation(self):
"""
Pre calculate the functions of a trellis (with options):
- get_dat, get_enc_bits, get_next_state, get_prev_state
- get_next_branches, get_prev_branches
Options are:
- reduction = log2(radix)
- merge parallel branches
"""
all_u, all_c, all_s = [], [], []
for s in range(self.Ns):
u, c, s = self._get_all_paths(self.reduction, s)
all_u = all_u + u
all_c = all_c + c
all_s = all_s + s
# init tables
n_branches_per_state = int(self.Nb / self.Ns)
self.get_dat_pc = []
self.get_enc_bits_pc = []
self.get_next_state_pc = []
self.get_prev_state_pc = []
self.get_next_branches_pc = [[-1] * n_branches_per_state for i in range(self.Ns)]
self.get_prev_branches_pc = [[] for i in range(self.Ns)]
# loop through all paths and generate a new branch for each
for branch_index in range(len(all_u)):
u = all_u[branch_index]
c = all_c[branch_index]
s = all_s[branch_index]
# check if branch already exists
n_states = self.get_next_state_pc
p_states = self.get_prev_state_pc
branch_exists = True in [x == s[0] and y == s[-1] for x, y in zip(p_states, n_states)]
if self.merge_parallel and branch_exists:
pass
else:
dat_int = utils.bin2dec(u)
self.get_dat_pc.append(u)
self.get_enc_bits_pc.append(c)
self.get_next_state_pc.append(s[-1])
self.get_prev_state_pc.append(s[0])
self.get_next_branches_pc[s[0]][dat_int] = branch_index
self.get_prev_branches_pc[s[-1]].append(branch_index)
def _get_all_paths(self, depth, state):
"""
recursively get all paths form a start state with depth n
Parameters
----------
depth [int]: depths of recursion
state [int]: start state for paths to be returned
Returns
-------
pathlist_u, pathlist_c, pathlist_s: [list of lists] list of paths
"""
if depth == 0:
return [[]], [[]], [[state]]
else:
# for all next states (next_state
# get all paths with depth-1 from next_state,
# then add the path from state to next_state to all these paths
# add new paths to list
pathlist_u, pathlist_c, pathlist_s = [], [], []
# pathlist_* are lists of paths
for b in self.tdef.get_next_branches(state):
next_state = self.tdef.get_next_state(b)
u1, c1, s1 = self._get_all_paths(depth - 1, next_state)
# for all lists in u1 add the new element
pathlist_u += [self.tdef.get_dat(b) + x for x in u1]
pathlist_c += [self.tdef.get_enc_bits(b) + x for x in c1]
pathlist_s += [[state] + x for x in s1]
return pathlist_u, pathlist_c, pathlist_s
|
[
"utils.bin2dec"
] |
[((2562, 2578), 'utils.bin2dec', 'utils.bin2dec', (['x'], {}), '(x)\n', (2575, 2578), False, 'import utils\n'), ((4209, 4225), 'utils.bin2dec', 'utils.bin2dec', (['u'], {}), '(u)\n', (4222, 4225), False, 'import utils\n')]
|
import os
import librosa
from torch.utils import data
from util.utils import sample_fixed_length_data_aligned
class Dataset(data.Dataset):
def __init__(self,
dataset,
limit=None,
offset=0,
sample_length=16384,
mode="train"):
"""Construct dataset for training and validation.
Args:
dataset (str): *.txt, the path of the dataset list file. See "Notes."
limit (int): Return at most limit files in the list. If None, all files are returned.
offset (int): Return files starting at an offset within the list. Use negative values to offset from the end of the list.
sample_length(int): The model only supports fixed-length input. Use sample_length to specify the feature size of the input.
mode(str): If mode is "train", return fixed-length signals. If mode is "validation", return original-length signals.
Notes:
dataset list file:
<noisy_1_path><space><clean_1_path>
<noisy_2_path><space><clean_2_path>
...
<noisy_n_path><space><clean_n_path>
e.g.
/train/noisy/a.wav /train/clean/a.wav
/train/noisy/b.wav /train/clean/b.wav
...
Return:
(mixture signals, clean signals, filename)
"""
super(Dataset, self).__init__()
dataset_list = [line.rstrip('\n') for line in open(os.path.abspath(os.path.expanduser(dataset)), "r")]
dataset_list = dataset_list[offset:]
if limit:
dataset_list = dataset_list[:limit]
assert mode in ("train", "validation"), "Mode must be one of 'train' or 'validation'."
self.length = len(dataset_list)
self.dataset_list = dataset_list
self.sample_length = sample_length
self.mode = mode
def __len__(self):
return self.length
def __getitem__(self, item):
mixture_path, clean_path = self.dataset_list[item].split(" ")
filename = os.path.splitext(os.path.basename(mixture_path))[0]
mixture, _ = librosa.load(os.path.abspath(os.path.expanduser(mixture_path)), sr=None)
clean, _ = librosa.load(os.path.abspath(os.path.expanduser(clean_path)), sr=None)
if self.mode == "train":
# The input of model should be fixed-length in the training.
mixture, clean = sample_fixed_length_data_aligned(mixture, clean, self.sample_length)
return mixture.reshape(1, -1), clean.reshape(1, -1), filename
else:
return mixture.reshape(1, -1), clean.reshape(1, -1), filename
|
[
"os.path.expanduser",
"util.utils.sample_fixed_length_data_aligned",
"os.path.basename"
] |
[((2512, 2580), 'util.utils.sample_fixed_length_data_aligned', 'sample_fixed_length_data_aligned', (['mixture', 'clean', 'self.sample_length'], {}), '(mixture, clean, self.sample_length)\n', (2544, 2580), False, 'from util.utils import sample_fixed_length_data_aligned\n'), ((2151, 2181), 'os.path.basename', 'os.path.basename', (['mixture_path'], {}), '(mixture_path)\n', (2167, 2181), False, 'import os\n'), ((2237, 2269), 'os.path.expanduser', 'os.path.expanduser', (['mixture_path'], {}), '(mixture_path)\n', (2255, 2269), False, 'import os\n'), ((2330, 2360), 'os.path.expanduser', 'os.path.expanduser', (['clean_path'], {}), '(clean_path)\n', (2348, 2360), False, 'import os\n'), ((1548, 1575), 'os.path.expanduser', 'os.path.expanduser', (['dataset'], {}), '(dataset)\n', (1566, 1575), False, 'import os\n')]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
from typing import Any, List, Tuple
def recv_from_connections_and_join_processes(
processes_and_connections: List[
Tuple[multiprocessing.Process, multiprocessing.connection.Connection]
],
) -> List[Any]:
"""
Wait for processes to return a value via a connection and then to terminate
Given a list of processes and, for each of them, (the reading end of) a
connection on which the process will send its result, gather the results of
all processes and then join them, with extra care taken to handle any error
(e.g., process crashing without returning) and kill all processes in case.
"""
results = [None] * len(processes_and_connections)
try:
connections = [c for _, c in processes_and_connections]
sentinels = [p.sentinel for p, _ in processes_and_connections]
not_ready = connections + sentinels
while len(not_ready) > 0:
ready = multiprocessing.connection.wait(not_ready)
for obj in ready:
if obj in connections:
idx = connections.index(obj)
try:
val = obj.recv()
except EOFError:
# We won't get any more values out of this connection.
not_ready.remove(obj)
else:
if results[idx] is not None:
raise RuntimeError(
f"Process {idx} returned more than one value"
)
# Wrap in a tuple so we can distinguish a process that
# returned None from one that didn't return yet.
results[idx] = (val,)
elif obj in sentinels:
idx = sentinels.index(obj)
proc, _ = processes_and_connections[idx]
proc.join()
if proc.exitcode != 0:
raise RuntimeError(
f"Process {idx} exited with status {proc.exitcode}"
)
not_ready.remove(obj)
else:
raise RuntimeError(f"Unexpected object: {obj}")
except Exception:
for p, _ in processes_and_connections:
p.kill()
for p, _ in processes_and_connections:
p.join()
raise
for idx, result in enumerate(results):
if result is None:
raise RuntimeError(f"Process {idx} exited without producing a result")
# Unwrap from the tuples.
return [r for r, in results]
|
[
"multiprocessing.connection.wait"
] |
[((1184, 1226), 'multiprocessing.connection.wait', 'multiprocessing.connection.wait', (['not_ready'], {}), '(not_ready)\n', (1215, 1226), False, 'import multiprocessing\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-08 22:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0022_auto_20161208_1740'),
]
operations = [
migrations.AddField(
model_name='atentionqueue',
name='max_capacity',
field=models.PositiveIntegerField(default=10),
),
]
|
[
"django.db.models.PositiveIntegerField"
] |
[((412, 451), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(10)'}), '(default=10)\n', (439, 451), False, 'from django.db import migrations, models\n')]
|
# -------------------------------------------------------------------------
# Copyright (c) 2017-2018 AT&T Intellectual Property
# Copyright (C) 2020 Wipro Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import unittest
from osdf.adapters.local_data import local_policies
from osdf.adapters.conductor import translation as tr
from osdf.utils.interfaces import json_from_file
class TestConductorTranslation(unittest.TestCase):
def setUp(self):
self.main_dir = ""
self.conductor_api_template = self.main_dir + "osdf/templates/conductor_interface.json"
self.local_config_file = self.main_dir + "config/common_config.yaml"
policy_data_path = self.main_dir + "test/policy-local-files"
valid_policies_list_file = policy_data_path + '/' + 'meta-valid-policies.txt'
valid_policies_files = local_policies.get_policy_names_from_file(valid_policies_list_file)
parameter_data_file = self.main_dir + "test/placement-tests/request.json"
self.request_json = json_from_file(parameter_data_file)
parameter_data_file = self.main_dir + "test/placement-tests/request_vfmod.json"
self.request_vfmod_json = json_from_file(parameter_data_file)
self.policies = [json_from_file(policy_data_path + '/' + name) for name in valid_policies_files]
self.optimization_policies = [json_from_file(policy_data_path + '/'
+ "slice-selection-files/opt_policy_nsi_reuse.json")]
def tearDown(self):
pass
def test_gen_demands(self):
# need to run this only on vnf policies
vnf_policies = [x for x in self.policies if x[list(x.keys())[0]]["type"]
== "onap.policies.optimization.VnfPolicy"]
res = tr.gen_demands(self.request_json['placementInfo']['placementDemands'], vnf_policies)
assert res is not None
def test_gen_vfmod_demands(self):
# need to run this only on vnf policies
vnf_policies = [x for x in self.policies if x[list(x.keys())[0]]["type"]
== "onap.policies.optimization.VnfPolicy"]
res = tr.gen_demands(self.request_vfmod_json['placementInfo']['placementDemands'], vnf_policies)
assert res is not None
def test_gen_optimization_policy(self):
expected = [{
"goal": "minimize",
"operation_function": {
"operator": "sum",
"operands": [
{
"function": "attribute",
"params": {
"attribute": "creation_cost",
"demand": "embb-nst"
}
}
]
}
}]
self.assertEqual(expected,
tr.gen_optimization_policy(self.request_vfmod_json['placementInfo']['placementDemands'],
self.optimization_policies))
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"osdf.adapters.local_data.local_policies.get_policy_names_from_file",
"osdf.adapters.conductor.translation.gen_demands",
"osdf.utils.interfaces.json_from_file",
"osdf.adapters.conductor.translation.gen_optimization_policy"
] |
[((3646, 3661), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3659, 3661), False, 'import unittest\n'), ((1447, 1514), 'osdf.adapters.local_data.local_policies.get_policy_names_from_file', 'local_policies.get_policy_names_from_file', (['valid_policies_list_file'], {}), '(valid_policies_list_file)\n', (1488, 1514), False, 'from osdf.adapters.local_data import local_policies\n'), ((1626, 1661), 'osdf.utils.interfaces.json_from_file', 'json_from_file', (['parameter_data_file'], {}), '(parameter_data_file)\n', (1640, 1661), False, 'from osdf.utils.interfaces import json_from_file\n'), ((1784, 1819), 'osdf.utils.interfaces.json_from_file', 'json_from_file', (['parameter_data_file'], {}), '(parameter_data_file)\n', (1798, 1819), False, 'from osdf.utils.interfaces import json_from_file\n'), ((2390, 2478), 'osdf.adapters.conductor.translation.gen_demands', 'tr.gen_demands', (["self.request_json['placementInfo']['placementDemands']", 'vnf_policies'], {}), "(self.request_json['placementInfo']['placementDemands'],\n vnf_policies)\n", (2404, 2478), True, 'from osdf.adapters.conductor import translation as tr\n'), ((2756, 2850), 'osdf.adapters.conductor.translation.gen_demands', 'tr.gen_demands', (["self.request_vfmod_json['placementInfo']['placementDemands']", 'vnf_policies'], {}), "(self.request_vfmod_json['placementInfo']['placementDemands'],\n vnf_policies)\n", (2770, 2850), True, 'from osdf.adapters.conductor import translation as tr\n'), ((1845, 1890), 'osdf.utils.interfaces.json_from_file', 'json_from_file', (["(policy_data_path + '/' + name)"], {}), "(policy_data_path + '/' + name)\n", (1859, 1890), False, 'from osdf.utils.interfaces import json_from_file\n'), ((1964, 2058), 'osdf.utils.interfaces.json_from_file', 'json_from_file', (["(policy_data_path + '/' + 'slice-selection-files/opt_policy_nsi_reuse.json')"], {}), "(policy_data_path + '/' +\n 'slice-selection-files/opt_policy_nsi_reuse.json')\n", (1978, 2058), False, 'from osdf.utils.interfaces import json_from_file\n'), ((3443, 3564), 'osdf.adapters.conductor.translation.gen_optimization_policy', 'tr.gen_optimization_policy', (["self.request_vfmod_json['placementInfo']['placementDemands']", 'self.optimization_policies'], {}), "(self.request_vfmod_json['placementInfo'][\n 'placementDemands'], self.optimization_policies)\n", (3469, 3564), True, 'from osdf.adapters.conductor import translation as tr\n')]
|
from datetime import datetime, timedelta
import pytz
from django.conf import settings
from django.contrib.auth.hashers import check_password
from django.db import models
from django.urls import reverse
from rest_framework.request import Request
from garden.formatters import WateringStationFormatter
from .managers import TokenManager
def _default_moisture_threshold():
return 50
def _default_watering_duration():
return timedelta(minutes=1)
def _default_is_connected():
return False
def _default_update_frequency():
return timedelta(minutes=5)
def _default_status():
return True
def _default_garden_image():
return 'default_garden.jpg'
class Garden(models.Model):
OK = 'ok'
LOW = 'lo'
WATER_LEVEL_CHOICES = [
(OK, 'Ok'),
(LOW, 'Low'),
]
owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='gardens', on_delete=models.CASCADE)
name = models.CharField(max_length=255, db_index=True)
image = models.ImageField(default=_default_garden_image)
is_connected = models.BooleanField(default=_default_is_connected)
last_connection_ip = models.GenericIPAddressField(null=True)
last_connection_time = models.DateTimeField(null=True)
update_frequency = models.DurationField(default=_default_update_frequency)
connection_strength = models.SmallIntegerField(null=True)
water_level = models.CharField(choices=WATER_LEVEL_CHOICES, max_length=2, null=True)
class Meta:
unique_together = ['owner', 'name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('garden-detail', kwargs={'pk': self.pk})
def get_watering_stations_url(self):
return reverse('watering-station-list', kwargs={'pk': self.pk})
def get_update_url(self):
return reverse('garden-update', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('garden-delete', kwargs={'pk': self.pk})
def calc_time_till_next_update(self):
if self.last_connection_time is None:
return None
factor = 1
next_update = self.last_connection_time + factor * self.update_frequency - datetime.now(pytz.UTC)
while next_update.total_seconds() < 0:
factor += 1
next_update = self.last_connection_time + factor * self.update_frequency - datetime.now(pytz.UTC)
return int(next_update.total_seconds())
def update_connection_status(self, request: Request):
self.is_connected = True
self.last_connection_ip = request.META.get('REMOTE_ADDR')
self.last_connection_time = datetime.now(pytz.UTC)
self.save()
def refresh_connection_status(self):
if self.last_connection_time is None:
return
time_next_update = self.last_connection_time + self.update_frequency - datetime.now(pytz.UTC)
if time_next_update.total_seconds() < 0:
self.is_connected = False
self.connection_strength = None
self.save()
def get_watering_station_formatters(self):
for watering_station in self.watering_stations.all():
yield WateringStationFormatter(watering_station)
def get_watering_station_idx(self, watering_station) -> int:
for i, station in enumerate(self.watering_stations.all()):
if station == watering_station:
return i
def get_watering_station_at_idx(self, idx):
for i, station in enumerate(self.watering_stations.all()):
if i == idx:
return station
def get_active_watering_stations(self):
return self.watering_stations.filter(status=True)
def get_num_active_watering_stations(self):
return self.get_active_watering_stations().count()
@property
def plant_types(self):
return self.watering_stations.exclude(plant_type__exact='').values_list('plant_type', flat=True)
@property
def time_since_last_connection(self):
if self.last_connection_time is None:
return None
return datetime.now(pytz.UTC) - self.last_connection_time
class Token(models.Model):
MAX_HASH_LENGTH = 128
garden = models.OneToOneField(Garden, on_delete=models.CASCADE)
uuid = models.CharField(max_length=MAX_HASH_LENGTH)
created = models.DateTimeField(auto_now_add=True)
objects = TokenManager()
def __str__(self):
return self.created.strftime('%B %-d, %Y %-I:%M %p')
def verify(self, uuid):
return check_password(uuid, self.uuid)
class WateringStation(models.Model):
garden = models.ForeignKey(Garden, related_name='watering_stations', on_delete=models.CASCADE)
image = models.ImageField(null=True, blank=True)
moisture_threshold = models.IntegerField(default=_default_moisture_threshold)
watering_duration = models.DurationField(default=_default_watering_duration)
plant_type = models.CharField(max_length=255, blank=True)
status = models.BooleanField(default=_default_status)
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['created']
def __str__(self):
return f'{str(self.garden)} - {self.idx}'
def get_absolute_url(self):
return reverse('watering-station-detail', kwargs={'garden_pk': self.garden.pk, 'ws_pk': self.pk})
def get_update_url(self):
return reverse('watering-station-update', kwargs={'garden_pk': self.garden.pk, 'ws_pk': self.pk})
def get_delete_url(self):
return reverse('watering-station-delete', kwargs={'garden_pk': self.garden.pk, 'ws_pk': self.pk})
def get_records_url(self):
return reverse('watering-station-record-list', kwargs={'garden_pk': self.garden.pk, 'ws_pk': self.pk})
@property
def idx(self):
return self.garden.get_watering_station_idx(self)
def get_formatter(self):
return WateringStationFormatter(self)
class WateringStationRecord(models.Model):
watering_station = models.ForeignKey(WateringStation, related_name='records', on_delete=models.CASCADE)
moisture_level = models.FloatField()
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['created']
def __str__(self):
return f'{self.watering_station.garden}/{self.watering_station.idx}/{self.created}'
|
[
"django.db.models.OneToOneField",
"garden.formatters.WateringStationFormatter",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.DurationField",
"django.db.models.FloatField",
"django.db.models.BooleanField",
"django.db.models.ImageField",
"django.db.models.GenericIPAddressField",
"django.db.models.SmallIntegerField",
"django.db.models.IntegerField",
"datetime.timedelta",
"django.urls.reverse",
"django.db.models.DateTimeField",
"django.contrib.auth.hashers.check_password",
"datetime.datetime.now"
] |
[((436, 456), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (445, 456), False, 'from datetime import datetime, timedelta\n'), ((551, 571), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (560, 571), False, 'from datetime import datetime, timedelta\n'), ((824, 921), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'related_name': '"""gardens"""', 'on_delete': 'models.CASCADE'}), "(settings.AUTH_USER_MODEL, related_name='gardens',\n on_delete=models.CASCADE)\n", (841, 921), False, 'from django.db import models\n'), ((929, 976), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'db_index': '(True)'}), '(max_length=255, db_index=True)\n', (945, 976), False, 'from django.db import models\n'), ((989, 1037), 'django.db.models.ImageField', 'models.ImageField', ([], {'default': '_default_garden_image'}), '(default=_default_garden_image)\n', (1006, 1037), False, 'from django.db import models\n'), ((1057, 1107), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '_default_is_connected'}), '(default=_default_is_connected)\n', (1076, 1107), False, 'from django.db import models\n'), ((1133, 1172), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', ([], {'null': '(True)'}), '(null=True)\n', (1161, 1172), False, 'from django.db import models\n'), ((1200, 1231), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (1220, 1231), False, 'from django.db import models\n'), ((1255, 1310), 'django.db.models.DurationField', 'models.DurationField', ([], {'default': '_default_update_frequency'}), '(default=_default_update_frequency)\n', (1275, 1310), False, 'from django.db import models\n'), ((1337, 1372), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (1361, 1372), False, 'from django.db import models\n'), ((1391, 1461), 'django.db.models.CharField', 'models.CharField', ([], {'choices': 'WATER_LEVEL_CHOICES', 'max_length': '(2)', 'null': '(True)'}), '(choices=WATER_LEVEL_CHOICES, max_length=2, null=True)\n', (1407, 1461), False, 'from django.db import models\n'), ((4208, 4262), 'django.db.models.OneToOneField', 'models.OneToOneField', (['Garden'], {'on_delete': 'models.CASCADE'}), '(Garden, on_delete=models.CASCADE)\n', (4228, 4262), False, 'from django.db import models\n'), ((4274, 4318), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': 'MAX_HASH_LENGTH'}), '(max_length=MAX_HASH_LENGTH)\n', (4290, 4318), False, 'from django.db import models\n'), ((4333, 4372), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4353, 4372), False, 'from django.db import models\n'), ((4616, 4706), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Garden'], {'related_name': '"""watering_stations"""', 'on_delete': 'models.CASCADE'}), "(Garden, related_name='watering_stations', on_delete=\n models.CASCADE)\n", (4633, 4706), False, 'from django.db import models\n'), ((4714, 4754), 'django.db.models.ImageField', 'models.ImageField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (4731, 4754), False, 'from django.db import models\n'), ((4780, 4836), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '_default_moisture_threshold'}), '(default=_default_moisture_threshold)\n', (4799, 4836), False, 'from django.db import models\n'), ((4861, 4917), 'django.db.models.DurationField', 'models.DurationField', ([], {'default': '_default_watering_duration'}), '(default=_default_watering_duration)\n', (4881, 4917), False, 'from django.db import models\n'), ((4935, 4979), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (4951, 4979), False, 'from django.db import models\n'), ((4993, 5037), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '_default_status'}), '(default=_default_status)\n', (5012, 5037), False, 'from django.db import models\n'), ((5052, 5091), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (5072, 5091), False, 'from django.db import models\n'), ((6006, 6095), 'django.db.models.ForeignKey', 'models.ForeignKey', (['WateringStation'], {'related_name': '"""records"""', 'on_delete': 'models.CASCADE'}), "(WateringStation, related_name='records', on_delete=models\n .CASCADE)\n", (6023, 6095), False, 'from django.db import models\n'), ((6112, 6131), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (6129, 6131), False, 'from django.db import models\n'), ((6146, 6185), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (6166, 6185), False, 'from django.db import models\n'), ((1620, 1668), 'django.urls.reverse', 'reverse', (['"""garden-detail"""'], {'kwargs': "{'pk': self.pk}"}), "('garden-detail', kwargs={'pk': self.pk})\n", (1627, 1668), False, 'from django.urls import reverse\n'), ((1726, 1782), 'django.urls.reverse', 'reverse', (['"""watering-station-list"""'], {'kwargs': "{'pk': self.pk}"}), "('watering-station-list', kwargs={'pk': self.pk})\n", (1733, 1782), False, 'from django.urls import reverse\n'), ((1829, 1877), 'django.urls.reverse', 'reverse', (['"""garden-update"""'], {'kwargs': "{'pk': self.pk}"}), "('garden-update', kwargs={'pk': self.pk})\n", (1836, 1877), False, 'from django.urls import reverse\n'), ((1924, 1972), 'django.urls.reverse', 'reverse', (['"""garden-delete"""'], {'kwargs': "{'pk': self.pk}"}), "('garden-delete', kwargs={'pk': self.pk})\n", (1931, 1972), False, 'from django.urls import reverse\n'), ((2634, 2656), 'datetime.datetime.now', 'datetime.now', (['pytz.UTC'], {}), '(pytz.UTC)\n', (2646, 2656), False, 'from datetime import datetime, timedelta\n'), ((4532, 4563), 'django.contrib.auth.hashers.check_password', 'check_password', (['uuid', 'self.uuid'], {}), '(uuid, self.uuid)\n', (4546, 4563), False, 'from django.contrib.auth.hashers import check_password\n'), ((5262, 5356), 'django.urls.reverse', 'reverse', (['"""watering-station-detail"""'], {'kwargs': "{'garden_pk': self.garden.pk, 'ws_pk': self.pk}"}), "('watering-station-detail', kwargs={'garden_pk': self.garden.pk,\n 'ws_pk': self.pk})\n", (5269, 5356), False, 'from django.urls import reverse\n'), ((5399, 5493), 'django.urls.reverse', 'reverse', (['"""watering-station-update"""'], {'kwargs': "{'garden_pk': self.garden.pk, 'ws_pk': self.pk}"}), "('watering-station-update', kwargs={'garden_pk': self.garden.pk,\n 'ws_pk': self.pk})\n", (5406, 5493), False, 'from django.urls import reverse\n'), ((5536, 5630), 'django.urls.reverse', 'reverse', (['"""watering-station-delete"""'], {'kwargs': "{'garden_pk': self.garden.pk, 'ws_pk': self.pk}"}), "('watering-station-delete', kwargs={'garden_pk': self.garden.pk,\n 'ws_pk': self.pk})\n", (5543, 5630), False, 'from django.urls import reverse\n'), ((5674, 5773), 'django.urls.reverse', 'reverse', (['"""watering-station-record-list"""'], {'kwargs': "{'garden_pk': self.garden.pk, 'ws_pk': self.pk}"}), "('watering-station-record-list', kwargs={'garden_pk': self.garden.pk,\n 'ws_pk': self.pk})\n", (5681, 5773), False, 'from django.urls import reverse\n'), ((5907, 5937), 'garden.formatters.WateringStationFormatter', 'WateringStationFormatter', (['self'], {}), '(self)\n', (5931, 5937), False, 'from garden.formatters import WateringStationFormatter\n'), ((2188, 2210), 'datetime.datetime.now', 'datetime.now', (['pytz.UTC'], {}), '(pytz.UTC)\n', (2200, 2210), False, 'from datetime import datetime, timedelta\n'), ((2864, 2886), 'datetime.datetime.now', 'datetime.now', (['pytz.UTC'], {}), '(pytz.UTC)\n', (2876, 2886), False, 'from datetime import datetime, timedelta\n'), ((4088, 4110), 'datetime.datetime.now', 'datetime.now', (['pytz.UTC'], {}), '(pytz.UTC)\n', (4100, 4110), False, 'from datetime import datetime, timedelta\n'), ((2369, 2391), 'datetime.datetime.now', 'datetime.now', (['pytz.UTC'], {}), '(pytz.UTC)\n', (2381, 2391), False, 'from datetime import datetime, timedelta\n'), ((3170, 3212), 'garden.formatters.WateringStationFormatter', 'WateringStationFormatter', (['watering_station'], {}), '(watering_station)\n', (3194, 3212), False, 'from garden.formatters import WateringStationFormatter\n')]
|
import numpy as np
import cv2
from poisson_disk import PoissonDiskSampler
import skimage.morphology
import skimage.measure
import scipy.stats
class Box(object):
"""
This class represents a box in an image. This could be a bounding box of an object or part.
Internally each box is represented by a tuple of 4 integers: (xmin, xmax, ymin, ymax)
"""
POINT_GENERATION_POLECIES = ['poisson_disk']
def __init__(self, xmin, xmax, ymin, ymax):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def __repr__(self):
return "%d - %d - %d - %d" % (self.xmin, self.xmax, self.ymin, self.ymax)
def is_valid(self):
return int(self.xmin) != -1
@staticmethod
def box_from_img(img):
"""
Creats a box from the image
"""
height, width = img.shape[:2]
return Box(0, height, 0, width)
@staticmethod
def box_from_cendim(cen, dim):
"""
Create a box from a pair of center and dimension. Each center or dimension is a tuple. For short we call the center and dimension the `cendim`
Center: (cenX, cenY)
Dimension: (height, width)
"""
cenX, cenY = cen
height, width = dim
height_2 = height / 2.
width_2 = width / 2.
xmin = int(round(cenX - height_2))
xmax = int(round(cenX + height_2))
ymin = int(round(cenY - width_2))
ymax = int(round(cenY + width_2))
return Box(xmin, xmax, ymin, ymax)
def cendim(self):
"""
Convert the box into cendim format. In cendim format the center and dimension are stored as floating point numbers.
"""
cenX = float(self.xmin + self.xmax) / 2
cenY = float(self.ymin + self.ymax) / 2
height = float(self.xmax - self.xmin)
width = float(self.ymax - self.ymin)
cen = (cenX, cenY)
dim = (height, width)
return cen, dim
def trim_to_borders(self, img_shape):
"""
Trims the box with respect to the image provided.
"""
img_h, img_w = img_shape[:2]
self.xmin = max(0, self.xmin)
self.xmax = min(img_h - 1, self.xmax)
self.ymin = max(0, self.ymin)
self.ymax = min(img_w - 1, self.ymax)
return self
def draw_box(self, img, color=(1, 0, 0), width=2):
"""
Annotate the `img` with this Box. This returns a new image with the box annotated on it.
"""
new_img = img.copy()
cv2.rectangle(new_img, (self.ymin, self.xmin), (self.ymax, self.xmax), color, width)
return new_img
def get_sub_image(self, img):
"""
Return a sub-image only containing information inside this Box.
"""
self.trim_to_borders(img.shape)
return img[self.xmin:self.xmax, self.ymin:self.ymax]
@staticmethod
def expand_cendim(cen, dim, alpha):
height, width = dim
height = (2 * alpha) * height
width = (2 * alpha) * width
dim = (height, width)
return cen, dim
def expand(self, alpha=0.666):
cen, dim = self.cendim()
cen, dim = Box.expand_cendim(cen, dim, alpha)
new_box = Box.box_from_cendim(cen, dim)
self.xmin = new_box.xmin
self.xmax = new_box.xmax
self.ymin = new_box.ymin
self.ymax = new_box.ymax
return self
def evalIOU(self, gt_box, source_shape):
# TODO
# making sure not to generate errors further down the line
self.trim_to_borders(source_shape)
gt_box.trim_to_borders(source_shape)
height, width = source_shape[:2]
gt_part = np.zeros((height, width), np.uint8)
gt_part[gt_box.xmin:gt_box.xmax, gt_box.ymin:gt_box.ymax] = 1
sl_part = np.zeros((height, width), np.uint8)
sl_part[self.xmin:self.xmax, self.ymin:self.ymax] = 1
intersection = (gt_part & sl_part).sum()
union = (gt_part | sl_part).sum()
return intersection / float(union)
def evalPCP(self, gt_box, source_shape, thresh=0.5):
iou = self.evalIOU(gt_box, source_shape)
if iou >= thresh:
return 1
else:
return 0
def generate_points_inside(self, policy='poisson_disk', param=None, img=None):
"""
This function generates points inside this rectangle. It uses the poisson disk to do it by default. But there is a policy option that is configurable.
There is an optional `param` parameter that specifies the parameters of the generation policy.
Different Policies:
- `poisson_disk`:
The param is expected to be the radius. The radius is the parameter of the poisson disk sampler.
By default radius is set to be average of 1/10 of width and height of the box.
Each point is a row vector [x, y]. A set of `n` points will be represented as a numpy array of shape (n,2). The dtype is numpy.int.
There can be an optional img option. We can use the image's shape to further prune points that are located outside the boundary of the image.
"""
assert(policy in self.POINT_GENERATION_POLECIES)
cen, dim = self.cendim()
height, width = dim
if policy == 'poisson_disk':
if param is None:
radius = ((height / 10.) + (width / 10.)) / 2.
else:
radius = param
# please note that PoissonDiskSampler does use a flipped version of the axis
# also the algorithm generates points in the range [0, height] but we want [0, height) that is
# the reason behind the "-1".
pds = PoissonDiskSampler(height - 1, width - 1, radius)
samples = pds.get_sample()
points = np.zeros((len(samples), 2), dtype=np.int)
for i, s in enumerate(samples):
points[i, :] = [int(round(s[0])), int(round(s[1]))]
points += np.array([self.xmin, self.ymin])
return points
def draw_points(points, ax, color=None):
if color is None:
color = 'red'
for p in points:
# Notice that in plt the axis are different from what we work with
# namely in plt the horizontal axis is x and vertical axis is y
# whereas in numpy and images that we work with the vertical axis is x
# this is the reason behind the flipping of points here.
ax.plot(p[1], p[0], 'o', color=color)
def filter_points(points, box):
"""
Remove points that lie inside the box from the set.
"""
new_points_ind = []
for i, p in enumerate(points):
if (box.xmin <= p[0] <= box.xmax and box.ymin <= p[1] <= box.ymax):
continue
else:
new_points_ind.append(i)
return points[new_points_ind, :]
def post_process_preds(preds):
preds = skimage.morphology.closing(preds, skimage.morphology.square(10))
preds = skimage.morphology.remove_small_objects(preds, min_size=10, connectivity=1)
return preds
def find_rect_from_preds(preds):
L, N = skimage.measure.label(preds, return_num=True, background=0)
if N > 0:
L_no_bg = L[L != 0].flatten()
vals, counts = scipy.stats.mode(L_no_bg)
part_label = int(vals[0])
indices = np.where(L == part_label)
xmin = indices[0].min()
xmax = indices[0].max()
ymin = indices[1].min()
ymax = indices[1].max()
return Box(xmin, xmax, ymin, ymax)
else:
return Box(-1, -1, -1, -1)
|
[
"numpy.zeros",
"poisson_disk.PoissonDiskSampler",
"numpy.where",
"numpy.array",
"cv2.rectangle"
] |
[((2542, 2630), 'cv2.rectangle', 'cv2.rectangle', (['new_img', '(self.ymin, self.xmin)', '(self.ymax, self.xmax)', 'color', 'width'], {}), '(new_img, (self.ymin, self.xmin), (self.ymax, self.xmax),\n color, width)\n', (2555, 2630), False, 'import cv2\n'), ((3701, 3736), 'numpy.zeros', 'np.zeros', (['(height, width)', 'np.uint8'], {}), '((height, width), np.uint8)\n', (3709, 3736), True, 'import numpy as np\n'), ((3826, 3861), 'numpy.zeros', 'np.zeros', (['(height, width)', 'np.uint8'], {}), '((height, width), np.uint8)\n', (3834, 3861), True, 'import numpy as np\n'), ((7345, 7370), 'numpy.where', 'np.where', (['(L == part_label)'], {}), '(L == part_label)\n', (7353, 7370), True, 'import numpy as np\n'), ((5731, 5780), 'poisson_disk.PoissonDiskSampler', 'PoissonDiskSampler', (['(height - 1)', '(width - 1)', 'radius'], {}), '(height - 1, width - 1, radius)\n', (5749, 5780), False, 'from poisson_disk import PoissonDiskSampler\n'), ((6018, 6050), 'numpy.array', 'np.array', (['[self.xmin, self.ymin]'], {}), '([self.xmin, self.ymin])\n', (6026, 6050), True, 'import numpy as np\n')]
|
from setuptools import setup
import os
with open(os.devnull, 'w') as a:
print("If this raises an error, you're using python 2 - not supported.", file=a) #get rid of python 2 users
with open("README.md", "r") as file:
long_desc = file.read()
import sys
if sys.version_info < (3,7):
sys.exit('Sorry, Python < 3.7 is not supported')
setup(
name='snakeGit',
version='0.4.5',
description='the missing Python git module',
long_description=long_desc,
python_requires='>3.7.0',
license='Apache-2.0',
packages=['snakeGit'],
author='TheTechRobo',
author_email='<EMAIL>',
keywords=['git', 'easy', 'thetechrobo'],
url='https://github.com/TheTechRobo/snakegit',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3'
],
project_urls={
'Documentation': 'https://github.com/thetechrobo/snakegit/wiki',
'Source': 'https://github.com/thetechrobo/snakegit',
'Tracker': 'https://github.com/thetechrobo/snakegit/issues',
},
long_description_content_type='text/markdown',
)
|
[
"setuptools.setup",
"sys.exit"
] |
[((342, 1068), 'setuptools.setup', 'setup', ([], {'name': '"""snakeGit"""', 'version': '"""0.4.5"""', 'description': '"""the missing Python git module"""', 'long_description': 'long_desc', 'python_requires': '""">3.7.0"""', 'license': '"""Apache-2.0"""', 'packages': "['snakeGit']", 'author': '"""TheTechRobo"""', 'author_email': '"""<EMAIL>"""', 'keywords': "['git', 'easy', 'thetechrobo']", 'url': '"""https://github.com/TheTechRobo/snakegit"""', 'classifiers': "['Development Status :: 3 - Alpha', 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3']", 'project_urls': "{'Documentation': 'https://github.com/thetechrobo/snakegit/wiki', 'Source':\n 'https://github.com/thetechrobo/snakegit', 'Tracker':\n 'https://github.com/thetechrobo/snakegit/issues'}", 'long_description_content_type': '"""text/markdown"""'}), "(name='snakeGit', version='0.4.5', description=\n 'the missing Python git module', long_description=long_desc,\n python_requires='>3.7.0', license='Apache-2.0', packages=['snakeGit'],\n author='TheTechRobo', author_email='<EMAIL>', keywords=['git', 'easy',\n 'thetechrobo'], url='https://github.com/TheTechRobo/snakegit',\n classifiers=['Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3'], project_urls={'Documentation':\n 'https://github.com/thetechrobo/snakegit/wiki', 'Source':\n 'https://github.com/thetechrobo/snakegit', 'Tracker':\n 'https://github.com/thetechrobo/snakegit/issues'},\n long_description_content_type='text/markdown')\n", (347, 1068), False, 'from setuptools import setup\n'), ((293, 341), 'sys.exit', 'sys.exit', (['"""Sorry, Python < 3.7 is not supported"""'], {}), "('Sorry, Python < 3.7 is not supported')\n", (301, 341), False, 'import sys\n')]
|
import json
import sys
from dsm import dsm_looper
def get_color(item):
n = len(item)
return COLORS[n % len(COLORS)]
class Node():
DN = {}
head = None
def __init__(self, val, dn):
self.val = val
self.nodes = []
self.dn = dn
Node.DN[dn] = self
def connect(self, node):
self.nodes.append(node)
@classmethod
def get_node(cls, dn):
return cls.DN[dn]
@classmethod
def loop_over(cls):
return cls.head.loop()
def loop(self):
print('-'.join(self.dn), self.val)
for n in self.nodes:
n.loop()
print()
LABELS = {}
COLORS = [
"black",
"red",
"green",
"yellow",
"blue",
"purple",
"pink",
]
DN = []
def get_color(item):
n = len(item)
return COLORS[n % len(COLORS)]
def load_dns(model, dn):
if len(dn) >= 1:
DN.append(dn)
return model
if __name__ == "__main__":
filename = sys.argv[sys.argv.index('-f')+1] if '-f' in sys.argv else None
if filename is not None:
dsm_model = json.load(open("example/example_file.json"))
else:
data = sys.stdin.read()
#data = input()
dsm_model = json.loads(data)
result = dsm_looper(load_dns, dsm_model)
font = "ubuntu"
print("digraph G {")
print("rankdir=LR;")
print(f'node [shape=rectangle width=3 fontname="{font}"];')
print(f'graph [fontname = "{font}"]');
print(f'edge [fontname = "{font}"]');
node = Node("dsm_model", "()")
Node.head = node
for i, item in enumerate(DN, start=2):
node = Node(i, str(item))
parent_dn = "()" if len(item) == 1 else str(item[:-1])
parent_node = Node.get_node(str(item[:-1]))
parent_node.connect(node)
for i, item in enumerate(DN, start=2):
LABELS[str(item)] = i
label = item[0] if len(item) == 1 else item[-1]
print(f'\t{i} [label="{label}", style=filled color={get_color(item)}];')
SEEN = set()
for i, item in enumerate(DN, start=2):
parent_dn = "()" if len(item) == 1 else str(item[:-1])
if parent_dn in SEEN:
continue
node = Node.get_node(parent_dn)
for n in node.nodes:
print(f"{node.val} -> {n.val} [penwidth=1, arrowhead=none];")
SEEN.add(parent_dn)
print("}")
|
[
"sys.argv.index",
"sys.stdin.read",
"json.loads",
"dsm.dsm_looper"
] |
[((1284, 1315), 'dsm.dsm_looper', 'dsm_looper', (['load_dns', 'dsm_model'], {}), '(load_dns, dsm_model)\n', (1294, 1315), False, 'from dsm import dsm_looper\n'), ((1192, 1208), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (1206, 1208), False, 'import sys\n'), ((1253, 1269), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (1263, 1269), False, 'import json\n'), ((1019, 1039), 'sys.argv.index', 'sys.argv.index', (['"""-f"""'], {}), "('-f')\n", (1033, 1039), False, 'import sys\n')]
|
import streamlit as st
import streamlit_book as stb
st.title("Multipage")
st.markdown("There are several user cases for having multipages on streamlit. We'll explore each one of those")
st.header("Basic or interactive single page")
st.markdown("""
You use only streamlit (no need can use streamlit_book).
Optionally, if you want to use any of the python function for activities/questions, you can use streamlit_book. No need to initialize the library.
""")
st.header("Book: A single document with multiple connected pages")
st.markdown("""
You only need previous/next buttons.
Use `stb.set_book_config` to set the path and other book configurations.
""")
st.header("Library: several simple or multipaged books")
st.markdown("""
Requires a sidebar menu (like this demo), where each topic required a previous/next buttons.
Use `stb.set_library_config` to set the path and the configuration for the book.
""")
|
[
"streamlit.header",
"streamlit.markdown",
"streamlit.title"
] |
[((53, 74), 'streamlit.title', 'st.title', (['"""Multipage"""'], {}), "('Multipage')\n", (61, 74), True, 'import streamlit as st\n'), ((76, 197), 'streamlit.markdown', 'st.markdown', (['"""There are several user cases for having multipages on streamlit. We\'ll explore each one of those"""'], {}), '(\n "There are several user cases for having multipages on streamlit. We\'ll explore each one of those"\n )\n', (87, 197), True, 'import streamlit as st\n'), ((189, 234), 'streamlit.header', 'st.header', (['"""Basic or interactive single page"""'], {}), "('Basic or interactive single page')\n", (198, 234), True, 'import streamlit as st\n'), ((235, 472), 'streamlit.markdown', 'st.markdown', (['"""\nYou use only streamlit (no need can use streamlit_book). \n\nOptionally, if you want to use any of the python function for activities/questions, you can use streamlit_book. No need to initialize the library.\n"""'], {}), '(\n """\nYou use only streamlit (no need can use streamlit_book). \n\nOptionally, if you want to use any of the python function for activities/questions, you can use streamlit_book. No need to initialize the library.\n"""\n )\n', (246, 472), True, 'import streamlit as st\n'), ((464, 530), 'streamlit.header', 'st.header', (['"""Book: A single document with multiple connected pages"""'], {}), "('Book: A single document with multiple connected pages')\n", (473, 530), True, 'import streamlit as st\n'), ((531, 673), 'streamlit.markdown', 'st.markdown', (['"""\nYou only need previous/next buttons. \n\nUse `stb.set_book_config` to set the path and other book configurations.\n"""'], {}), '(\n """\nYou only need previous/next buttons. \n\nUse `stb.set_book_config` to set the path and other book configurations.\n"""\n )\n', (542, 673), True, 'import streamlit as st\n'), ((665, 721), 'streamlit.header', 'st.header', (['"""Library: several simple or multipaged books"""'], {}), "('Library: several simple or multipaged books')\n", (674, 721), True, 'import streamlit as st\n'), ((722, 928), 'streamlit.markdown', 'st.markdown', (['"""\nRequires a sidebar menu (like this demo), where each topic required a previous/next buttons. \n\nUse `stb.set_library_config` to set the path and the configuration for the book.\n"""'], {}), '(\n """\nRequires a sidebar menu (like this demo), where each topic required a previous/next buttons. \n\nUse `stb.set_library_config` to set the path and the configuration for the book.\n"""\n )\n', (733, 928), True, 'import streamlit as st\n')]
|
# template global functions
# make sure not to conflict with built-ins:
# http://jinja.pocoo.org/docs/2.9/templates/#list-of-global-functions
from flask.helpers import url_for as _url_for
from flask_paginate import Pagination
def paginate(page, total, per_page, config):
record_name = config['MOMO_PAGINATION_RECORD_NAME']
display_msg = config['MOMO_PAGINATION_DISPLAY_MSG']
pagination = _paginate(
page=page,
total=total,
per_page=per_page,
record_name=record_name,
display_msg=display_msg,
)
return pagination
def _paginate(page, total, per_page, record_name, display_msg):
pagination = Pagination(
page=page,
total=total,
per_page=per_page,
bs_version=3,
show_single_page=False,
record_name=record_name,
display_msg=display_msg,
)
return pagination
def get_page(request):
return request.args.get('page', default=1, type=int)
def toggle_arg(endpoint, request, arg, value, **kwargs):
"""Toggle request arguments.
:param endpoint: endpoint name.
:param request: request object.
:param arg: request argument name to toggle.
:param value: intial value for the toggled argument.
:param kwargs: keyword arguments to preserve.
"""
args = request.args.to_dict()
if arg in args:
args.pop(arg)
else:
args[arg] = value
args.update(request.view_args)
args.update(kwargs)
return _url_for(endpoint, **args)
|
[
"flask_paginate.Pagination",
"flask.helpers.url_for"
] |
[((658, 803), 'flask_paginate.Pagination', 'Pagination', ([], {'page': 'page', 'total': 'total', 'per_page': 'per_page', 'bs_version': '(3)', 'show_single_page': '(False)', 'record_name': 'record_name', 'display_msg': 'display_msg'}), '(page=page, total=total, per_page=per_page, bs_version=3,\n show_single_page=False, record_name=record_name, display_msg=display_msg)\n', (668, 803), False, 'from flask_paginate import Pagination\n'), ((1478, 1504), 'flask.helpers.url_for', '_url_for', (['endpoint'], {}), '(endpoint, **args)\n', (1486, 1504), True, 'from flask.helpers import url_for as _url_for\n')]
|
__author__ = 'orhan'
from math import asin, sqrt, degrees
class Point:
def __init__(self, x, y=0.0, z=0.0):
self.x = x
self.y = y
self.z = z
def angle_x(self, p2):
dy = self.y - p2.y
dx = self.x - p2.x
h = sqrt(dy ** 2 + dx ** 2)
if h == 0:
return 0
return degrees(asin(dx / h))
def __cmp__(self, other):
return (self.x - other.x) or (self.y - other.y) or (self.z - other.z)
def __eq__(self, other):
return self.__cmp__(other) == 0.0
def __ne__(self, other):
return self.__cmp__(other) != 0.0
|
[
"math.asin",
"math.sqrt"
] |
[((266, 289), 'math.sqrt', 'sqrt', (['(dy ** 2 + dx ** 2)'], {}), '(dy ** 2 + dx ** 2)\n', (270, 289), False, 'from math import asin, sqrt, degrees\n'), ((354, 366), 'math.asin', 'asin', (['(dx / h)'], {}), '(dx / h)\n', (358, 366), False, 'from math import asin, sqrt, degrees\n')]
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for AFL's engine implementation."""
import os
import shutil
import unittest
from clusterfuzz._internal.bot.fuzzers.afl import engine
from clusterfuzz._internal.bot.fuzzers.afl import launcher
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.tests.core.bot.fuzzers.afl import \
afl_launcher_integration_test
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
# TODO(mbarbella): Break dependency on afl_launcher_integration_test once
# everything has been fully converted to the new pipeline.
TEST_PATH = os.path.abspath(os.path.dirname(__file__))
TEMP_DIRECTORY = os.path.join(TEST_PATH, 'temp')
DATA_DIRECTORY = os.path.join(TEST_PATH, 'data')
CORPUS_DIRECTORY = os.path.join(TEMP_DIRECTORY, 'corpus')
OUTPUT_DIRECTORY = os.path.join(TEMP_DIRECTORY, 'output')
BASE_FUZZ_TIMEOUT = (
launcher.AflRunnerCommon.SIGTERM_WAIT_TIME +
launcher.AflRunnerCommon.AFL_CLEAN_EXIT_TIME)
FUZZ_TIMEOUT = 5 + BASE_FUZZ_TIMEOUT
LONG_FUZZ_TIMEOUT = 90 + BASE_FUZZ_TIMEOUT
def clear_temp_dir():
"""Clear temp directories."""
if os.path.exists(TEMP_DIRECTORY):
shutil.rmtree(TEMP_DIRECTORY)
def create_temp_dir():
"""Create temp directories."""
# Corpus directory will be created when preparing for fuzzing.
os.mkdir(TEMP_DIRECTORY)
os.mkdir(OUTPUT_DIRECTORY)
@unittest.skipIf(not environment.get_value('AFL_INTEGRATION_TESTS'),
'AFL_INTEGRATION_TESTS=1 must be set')
class AFLEngineTest(unittest.TestCase):
"""Tests for AFLEngine."""
def setUp(self):
clear_temp_dir()
create_temp_dir()
test_helpers.patch_environ(self)
afl_launcher_integration_test.dont_use_strategies(self)
def tearDown(self):
clear_temp_dir()
def test_fuzz(self):
"""Test for fuzz."""
engine_impl = engine.AFLEngine()
afl_launcher_integration_test.setup_testcase_and_corpus(
'empty', 'corpus', fuzz=True)
fuzzer_path = os.path.join(DATA_DIRECTORY, 'test_fuzzer')
options = engine_impl.prepare(CORPUS_DIRECTORY, fuzzer_path, DATA_DIRECTORY)
result = engine_impl.fuzz(fuzzer_path, options, OUTPUT_DIRECTORY,
FUZZ_TIMEOUT)
self.assertEqual('{0}/afl-fuzz'.format(DATA_DIRECTORY), result.command[0])
self.assertIn('-i{0}'.format(CORPUS_DIRECTORY), result.command)
# Ensure that we've added something other than the dummy file to the corpus.
self.assertTrue(os.listdir(CORPUS_DIRECTORY))
def test_reproduce(self):
"""Test for reproduce."""
engine_impl = engine.AFLEngine()
target_path = os.path.join(DATA_DIRECTORY, 'test_fuzzer')
testcase_path = afl_launcher_integration_test.setup_testcase_and_corpus(
'crash', 'empty_corpus')
timeout = 5
result = engine_impl.reproduce(target_path, testcase_path, [], timeout)
self.assertIn(
'ERROR: AddressSanitizer: SEGV on unknown address 0x000000000000',
result.output)
def test_fuzz_with_crash(self):
"""Tests that we detect crashes when fuzzing."""
engine_impl = engine.AFLEngine()
afl_launcher_integration_test.setup_testcase_and_corpus(
'empty', 'corpus', fuzz=True)
fuzzer_path = os.path.join(DATA_DIRECTORY, 'easy_crash_fuzzer')
options = engine_impl.prepare(CORPUS_DIRECTORY, fuzzer_path, DATA_DIRECTORY)
result = engine_impl.fuzz(fuzzer_path, options, OUTPUT_DIRECTORY,
LONG_FUZZ_TIMEOUT)
self.assertGreater(len(result.crashes), 0)
crash = result.crashes[0]
self.assertIn('ERROR: AddressSanitizer: heap-use-after-free',
crash.stacktrace)
# Testcase (non-zero size) should've been copied back.
self.assertNotEqual(os.path.getsize(crash.input_path), 0)
def test_startup_crash_not_reported(self):
"""Ensures that we properly handle startup crashes."""
engine_impl = engine.AFLEngine()
afl_launcher_integration_test.setup_testcase_and_corpus(
'empty', 'corpus', fuzz=True)
fuzzer_path = os.path.join(DATA_DIRECTORY, 'always_crash_fuzzer')
options = engine_impl.prepare(CORPUS_DIRECTORY, fuzzer_path, DATA_DIRECTORY)
result = engine_impl.fuzz(fuzzer_path, options, OUTPUT_DIRECTORY,
FUZZ_TIMEOUT)
self.assertFalse(result.crashes)
|
[
"clusterfuzz._internal.tests.core.bot.fuzzers.afl.afl_launcher_integration_test.setup_testcase_and_corpus",
"os.mkdir",
"os.listdir",
"clusterfuzz._internal.tests.core.bot.fuzzers.afl.afl_launcher_integration_test.dont_use_strategies",
"os.path.getsize",
"os.path.dirname",
"os.path.exists",
"shutil.rmtree",
"clusterfuzz._internal.system.environment.get_value",
"os.path.join",
"clusterfuzz._internal.tests.test_libs.helpers.patch_environ",
"clusterfuzz._internal.bot.fuzzers.afl.engine.AFLEngine"
] |
[((1209, 1240), 'os.path.join', 'os.path.join', (['TEST_PATH', '"""temp"""'], {}), "(TEST_PATH, 'temp')\n", (1221, 1240), False, 'import os\n'), ((1258, 1289), 'os.path.join', 'os.path.join', (['TEST_PATH', '"""data"""'], {}), "(TEST_PATH, 'data')\n", (1270, 1289), False, 'import os\n'), ((1309, 1347), 'os.path.join', 'os.path.join', (['TEMP_DIRECTORY', '"""corpus"""'], {}), "(TEMP_DIRECTORY, 'corpus')\n", (1321, 1347), False, 'import os\n'), ((1367, 1405), 'os.path.join', 'os.path.join', (['TEMP_DIRECTORY', '"""output"""'], {}), "(TEMP_DIRECTORY, 'output')\n", (1379, 1405), False, 'import os\n'), ((1165, 1190), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1180, 1190), False, 'import os\n'), ((1669, 1699), 'os.path.exists', 'os.path.exists', (['TEMP_DIRECTORY'], {}), '(TEMP_DIRECTORY)\n', (1683, 1699), False, 'import os\n'), ((1860, 1884), 'os.mkdir', 'os.mkdir', (['TEMP_DIRECTORY'], {}), '(TEMP_DIRECTORY)\n', (1868, 1884), False, 'import os\n'), ((1887, 1913), 'os.mkdir', 'os.mkdir', (['OUTPUT_DIRECTORY'], {}), '(OUTPUT_DIRECTORY)\n', (1895, 1913), False, 'import os\n'), ((1705, 1734), 'shutil.rmtree', 'shutil.rmtree', (['TEMP_DIRECTORY'], {}), '(TEMP_DIRECTORY)\n', (1718, 1734), False, 'import shutil\n'), ((2178, 2210), 'clusterfuzz._internal.tests.test_libs.helpers.patch_environ', 'test_helpers.patch_environ', (['self'], {}), '(self)\n', (2204, 2210), True, 'from clusterfuzz._internal.tests.test_libs import helpers as test_helpers\n'), ((2215, 2270), 'clusterfuzz._internal.tests.core.bot.fuzzers.afl.afl_launcher_integration_test.dont_use_strategies', 'afl_launcher_integration_test.dont_use_strategies', (['self'], {}), '(self)\n', (2264, 2270), False, 'from clusterfuzz._internal.tests.core.bot.fuzzers.afl import afl_launcher_integration_test\n'), ((2382, 2400), 'clusterfuzz._internal.bot.fuzzers.afl.engine.AFLEngine', 'engine.AFLEngine', ([], {}), '()\n', (2398, 2400), False, 'from clusterfuzz._internal.bot.fuzzers.afl import engine\n'), ((2406, 2495), 'clusterfuzz._internal.tests.core.bot.fuzzers.afl.afl_launcher_integration_test.setup_testcase_and_corpus', 'afl_launcher_integration_test.setup_testcase_and_corpus', (['"""empty"""', '"""corpus"""'], {'fuzz': '(True)'}), "('empty', 'corpus',\n fuzz=True)\n", (2461, 2495), False, 'from clusterfuzz._internal.tests.core.bot.fuzzers.afl import afl_launcher_integration_test\n'), ((2519, 2562), 'os.path.join', 'os.path.join', (['DATA_DIRECTORY', '"""test_fuzzer"""'], {}), "(DATA_DIRECTORY, 'test_fuzzer')\n", (2531, 2562), False, 'import os\n'), ((3116, 3134), 'clusterfuzz._internal.bot.fuzzers.afl.engine.AFLEngine', 'engine.AFLEngine', ([], {}), '()\n', (3132, 3134), False, 'from clusterfuzz._internal.bot.fuzzers.afl import engine\n'), ((3153, 3196), 'os.path.join', 'os.path.join', (['DATA_DIRECTORY', '"""test_fuzzer"""'], {}), "(DATA_DIRECTORY, 'test_fuzzer')\n", (3165, 3196), False, 'import os\n'), ((3217, 3302), 'clusterfuzz._internal.tests.core.bot.fuzzers.afl.afl_launcher_integration_test.setup_testcase_and_corpus', 'afl_launcher_integration_test.setup_testcase_and_corpus', (['"""crash"""', '"""empty_corpus"""'], {}), "('crash', 'empty_corpus'\n )\n", (3272, 3302), False, 'from clusterfuzz._internal.tests.core.bot.fuzzers.afl import afl_launcher_integration_test\n'), ((3623, 3641), 'clusterfuzz._internal.bot.fuzzers.afl.engine.AFLEngine', 'engine.AFLEngine', ([], {}), '()\n', (3639, 3641), False, 'from clusterfuzz._internal.bot.fuzzers.afl import engine\n'), ((3647, 3736), 'clusterfuzz._internal.tests.core.bot.fuzzers.afl.afl_launcher_integration_test.setup_testcase_and_corpus', 'afl_launcher_integration_test.setup_testcase_and_corpus', (['"""empty"""', '"""corpus"""'], {'fuzz': '(True)'}), "('empty', 'corpus',\n fuzz=True)\n", (3702, 3736), False, 'from clusterfuzz._internal.tests.core.bot.fuzzers.afl import afl_launcher_integration_test\n'), ((3760, 3809), 'os.path.join', 'os.path.join', (['DATA_DIRECTORY', '"""easy_crash_fuzzer"""'], {}), "(DATA_DIRECTORY, 'easy_crash_fuzzer')\n", (3772, 3809), False, 'import os\n'), ((4436, 4454), 'clusterfuzz._internal.bot.fuzzers.afl.engine.AFLEngine', 'engine.AFLEngine', ([], {}), '()\n', (4452, 4454), False, 'from clusterfuzz._internal.bot.fuzzers.afl import engine\n'), ((4460, 4549), 'clusterfuzz._internal.tests.core.bot.fuzzers.afl.afl_launcher_integration_test.setup_testcase_and_corpus', 'afl_launcher_integration_test.setup_testcase_and_corpus', (['"""empty"""', '"""corpus"""'], {'fuzz': '(True)'}), "('empty', 'corpus',\n fuzz=True)\n", (4515, 4549), False, 'from clusterfuzz._internal.tests.core.bot.fuzzers.afl import afl_launcher_integration_test\n'), ((4573, 4624), 'os.path.join', 'os.path.join', (['DATA_DIRECTORY', '"""always_crash_fuzzer"""'], {}), "(DATA_DIRECTORY, 'always_crash_fuzzer')\n", (4585, 4624), False, 'import os\n'), ((1937, 1983), 'clusterfuzz._internal.system.environment.get_value', 'environment.get_value', (['"""AFL_INTEGRATION_TESTS"""'], {}), "('AFL_INTEGRATION_TESTS')\n", (1958, 1983), False, 'from clusterfuzz._internal.system import environment\n'), ((3009, 3037), 'os.listdir', 'os.listdir', (['CORPUS_DIRECTORY'], {}), '(CORPUS_DIRECTORY)\n', (3019, 3037), False, 'import os\n'), ((4275, 4308), 'os.path.getsize', 'os.path.getsize', (['crash.input_path'], {}), '(crash.input_path)\n', (4290, 4308), False, 'import os\n')]
|
import numpy as np
# direct cluster
class FCM(object):
def __init__(self, data):
self.lambd = 0
self.data = data
self.cluster = []
self.F_S = []
def standard(self):
data_min, data_max = np.min(self.data, axis=0), np.max(self.data, axis=0)
num_samples, num_shapes = np.shape(self.data)
for i in range(num_samples):
self.data[i, :] = (self.data[i, :])/data_max
for j in range(num_shapes):
self.data[i, j] = round(float(self.data[i, j]), 2)
def matrix_alike(self):
num_samples, num_shapes = np.shape(self.data)
data = self.data
r = np.zeros((num_samples, num_samples))
# using max min method
for i in range(num_samples):
for j in range(num_samples):
r[i, j] = np.sum(self.min(data[i, :], data[j, :]))/np.sum(self.max(data[i, :], data[j, :]))
r[i, j] = round(r[i, j], 2)
return r
def max(self, a, b):
a_or_b = []
for (i, j) in zip(a, b):
if i > j:
a_or_b.append(i)
else:
a_or_b.append(j)
return a_or_b
def min(self, a, b):
a_and_b = []
for (i, j) in zip(a, b):
if i < j:
a_and_b.append(i)
else:
a_and_b.append(j)
return a_and_b
def merge_alike_class(self, a):
b = []
for i in range(len(a)):
temp = []
sign = False
for j in range(len(a[i])):
if len(b) != 0:
for k in range(len(b)):
if a[i][j] in b[k]:
b[k].extend(a[i])
b[k] = list(np.unique(b[k]))
sign = True
break
if sign:
break
temp.append(a[i][j])
if sign:
continue
b.append(temp)
return b
def remove_same_cluster(self):
length = len(self.cluster)
temp = self.cluster.copy()
for i in range(length-1):
if self.cluster[i]['result'] == self.cluster[i+1]['result']:
index = 0
while True:
if temp[index]['lambd'] == self.cluster[i+1]['lambd']:
break
else:
index = index+1
temp.pop(index)
self.cluster = temp
def cluster_t(self, T, lam):
answer = T >= lam
num_i, num_j = answer.shape
x_index, y_index = [], []
for i in range(num_i):
for j in range(num_j):
if answer[i, j]:
x_index.append(i+1)
y_index.append(j+1)
num = list(np.unique(x_index))
result = []
for i in num:
temp = []
for j, k in zip(x_index, y_index):
if i == j:
temp.append(k)
result.append(temp)
result = self.merge_alike_class(result) # merge alike class
return result
# start cluster
def fcm(self):
self.standard() # data standardization
r = self.matrix_alike() # create fuzzy alike matrix
lambd = np.unique(r) # get confidence level lambda
lambd_length = len(lambd)
for i in range(lambd_length):
temp = {}
temp['lambd'] = round(lambd[lambd_length-i-1], 2)
temp['result'] = self.cluster_t(r, lambd[lambd_length-i-1])
self.cluster.append(temp)
self.remove_same_cluster()
print('The result of cluster is ', self.cluster)
self.select_lambda()
best = self.F_S.index(min(self.F_S))+1 # use the F-S function to be the validate measure of lambda
print('The best lambda is ', self.cluster[best]['lambd'])
print('The best result of cluster is ', self.cluster[best]['result'])
def data_mean(self, data, index):
if len(index) == 1:
return data
else:
return np.mean(data, axis=0)
def select_lambda(self):
total_mean = np.mean(self.data, axis=0)
length = len(self.cluster)
for option in range(1, length-1):
F_S = 0
temp = 0
for i in self.cluster[option]['result']:
i = [j-1 for j in i] # fix list index
vi = self.data_mean(self.data[i, :], i)
temp = 0
for j in i:
temp = temp + (np.sum(np.square(self.data[j, :] - vi)) - np.sum(np.square(vi - total_mean)))
F_S = F_S + temp
self.F_S.append(F_S)
def main():
data = np.array([[80., 10., 6., 2.],
[50., 1., 6., 4.],
[90., 6., 4., 6.],
[40., 5., 7., 3.],
[10., 1., 2., 4.]])
fcm = FCM(data)
fcm.fcm()
if __name__ == '__main__':
main()
|
[
"numpy.square",
"numpy.zeros",
"numpy.shape",
"numpy.min",
"numpy.mean",
"numpy.array",
"numpy.max",
"numpy.unique"
] |
[((4941, 5072), 'numpy.array', 'np.array', (['[[80.0, 10.0, 6.0, 2.0], [50.0, 1.0, 6.0, 4.0], [90.0, 6.0, 4.0, 6.0], [\n 40.0, 5.0, 7.0, 3.0], [10.0, 1.0, 2.0, 4.0]]'], {}), '([[80.0, 10.0, 6.0, 2.0], [50.0, 1.0, 6.0, 4.0], [90.0, 6.0, 4.0, \n 6.0], [40.0, 5.0, 7.0, 3.0], [10.0, 1.0, 2.0, 4.0]])\n', (4949, 5072), True, 'import numpy as np\n'), ((337, 356), 'numpy.shape', 'np.shape', (['self.data'], {}), '(self.data)\n', (345, 356), True, 'import numpy as np\n'), ((628, 647), 'numpy.shape', 'np.shape', (['self.data'], {}), '(self.data)\n', (636, 647), True, 'import numpy as np\n'), ((687, 723), 'numpy.zeros', 'np.zeros', (['(num_samples, num_samples)'], {}), '((num_samples, num_samples))\n', (695, 723), True, 'import numpy as np\n'), ((3459, 3471), 'numpy.unique', 'np.unique', (['r'], {}), '(r)\n', (3468, 3471), True, 'import numpy as np\n'), ((4363, 4389), 'numpy.mean', 'np.mean', (['self.data'], {'axis': '(0)'}), '(self.data, axis=0)\n', (4370, 4389), True, 'import numpy as np\n'), ((249, 274), 'numpy.min', 'np.min', (['self.data'], {'axis': '(0)'}), '(self.data, axis=0)\n', (255, 274), True, 'import numpy as np\n'), ((276, 301), 'numpy.max', 'np.max', (['self.data'], {'axis': '(0)'}), '(self.data, axis=0)\n', (282, 301), True, 'import numpy as np\n'), ((2961, 2979), 'numpy.unique', 'np.unique', (['x_index'], {}), '(x_index)\n', (2970, 2979), True, 'import numpy as np\n'), ((4287, 4308), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (4294, 4308), True, 'import numpy as np\n'), ((1832, 1847), 'numpy.unique', 'np.unique', (['b[k]'], {}), '(b[k])\n', (1841, 1847), True, 'import numpy as np\n'), ((4777, 4808), 'numpy.square', 'np.square', (['(self.data[j, :] - vi)'], {}), '(self.data[j, :] - vi)\n', (4786, 4808), True, 'import numpy as np\n'), ((4819, 4845), 'numpy.square', 'np.square', (['(vi - total_mean)'], {}), '(vi - total_mean)\n', (4828, 4845), True, 'import numpy as np\n')]
|
"""
Some useful functions for file management.
Functions:
copytree(scr, dst, symlinks=False, ignore=None):
Copy all the contents of directory scr to directory dst.
empty_folder(folder):
Empty the directory folder from all subfolders and files.
"""
import os
import shutil
def copytree(src, dst, symlinks=False, ignore=None):
"""
Copy all the contents of directory scr to directory dst.
:param src: String
Source directory.
:param dst: String
Destination directory.
:param symlinks: default False
:param ignore: default None
:return: None
"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def empty_folder(folder):
"""
Empty the directory folder from all subfolders and files.
Print a diagnostic message if the directory cannot be emptied for any reason.
:param folder: string
The dir to be emptied.
:return:
"""
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
|
[
"os.unlink",
"shutil.rmtree",
"os.path.isdir",
"shutil.copy2",
"os.path.isfile",
"os.path.islink",
"shutil.copytree",
"os.path.join",
"os.listdir"
] |
[((638, 653), 'os.listdir', 'os.listdir', (['src'], {}), '(src)\n', (648, 653), False, 'import os\n'), ((1133, 1151), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (1143, 1151), False, 'import os\n'), ((667, 690), 'os.path.join', 'os.path.join', (['src', 'item'], {}), '(src, item)\n', (679, 690), False, 'import os\n'), ((703, 726), 'os.path.join', 'os.path.join', (['dst', 'item'], {}), '(dst, item)\n', (715, 726), False, 'import os\n'), ((738, 754), 'os.path.isdir', 'os.path.isdir', (['s'], {}), '(s)\n', (751, 754), False, 'import os\n'), ((1173, 1203), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (1185, 1203), False, 'import os\n'), ((768, 807), 'shutil.copytree', 'shutil.copytree', (['s', 'd', 'symlinks', 'ignore'], {}), '(s, d, symlinks, ignore)\n', (783, 807), False, 'import shutil\n'), ((834, 852), 'shutil.copy2', 'shutil.copy2', (['s', 'd'], {}), '(s, d)\n', (846, 852), False, 'import shutil\n'), ((1232, 1257), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1246, 1257), False, 'import os\n'), ((1261, 1286), 'os.path.islink', 'os.path.islink', (['file_path'], {}), '(file_path)\n', (1275, 1286), False, 'import os\n'), ((1304, 1324), 'os.unlink', 'os.unlink', (['file_path'], {}), '(file_path)\n', (1313, 1324), False, 'import os\n'), ((1342, 1366), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (1355, 1366), False, 'import os\n'), ((1384, 1408), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (1397, 1408), False, 'import shutil\n')]
|
"""Version's attribute test.
"""
import pytest
import fairytool
def test_version():
assert hasattr(fairytool, "__version__")
if __name__ == "__main__":
pytest.main(["--capture=no"])
|
[
"pytest.main"
] |
[((165, 194), 'pytest.main', 'pytest.main', (["['--capture=no']"], {}), "(['--capture=no'])\n", (176, 194), False, 'import pytest\n')]
|
__all__ = ["CeParser"]
from copy import deepcopy
from decimal import Decimal
from typing import Callable, Dict, Set, Union
import simplejson as json
from boto3.dynamodb.types import (
BINARY,
BINARY_SET,
BOOLEAN,
LIST,
MAP,
NULL,
NUMBER,
NUMBER_SET,
STRING,
STRING_SET,
Binary,
TypeDeserializer,
TypeSerializer,
)
from sly.yacc import Parser
from .celexer import CeLexer
class CeTypeDeserializer(TypeDeserializer):
def deserialize(self, value):
if value and isinstance(value, dict):
if list(value)[0] in (
BINARY,
BINARY_SET,
BOOLEAN,
LIST,
MAP,
NULL,
NUMBER,
NUMBER_SET,
STRING,
STRING_SET,
):
value = super().deserialize(value)
else:
value = {k: self.deserialize(v) for k, v in value.items()}
return value.value if isinstance(value, Binary) else value
_TYPE_DESERIALIZER = CeTypeDeserializer()
_TYPE_SERIALIZER = TypeSerializer()
Dynamo = Union[
Binary, bool, Decimal, dict, list, None, str, Set[Binary], Set[Decimal], Set[str]
]
ExpressionAttributeNames = Dict[str, str]
ExpressionAttributeValues = DynamoItem = Dict[str, Union[Dynamo, Dict[str, Dynamo]]]
class CeParser(Parser):
_expression_cache: Dict[int, Callable[[DynamoItem], bool]] = dict()
def __init__(
self,
*,
expression_attribute_names: ExpressionAttributeNames = None,
expression_attribute_values: ExpressionAttributeValues = None,
):
self._expression_attribute_names: ExpressionAttributeNames = dict()
self._expression_attribute_values: ExpressionAttributeValues = dict()
self.expression_attribute_names = expression_attribute_names or dict()
self.expression_attribute_values = expression_attribute_values or dict()
self._set_expression_attribute_json()
super().__init__()
def _set_expression_attribute_json(self) -> None:
self._expression_attribute_json = json.dumps(
self._expression_attribute_names, separators=(",", ":"), use_decimal=True
) + json.dumps(
self._expression_attribute_values, separators=(",", ":"), use_decimal=True
)
@property
def expression_attribute_names(self) -> ExpressionAttributeNames:
return deepcopy(self._expression_attribute_names)
@expression_attribute_names.setter
def expression_attribute_names(
self, expression_attribute_names: ExpressionAttributeNames
) -> None:
self._expression_attribute_names = (
deepcopy(expression_attribute_names) or dict()
)
self._set_expression_attribute_json()
@expression_attribute_names.deleter
def expression_attribute_names(self) -> None:
self._expression_attribute_names: ExpressionAttributeNames = dict()
self._set_expression_attribute_json()
@property
def expression_attribute_values(self) -> ExpressionAttributeValues:
return deepcopy(self._expression_attribute_values)
@expression_attribute_values.setter
def expression_attribute_values(
self, expression_attribute_values: ExpressionAttributeValues
) -> None:
self._expression_attribute_values: ExpressionAttributeValues = (
_TYPE_DESERIALIZER.deserialize(expression_attribute_values) or dict()
)
self._set_expression_attribute_json()
@expression_attribute_values.deleter
def expression_attribute_values(self) -> None:
self._expression_attribute_values: ExpressionAttributeValues = dict()
self._set_expression_attribute_json()
def evaluate(self, /, expression: str, item: DynamoItem) -> bool:
return self.parse(expression)(item)
@classmethod
def flush_cache(cls) -> None:
cls._expression_cache: Dict[int, Callable[[DynamoItem], bool]] = dict()
def parse(self, expression: str) -> Callable[[DynamoItem], bool]:
expression_hash = hash(expression + self._expression_attribute_json)
if expression_hash not in self._expression_cache:
compiled_expression: Callable[[DynamoItem], bool] = super().parse(
CeLexer().tokenize(expression)
)
def truthy(item: DynamoItem) -> bool:
item = _TYPE_DESERIALIZER.deserialize(item)
return compiled_expression(item)
self._expression_cache[expression_hash] = lambda m: truthy(m)
return self._expression_cache[expression_hash]
# Get the token list from the lexer (required)
tokens = CeLexer.tokens
precedence = (
("left", OR),
("left", AND),
("right", NOT),
("right", PARENS),
("left", ATTRIBUTE_EXISTS, ATTRIBUTE_NOT_EXISTS, BEGINS_WITH, CONTAINS),
("left", BETWEEN),
("left", IN),
("left", EQ, NE, LT, LTE, GT, GTE),
)
# Grammar rules and actions
@_("operand EQ operand")
def condition(self, p):
operand0 = p.operand0
operand1 = p.operand1
return lambda m: operand0(m) == operand1(m)
@_("operand NE operand")
def condition(self, p):
operand0 = p.operand0
operand1 = p.operand1
return lambda m: operand0(m) != operand1(m)
@_("operand GT operand")
def condition(self, p):
operand0 = p.operand0
operand1 = p.operand1
return lambda m: operand0(m) > operand1(m)
@_("operand GTE operand")
def condition(self, p):
operand0 = p.operand0
operand1 = p.operand1
return lambda m: operand0(m) >= operand1(m)
@_("operand LT operand")
def condition(self, p):
operand0 = p.operand0
operand1 = p.operand1
return lambda m: operand0(m) < operand1(m)
@_("operand LTE operand")
def condition(self, p):
operand0 = p.operand0
operand1 = p.operand1
return lambda m: operand0(m) <= operand1(m)
@_("operand BETWEEN operand AND operand")
def condition(self, p):
operand0 = p.operand0
operand1 = p.operand1
operand2 = p.operand2
return lambda m: operand1(m) <= operand0(m) <= operand2(m)
@_('operand IN "(" in_list ")"')
def condition(self, p):
operand = p.operand
in_list = p.in_list
return lambda m: operand(m) in in_list(m)
@_("function")
def condition(self, p):
function = p.function
return lambda m: function(m)
@_("condition AND condition")
def condition(self, p):
condition0 = p.condition0
condition1 = p.condition1
return lambda m: condition0(m) and condition1(m)
@_("condition OR condition")
def condition(self, p):
condition0 = p.condition0
condition1 = p.condition1
return lambda m: condition0(m) or condition1(m)
@_("NOT condition")
def condition(self, p):
condition = p.condition
return lambda m: not condition(m)
@_('"(" condition ")" %prec PARENS')
def condition(self, p):
condition = p.condition
return lambda m: condition(m)
@_('ATTRIBUTE_EXISTS "(" path ")"')
def function(self, p):
path = p.path
return lambda m: path(m) is not None
@_('ATTRIBUTE_NOT_EXISTS "(" path ")"')
def function(self, p):
path = p.path
return lambda m: path(m) is None
@_('ATTRIBUTE_TYPE "(" path "," operand ")"')
def function(self, p):
path = p.path
operand = p.operand
return lambda m: list(_TYPE_SERIALIZER.serialize(path(m)))[0] == operand(m)
@_('BEGINS_WITH "(" path "," operand ")"')
def function(self, p):
path = p.path
operand = p.operand
return (
lambda m: path(m).startswith(operand(m))
if isinstance(path(m), str)
else False
)
@_('CONTAINS "(" path "," operand ")"')
def function(self, p):
path = p.path
operand = p.operand
return (
lambda m: operand(m) in path(m)
if isinstance(path(m), (str, set))
else False
)
@_('SIZE "(" path ")"')
def operand(self, p):
path = p.path
return (
lambda m: len(path(m))
if isinstance(path(m), (str, set, dict, bytearray, bytes, list))
else -1
)
@_('in_list "," operand')
def in_list(self, p):
in_list = p.in_list
operand = p.operand
return lambda m: [*in_list(m), operand(m)]
@_('operand "," operand')
def in_list(self, p):
operand0 = p.operand0
operand1 = p.operand1
return lambda m: [operand0(m), operand1(m)]
@_("path")
def operand(self, p):
return p.path
@_("VALUE")
def operand(self, p):
VALUE = p.VALUE
expression_attribute_values = self._expression_attribute_values
return lambda m: expression_attribute_values.get(VALUE)
@_('path "." NAME')
def path(self, p):
path = p.path
NAME = p.NAME
return lambda m: path(m).get(NAME) if path(m) else None
@_('path "." NAME_REF')
def path(self, p):
path = p.path
NAME_REF = p.NAME_REF
expression_attribute_names = self._expression_attribute_names
return (
lambda m: path(m).get(expression_attribute_names.get(NAME_REF))
if path(m)
else None
)
@_('path "[" INDEX "]"')
def path(self, p):
path = p.path
INDEX = p.INDEX
return (
lambda m: path(m)[INDEX]
if isinstance(path(m), list) and len(path(m)) > INDEX
else None
)
@_("NAME")
def path(self, p):
NAME = p.NAME
return lambda m: m.get(NAME)
@_("NAME_REF")
def path(self, p):
NAME_REF = p.NAME_REF
expression_attribute_names = self._expression_attribute_names
return lambda m: m.get(expression_attribute_names.get(NAME_REF))
|
[
"simplejson.dumps",
"copy.deepcopy",
"boto3.dynamodb.types.TypeSerializer"
] |
[((1122, 1138), 'boto3.dynamodb.types.TypeSerializer', 'TypeSerializer', ([], {}), '()\n', (1136, 1138), False, 'from boto3.dynamodb.types import BINARY, BINARY_SET, BOOLEAN, LIST, MAP, NULL, NUMBER, NUMBER_SET, STRING, STRING_SET, Binary, TypeDeserializer, TypeSerializer\n'), ((2464, 2506), 'copy.deepcopy', 'deepcopy', (['self._expression_attribute_names'], {}), '(self._expression_attribute_names)\n', (2472, 2506), False, 'from copy import deepcopy\n'), ((3140, 3183), 'copy.deepcopy', 'deepcopy', (['self._expression_attribute_values'], {}), '(self._expression_attribute_values)\n', (3148, 3183), False, 'from copy import deepcopy\n'), ((2145, 2234), 'simplejson.dumps', 'json.dumps', (['self._expression_attribute_names'], {'separators': "(',', ':')", 'use_decimal': '(True)'}), "(self._expression_attribute_names, separators=(',', ':'),\n use_decimal=True)\n", (2155, 2234), True, 'import simplejson as json\n'), ((2255, 2345), 'simplejson.dumps', 'json.dumps', (['self._expression_attribute_values'], {'separators': "(',', ':')", 'use_decimal': '(True)'}), "(self._expression_attribute_values, separators=(',', ':'),\n use_decimal=True)\n", (2265, 2345), True, 'import simplejson as json\n'), ((2722, 2758), 'copy.deepcopy', 'deepcopy', (['expression_attribute_names'], {}), '(expression_attribute_names)\n', (2730, 2758), False, 'from copy import deepcopy\n')]
|
# SPDX-FileCopyrightText: 2021 easyDiffraction contributors <<EMAIL>>
# SPDX-License-Identifier: BSD-3-Clause
# © 2021 Contributors to the easyDiffraction project <https://github.com/easyScience/easyDiffractionApp>
__author__ = "github.com/AndrewSazonov"
__version__ = '0.0.1'
import os, sys
import ftplib
import pathlib
import Functions, Config
CONFIG = Config.Config()
def connect(ftp, host, port):
try:
message = f'connect to ftp server'
ftp.connect(host, port)
except Exception as exception:
Functions.printFailMessage(message, exception)
sys.exit(1)
else:
Functions.printSuccessMessage(message)
def login(ftp, user, password):
try:
message = f'login to ftp server'
ftp.login(user, password)
except Exception as exception:
Functions.printFailMessage(message, exception)
sys.exit(1)
else:
Functions.printSuccessMessage(message)
def winToLin(path):
return path.replace('\\', '/')
def makeDir(ftp, path):
if pathExists(ftp, path):
Functions.printNeutralMessage(f'Directory exists: {path}')
return
try:
path = winToLin(path)
message = f'create directory {path}'
ftp.mkd(path)
except Exception as exception:
Functions.printFailMessage(message, exception)
sys.exit(1)
else:
Functions.printSuccessMessage(message)
def uploadFile(ftp, source, destination):
try:
destination = winToLin(destination)
message = f'upload file {source} to {destination}'
dir_name = os.path.basename(destination)
dir_names = ftp.nlst(os.path.dirname(destination))
if dir_name not in dir_names:
makeDir(ftp, destination)
destination = f'{destination}/{os.path.basename(source)}'
with open(source, 'rb') as fb:
ftp.storbinary(f'STOR {destination}', fb)
except Exception as exception:
Functions.printFailMessage(message, exception)
sys.exit(1)
else:
Functions.printSuccessMessage(message)
def uploadDir(ftp, source, destination):
try:
message = f'upload dir {source} to {destination}'
root_dir_name = os.path.basename(source)
for dir_path, _, file_names in os.walk(source):
for file_name in file_names:
source_file = os.path.join(dir_path, file_name)
parent_path = os.path.relpath(source_file, source)
parent_dir = os.path.dirname(parent_path)
destination_dir = os.path.join(destination, root_dir_name, parent_dir).rstrip(os.path.sep)
uploadFile(ftp, source_file, destination_dir)
except Exception as exception:
Functions.printFailMessage(message, exception)
sys.exit(1)
else:
Functions.printSuccessMessage(message)
def upload(ftp, source, destination):
try:
message = f'upload {source} to {destination}'
if os.path.isfile(source):
uploadFile(ftp, source, destination)
elif os.path.isdir(source):
uploadDir(ftp, source, destination)
else:
Functions.printFailMessage(message)
sys.exit(1)
except Exception as exception:
Functions.printFailMessage(message, exception)
sys.exit(1)
else:
Functions.printSuccessMessage(message)
def pathExists(ftp, path):
try:
message = f'find path {path}'
ftp.nlst(path)
except Exception as exception:
Functions.printFailMessage(message, exception)
return False
else:
Functions.printSuccessMessage(message)
return True
def removeDir(ftp, path):
if not pathExists(ftp, path):
Functions.printNeutralMessage(f"Directory doesn't exists: {path}")
return
try:
path = winToLin(path)
message = f'remove directory {path}'
for (name, properties) in ftp.mlsd(path=path):
if name in ['.', '..']:
continue
elif properties['type'] == 'file':
ftp.delete(f'{path}/{name}')
elif properties['type'] == 'dir':
removeDir(ftp, f'{path}/{name}')
ftp.rmd(path)
except Exception as exception:
Functions.printNeutralMessage(message, exception)
sys.exit(1)
else:
Functions.printSuccessMessage(message)
def deploy():
branch = sys.argv[1]
if branch != 'master':
Functions.printNeutralMessage(f'No ftp upload for branch {branch}')
return
password = sys.argv[2]
host = CONFIG['ci']['app']['setup']['ftp']['host']
port = CONFIG['ci']['app']['setup']['ftp']['port']
user = CONFIG['ci']['app']['setup']['ftp']['user']
prefix = CONFIG['ci']['app']['setup']['ftp']['prefix']
repo_subdir = CONFIG['ci']['app']['setup']['ftp']['repo_subdir']
local_repository_dir_name = f'{CONFIG.app_name}{CONFIG.repository_dir_suffix}'
local_repository_dir_path = os.path.join(CONFIG.dist_dir, local_repository_dir_name, CONFIG.setup_os)
online_repository_subdir_path = f'{prefix}/{repo_subdir}'
online_repository_dir_path = f'{online_repository_subdir_path}/{CONFIG.setup_os}'
ftp = ftplib.FTP()
connect(ftp, host, port)
login(ftp, user, password)
removeDir(ftp, online_repository_dir_path)
makeDir(ftp, online_repository_dir_path)
upload(ftp, local_repository_dir_path, online_repository_subdir_path)
ftp.quit()
if __name__ == "__main__":
deploy()
|
[
"Functions.printFailMessage",
"os.path.basename",
"os.path.isdir",
"os.path.dirname",
"os.walk",
"Functions.printSuccessMessage",
"os.path.isfile",
"os.path.relpath",
"Config.Config",
"ftplib.FTP",
"sys.exit",
"os.path.join",
"Functions.printNeutralMessage"
] |
[((359, 374), 'Config.Config', 'Config.Config', ([], {}), '()\n', (372, 374), False, 'import Functions, Config\n'), ((4986, 5059), 'os.path.join', 'os.path.join', (['CONFIG.dist_dir', 'local_repository_dir_name', 'CONFIG.setup_os'], {}), '(CONFIG.dist_dir, local_repository_dir_name, CONFIG.setup_os)\n', (4998, 5059), False, 'import os, sys\n'), ((5219, 5231), 'ftplib.FTP', 'ftplib.FTP', ([], {}), '()\n', (5229, 5231), False, 'import ftplib\n'), ((618, 656), 'Functions.printSuccessMessage', 'Functions.printSuccessMessage', (['message'], {}), '(message)\n', (647, 656), False, 'import Functions, Config\n'), ((902, 940), 'Functions.printSuccessMessage', 'Functions.printSuccessMessage', (['message'], {}), '(message)\n', (931, 940), False, 'import Functions, Config\n'), ((1060, 1118), 'Functions.printNeutralMessage', 'Functions.printNeutralMessage', (['f"""Directory exists: {path}"""'], {}), "(f'Directory exists: {path}')\n", (1089, 1118), False, 'import Functions, Config\n'), ((1368, 1406), 'Functions.printSuccessMessage', 'Functions.printSuccessMessage', (['message'], {}), '(message)\n', (1397, 1406), False, 'import Functions, Config\n'), ((1581, 1610), 'os.path.basename', 'os.path.basename', (['destination'], {}), '(destination)\n', (1597, 1610), False, 'import os, sys\n'), ((2033, 2071), 'Functions.printSuccessMessage', 'Functions.printSuccessMessage', (['message'], {}), '(message)\n', (2062, 2071), False, 'import Functions, Config\n'), ((2205, 2229), 'os.path.basename', 'os.path.basename', (['source'], {}), '(source)\n', (2221, 2229), False, 'import os, sys\n'), ((2269, 2284), 'os.walk', 'os.walk', (['source'], {}), '(source)\n', (2276, 2284), False, 'import os, sys\n'), ((2813, 2851), 'Functions.printSuccessMessage', 'Functions.printSuccessMessage', (['message'], {}), '(message)\n', (2842, 2851), False, 'import Functions, Config\n'), ((2965, 2987), 'os.path.isfile', 'os.path.isfile', (['source'], {}), '(source)\n', (2979, 2987), False, 'import os, sys\n'), ((3336, 3374), 'Functions.printSuccessMessage', 'Functions.printSuccessMessage', (['message'], {}), '(message)\n', (3365, 3374), False, 'import Functions, Config\n'), ((3602, 3640), 'Functions.printSuccessMessage', 'Functions.printSuccessMessage', (['message'], {}), '(message)\n', (3631, 3640), False, 'import Functions, Config\n'), ((3730, 3796), 'Functions.printNeutralMessage', 'Functions.printNeutralMessage', (['f"""Directory doesn\'t exists: {path}"""'], {}), '(f"Directory doesn\'t exists: {path}")\n', (3759, 3796), False, 'import Functions, Config\n'), ((4352, 4390), 'Functions.printSuccessMessage', 'Functions.printSuccessMessage', (['message'], {}), '(message)\n', (4381, 4390), False, 'import Functions, Config\n'), ((4466, 4533), 'Functions.printNeutralMessage', 'Functions.printNeutralMessage', (['f"""No ftp upload for branch {branch}"""'], {}), "(f'No ftp upload for branch {branch}')\n", (4495, 4533), False, 'import Functions, Config\n'), ((533, 579), 'Functions.printFailMessage', 'Functions.printFailMessage', (['message', 'exception'], {}), '(message, exception)\n', (559, 579), False, 'import Functions, Config\n'), ((588, 599), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (596, 599), False, 'import os, sys\n'), ((817, 863), 'Functions.printFailMessage', 'Functions.printFailMessage', (['message', 'exception'], {}), '(message, exception)\n', (843, 863), False, 'import Functions, Config\n'), ((872, 883), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (880, 883), False, 'import os, sys\n'), ((1283, 1329), 'Functions.printFailMessage', 'Functions.printFailMessage', (['message', 'exception'], {}), '(message, exception)\n', (1309, 1329), False, 'import Functions, Config\n'), ((1338, 1349), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1346, 1349), False, 'import os, sys\n'), ((1640, 1668), 'os.path.dirname', 'os.path.dirname', (['destination'], {}), '(destination)\n', (1655, 1668), False, 'import os, sys\n'), ((1948, 1994), 'Functions.printFailMessage', 'Functions.printFailMessage', (['message', 'exception'], {}), '(message, exception)\n', (1974, 1994), False, 'import Functions, Config\n'), ((2003, 2014), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2011, 2014), False, 'import os, sys\n'), ((2728, 2774), 'Functions.printFailMessage', 'Functions.printFailMessage', (['message', 'exception'], {}), '(message, exception)\n', (2754, 2774), False, 'import Functions, Config\n'), ((2783, 2794), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2791, 2794), False, 'import os, sys\n'), ((3051, 3072), 'os.path.isdir', 'os.path.isdir', (['source'], {}), '(source)\n', (3064, 3072), False, 'import os, sys\n'), ((3251, 3297), 'Functions.printFailMessage', 'Functions.printFailMessage', (['message', 'exception'], {}), '(message, exception)\n', (3277, 3297), False, 'import Functions, Config\n'), ((3306, 3317), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3314, 3317), False, 'import os, sys\n'), ((3516, 3562), 'Functions.printFailMessage', 'Functions.printFailMessage', (['message', 'exception'], {}), '(message, exception)\n', (3542, 3562), False, 'import Functions, Config\n'), ((4264, 4313), 'Functions.printNeutralMessage', 'Functions.printNeutralMessage', (['message', 'exception'], {}), '(message, exception)\n', (4293, 4313), False, 'import Functions, Config\n'), ((4322, 4333), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4330, 4333), False, 'import os, sys\n'), ((1785, 1809), 'os.path.basename', 'os.path.basename', (['source'], {}), '(source)\n', (1801, 1809), False, 'import os, sys\n'), ((2357, 2390), 'os.path.join', 'os.path.join', (['dir_path', 'file_name'], {}), '(dir_path, file_name)\n', (2369, 2390), False, 'import os, sys\n'), ((2421, 2457), 'os.path.relpath', 'os.path.relpath', (['source_file', 'source'], {}), '(source_file, source)\n', (2436, 2457), False, 'import os, sys\n'), ((2487, 2515), 'os.path.dirname', 'os.path.dirname', (['parent_path'], {}), '(parent_path)\n', (2502, 2515), False, 'import os, sys\n'), ((3148, 3183), 'Functions.printFailMessage', 'Functions.printFailMessage', (['message'], {}), '(message)\n', (3174, 3183), False, 'import Functions, Config\n'), ((3196, 3207), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3204, 3207), False, 'import os, sys\n'), ((2550, 2602), 'os.path.join', 'os.path.join', (['destination', 'root_dir_name', 'parent_dir'], {}), '(destination, root_dir_name, parent_dir)\n', (2562, 2602), False, 'import os, sys\n')]
|
"""
Module with useful functions.
"""
from typing import Union, List
import ast
def parse_code(input: Union[str, List[str]]) -> str:
"""Tries to parse code represented as string or list of strings
Parameters
----------
input : Union[str, List[str]]
either a str or a list of str
Returns
-------
str
the formatted string
Raises
------
SyntaxError
if there are any parsing issues
"""
if input is None:
return input
try:
simple = "".join(input)
ast.parse(simple)
return simple
except SyntaxError as e:
if "EOF" in str(e):
return "\n".join(input)
else:
raise SyntaxError("Problem parsing your code!")
|
[
"ast.parse"
] |
[((547, 564), 'ast.parse', 'ast.parse', (['simple'], {}), '(simple)\n', (556, 564), False, 'import ast\n')]
|
##############################################################################
#
# Copyright (c) 2016 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
Helpers for drivers
"""
from __future__ import print_function
import importlib
import sys
import os
from zope.interface import directlyProvides
from zope.interface import implementer
from .._compat import PYPY
from .._compat import PY3
from .._compat import casefold
from .._util import positive_integer
from .._util import consume
from .interfaces import IDBDriver
from .interfaces import IDBDriverFactory
from .interfaces import IDBDriverOptions
from .interfaces import DriverNotAvailableError
from .interfaces import NoDriversAvailableError
from .interfaces import ReplicaClosedException
from .interfaces import UnknownDriverError
logger = __import__('logging').getLogger(__name__)
def _select_driver(options, driver_options):
driver = _select_driver_by_name(options.driver, driver_options)
driver.configure_from_options(options)
return driver
def _select_driver_by_name(driver_name, driver_options):
driver_name = driver_name or 'auto'
driver_name = casefold(driver_name)
accept_any_driver = driver_name == 'auto'
# XXX: For testing, we'd like to be able to prohibit the use of auto.
for factory in driver_options.known_driver_factories():
exact_match = casefold(factory.driver_name) == driver_name
if accept_any_driver or exact_match:
try:
return factory()
except DriverNotAvailableError as e:
if not accept_any_driver:
e.driver_options = driver_options
raise
# Well snap, no driver. Either we would take any driver,
# and none were available, or we needed an exact driver that
# wasn't found
error = NoDriversAvailableError if accept_any_driver else UnknownDriverError
raise error(driver_name, driver_options)
class DriverNotImportableError(DriverNotAvailableError,
ImportError):
"When the module can't be imported."
class AbstractModuleDriver(object):
"""
Base implementation of a driver, based on a module, as used in DBAPI.
Subclasses must provide:
- ``MODULE_NAME`` property.
- ``__name__`` property
- Implementation of ``get_driver_module``; this should import the
module at runtime.
"""
#: The name of the DB-API module to import.
MODULE_NAME = None
#: The name written in config files
__name__ = None
#: Can this module be used on PyPy?
AVAILABLE_ON_PYPY = True
#: Set this to false if your subclass can do static checks
#: at import time to determine it should not be used.
#: Helpful for things like Python version detection.
STATIC_AVAILABLE = True
#: Priority of this driver, when available. Lower is better.
#: (That is, first choice should have value 1, and second choice value
#: 2, and so on.)
PRIORITY = 100
#: Priority of this driver when running on PyPy. Lower is better.
PRIORITY_PYPY = 100
#: Class attribute. If set to a true value (not the default),
#: ask the underlying driver to work in as strict a mode as possible
#: when it comes to detecting programming errors.
#:
#: Typically set by tests. Most drivers do not have a stricter mode
#: that can be enabled.
STRICT = False
# Can this driver work with gevent?
_GEVENT_CAPABLE = False
# Does this driver need the socket module patched?
# Only checked if _GEVENT_CAPABLE is set to True.
_GEVENT_NEEDS_SOCKET_PATCH = True
#: The size we request cursor's from our :meth:`cursor` method
#: to fetch from ``fetchmany`` and (hopefully) iteration (which is a
#: DB-API extension. We default to 1024, but the environment variable
#: RS_CURSOR_ARRAYSIZE can be set to an int to change this default.
#: Individual drivers *might* choose a different default.
cursor_arraysize = positive_integer(
os.environ.get('RS_CURSOR_ARRAYSIZE', '1024')
)
DriverNotAvailableError = DriverNotAvailableError
# Can the driver support the full range of a 64-bit unsigned ID for
# OID and TID parameters?
supports_64bit_unsigned_id = True
def __init__(self):
if PYPY and not self.AVAILABLE_ON_PYPY:
raise self.DriverNotAvailableError(self.__name__)
if not self.STATIC_AVAILABLE:
raise self.DriverNotAvailableError(self.__name__)
try:
self.driver_module = mod = self.get_driver_module()
except ImportError:
logger.debug("Unable to import driver", exc_info=True)
raise DriverNotImportableError(self.__name__)
self.disconnected_exceptions = (mod.OperationalError,
mod.InterfaceError,
ReplicaClosedException)
self.close_exceptions = self.disconnected_exceptions + (mod.ProgrammingError,)
self.lock_exceptions = (mod.DatabaseError,)
# If we try to do something very wrong, a bug in our code,
# we *should* get a ProgrammingError. Unfortunately, some drivers
# raise ProgrammingError for other things, such as failing to get a lock.
self.illegal_operation_exceptions = (mod.ProgrammingError,)
self.use_replica_exceptions = (mod.OperationalError,)
self.Binary = mod.Binary
self._connect = mod.connect
self.priority = self.PRIORITY if not PYPY else self.PRIORITY_PYPY
def connect(self, *args, **kwargs):
return self._connect(*args, **kwargs)
def get_driver_module(self):
"""Import and return the driver module."""
return importlib.import_module(self.MODULE_NAME)
def gevent_cooperative(self):
# Return whether this driver is cooperative with gevent.
# This takes into account whether the system is
# and/or needs to be monkey-patched
if not self._GEVENT_CAPABLE:
return False
if self._GEVENT_NEEDS_SOCKET_PATCH:
return self._sockets_gevent_monkey_patched()
return True
def configure_from_options(self, options): # pylint:disable=unused-argument
"""Default implementation; does nothing."""
def _sockets_gevent_monkey_patched(self):
# Return whether the socket module has been monkey-patched
# by gevent
try:
from gevent import monkey
except ImportError: # pragma: no cover
return False
else:
# some versions of gevent have a bug where if we're monkey-patched
# on the command line using python -m gevent.monkey /path/to/testrunner ...
# it doesn't report being monkey-patched.
import socket
return monkey.is_module_patched('socket') or 'gevent' in repr(socket.socket)
# Common compatibility shims, overriden as needed.
def set_autocommit(self, conn, value):
conn.autocommit(value)
def cursor(self, conn, server_side=False): # pylint:disable=unused-argument
cur = conn.cursor()
cur.arraysize = self.cursor_arraysize
return cur
def debug_connection(self, conn, *extra): # pragma: no cover
print(conn, *extra)
def get_messages(self, conn): # pragma: no cover pylint:disable=unused-argument
return ()
def __transaction_boundary(self, conn, meth):
meth()
messages = self.get_messages(conn)
for msg in messages:
logger.debug(msg.strip())
def commit(self, conn, cursor=None): # pylint:disable=unused-argument
self.__transaction_boundary(conn, conn.commit)
def rollback(self, conn):
self.__transaction_boundary(conn, conn.rollback)
def connection_may_need_rollback(self, conn): # pylint:disable=unused-argument
return True
connection_may_need_commit = connection_may_need_rollback
def synchronize_cursor_for_rollback(self, cursor):
"""Exceptions here are ignored, we don't know what state the cursor is in."""
# psycopg2 raises ProgrammingError if we rollback when no results
# are present on the cursor. mysql-connector-python raises
# InterfaceError. OTOH, mysqlclient raises nothing and even wants
# it in certain circumstances.
if cursor is not None:
try:
consume(cursor)
except Exception: # pylint:disable=broad-except
pass
# Things that can be recognized as a pickled state,
# passed to an io.BytesIO reader, and unpickled.
# Py MySQL Connector/Python returns a bytearray, whereas
# C MySQL Connector/Python returns bytes.
# sqlite uses buffer on Py2 and memoryview on Py3.
# Keep these ordered with the most common at the front;
# Python does a linear traversal of type checks.
state_types = (bytes, bytearray)
def binary_column_as_state_type(self, data):
if isinstance(data, self.state_types) or data is None:
return data
__traceback_info__ = type(data), data
raise TypeError("Unknown binary state column")
def binary_column_as_bytes(self, data):
# Take the same inputs as `as_state_type`, but turn them into
# actual bytes. This includes None and empty bytes, which becomes
# the literal b'';
# XXX: TODO: We don't need all these checks up here. Just the common ones,
# move everything else to specific drivers.
if data is None or not data:
return b''
if isinstance(data, bytes):
return data
if isinstance(data, memoryview):
return data.tobytes()
# Everything left we convert with the bytes() construtor.
# That would be buffer and bytearray
__traceback_info__ = data, type(data)
return bytes(data)
def enter_critical_phase_until_transaction_end(self, connection, cursor):
"""Default implementation; does nothing."""
def is_in_critical_phase(self, connection, cursor):
"""Default implementation; returns a false value."""
def exit_critical_phase(self, connection, cursor):
"Default implementation; does nothing."
class MemoryViewBlobDriverMixin(object):
# psycopg2 is smart enough to return memoryview or buffer on
# Py3/Py2, respectively, for BYTEa columns. sqlite3 does exactly
# the same for BLOB columns (on Python 2; on Python 3 it returns
# bytes instead of buffer), and defines ``Binary`` that way as
# well.
# memoryview can't be passed to bytes() on Py2 or Py3, but it can
# be passed to cStringIO.StringIO() or io.BytesIO() ---
# unfortunately, memoryviews, at least, don't like going to
# io.BytesIO() on Python 3, and that's how we unpickle states. So
# while ideally we'd like to keep it that way, to save a copy, we
# are forced to make the copy. Plus there are tests that like to
# directly compare bytes.
if PY3:
def binary_column_as_state_type(self, data):
if data:
# Calling 'bytes()' on a memoryview in Python 3 does
# nothing useful.
data = data.tobytes()
return data
else:
def binary_column_as_state_type(self, data):
if data:
data = bytes(data)
return data
@implementer(IDBDriverFactory)
class _ClassDriverFactory(object):
def __init__(self, driver_type):
self.driver_type = driver_type
# Getting the name is tricky, the class wants to shadow it.
self.driver_name = driver_type.__dict__.get('__name__') or driver_type.__name__
def check_availability(self):
try:
self.driver_type()
except DriverNotAvailableError:
return False
return True
def __call__(self):
return self.driver_type()
def __eq__(self, other):
return (casefold(self.driver_name), self.driver_type) == (
casefold(other.driver_name), other.driver_type)
def __hash__(self):
return hash((casefold(self.driver_name), self.driver_type))
def __getattr__(self, name):
return getattr(self.driver_type, name)
def implement_db_driver_options(name, *driver_modules):
"""
Helper function to be called at a module scope to
make it implement ``IDBDriverOptions``.
:param str name: The value of ``__name__``.
:param driver_modules: Each of these names a module that has
one or more implementations of ``IDBDriver`` in it,
as named in their ``__all__`` attribute.
"""
module = sys.modules[name]
driver_factories = set()
for driver_module in driver_modules:
driver_module = importlib.import_module('.' + driver_module,
name)
for factory in driver_module.__all__:
factory = getattr(driver_module, factory)
if IDBDriver.implementedBy(factory): # pylint:disable=no-value-for-parameter
driver_factories.add(_ClassDriverFactory(factory))
module.known_driver_factories = lambda: sorted(
driver_factories,
key=lambda factory: factory.PRIORITY if not PYPY else factory.PRIORITY_PYPY,
)
directlyProvides(module, IDBDriverOptions)
module.select_driver = lambda driver_name=None: _select_driver_by_name(driver_name,
sys.modules[name])
class _NoGeventDriverMixin(object):
import time as gevent
def get_driver_module(self):
raise ImportError("Could not import gevent")
class _NoGeventConnectionMixin(object):
gevent_hub = None
gevent_read_watcher = None
gevent_write_watcher = None
gevent_sleep = None
try:
import gevent
except ImportError:
GeventDriverMixin = _NoGeventDriverMixin
GeventConnectionMixin = _NoGeventConnectionMixin
else:
import select
from gevent.socket import wait
get_hub = gevent.get_hub
class GeventDriverMixin(object):
gevent = gevent
class GeventConnectionMixin(_NoGeventConnectionMixin):
"""
Helper for a connection that waits using gevent.
Subclasses must provide a ``fileno()`` method. The usual
pattern for executing a query would then be something like
this::
query = format_query_to_bytes(...)
self.gevent_wait_write()
self.send_query()
self.gevent_wait_read()
self.read_results()
It is important that ``send_query`` do nothing but put bytes
on the wire. It must not include any attempt to wait for a
response from the database, especially if that response could
take an arbitrary amount of time or block. (Of course, if
``send_query`` and ``read_results`` can arrange to use gevent
waiting functions too, you'll have finer control. This example
is all-or-nothing. Sometimes its easy to handle
``read_results`` in a looping function using a server-side
cursor.)
The ``gevent_wait_read`` and ``gevent_wait_write`` functions
are implemented using :func:`gevent.socket.wait`. That
function always takes a full iteration of the event loop to
determine whether a file descriptor is ready; it always yields
control to other greenlets immediately. gevent's own sockets
don't work that way; instead they try to read/write and catch
the resulting EAGAIN exception. Only after that do they yield
to the event loop. This is for good reason: eliminating
unnecessary switches can lead to higher throughput.
Here, a pass through the event loop can be risky. If we send a
request that establishes database locks that will require
further action from the greenlet to relinquish, those will
come into being (potentially blocking other greenlets in the
same or different processes) sometime between when
``send_query`` is entered and when ``gevent_wait_read`` exits.
If, for any reason, a different greenlet runs while we have
yielded to the event loop and blocks on a resource we own that
is not gevent cooperative (a non-monkey-patched lock, a
different database) we'll never regain control. And thus we'll
never be able to make forward progress and release those
locks. Since they're shared locks, that could harm arbitrary
machines in the cluster.
Thus, we perform a similar optimization as gevent sockets: we
first check to see if the file descriptor is ready and only
yield to the event loop if it isn't. The cost is an extra
system call to ``select``. For write requests, we could be
able to assume that they are always ready (depending on the
nature of the protocol); if that's so, override
:meth:`gevent_check_write`. The same goes for
:meth:`gevent_check_read`. This doesn't eliminate the problem,
but it should substantially reduce the chances of it
happening.
"""
gevent_sleep = staticmethod(gevent.sleep)
def close(self):
self.__close_watchers()
super(GeventConnectionMixin, self).close()
def __check_watchers(self):
# We can be used from more than one thread in a sequential
# fashion.
hub = get_hub()
if hub is not self.gevent_hub:
self.__close_watchers()
fileno = self.fileno()
hub = self.gevent_hub = get_hub()
self.gevent_read_watcher = hub.loop.io(fileno, 1)
self.gevent_write_watcher = hub.loop.io(fileno, 2)
def __close_watchers(self):
if self.gevent_read_watcher is not None:
self.gevent_read_watcher.close()
self.gevent_write_watcher.close()
self.gevent_hub = None
def gevent_check_read(self,):
if select.select([self], (), (), 0)[0]:
return True
return False
def gevent_wait_read(self):
if not self.gevent_check_read():
self.__check_watchers()
wait(self.gevent_read_watcher,
hub=self.gevent_hub)
def gevent_check_write(self):
if select.select((), [self], (), 0)[1]:
return True
return False
def gevent_wait_write(self):
if not self.gevent_check_write():
self.__check_watchers()
wait(self.gevent_write_watcher,
hub=self.gevent_hub)
|
[
"importlib.import_module",
"zope.interface.implementer",
"gevent.monkey.is_module_patched",
"os.environ.get",
"select.select",
"zope.interface.directlyProvides",
"gevent.socket.wait"
] |
[((11918, 11947), 'zope.interface.implementer', 'implementer', (['IDBDriverFactory'], {}), '(IDBDriverFactory)\n', (11929, 11947), False, 'from zope.interface import implementer\n'), ((13823, 13865), 'zope.interface.directlyProvides', 'directlyProvides', (['module', 'IDBDriverOptions'], {}), '(module, IDBDriverOptions)\n', (13839, 13865), False, 'from zope.interface import directlyProvides\n'), ((4506, 4551), 'os.environ.get', 'os.environ.get', (['"""RS_CURSOR_ARRAYSIZE"""', '"""1024"""'], {}), "('RS_CURSOR_ARRAYSIZE', '1024')\n", (4520, 4551), False, 'import os\n'), ((6229, 6270), 'importlib.import_module', 'importlib.import_module', (['self.MODULE_NAME'], {}), '(self.MODULE_NAME)\n', (6252, 6270), False, 'import importlib\n'), ((13293, 13343), 'importlib.import_module', 'importlib.import_module', (["('.' + driver_module)", 'name'], {}), "('.' + driver_module, name)\n", (13316, 13343), False, 'import importlib\n'), ((7324, 7358), 'gevent.monkey.is_module_patched', 'monkey.is_module_patched', (['"""socket"""'], {}), "('socket')\n", (7348, 7358), False, 'from gevent import monkey\n'), ((18634, 18666), 'select.select', 'select.select', (['[self]', '()', '()', '(0)'], {}), '([self], (), (), 0)\n', (18647, 18666), False, 'import select\n'), ((18862, 18913), 'gevent.socket.wait', 'wait', (['self.gevent_read_watcher'], {'hub': 'self.gevent_hub'}), '(self.gevent_read_watcher, hub=self.gevent_hub)\n', (18866, 18913), False, 'from gevent.socket import wait\n'), ((18989, 19021), 'select.select', 'select.select', (['()', '[self]', '()', '(0)'], {}), '((), [self], (), 0)\n', (19002, 19021), False, 'import select\n'), ((19219, 19271), 'gevent.socket.wait', 'wait', (['self.gevent_write_watcher'], {'hub': 'self.gevent_hub'}), '(self.gevent_write_watcher, hub=self.gevent_hub)\n', (19223, 19271), False, 'from gevent.socket import wait\n')]
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
# This matains a global state.
# Only consider two expression that has the same subscripts and operand shapes
# to be the same einsum expression.
class _EinsumPathCached:
def __init__(self):
self.path = {}
def __call__(self, *args, **kwargs):
subscript = args[0]
operands = args[1:]
key = subscript
key += '|'
for operand in operands:
key += '-'.join([str(dim) for dim in operand.shape])
key += '|'
if key not in self.path:
self.path[key] = np.einsum_path(*args,
**kwargs,
optimize='optimal')[0]
kwargs['optimize'] = self.path[key]
return np.einsum(*args, **kwargs)
einsum_pc = _EinsumPathCached()
# import time
# N = 10
# C = np.random.rand(N, N)
# I = np.random.rand(N, N, N, N)
# begin = time.time()
# for i in range(10):
# einsum_pc('pi,qj,ijkl,rk,sl->pqrs', C, C, I, C, C)
# einsum_pc('pi,qj,ijko,rk,so->pqrs', C, C, I, C, C)
# end = time.time()
# print(einsum_pc.path)
# print(f'{end - begin}')
# begin = time.time()
# for i in range(10):
# np.einsum('pi,qj,ijkl,rk,sl->pqrs', C, C, I, C, C, optimize='optimal')
# end = time.time()
# print(f'{end - begin}')
|
[
"numpy.einsum",
"numpy.einsum_path"
] |
[((1342, 1368), 'numpy.einsum', 'np.einsum', (['*args'], {}), '(*args, **kwargs)\n', (1351, 1368), True, 'import numpy as np\n'), ((1140, 1191), 'numpy.einsum_path', 'np.einsum_path', (['*args'], {'optimize': '"""optimal"""'}), "(*args, **kwargs, optimize='optimal')\n", (1154, 1191), True, 'import numpy as np\n')]
|
# Copy this to urls.py. Most sites can leave this as-is. If you have custom
# apps which need routing, modify this file to include those urlconfs.
from django.conf.urls import url, include
urlpatterns = [
url('', include("core.urls")),
# If you were to add a plugin app that handles its own URLs, you might do
# something like this:
#
# url(r'^map/', include("onisite.plugins.map.urls")),
]
|
[
"django.conf.urls.include"
] |
[((219, 239), 'django.conf.urls.include', 'include', (['"""core.urls"""'], {}), "('core.urls')\n", (226, 239), False, 'from django.conf.urls import url, include\n')]
|
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, ClockCycles
import random
async def reset(dut):
dut.reset <= 1
await ClockCycles(dut.clk, 5)
dut.reset <= 0;
@cocotb.test()
async def test_pwm(dut):
clock = Clock(dut.clk, 10, units="us")
cocotb.fork(clock.start())
# test a range of values
for i in range(10, 255, 20):
# set pwm to this level
dut.level <= i
await reset(dut)
# wait pwm level clock steps
await ClockCycles(dut.clk, i)
# assert still high
assert(dut.out)
# wait for next rising clk edge
await RisingEdge(dut.clk)
# assert pwm goes low
assert(dut.out == 0)
|
[
"cocotb.clock.Clock",
"cocotb.test",
"cocotb.triggers.RisingEdge",
"cocotb.triggers.ClockCycles"
] |
[((223, 236), 'cocotb.test', 'cocotb.test', ([], {}), '()\n', (234, 236), False, 'import cocotb\n'), ((274, 304), 'cocotb.clock.Clock', 'Clock', (['dut.clk', '(10)'], {'units': '"""us"""'}), "(dut.clk, 10, units='us')\n", (279, 304), False, 'from cocotb.clock import Clock\n'), ((177, 200), 'cocotb.triggers.ClockCycles', 'ClockCycles', (['dut.clk', '(5)'], {}), '(dut.clk, 5)\n', (188, 200), False, 'from cocotb.triggers import RisingEdge, FallingEdge, ClockCycles\n'), ((536, 559), 'cocotb.triggers.ClockCycles', 'ClockCycles', (['dut.clk', 'i'], {}), '(dut.clk, i)\n', (547, 559), False, 'from cocotb.triggers import RisingEdge, FallingEdge, ClockCycles\n'), ((668, 687), 'cocotb.triggers.RisingEdge', 'RisingEdge', (['dut.clk'], {}), '(dut.clk)\n', (678, 687), False, 'from cocotb.triggers import RisingEdge, FallingEdge, ClockCycles\n')]
|
import random
from typing import List
def selection_sort(numbers: List[int]) -> List[int]:
len_numbers = len(numbers)
for i in range(len_numbers):
min_idx = i
for j in range(i + 1, len_numbers):
if numbers[min_idx] > numbers[j]:
min_idx = j
numbers[i], numbers[min_idx] = numbers[min_idx], numbers[i]
return numbers
nums = [random.randint(0, 100) for _ in range(10)]
#nums = [2, 5, 1, 8, 7, 3]
print(selection_sort(nums))
|
[
"random.randint"
] |
[((394, 416), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (408, 416), False, 'import random\n')]
|
import codecademylib
import pandas as pd
inventory = pd.read_csv('inventory.csv')
print(inventory.head(10))
staten_island = inventory.head(10)
product_request = staten_island.product_description
seed_request = inventory[(inventory.location == 'Brooklyn') & (inventory.product_type == 'seeds')]
inventory['in_stock'] = inventory.quantity.apply(lambda row: True if row > 0 else False)
inventory['total_value'] = inventory.price*inventory.quantity
combine_lambda = lambda row:'{}-{}'.format(row.product_type,row.product_description)
inventory['full_description'] = inventory.apply(combine_lambda,axis=1)
print(inventory)
|
[
"pandas.read_csv"
] |
[((54, 82), 'pandas.read_csv', 'pd.read_csv', (['"""inventory.csv"""'], {}), "('inventory.csv')\n", (65, 82), True, 'import pandas as pd\n')]
|
from random import randint
from src.randomExpression.RandomOperand import RandomOperand
from src.randomExpression.RandomOperator import RandomOperator
class ExpressionBranch:
def __init__(self, size):
"""
This class create a random expression of a given length. The operands
will only be 0-9 or x and the operators will only be '+', '-', '/', '*'.
:param size: Determines how long the expression can be
"""
self._branch_expression = ""
self._branch_length = 1 if size is None else size
self.operator = RandomOperator()
self.operand = RandomOperand(9)
self.create_branch()
def __str__(self):
"""
:return: String the most recent constructed expression with a title
"""
return "Expression: " + self._branch_expression
def get_branch(self):
"""
:return: String The most recent constructed expression
"""
return self._branch_expression
def create_branch(self):
"""
Creates a new math expression within the provided length.
The expression can contain operands 0-9 and operators +, -, *, /
:return: A random mathematical expression
"""
constructed_expression = ""
for i in range(self._ensure_odd_length()):
if i % 2 == 0:
constructed_expression += self.operand.generate_operand()
else:
constructed_expression += self.operator.generate_operator()
self._branch_expression = constructed_expression
return self._branch_expression
def _ensure_odd_length(self):
"""
An equation must have an odd length (i.e. 1 + 2). If we
end up with an even number, we will have an invalid equation
so this ensures that our equation will be of the correct length.
1 is added to handle if the random number selected is 0
:return: An odd number within a given range
"""
eq_length = randint(0, self._branch_length - 1)
return eq_length + 1 if eq_length % 2 == 0 else eq_length
def is_valid_branch(self, branch):
"""
Loops through the supplied equation and determines if the equation
is of an odd length and alternates operands and operators.
:return: Whether the entire equation is valid or not
"""
if len(branch) % 2 == 0:
return False
for i in range(len(branch)):
if i % 2 == 0:
if not self.operand.valid_operand(branch[i]):
return False
else:
if not self.operator.valid_operator(branch[i]):
return False
return True
if __name__ == '__main__':
exp = ExpressionBranch(10)
print(exp)
print(exp.is_valid_branch(exp.get_branch()))
|
[
"src.randomExpression.RandomOperand.RandomOperand",
"src.randomExpression.RandomOperator.RandomOperator",
"random.randint"
] |
[((576, 592), 'src.randomExpression.RandomOperator.RandomOperator', 'RandomOperator', ([], {}), '()\n', (590, 592), False, 'from src.randomExpression.RandomOperator import RandomOperator\n'), ((616, 632), 'src.randomExpression.RandomOperand.RandomOperand', 'RandomOperand', (['(9)'], {}), '(9)\n', (629, 632), False, 'from src.randomExpression.RandomOperand import RandomOperand\n'), ((2020, 2055), 'random.randint', 'randint', (['(0)', '(self._branch_length - 1)'], {}), '(0, self._branch_length - 1)\n', (2027, 2055), False, 'from random import randint\n')]
|
# import os
import sys
from multiprocessing import Pool
# import time
# from concurrent import futures
import test4
print("test2 run")
class MyLocker:
def __init__(self):
print("mylocker.__init__() called.")
@staticmethod
def acquire():
print("mylocker.acquire() called.")
@staticmethod
def unlock():
print(" mylocker.unlock() called.")
class Lockerex(MyLocker):
@staticmethod
def acquire():
print("lockerex.acquire() called.")
@staticmethod
def unlock():
print(" lockerex.unlock() called.")
def lockhelper(cls):
"""
cls 必须实现acquire和release静态方法
:param cls:
:return:
"""
def _deco(func):
def __deco(*args, **kwargs):
print("before %s called." % func.__name__)
cls.acquire()
try:
return func(*args, **kwargs)
finally:
cls.unlock()
return __deco
return _deco
abc = 1
import logging
def test(value):
pid = 123
global abc
abc += 1
info = "Value=%s, Pid=%s, abc=%s" % (value, pid, abc)
logging.info(info)
return info, 5
if __name__ == '__main__':
print("test2")
|
[
"logging.info"
] |
[((1115, 1133), 'logging.info', 'logging.info', (['info'], {}), '(info)\n', (1127, 1133), False, 'import logging\n')]
|
import random as rnd
EMPTY = ' '
DEAD = 'X'
HIT = '+'
MISSED = '-'
SHIP = 'O'
LETTERKEYS = [
'A',
'B',
'C',
'D',
'E',
'F',
'G',
'H',
'I',
'J'
]
def digit(key):
if key in LETTERKEYS:
return LETTERKEYS.index(key) + 1
elif 1 <= key <= 10:
return key
else:
raise ValueError
def letter(key):
if key in LETTERKEYS:
return key
elif 1 <= key <= 10:
return LETTERKEYS[key-1]
else:
raise ValueError
def area(posx, posy):
res = []
for i in range(3):
for j in range(3):
res.append((posx - 1 + i, posy - 1 + j))
return res
def around(posx, posy):
return [a for a in area(posx, posy) if
not (a[0] == posx and a[1] == posy) and (1 <= a[0] <= 10 and 1 <= a[1] <= 10)]
def cross_around(posx, posy):
res = []
for i in range(2):
res.append((posx, posy - 1 + i * 2))
res.append((posx - 1 + i * 2, posy))
return [a for a in res if (1 <= a[0] <= 10 and 1 <= a[1] <= 10)]
# Класс-родитель корабля
class SBGameShip:
def __init__(self, field, angle = None, length = None, posx = None, posy = None):
self.angle = 0 if angle is None else angle
self.length = 0 if length is None else length
self.posx = 0 if not posx else posx
self.posy = 0 if not posy else posy
self.field = field
self.cells = list()
def __randomcoords(self):
posx = 0
posy = 0
self.angle = rnd.randint(0, 3)
if self.angle == 0:
posx = rnd.randint(self.length, self.field.xlength)
posy = rnd.randint(1, self.field.ylength)
elif self.angle == 1:
posx = rnd.randint(1, self.field.xlength)
posy = rnd.randint(1, self.field.ylength - self.length)
elif self.angle == 2:
posx = rnd.randint(1, self.field.xlength - self.length)
posy = rnd.randint(1, self.field.ylength)
elif self.angle == 3:
posx = rnd.randint(1, self.field.xlength)
posy = rnd.randint(self.length, self.field.ylength)
self.posx = posx
self.posy = posy
self.gen_cells()
def gen_cells(self):
if self.angle == 0:
self.cells = [[self.posx - cell, self.posy, SHIP] for cell in range(self.length)]
elif self.angle == 1:
self.cells = [[self.posx, self.posy + cell, SHIP] for cell in range(self.length)]
elif self.angle == 2:
self.cells = [[self.posx + cell, self.posy, SHIP] for cell in range(self.length)]
elif self.angle == 3:
self.cells = [[self.posx, self.posy - cell, SHIP] for cell in range(self.length)]
def randompos(self):
ship_not_put = True
while (ship_not_put):
try:
self.__randomcoords()
self.field.put_ship(self)
ship_not_put = False
except ValueError:
pass
def isdead(self):
is_d = False not in [a[2] == HIT for a in self.cells]
if is_d:
for i, cell in enumerate(self.cells):
self.cells[i][2] = DEAD
return is_d
def around(self):
res = set()
own_cells = [(cell[0], cell[1]) for cell in self.cells]
for cell in own_cells:
for a in around(cell[0], cell[1]):
if a not in own_cells:
res.add(a)
res = list(res)
res = [a for a in res if 1 <= a[0] <= 10 and 1 <= a[1] <= 10]
return res
def __str__(self):
return str(self.cells)
class SBGameField:
def __init__(self):
self.ships = list()
self.hitten = list()
self.ship_hitten = list()
self.xlength = self.ylength = 10
def clean(self):
self.ships = list()
self.hitten = list()
self.ship_hitten = list()
self.xlength = self.ylength = 10
def get_all_ship_cells(self):
cells = []
for ship in self.ships:
for cell in ship.cells:
cells.append((cell[0], cell[1]))
return cells
def __getitem__(self, key):
key = digit(key)
rowitems = [None, ] + [EMPTY for cell in range(10)]
for cell in self.hitten:
if cell[0] == key:
rowitems[cell[1]] = cell[2]
for ship in self.ships:
for cell in ship.cells:
if cell[0] == key:
rowitems[cell[1]] = cell[2]
return rowitems
def field(self):
return {key: self[key] for key in LETTERKEYS}
def opfield(self):
return {key: [(cell if cell != SHIP else EMPTY) for cell in self[key]] for key in LETTERKEYS}
def __str__(self):
field = self.field()
header = ' | ' + ' | '.join(key for key in field) + ' |'
border = '-' * len(header)
content = '\n'.join(
['{:<2}'.format(str(i)) + ' |' + '|'.join(
'{:^5}'.format(field[key][i]) for key in field.keys()) + '|\n' + border for i in range(1, 11)])
return header + '\n' + border + '\n' + content
def oneline(self):
field = self.field()
content = ''.join([''.join(field[key][i] for key in field.keys()) for i in range(1, 11)])
content = ''.join(map(lambda a: a if a is not ' ' else '_', content))
return content
def op_oneline(self):
field = self.opfield()
content = ''.join([''.join(field[key][i] for key in field.keys()) for i in range(1, 11)])
content = ''.join(map(lambda a: a if a is not ' ' else '_', content))
return content
def as_opposite(self):
field = self.opfield()
header = ' | ' + ' | '.join(key for key in field) + ' |'
border = '-' * len(header)
content = '\n'.join(
['{:<2}'.format(str(i)) + ' |' + '|'.join(
'{:^5}'.format(field[key][i]) for key in field.keys()) + '|\n' + border for i in range(1, 11)])
return header + '\n' + border + '\n' + content
def can_ship(self, posx, posy):
all_cells = self.get_all_ship_cells()
banned_cells = []
for cell in all_cells:
banned_cells += area(cell[0], cell[1])
return (posx, posy) not in banned_cells
def put_ship(self, ship):
crossing = False not in [self.can_ship(cell[0], cell[1]) for cell in ship.cells]
if not crossing:
raise ValueError('Здесь нельзя поставить корабль')
self.ships.append(ship)
def get_insulted(self):
return [a for a in self.ship_hitten if a[2] == HIT]
def hit(self, posx, posy):
success = False
res = None
for ship in self.ships:
for cell in ship.cells:
if (cell[0], cell[1]) == (posx, posy):
cell[2] = HIT
res = HIT
self.hitten.append([posx, posy, HIT])
success = True
if ship.isdead():
for a in ship.around():
self.hitten.append([a[0], a[1], MISSED])
res = DEAD
for cell in self.hitten:
for c in ship.cells:
c[2] = DEAD
if (cell[0], cell[1]) == (c[0], c[1]):
cell[2] = c[2]
return res
def ai_hit(self):
field = self.opfield()
hc = map(lambda a: (a[0], a[1]), self.hitten)
insulted = self.get_insulted()
if len(insulted) == 0:
target_chosen = False
while not target_chosen:
target = (rnd.randint(1, 10), rnd.randint(1, 10))
target_chosen = target not in hc
elif len(insulted) == 1:
target = rnd.choice(cross_around(insulted[0][0], insulted[0][1]))
elif len(insulted) > 1:
xses = [cell[0] for cell in insulted]
yses = [cell[1] for cell in insulted]
last = next(a for a in reversed(self.ship_hitten) if a[2] == HIT)
ca = cross_around(last[0], last[1])
print(last, ca)
if len(set(xses)) == 1:
ca = [a for a in ca if
a[0] == xses[0]]
print(last, ca)
ca = [a for a in ca if field[letter(a[0])][a[1]] == EMPTY]
print(last, ca)
if not ca:
last = next(a for a in self.ship_hitten if a[2] == HIT)
ca = cross_around(last[0], last[1])
print(last, ca)
ca = [a for a in ca if
a[0] == xses[0]]
print(last, ca)
ca = [a for a in ca if field[letter(a[0])][a[1]] == EMPTY]
print(last, ca)
try:
target = ca[0]
except IndexError:
print(self.as_opposite(), '\n', self)
elif len(set(yses)) == 1:
ca = [a for a in ca if
a[1] == yses[0]]
print(last, ca)
ca = [a for a in ca if field[letter(a[0])][a[1]] == EMPTY]
print(last, ca)
if not ca:
last = next(a for a in self.ship_hitten if a[2] == HIT)
ca = cross_around(last[0], last[1])
print(last, ca)
ca = [a for a in ca if
a[1] == yses[1]]
print(last, ca)
ca = [a for a in ca if field[letter(a[0])][a[1]] == EMPTY]
print(last, ca)
try:
target = ca[0]
except IndexError:
print(self.as_opposite(), '\n', self)
res = self.hit(*target)
return res
|
[
"random.randint"
] |
[((1514, 1531), 'random.randint', 'rnd.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (1525, 1531), True, 'import random as rnd\n'), ((1579, 1623), 'random.randint', 'rnd.randint', (['self.length', 'self.field.xlength'], {}), '(self.length, self.field.xlength)\n', (1590, 1623), True, 'import random as rnd\n'), ((1643, 1677), 'random.randint', 'rnd.randint', (['(1)', 'self.field.ylength'], {}), '(1, self.field.ylength)\n', (1654, 1677), True, 'import random as rnd\n'), ((1727, 1761), 'random.randint', 'rnd.randint', (['(1)', 'self.field.xlength'], {}), '(1, self.field.xlength)\n', (1738, 1761), True, 'import random as rnd\n'), ((1781, 1829), 'random.randint', 'rnd.randint', (['(1)', '(self.field.ylength - self.length)'], {}), '(1, self.field.ylength - self.length)\n', (1792, 1829), True, 'import random as rnd\n'), ((1879, 1927), 'random.randint', 'rnd.randint', (['(1)', '(self.field.xlength - self.length)'], {}), '(1, self.field.xlength - self.length)\n', (1890, 1927), True, 'import random as rnd\n'), ((1947, 1981), 'random.randint', 'rnd.randint', (['(1)', 'self.field.ylength'], {}), '(1, self.field.ylength)\n', (1958, 1981), True, 'import random as rnd\n'), ((7701, 7719), 'random.randint', 'rnd.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (7712, 7719), True, 'import random as rnd\n'), ((7721, 7739), 'random.randint', 'rnd.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (7732, 7739), True, 'import random as rnd\n'), ((2031, 2065), 'random.randint', 'rnd.randint', (['(1)', 'self.field.xlength'], {}), '(1, self.field.xlength)\n', (2042, 2065), True, 'import random as rnd\n'), ((2085, 2129), 'random.randint', 'rnd.randint', (['self.length', 'self.field.ylength'], {}), '(self.length, self.field.ylength)\n', (2096, 2129), True, 'import random as rnd\n')]
|
groups = [
{
"img": "groups/images/vegan.png",
"name": "Vegan Group",
},
{
"img": "groups/images/ketogenic-diet.png",
"name": "Keto Group",
},
{
"img": "groups/images/vegetables.png",
"name": "Vegetarian Group",
},
{
"img": "groups/images/gluten-free.png",
"name": "Gluten Free Group",
},
{
"img": "groups/images/sushi.png",
"name": "Raw Diet",
},
{
"img": "groups/images/sardine.png",
"name": "Pescatarian Group",
},
{
"img": "groups/images/fruits.png",
"name": "Paleo Group",
},
{
"img": "groups/images/low-carb-diet.png",
"name": "Low Carb Group",
}
]
def initialize_groups():
from groups.models import Group
for group in groups:
new_group = Group(img_path=group["img"], name=group["name"])
new_group.save()
print(f'Added group - {new_group.name}')
|
[
"groups.models.Group"
] |
[((898, 946), 'groups.models.Group', 'Group', ([], {'img_path': "group['img']", 'name': "group['name']"}), "(img_path=group['img'], name=group['name'])\n", (903, 946), False, 'from groups.models import Group\n')]
|
# Datos
# 'SERIALIZACION' DE OBJETOS (para manejar el salvado de datos)
try:
import cPickle as pickle
except ImportError:
import pickle
# mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm
# FUNCIONES CONTROL Y GESTION DE FICHEROS DE DATOS
# mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm
def salvar_Backup_datos(informacion_para_salvar, nombre_con_ruta):
''' Salvado de datos que autogenera una copia con el mismo nombre recibido pero con extension BAK '''
try:
ficheroDatos = open(nombre_con_ruta, "wb")
pickle.dump(informacion_para_salvar, ficheroDatos, protocol=-1) # -1, seleccion automatica del más alto disponible
ficheroDatos.close()
#CREACION DE COPIAS .bak AUTOMATICAMENTE
#separamos el nombre y la extenxion de la informacion que llega a la funcion
longitud_extension = len(nombre_con_ruta.split(".")[-1])
nombre_con_ruta_backup = nombre_con_ruta[:-longitud_extension] + "bak"
ficheroDatos_backup = open(nombre_con_ruta_backup, "wb")
pickle.dump(informacion_para_salvar, ficheroDatos_backup, protocol=-1) # -1, seleccion automatica del más alto disponible
ficheroDatos.close()
return(True)
except:
print ("---------------------------")
print ("Error Guardando backup >> ", nombre_con_ruta)
return(False)
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
def cargar_datos_desde_fichero(nombre_con_ruta):
''' Recuperacion de los datos de backup desde fichero en los momentos de reinicio '''
datos = []
try:
nombreDatosFile = nombre_con_ruta
ficheroDatos = open(nombreDatosFile,"rb")
datos = pickle.load(ficheroDatos)
ficheroDatos.close()
return True, datos
except:
print ("---------------------------")
print ("error con la carga de registros de backup")
return False , []
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
def ListaUnica (lista, destino):
for n in range(len(lista)):
if isinstance(lista[n],list):
ListaUnica(lista[n], destino)
else:
destino.append(lista[n])
return destino
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
def convertir_Datos_to_TXT(datos, nombreDatosFile, cabecera=""):
'''
RECIBE UNA LISTA o UN EMPAQUETADO (una lista de listas) y el nombre con el que queremos guardar el TxT
Funcion para la conversion de los datos de una serie de listas
a un formato de texto plano separado en colunmas.
Opcionalmente podemos indicar uan cabecera de texto apra dichos datos
Esta sera informacion que se envia por email a los suscriptores
'''
#datos = lista simple o bien lista de listas
nombreFileSalida = nombreDatosFile
numeroDatos = len(datos)
outfile = open(nombreFileSalida, 'w') # Indicamos el valor 'w' para escritura.
if cabecera != "": #si hay informacion de cabecera se añade antes de los datos para no numerar esa linea
outfile.write(cabecera)
outfile.write("\n\n")
#y dejamos el fichero abierto para seguir escribiendo la informacion correspondiente a los datos
if datos == []: #Si llega una lista vacia (que puede ser) se generarian errores,
#asi que añadimos una linea para informar de ello, cerramos el fichero y salimos
outfile.write("\nNo hay informacion disponible\n")
outfile.close()
return (True)
try:
for x in range(len(datos)):
lista_unica=[]
indice = "00000"+ str(x)
indice = indice[-5:]
linea = indice + "\t"
lista_unica = ListaUnica(datos[x], lista_unica)
for elemento in lista_unica:
if str(type(elemento))== "<class 'int'>" or str(type(elemento))== "<class 'float'>":
dato = float(elemento)
dato = "%.2f" % (dato)
linea += str(dato) + "\t"
else:
linea += elemento + "\t"
linea += "\n"
outfile.write(linea)
outfile.close()
return (True)
except:
print ("---------------------------")
outfile.close() #Cerramos por si se quedo abierto
outfile = open(nombreFileSalida, 'wb') #Reabrimos nuevamente y escribimos un mensaje de error
linea = "\n\nHubo un error en la conversion de datos\n\nContacte EXPERIMENTO BIO en telegram con el comando /DATA_ERROR_"+nombreDatosFile[:-4]+" y solicite los datos en formato RAW si lo desea \n"
outfile.write(linea)
outfile.close()
return (False)
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
|
[
"pickle.dump",
"pickle.load"
] |
[((566, 629), 'pickle.dump', 'pickle.dump', (['informacion_para_salvar', 'ficheroDatos'], {'protocol': '(-1)'}), '(informacion_para_salvar, ficheroDatos, protocol=-1)\n', (577, 629), False, 'import pickle\n'), ((1073, 1143), 'pickle.dump', 'pickle.dump', (['informacion_para_salvar', 'ficheroDatos_backup'], {'protocol': '(-1)'}), '(informacion_para_salvar, ficheroDatos_backup, protocol=-1)\n', (1084, 1143), False, 'import pickle\n'), ((1872, 1897), 'pickle.load', 'pickle.load', (['ficheroDatos'], {}), '(ficheroDatos)\n', (1883, 1897), False, 'import pickle\n')]
|
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import unittest
from amaascore.market_data.fx_rate import FXRate
from amaascore.tools.generate_market_data import generate_fx_rate
class FXRateTest(unittest.TestCase):
def setUp(self):
self.longMessage = True # Print complete error message on failure
self.fx_rate = generate_fx_rate()
self.asset_id = self.fx_rate.asset_id
def tearDown(self):
pass
def test_FXRate(self):
self.assertEqual(type(self.fx_rate), FXRate)
def test_FXRateToDict(self):
fx_rate_dict = self.fx_rate.__dict__
self.assertEqual(type(fx_rate_dict), dict)
self.assertEqual(fx_rate_dict.get('asset_id'), self.asset_id)
def test_FXRateToJSON(self):
fx_rate_json = self.fx_rate.to_json()
self.assertEqual(fx_rate_json.get('asset_id'), self.asset_id)
# If party_json is valid JSON, this will run without serialisation errors
json_asset_id = json.loads(json.dumps(fx_rate_json, ensure_ascii=False)).get('asset_id')
self.assertEqual(json_asset_id, self.asset_id)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"amaascore.tools.generate_market_data.generate_fx_rate",
"json.dumps"
] |
[((1187, 1202), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1200, 1202), False, 'import unittest\n'), ((387, 405), 'amaascore.tools.generate_market_data.generate_fx_rate', 'generate_fx_rate', ([], {}), '()\n', (403, 405), False, 'from amaascore.tools.generate_market_data import generate_fx_rate\n'), ((1038, 1082), 'json.dumps', 'json.dumps', (['fx_rate_json'], {'ensure_ascii': '(False)'}), '(fx_rate_json, ensure_ascii=False)\n', (1048, 1082), False, 'import json\n')]
|
# https://spotipy.readthedocs.io/en/2.13.0/
# pip install spotipy --upgrade
# pipenv install python-dotenv
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import sys
import time
from flask import Flask, jsonify, Response, render_template, request
from flask_sqlalchemy import SQLAlchemy
import pandas as pd
import numpy as np
from os import getenv
from dotenv import load_dotenv
load_dotenv()
app = Flask(__name__)
market = ["us"]
client_id = getenv('SPOTIPY_CLIENT_ID')
client_secret = getenv('SPOTIPY_CLIENT_SECRET')
credentials = SpotifyClientCredentials(client_id=client_id, client_secret=client_secret)
token = credentials.get_access_token()
spotify = spotipy.Spotify(auth=token)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/output', methods=['POST'])
def output():
# connecting html to request
# User inputs song name here
user_input_song = request.form['user_input_song']
# spotify search params
results = spotify.search(str(user_input_song), type="track", limit=1)
return results
|
[
"flask.Flask",
"dotenv.load_dotenv",
"flask.render_template",
"spotipy.Spotify",
"spotipy.oauth2.SpotifyClientCredentials",
"os.getenv"
] |
[((399, 412), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (410, 412), False, 'from dotenv import load_dotenv\n'), ((420, 435), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (425, 435), False, 'from flask import Flask, jsonify, Response, render_template, request\n'), ((466, 493), 'os.getenv', 'getenv', (['"""SPOTIPY_CLIENT_ID"""'], {}), "('SPOTIPY_CLIENT_ID')\n", (472, 493), False, 'from os import getenv\n'), ((510, 541), 'os.getenv', 'getenv', (['"""SPOTIPY_CLIENT_SECRET"""'], {}), "('SPOTIPY_CLIENT_SECRET')\n", (516, 541), False, 'from os import getenv\n'), ((558, 632), 'spotipy.oauth2.SpotifyClientCredentials', 'SpotifyClientCredentials', ([], {'client_id': 'client_id', 'client_secret': 'client_secret'}), '(client_id=client_id, client_secret=client_secret)\n', (582, 632), False, 'from spotipy.oauth2 import SpotifyClientCredentials\n'), ((683, 710), 'spotipy.Spotify', 'spotipy.Spotify', ([], {'auth': 'token'}), '(auth=token)\n', (698, 710), False, 'import spotipy\n'), ((752, 781), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (767, 781), False, 'from flask import Flask, jsonify, Response, render_template, request\n')]
|
# # -*- coding: utf-8 -*-
# from chatterbot import ChatBot
# bot = ChatBot(
# "Math & Time Bot",
# logic_adapters=[
# "chatterbot.logic.MathematicalEvaluation",
# "chatterbot.logic.TimeLogicAdapter"
# ],
# input_adapter="chatterbot.input.VariableInputTypeAdapter",
# output_adapter="chatterbot.output.OutputAdapter",
# trainer='chatterbot.trainers.ChatterBotCorpusTrainer'
# )
# # Print an example of getting one math based response
# response = bot.get_response("What is 4 + 9?")
# print(response)
# # Print an example of getting one time based response
# response = bot.get_response("What time is it?")
# print(response)
import numpy as np
from matplotlib import pyplot as plt
import scipy.io.wavfile as wav
from numpy.lib import stride_tricks
import sys
import os
import pickle
def stft(sig, frameSize, overlapFac=0.5, window=np.hanning):
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
samples = np.append(np.zeros(int(np.floor(frameSize/2.0))), sig)
cols = np.ceil( (len(samples) - frameSize) / float(hopSize)) + 1
samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(samples, shape=(int(cols), frameSize), strides=(samples.strides[0]*hopSize, samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
def logscale_spec(spec, sr=22000, factor=20.):
timebins, freqbins = np.shape(spec)
scale = np.linspace(0, 1, freqbins) ** factor
scale *= (freqbins-1)/max(scale)
scale = np.unique(np.round(scale))
newspec = np.complex128(np.zeros([timebins, len(scale)]))
for i in range(0, len(scale)):
if i == len(scale)-1:
newspec[:,i] = np.sum(spec[:,int(scale[i]):], axis=1)
else:
newspec[:,i] = np.sum(spec[:,int(scale[i]):int(scale[i+1])], axis=1)
allfreqs = np.abs(np.fft.fftfreq(freqbins*2, 1./sr)[:freqbins+1])
freqs = []
for i in range(0, len(scale)):
if i == len(scale)-1:
freqs += [np.mean(allfreqs[int(scale[i]):])]
else:
freqs += [np.mean(allfreqs[int(scale[i]):int(scale[i+1])])]
return newspec, freqs
def plotstft(audiopath, binsize=2**10, plotpath=None, colormap="jet"):
samplerate, samples = wav.read(audiopath)
s = stft(samples, binsize)
sshow, freq = logscale_spec(s, factor=1.0, sr=samplerate)
ims = 20.*np.log10(np.abs(sshow)/10e-6)
timebins, freqbins = np.shape(ims)
freqbins=freqbins/2
print("timebins: ", timebins)
print("freqbins: ", freqbins)
# plt.title('Spectrogram')
# plt.imshow(np.transpose(ims), origin="lower", aspect="auto", cmap=colormap, interpolation="none")
arr=[]
fingerprint = []
min_var=np.median(ims[0])
for i in range(0,timebins,3):
temp=np.median(ims[i])
arr.append(temp)
plt.plot(temp)
if min_var > temp and temp>0:
min_var = temp
fingerprint.append(temp)
if min_var<0:
min_var = 0
# plt.colorbar()
# plt.xlabel("timebins ")
# plt.ylabel("frequency (hz)")
# plt.xlim([0, timebins-1])
# plt.ylim([0, int(freqbins)])
# plt.plot(arr,'.',color='b')
# plt.show()
# xlocs = np.float32(np.linspace(0, timebins-1, 5))
# plt.xticks(xlocs, ["%.02f" % l for l in ((xlocs*len(samples)/timebins)+(0.5*binsize))/samplerate])
# ylocs = np.int16(np.round(np.linspace(0, freqbins-1, 10)))
# plt.yticks(ylocs, ["%.02f" % freq[i] for i in ylocs])
# if plotpath:
# plt.savefig(plotpath, bbox_inches="tight")
# plt.clf()
return ims,arr,fingerprint
filename1='test.wav'
#ims2,arr2,fingerprint2=plotstft('newSong.wav')
def check_song(filename1,ims2,arr2,fingerprint2):
ims,arr,fingerprint1 = plotstft(filename1)
# ims2,arr2,fingerprint2 = plotstft(filename2)
arrBig = fingerprint1
arrSmall = fingerprint2
l1 = len(fingerprint1)
l2 = len(fingerprint2)
err = 1000
subsong = False
sum1=0
min_sum=20000
newarr=[]
for i in range(0,l1-l2+1):
subArr = np.array(arrBig[i:i+l2])
for j in range(0,l2):
dummy = subArr[j]-arrSmall[j]
if(dummy<0): dummy=dummy*(-1)
newarr.append(dummy)
newarr=np.array(newarr)
sum1 = np.median(newarr)
if sum1<=0:
sum1 = sum1*(-1)
if sum1<err:
subsong=True
newarr=[]
if(min_sum>sum1):
min_sum=sum1
return subsong,min_sum
song_files = os.listdir('./songs')
main_lis={}
#############################
filename1='test.wav'
ims2,arr2,fingerprint1=plotstft(sys.argv[1])
fingerprint1=np.array(fingerprint1[20:])
filename2='db.pkl'
main_dir={}
def check_song1(fingerprint1):
with open(filename2,'rb') as inp:
main_lis = pickle.load(inp)
for fprint in main_lis:
arrBig = main_lis[fprint]
arrSmall = fingerprint1
l1 = len(arrBig)
l2 = len(arrSmall)
err = 1000
subsong = False
sum1=0
min_sum=20000
newarr=[]
for i in range(0,l1-l2+1):
subArr = np.array(arrBig[i:i+l2])
for j in range(0,l2):
dummy = subArr[j]-arrSmall[j]
if(dummy<0): dummy=dummy*(-1)
newarr.append(dummy)
newarr=np.array(newarr)
sum1 = np.median(newarr)
if sum1<=0:
sum1 = sum1*(-1)
if sum1<err:
subsong=True
newarr=[]
if(min_sum>sum1):
min_sum=sum1
main_dir[fprint]=min_sum
check_song1(fingerprint1)
# print(main_dir)
main_dir = sorted(main_dir.items(),key = lambda x:x[1])
print(main_dir)
|
[
"numpy.fft.rfft",
"numpy.abs",
"matplotlib.pyplot.plot",
"numpy.median",
"numpy.floor",
"numpy.zeros",
"scipy.io.wavfile.read",
"numpy.shape",
"numpy.fft.fftfreq",
"pickle.load",
"numpy.array",
"numpy.linspace",
"numpy.round",
"os.listdir"
] |
[((4136, 4157), 'os.listdir', 'os.listdir', (['"""./songs"""'], {}), "('./songs')\n", (4146, 4157), False, 'import os\n'), ((4281, 4308), 'numpy.array', 'np.array', (['fingerprint1[20:]'], {}), '(fingerprint1[20:])\n', (4289, 4308), True, 'import numpy as np\n'), ((1324, 1343), 'numpy.fft.rfft', 'np.fft.rfft', (['frames'], {}), '(frames)\n', (1335, 1343), True, 'import numpy as np\n'), ((1418, 1432), 'numpy.shape', 'np.shape', (['spec'], {}), '(spec)\n', (1426, 1432), True, 'import numpy as np\n'), ((2174, 2193), 'scipy.io.wavfile.read', 'wav.read', (['audiopath'], {}), '(audiopath)\n', (2182, 2193), True, 'import scipy.io.wavfile as wav\n'), ((2344, 2357), 'numpy.shape', 'np.shape', (['ims'], {}), '(ims)\n', (2352, 2357), True, 'import numpy as np\n'), ((2605, 2622), 'numpy.median', 'np.median', (['ims[0]'], {}), '(ims[0])\n', (2614, 2622), True, 'import numpy as np\n'), ((1141, 1160), 'numpy.zeros', 'np.zeros', (['frameSize'], {}), '(frameSize)\n', (1149, 1160), True, 'import numpy as np\n'), ((1442, 1469), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'freqbins'], {}), '(0, 1, freqbins)\n', (1453, 1469), True, 'import numpy as np\n'), ((1533, 1548), 'numpy.round', 'np.round', (['scale'], {}), '(scale)\n', (1541, 1548), True, 'import numpy as np\n'), ((2661, 2678), 'numpy.median', 'np.median', (['ims[i]'], {}), '(ims[i])\n', (2670, 2678), True, 'import numpy as np\n'), ((2700, 2714), 'matplotlib.pyplot.plot', 'plt.plot', (['temp'], {}), '(temp)\n', (2708, 2714), True, 'from matplotlib import pyplot as plt\n'), ((3793, 3819), 'numpy.array', 'np.array', (['arrBig[i:i + l2]'], {}), '(arrBig[i:i + l2])\n', (3801, 3819), True, 'import numpy as np\n'), ((3941, 3957), 'numpy.array', 'np.array', (['newarr'], {}), '(newarr)\n', (3949, 3957), True, 'import numpy as np\n'), ((3967, 3984), 'numpy.median', 'np.median', (['newarr'], {}), '(newarr)\n', (3976, 3984), True, 'import numpy as np\n'), ((4421, 4437), 'pickle.load', 'pickle.load', (['inp'], {}), '(inp)\n', (4432, 4437), False, 'import pickle\n'), ((941, 973), 'numpy.floor', 'np.floor', (['(overlapFac * frameSize)'], {}), '(overlapFac * frameSize)\n', (949, 973), True, 'import numpy as np\n'), ((1821, 1859), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['(freqbins * 2)', '(1.0 / sr)'], {}), '(freqbins * 2, 1.0 / sr)\n', (1835, 1859), True, 'import numpy as np\n'), ((1009, 1034), 'numpy.floor', 'np.floor', (['(frameSize / 2.0)'], {}), '(frameSize / 2.0)\n', (1017, 1034), True, 'import numpy as np\n'), ((2301, 2314), 'numpy.abs', 'np.abs', (['sshow'], {}), '(sshow)\n', (2307, 2314), True, 'import numpy as np\n'), ((4678, 4704), 'numpy.array', 'np.array', (['arrBig[i:i + l2]'], {}), '(arrBig[i:i + l2])\n', (4686, 4704), True, 'import numpy as np\n'), ((4836, 4852), 'numpy.array', 'np.array', (['newarr'], {}), '(newarr)\n', (4844, 4852), True, 'import numpy as np\n'), ((4864, 4881), 'numpy.median', 'np.median', (['newarr'], {}), '(newarr)\n', (4873, 4881), True, 'import numpy as np\n')]
|
from nxt.tokens import register_token
PREFIX = 'ex::'
def detect_token_type(value):
return value.startswith(PREFIX)
def resolve_token(stage, node, value, layer, **kwargs):
value = stage.resolve(node, value, layer, **kwargs)
# Reverses given value
return value[::-1]
register_token(PREFIX, detect_token_type, resolve_token)
|
[
"nxt.tokens.register_token"
] |
[((289, 345), 'nxt.tokens.register_token', 'register_token', (['PREFIX', 'detect_token_type', 'resolve_token'], {}), '(PREFIX, detect_token_type, resolve_token)\n', (303, 345), False, 'from nxt.tokens import register_token\n')]
|
import loaders
import xarray as xr
import numpy as np
from loaders._utils import SAMPLE_DIM_NAME
import pytest
def test_multiple_unstacked_dims():
na, nb, nc, nd = 2, 3, 4, 5
ds = xr.Dataset(
data_vars={
"var1": xr.DataArray(
np.zeros([na, nb, nc, nd]), dims=["a", "b", "c", "d"],
),
"var2": xr.DataArray(np.zeros([na, nb, nc]), dims=["a", "b", "c"],),
}
)
unstacked_dims = ["c", "d"]
expected = xr.Dataset(
data_vars={
"var1": xr.DataArray(
np.zeros([na * nb, nc, nd]), dims=[SAMPLE_DIM_NAME, "c", "d"],
),
"var2": xr.DataArray(np.zeros([na * nb, nc]), dims=[SAMPLE_DIM_NAME, "c"],),
}
)
result = loaders.stack(ds=ds, unstacked_dims=unstacked_dims)
xr.testing.assert_identical(result.drop(result.coords.keys()), expected)
@pytest.fixture
def gridded_dataset(request):
num_nans, zdim, ydim, xdim = request.param
coords = {"z": range(zdim), "y": range(ydim), "x": range(xdim)}
# unique values for ease of set comparison in test
var = xr.DataArray(
[
[[(100 * k) + (10 * j) + i for i in range(10)] for j in range(10)]
for k in range(zdim)
],
dims=["z", "y", "x"],
coords=coords,
)
var = var.where(var >= num_nans) # assign nan values
return xr.Dataset({"var": var})
@pytest.mark.parametrize(
"gridded_dataset", [(0, 1, 10, 10), (0, 10, 10, 10)], indirect=True,
)
def test_stack_dims(gridded_dataset):
s_dim = SAMPLE_DIM_NAME
ds_train = loaders.stack(["z"], gridded_dataset)
assert set(ds_train.dims) == {s_dim, "z"}
assert len(ds_train["z"]) == len(gridded_dataset.z)
assert ds_train["var"].dims[0] == s_dim
|
[
"loaders.stack",
"pytest.mark.parametrize",
"numpy.zeros",
"xarray.Dataset"
] |
[((1424, 1521), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""gridded_dataset"""', '[(0, 1, 10, 10), (0, 10, 10, 10)]'], {'indirect': '(True)'}), "('gridded_dataset', [(0, 1, 10, 10), (0, 10, 10, 10)\n ], indirect=True)\n", (1447, 1521), False, 'import pytest\n'), ((764, 815), 'loaders.stack', 'loaders.stack', ([], {'ds': 'ds', 'unstacked_dims': 'unstacked_dims'}), '(ds=ds, unstacked_dims=unstacked_dims)\n', (777, 815), False, 'import loaders\n'), ((1396, 1420), 'xarray.Dataset', 'xr.Dataset', (["{'var': var}"], {}), "({'var': var})\n", (1406, 1420), True, 'import xarray as xr\n'), ((1605, 1642), 'loaders.stack', 'loaders.stack', (["['z']", 'gridded_dataset'], {}), "(['z'], gridded_dataset)\n", (1618, 1642), False, 'import loaders\n'), ((272, 298), 'numpy.zeros', 'np.zeros', (['[na, nb, nc, nd]'], {}), '([na, nb, nc, nd])\n', (280, 298), True, 'import numpy as np\n'), ((375, 397), 'numpy.zeros', 'np.zeros', (['[na, nb, nc]'], {}), '([na, nb, nc])\n', (383, 397), True, 'import numpy as np\n'), ((568, 595), 'numpy.zeros', 'np.zeros', (['[na * nb, nc, nd]'], {}), '([na * nb, nc, nd])\n', (576, 595), True, 'import numpy as np\n'), ((679, 702), 'numpy.zeros', 'np.zeros', (['[na * nb, nc]'], {}), '([na * nb, nc])\n', (687, 702), True, 'import numpy as np\n')]
|
from paretoarchive.pandas import pareto
import pandas as pd
def test_df():
df = pd.DataFrame(
[[1, 3, 3], [1, 2, 3], [1, 1, 2]], columns=["a", "b", "c"]
)
assert (pareto(df, ["a", "b"]).index == [2]).all()
assert (pareto(df, ["a", "b", "c"]).index == [2]).all()
assert (pareto(df, ["a", "b", "c"], minimizeObjective2=False).index == [0, 2]).all()
if __name__ == "__main__":
test_df()
|
[
"pandas.DataFrame",
"paretoarchive.pandas.pareto"
] |
[((85, 157), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 3, 3], [1, 2, 3], [1, 1, 2]]'], {'columns': "['a', 'b', 'c']"}), "([[1, 3, 3], [1, 2, 3], [1, 1, 2]], columns=['a', 'b', 'c'])\n", (97, 157), True, 'import pandas as pd\n'), ((186, 208), 'paretoarchive.pandas.pareto', 'pareto', (['df', "['a', 'b']"], {}), "(df, ['a', 'b'])\n", (192, 208), False, 'from paretoarchive.pandas import pareto\n'), ((242, 269), 'paretoarchive.pandas.pareto', 'pareto', (['df', "['a', 'b', 'c']"], {}), "(df, ['a', 'b', 'c'])\n", (248, 269), False, 'from paretoarchive.pandas import pareto\n'), ((302, 355), 'paretoarchive.pandas.pareto', 'pareto', (['df', "['a', 'b', 'c']"], {'minimizeObjective2': '(False)'}), "(df, ['a', 'b', 'c'], minimizeObjective2=False)\n", (308, 355), False, 'from paretoarchive.pandas import pareto\n')]
|
from torch.utils.data import Dataset
from utils import load_data, get_labels
class SGEDDataset(Dataset):
def __init__(self, file_path, mode):
src_lst, trg_lst = load_data(file_path, mode)
self.src_lst = src_lst
self.trg_lst = trg_lst
self.labels = get_labels(src_lst, trg_lst)
def __len__(self):
return len(self.labels)
def __getitem__(self, item):
return self.src_lst[item], self.trg_lst[item], self.labels[item]
|
[
"utils.get_labels",
"utils.load_data"
] |
[((174, 200), 'utils.load_data', 'load_data', (['file_path', 'mode'], {}), '(file_path, mode)\n', (183, 200), False, 'from utils import load_data, get_labels\n'), ((285, 313), 'utils.get_labels', 'get_labels', (['src_lst', 'trg_lst'], {}), '(src_lst, trg_lst)\n', (295, 313), False, 'from utils import load_data, get_labels\n')]
|
# type: ignore
"""
A Tensor module on top of Numpy arrays.
TODO: Implement the reverse mode autodiff to compute gradients. It will have
to go backward through the computation graph.
"""
from __future__ import annotations
from typing import Union
import os
import pkgutil
import numpy as np
import pyopencl as cl
import pyopencl.array as clarray
import pyopencl.clmath as clmath
import pyopencl.clrandom as clrandom
import pyopencl.bitonic_sort as clbitonicsort
# Initialize the context
CONTEXT: cl.Context = cl.create_some_context(answers=[0, 1])
# Instantiate a queue
QUEUE: cl.CommandQueue = cl.CommandQueue(CONTEXT)
# OpenCL options
CLOPTS: str = "-cl-mad-enable -cl-fast-relaxed-math"
# Scalar type
Scalar = Union[float, int, np.float32]
def readcl(filename: str) -> str:
"""Read an OpenCL file and return it as a string."""
return pkgutil.get_data("miniml", f"opencl/{filename}").decode()
class Tensor:
"""A tensor class. Computations can be delegated to the GPU."""
def __init__(
self, data: Union[cl.array.Array, list, np.ndarray], gpu: bool = False
) -> None:
"""Initialize variables."""
self._gpu: bool = gpu
if isinstance(data, list):
self._data: np.ndarray = np.array(data, dtype=np.float32)
if self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
elif isinstance(data, np.ndarray):
if data.dtype != np.float32:
# NOTE: The NumPy array has to be converted into a list first.
# Otherwise, the operations on cpu and gpu produce
# different results. This behavior can be caused by many
# reasons including OpenCL and even the operating system
# itself. Some research is needed to figure out cause and
# eliminate extra work for rebuilding the array.
self._data: np.ndarray = np.array(data.tolist(), np.float32)
else:
self._data: np.ndarray = data
if self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
elif isinstance(data, cl.array.Array):
self._data: cl.array.Array = data
self._gpu: bool = True
else:
raise TypeError(
"Expected `list`, `np.ndarray`, or `pyopencl.array.Array` got "
f"`{type(data)}`"
)
@property
def data(self) -> Union[np.ndarray, cl.array.Array]:
"""The data inside of a tensor."""
return self._data
@data.setter
def data(self, data: Union[cl.array.Array, list, np.ndarray]) -> None:
"""Set the data inside of a tensor."""
if isinstance(data, list):
self._data: np.ndarray = np.array(data, dtype=np.float32)
if self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
elif isinstance(data, np.ndarray):
if data.dtype != np.dtype("float32"):
self._data: np.ndarray = data.astype(np.float32)
else:
self._data: np.ndarray = data
if self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
elif isinstance(data, cl.array.Array):
self._data: cl.array.Array = data
self._gpu: bool = True
else:
raise TypeError(
"Expected `list`, `np.ndarray`, or `pyopencl.array.Array` got "
f"`{type(data)}`"
)
def to_cpu(self) -> Tensor:
"""Load the data into CPU."""
if self._gpu:
self._data = self._data.get()
self._gpu = False
return self
def to_gpu(self) -> Tensor:
"""Load the data into GPU."""
if not self._gpu:
self._data = clarray.to_device(QUEUE, self._data)
self._gpu = True
return self
def to_numpy(self) -> np.ndarray:
"""Return a numpy ndarray."""
if self._gpu:
return self._data.get()
return self._data
@property
def gpu(self) -> bool:
"""Return the state of the GPU."""
return self._gpu
def __repr__(self) -> str:
"""A representation of a tensor."""
state: str = "GPU" if self._gpu else "CPU"
return f"{self._data}\n\nTensor[{state}]"
def __iter__(self) -> Union[np.ndarray, cl.array.Array]:
"""An iterator for tensors."""
for i in self._data:
yield i
def __len__(self) -> int:
"""Return a length of tensors."""
return len(self._data)
def __getitem__(self, idx: int) -> Union[np.ndarray, cl.array.Array]:
"""Return a length of tensors."""
return self._data[idx]
def __setitem__(
self, idx: int, item: Union[np.ndarray, cl.array.Array]
) -> None:
"""Return a length of tensors."""
self._data[idx] = item
def __add__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Add two tensors."""
if not isinstance(other, Tensor):
return Tensor(self._data + other, gpu=self._gpu)
return Tensor(self._data + other._data, gpu=self._gpu or other._gpu)
__radd__ = __add__
def __iadd__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Add two tensors in-place."""
if not isinstance(other, Tensor):
self._data += other
else:
self._data += other._data
return self
def __sub__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Subtract two tensors."""
if not isinstance(other, Tensor):
return Tensor(self._data - other, gpu=self._gpu)
return Tensor(self._data - other._data, gpu=self._gpu or other._gpu)
__rsub__ = __sub__
def __isub__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Subtract two tensors in-place."""
if not isinstance(other, Tensor):
self._data -= other
else:
self._data -= other._data
return self
def __mul__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Multiply two tensors."""
if not isinstance(other, Tensor):
return Tensor(self._data * other, gpu=self._gpu)
return Tensor(self._data * other._data, gpu=self._gpu or other._gpu)
__rmul__ = __mul__
def __imul__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Multiply two tensors in-place."""
if not isinstance(other, Tensor):
self._data *= other
else:
self._data *= other._data
return self
def __truediv__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Divide two tensors."""
if not isinstance(other, Tensor):
return Tensor(self._data / other, gpu=self._gpu)
return Tensor(self._data / other._data, gpu=self._gpu or other._gpu)
__rtruediv__ = __truediv__
def __itruediv__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Divide two tensors in-place."""
if not isinstance(other, Tensor):
self._data /= other
else:
self._data /= other._data
return self
def __lt__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Less than operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data < other, gpu=self._gpu)
return Tensor(self._data < other._data, gpu=self._gpu or other._gpu)
def __le__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Less than or equal operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data <= other, gpu=self._gpu)
return Tensor(self._data <= other._data, gpu=self._gpu or other._gpu)
def __eq__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Equal to operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data == other, gpu=self._gpu)
return Tensor(self._data == other._data, gpu=self._gpu or other._gpu)
def __ne__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Not equal to operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data != other, gpu=self._gpu)
return Tensor(self._data != other._data, gpu=self._gpu or other._gpu)
def __ge__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Greater than or equal operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data >= other, gpu=self._gpu)
return Tensor(self._data >= other._data, gpu=self._gpu or other._gpu)
def __gt__(self, other: Union[Tensor, Scalar]) -> Tensor:
"""Greater than operation for a tensor and a tensor/scalar."""
if not isinstance(other, Tensor):
return Tensor(self._data > other, gpu=self._gpu)
return Tensor(self._data > other._data, gpu=self._gpu or other._gpu)
def __neg__(self) -> Tensor:
"""Return a negated tensor."""
return Tensor(-self._data, gpu=self._gpu)
def all(self) -> bool:
"""Returns the true value if all values of a tensor are true."""
return self._data.all()
def any(self) -> bool:
"""Returns the true value if at least one value of a tensor is true."""
return self._data.any()
def view(self, dtype: np.dtype) -> None:
"""Returns the view of a tensor with the same data. If dtype is
different from current dtype, the actual bytes of memory will be
reinterpreted.
"""
return Tensor(self._data.view(dtype), gpu=self._gpu)
def astype(self, dtype: np.dtype) -> Tensoor:
"""Return a copy of self, cast to dtype."""
return Tensor(self._data.astype(dtype), gpu=self._gpu)
def squeeze(self) -> None:
"""Returns a view of the tensor with dimensions of length 1 removed."""
return Tensor(self._data.squeeze(), gpu=self._gpu)
def sort(self) -> None:
"""Sorts a tensor, uses the parallel bitonic sort when on GPU."""
if self._gpu:
sorter = clbitonicsort.BitonicSort(CONTEXT)
sorter(self._data)
else:
self._data.sort()
@property
def T(self) -> Tensor:
"""Returns a transpose of a tensor."""
return Tensor(self._data.T, gpu=self._gpu)
@property
def dtype(self) -> np.dtype:
"""The data type of a tensor."""
return self._data.dtype
@property
def flags(self) -> Union[cl.compyte.array.ArrayFlags, np.flagsobj]:
"""Return an object with attributes `c_contiguous`, `f_contiguous` and
`forc`, which may be used to query contiguity properties in analogy
to `numpy.ndarray.flags`.
"""
return self._data.size
@property
def ndim(self) -> int:
"""The dimensions of a tensor."""
return self._data.ndim
@property
def nbytes(self) -> int:
"""Return the number of bytes."""
return self._data.nbytes
@property
def shape(self) -> tuple[int, ...]:
"""The tuple of lengths of each dimension in the tensor."""
return self._data.shape
@property
def strides(self) -> tuple[int, ...]:
"""tuple of bytes to step in each dimension."""
self._data.strides
@property
def size(self) -> int:
"""The number of meaningful entries in the tensor."""
self._data.size
class Ops:
"""Tensor operations."""
@staticmethod
def dot(t1: Tensor, t2: Tensor, gpu=False) -> Tensor:
"""Returns a dot product (matrix multiplication) of two tensors."""
if gpu:
# Convert back to numpy ndarrays
t1 = t1.data.get().astype(np.float32)
t2 = t2.data.get().astype(np.float32)
t1_w = np.int32(t1.shape[1])
t1_h = np.int32(t1.shape[0])
t2_w = np.int32(t2.shape[1])
t2_h = np.int32(t2.shape[0])
rt_h = t1_h
rt_w = t2_w
rt = np.empty((rt_h, rt_w)).astype(np.float32)
# Mem flags
mf = cl.mem_flags
# Buffer variables
t1_buf = cl.Buffer(
CONTEXT, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=t1
)
t2_buf = cl.Buffer(
CONTEXT, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=t2
)
rt_buf = cl.Buffer(CONTEXT, mf.WRITE_ONLY, size=rt.nbytes)
# OpenCL program for computing a matrix multiply
prg = cl.Program(CONTEXT, readcl("matmul.cl")).build(
options=CLOPTS
)
# Perform the matrix multiplication and return the resulting tensor
prg.matmul(
QUEUE, rt.shape, None, t1_buf, t2_buf, rt_buf, t1_h, t2_w, t1_w
)
cl.enqueue_copy(QUEUE, rt, rt_buf)
return Tensor(rt, gpu=True)
return Tensor(np.dot(t1.data, t2.data))
@staticmethod
def vdot(m1: Tensor, m2: Tensor) -> Tensor:
"""Returns a dot product of two tensors."""
if m1.gpu or m2.gpu:
return Tensor(clarray.dot(m1.data, m2.data), gpu=True)
return Tensor(np.vdot(m1.data, m2.data))
@staticmethod
def flatten(t: Tensor) -> Tensor:
"""Returns flattened tensor containing the same data."""
return Tensor(t._data.ravel(), gpu=t.gpu)
@staticmethod
def fill(shape: tuple[int, ...], val: np.float32, gpu=False) -> Tensor:
"""Fill the tensor with scalar."""
if gpu:
return Tensor(
clarray.empty(QUEUE, shape, dtype=np.float32).fill(val),
gpu=True,
)
return Tensor(np.full(shape, val))
@staticmethod
def where(
cond: Tensor,
fst: Union[Tensor, Scalar],
snd: Union[Tensor, Scalar],
) -> Tensor:
"""Fill the tensor based on a condition."""
if cond.gpu:
if isinstance(fst, Tensor) and isinstance(snd, Tensor):
return Tensor(
clarray.if_positive(cond._data, fst._data, snd._data),
gpu=True,
)
shape: tuple[int, ...] = cond._data.shape
if not isinstance(fst, Tensor) and isinstance(snd, Tensor):
snd = snd._data
fst = clarray.empty(QUEUE, shape, dtype=np.float32).fill(fst)
elif isinstance(fst, Tensor) and not isinstance(snd, Tensor):
fst = fst._data
snd = clarray.empty(QUEUE, shape, dtype=np.float32).fill(snd)
elif not isinstance(fst, Tensor) and not isinstance(snd, Tensor):
fst = clarray.empty(QUEUE, shape, dtype=np.float32).fill(fst)
snd = clarray.empty(QUEUE, shape, dtype=np.float32).fill(snd)
return Tensor(clarray.if_positive(cond._data, fst, snd), gpu=True)
if not isinstance(fst, Tensor) and isinstance(snd, Tensor):
return Tensor(np.where(cond._data, fst, snd._data))
if isinstance(fst, Tensor) and not isinstance(snd, Tensor):
return Tensor(np.where(cond._data, fst._data, snd))
if not isinstance(fst, Tensor) and not isinstance(snd, Tensor):
return Tensor(np.where(cond._data, fst, snd))
return Tensor(np.where(cond._data, fst._data, snd._data))
@staticmethod
def reshape(t: Tensor, shape: tuple) -> Tensor:
"""Returns a tensor containing the same data with a new shape."""
if t.gpu:
return Tensor(clarray.reshape(t._data, shape), gpu=True)
return Tensor(np.reshape(t._data, shape))
@staticmethod
def log(t: Tensor) -> Tensor:
"""Returns a natural logarithm of a tensor."""
if t.gpu:
return Tensor(clmath.log(t._data), gpu=True)
return Tensor(np.log(t._data))
@staticmethod
def tanh(t: Tensor) -> Tensor:
"""Returns a tanh of a tensor."""
if t.gpu:
return Tensor(clmath.tanh(t._data), gpu=True)
return Tensor(np.tanh(t._data))
@staticmethod
def exp(t: Tensor) -> Tensor:
"""Returns a natural exponent of a tensor."""
if t.gpu:
return Tensor(clmath.exp(t._data), gpu=True)
return Tensor(np.exp(t._data))
@staticmethod
def maximum(t: Tensor, uts: Union[Tensor, Scalar]) -> Tensor:
"""Returns the maximum of a tensor."""
if t.gpu:
if not isinstance(uts, Tensor):
ot: cl.array.Array = clarray.empty(
QUEUE, t.shape, dtype=np.float32
).fill(uts)
return Tensor(clarray.maximum(t._data, ot), gpu=True)
return Tensor(clarray.maximum(t._data, uts._data), gpu=True)
if not isinstance(uts, Tensor):
return Tensor(np.maximum(t._data, uts))
return Tensor(np.maximum(t._data, uts._data))
@staticmethod
def minimum(t: Tensor, uts: Union[Tensor, Scalar]) -> Tensor:
"""Returns the minimum of a tensor."""
if t.gpu:
if not isinstance(uts, Tensor):
ot: cl.array.Array = clarray.empty(
QUEUE, t.shape, dtype=np.float32
).fill(uts)
return Tensor(clarray.minimum(t._data, ot), gpu=True)
return Tensor(clarray.minimum(t._data, uts._data), gpu=True)
if not isinstance(uts, Tensor):
return Tensor(np.minimum(t._data, uts))
return Tensor(np.minimum(t._data, uts._data))
@staticmethod
def power(t: Tensor, exponent: Union[Tensor, Scalar]) -> Tensor:
"""Raise all elements of the tensor to the specified power."""
if not isinstance(exponent, Tensor):
return Tensor(t._data ** exponent, gpu=t.gpu)
return Tensor(t._data ** exponent._data, gpu=t.gpu or exponent.gpu)
@staticmethod
def square(t: Tensor) -> Tensor:
"""Return a square-valued tensor."""
return Tensor(t._data ** 2, gpu=t.gpu)
@staticmethod
def transpose(t: Tensor) -> Tensor:
"""Returns a transpose of a tensor."""
if t.gpu:
return Tensor(clarray.transpose(t._data), gpu=True)
return Tensor(np.transpose(t._data), gpu=t.gpu)
@staticmethod
def zeros(shape: tuple = (1, 1), gpu=False) -> Tensor:
"""Return a new tensor of given shape and type, filled with zeros."""
if gpu:
return Tensor(clarray.zeros(QUEUE, shape, np.float32), gpu=True)
return Tensor(np.zeros(shape, dtype=np.float32))
@staticmethod
def zeros_like(t: Tensor, gpu=False) -> Tensor:
"""Return a tensor of zeros with the same shape and type as a given
tensor.
"""
if gpu:
return Tensor(clarray.zeros_like(t._data), gpu=True)
return Tensor(np.zeros_like(t._data, dtype=np.float32))
class Random:
"""Random number generation for tensors."""
@staticmethod
def normal(
shape: Union[tuple[int, ...], int] = (1, 1), gpu=False
) -> Tensor:
"""Draw random samples from a normal (Gaussian) distribution."""
if gpu:
return Tensor(
clrandom.PhiloxGenerator(CONTEXT).normal(
cq=QUEUE, shape=shape, dtype=np.float32
),
gpu=True,
)
return Tensor(np.random.normal(size=shape).astype(np.float32))
@staticmethod
def rand(shape: Union[tuple[int, ...], int] = (1, 1), gpu=False) -> Tensor:
"""Returns a tensor of random values in a given shape."""
if gpu:
return Tensor(clrandom.rand(QUEUE, shape, np.float32), gpu=True)
if isinstance(shape, tuple):
return Tensor(np.random.rand(*shape).astype(np.float32))
return Tensor(np.random.rand(shape).astype(np.float32))
@staticmethod
def uniform(
shape: Union[tuple[int, ...], int] = (1, 1),
min: float = 0.0,
max: float = 1.0,
gpu=False,
) -> Tensor:
"""Draw samples from a uniform distribution."""
if gpu:
return Tensor(
clrandom.PhiloxGenerator(CONTEXT).uniform(
cq=QUEUE, shape=shape, dtype=np.float32, a=min, b=max
),
gpu=True,
)
return Tensor(
np.random.uniform(min, max, size=shape).astype(np.float32)
)
class Reduce:
"""Reduction operations on tensors."""
@staticmethod
def max(t: Tensor) -> np.float32:
"""The maximum of the values in a tensor."""
if t.gpu:
return clarray.max(t._data).get().flat[0]
return np.max(t._data)
@staticmethod
def min(t: Tensor) -> np.float32:
"""The minimum of the values in a tensor."""
if t.gpu:
return clarray.min(t._data).get().flat[0]
return np.min(t._data)
@staticmethod
def sum(t: Tensor) -> np.float32:
"""The sum of the values in a tensor."""
if t.gpu:
return clarray.sum(t._data).get().flat[0]
return np.sum(t._data)
@staticmethod
def mean(t: Tensor) -> np.float32:
"""The mean of the values in a tensor."""
if t.gpu:
return clarray.sum(t._data).get().flat[0] / t._data.size
return np.mean(t._data)
|
[
"pyopencl.array.sum",
"numpy.sum",
"numpy.maximum",
"pyopencl.clmath.exp",
"pyopencl.enqueue_copy",
"pyopencl.array.transpose",
"numpy.empty",
"pyopencl.array.empty",
"pyopencl.array.minimum",
"pyopencl.Buffer",
"numpy.mean",
"numpy.exp",
"numpy.random.normal",
"pyopencl.array.reshape",
"pyopencl.array.zeros_like",
"pyopencl.clmath.log",
"numpy.full",
"numpy.zeros_like",
"pyopencl.clmath.tanh",
"numpy.transpose",
"pyopencl.CommandQueue",
"pyopencl.array.if_positive",
"pyopencl.array.max",
"pyopencl.bitonic_sort.BitonicSort",
"numpy.max",
"numpy.reshape",
"numpy.int32",
"pyopencl.array.maximum",
"pkgutil.get_data",
"numpy.minimum",
"numpy.tanh",
"pyopencl.create_some_context",
"numpy.min",
"pyopencl.array.min",
"numpy.dot",
"numpy.random.uniform",
"numpy.log",
"pyopencl.array.dot",
"pyopencl.clrandom.PhiloxGenerator",
"numpy.dtype",
"numpy.vdot",
"numpy.zeros",
"pyopencl.array.to_device",
"numpy.where",
"numpy.array",
"numpy.random.rand",
"pyopencl.clrandom.rand",
"pyopencl.array.zeros"
] |
[((521, 559), 'pyopencl.create_some_context', 'cl.create_some_context', ([], {'answers': '[0, 1]'}), '(answers=[0, 1])\n', (543, 559), True, 'import pyopencl as cl\n'), ((608, 632), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['CONTEXT'], {}), '(CONTEXT)\n', (623, 632), True, 'import pyopencl as cl\n'), ((20996, 21011), 'numpy.max', 'np.max', (['t._data'], {}), '(t._data)\n', (21002, 21011), True, 'import numpy as np\n'), ((21211, 21226), 'numpy.min', 'np.min', (['t._data'], {}), '(t._data)\n', (21217, 21226), True, 'import numpy as np\n'), ((21422, 21437), 'numpy.sum', 'np.sum', (['t._data'], {}), '(t._data)\n', (21428, 21437), True, 'import numpy as np\n'), ((21650, 21666), 'numpy.mean', 'np.mean', (['t._data'], {}), '(t._data)\n', (21657, 21666), True, 'import numpy as np\n'), ((863, 911), 'pkgutil.get_data', 'pkgutil.get_data', (['"""miniml"""', 'f"""opencl/{filename}"""'], {}), "('miniml', f'opencl/{filename}')\n", (879, 911), False, 'import pkgutil\n'), ((1258, 1290), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (1266, 1290), True, 'import numpy as np\n'), ((2820, 2852), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (2828, 2852), True, 'import numpy as np\n'), ((3873, 3909), 'pyopencl.array.to_device', 'clarray.to_device', (['QUEUE', 'self._data'], {}), '(QUEUE, self._data)\n', (3890, 3909), True, 'import pyopencl.array as clarray\n'), ((10335, 10369), 'pyopencl.bitonic_sort.BitonicSort', 'clbitonicsort.BitonicSort', (['CONTEXT'], {}), '(CONTEXT)\n', (10360, 10369), True, 'import pyopencl.bitonic_sort as clbitonicsort\n'), ((12067, 12088), 'numpy.int32', 'np.int32', (['t1.shape[1]'], {}), '(t1.shape[1])\n', (12075, 12088), True, 'import numpy as np\n'), ((12108, 12129), 'numpy.int32', 'np.int32', (['t1.shape[0]'], {}), '(t1.shape[0])\n', (12116, 12129), True, 'import numpy as np\n'), ((12150, 12171), 'numpy.int32', 'np.int32', (['t2.shape[1]'], {}), '(t2.shape[1])\n', (12158, 12171), True, 'import numpy as np\n'), ((12191, 12212), 'numpy.int32', 'np.int32', (['t2.shape[0]'], {}), '(t2.shape[0])\n', (12199, 12212), True, 'import numpy as np\n'), ((12430, 12493), 'pyopencl.Buffer', 'cl.Buffer', (['CONTEXT', '(mf.READ_ONLY | mf.COPY_HOST_PTR)'], {'hostbuf': 't1'}), '(CONTEXT, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=t1)\n', (12439, 12493), True, 'import pyopencl as cl\n'), ((12545, 12608), 'pyopencl.Buffer', 'cl.Buffer', (['CONTEXT', '(mf.READ_ONLY | mf.COPY_HOST_PTR)'], {'hostbuf': 't2'}), '(CONTEXT, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=t2)\n', (12554, 12608), True, 'import pyopencl as cl\n'), ((12660, 12709), 'pyopencl.Buffer', 'cl.Buffer', (['CONTEXT', 'mf.WRITE_ONLY'], {'size': 'rt.nbytes'}), '(CONTEXT, mf.WRITE_ONLY, size=rt.nbytes)\n', (12669, 12709), True, 'import pyopencl as cl\n'), ((13094, 13128), 'pyopencl.enqueue_copy', 'cl.enqueue_copy', (['QUEUE', 'rt', 'rt_buf'], {}), '(QUEUE, rt, rt_buf)\n', (13109, 13128), True, 'import pyopencl as cl\n'), ((13192, 13216), 'numpy.dot', 'np.dot', (['t1.data', 't2.data'], {}), '(t1.data, t2.data)\n', (13198, 13216), True, 'import numpy as np\n'), ((13457, 13482), 'numpy.vdot', 'np.vdot', (['m1.data', 'm2.data'], {}), '(m1.data, m2.data)\n', (13464, 13482), True, 'import numpy as np\n'), ((13975, 13994), 'numpy.full', 'np.full', (['shape', 'val'], {}), '(shape, val)\n', (13982, 13994), True, 'import numpy as np\n'), ((15594, 15636), 'numpy.where', 'np.where', (['cond._data', 'fst._data', 'snd._data'], {}), '(cond._data, fst._data, snd._data)\n', (15602, 15636), True, 'import numpy as np\n'), ((15894, 15920), 'numpy.reshape', 'np.reshape', (['t._data', 'shape'], {}), '(t._data, shape)\n', (15904, 15920), True, 'import numpy as np\n'), ((16129, 16144), 'numpy.log', 'np.log', (['t._data'], {}), '(t._data)\n', (16135, 16144), True, 'import numpy as np\n'), ((16342, 16358), 'numpy.tanh', 'np.tanh', (['t._data'], {}), '(t._data)\n', (16349, 16358), True, 'import numpy as np\n'), ((16566, 16581), 'numpy.exp', 'np.exp', (['t._data'], {}), '(t._data)\n', (16572, 16581), True, 'import numpy as np\n'), ((17171, 17201), 'numpy.maximum', 'np.maximum', (['t._data', 'uts._data'], {}), '(t._data, uts._data)\n', (17181, 17201), True, 'import numpy as np\n'), ((17791, 17821), 'numpy.minimum', 'np.minimum', (['t._data', 'uts._data'], {}), '(t._data, uts._data)\n', (17801, 17821), True, 'import numpy as np\n'), ((18524, 18545), 'numpy.transpose', 'np.transpose', (['t._data'], {}), '(t._data)\n', (18536, 18545), True, 'import numpy as np\n'), ((18831, 18864), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (18839, 18864), True, 'import numpy as np\n'), ((19146, 19186), 'numpy.zeros_like', 'np.zeros_like', (['t._data'], {'dtype': 'np.float32'}), '(t._data, dtype=np.float32)\n', (19159, 19186), True, 'import numpy as np\n'), ((1347, 1383), 'pyopencl.array.to_device', 'clarray.to_device', (['QUEUE', 'self._data'], {}), '(QUEUE, self._data)\n', (1364, 1383), True, 'import pyopencl.array as clarray\n'), ((2909, 2945), 'pyopencl.array.to_device', 'clarray.to_device', (['QUEUE', 'self._data'], {}), '(QUEUE, self._data)\n', (2926, 2945), True, 'import pyopencl.array as clarray\n'), ((13393, 13422), 'pyopencl.array.dot', 'clarray.dot', (['m1.data', 'm2.data'], {}), '(m1.data, m2.data)\n', (13404, 13422), True, 'import pyopencl.array as clarray\n'), ((15121, 15162), 'pyopencl.array.if_positive', 'clarray.if_positive', (['cond._data', 'fst', 'snd'], {}), '(cond._data, fst, snd)\n', (15140, 15162), True, 'import pyopencl.array as clarray\n'), ((15269, 15305), 'numpy.where', 'np.where', (['cond._data', 'fst', 'snd._data'], {}), '(cond._data, fst, snd._data)\n', (15277, 15305), True, 'import numpy as np\n'), ((15402, 15438), 'numpy.where', 'np.where', (['cond._data', 'fst._data', 'snd'], {}), '(cond._data, fst._data, snd)\n', (15410, 15438), True, 'import numpy as np\n'), ((15539, 15569), 'numpy.where', 'np.where', (['cond._data', 'fst', 'snd'], {}), '(cond._data, fst, snd)\n', (15547, 15569), True, 'import numpy as np\n'), ((15828, 15859), 'pyopencl.array.reshape', 'clarray.reshape', (['t._data', 'shape'], {}), '(t._data, shape)\n', (15843, 15859), True, 'import pyopencl.array as clarray\n'), ((16075, 16094), 'pyopencl.clmath.log', 'clmath.log', (['t._data'], {}), '(t._data)\n', (16085, 16094), True, 'import pyopencl.clmath as clmath\n'), ((16287, 16307), 'pyopencl.clmath.tanh', 'clmath.tanh', (['t._data'], {}), '(t._data)\n', (16298, 16307), True, 'import pyopencl.clmath as clmath\n'), ((16512, 16531), 'pyopencl.clmath.exp', 'clmath.exp', (['t._data'], {}), '(t._data)\n', (16522, 16531), True, 'import pyopencl.clmath as clmath\n'), ((17008, 17043), 'pyopencl.array.maximum', 'clarray.maximum', (['t._data', 'uts._data'], {}), '(t._data, uts._data)\n', (17023, 17043), True, 'import pyopencl.array as clarray\n'), ((17122, 17146), 'numpy.maximum', 'np.maximum', (['t._data', 'uts'], {}), '(t._data, uts)\n', (17132, 17146), True, 'import numpy as np\n'), ((17628, 17663), 'pyopencl.array.minimum', 'clarray.minimum', (['t._data', 'uts._data'], {}), '(t._data, uts._data)\n', (17643, 17663), True, 'import pyopencl.array as clarray\n'), ((17742, 17766), 'numpy.minimum', 'np.minimum', (['t._data', 'uts'], {}), '(t._data, uts)\n', (17752, 17766), True, 'import numpy as np\n'), ((18463, 18489), 'pyopencl.array.transpose', 'clarray.transpose', (['t._data'], {}), '(t._data)\n', (18480, 18489), True, 'import pyopencl.array as clarray\n'), ((18757, 18796), 'pyopencl.array.zeros', 'clarray.zeros', (['QUEUE', 'shape', 'np.float32'], {}), '(QUEUE, shape, np.float32)\n', (18770, 18796), True, 'import pyopencl.array as clarray\n'), ((19084, 19111), 'pyopencl.array.zeros_like', 'clarray.zeros_like', (['t._data'], {}), '(t._data)\n', (19102, 19111), True, 'import pyopencl.array as clarray\n'), ((19941, 19980), 'pyopencl.clrandom.rand', 'clrandom.rand', (['QUEUE', 'shape', 'np.float32'], {}), '(QUEUE, shape, np.float32)\n', (19954, 19980), True, 'import pyopencl.clrandom as clrandom\n'), ((2127, 2163), 'pyopencl.array.to_device', 'clarray.to_device', (['QUEUE', 'self._data'], {}), '(QUEUE, self._data)\n', (2144, 2163), True, 'import pyopencl.array as clarray\n'), ((3019, 3038), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (3027, 3038), True, 'import numpy as np\n'), ((3225, 3261), 'pyopencl.array.to_device', 'clarray.to_device', (['QUEUE', 'self._data'], {}), '(QUEUE, self._data)\n', (3242, 3261), True, 'import pyopencl.array as clarray\n'), ((12280, 12302), 'numpy.empty', 'np.empty', (['(rt_h, rt_w)'], {}), '((rt_h, rt_w))\n', (12288, 12302), True, 'import numpy as np\n'), ((14334, 14387), 'pyopencl.array.if_positive', 'clarray.if_positive', (['cond._data', 'fst._data', 'snd._data'], {}), '(cond._data, fst._data, snd._data)\n', (14353, 14387), True, 'import pyopencl.array as clarray\n'), ((16941, 16969), 'pyopencl.array.maximum', 'clarray.maximum', (['t._data', 'ot'], {}), '(t._data, ot)\n', (16956, 16969), True, 'import pyopencl.array as clarray\n'), ((17561, 17589), 'pyopencl.array.minimum', 'clarray.minimum', (['t._data', 'ot'], {}), '(t._data, ot)\n', (17576, 17589), True, 'import pyopencl.array as clarray\n'), ((19684, 19712), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'shape'}), '(size=shape)\n', (19700, 19712), True, 'import numpy as np\n'), ((20122, 20143), 'numpy.random.rand', 'np.random.rand', (['shape'], {}), '(shape)\n', (20136, 20143), True, 'import numpy as np\n'), ((20669, 20708), 'numpy.random.uniform', 'np.random.uniform', (['min', 'max'], {'size': 'shape'}), '(min, max, size=shape)\n', (20686, 20708), True, 'import numpy as np\n'), ((13855, 13900), 'pyopencl.array.empty', 'clarray.empty', (['QUEUE', 'shape'], {'dtype': 'np.float32'}), '(QUEUE, shape, dtype=np.float32)\n', (13868, 13900), True, 'import pyopencl.array as clarray\n'), ((14618, 14663), 'pyopencl.array.empty', 'clarray.empty', (['QUEUE', 'shape'], {'dtype': 'np.float32'}), '(QUEUE, shape, dtype=np.float32)\n', (14631, 14663), True, 'import pyopencl.array as clarray\n'), ((16815, 16862), 'pyopencl.array.empty', 'clarray.empty', (['QUEUE', 't.shape'], {'dtype': 'np.float32'}), '(QUEUE, t.shape, dtype=np.float32)\n', (16828, 16862), True, 'import pyopencl.array as clarray\n'), ((17435, 17482), 'pyopencl.array.empty', 'clarray.empty', (['QUEUE', 't.shape'], {'dtype': 'np.float32'}), '(QUEUE, t.shape, dtype=np.float32)\n', (17448, 17482), True, 'import pyopencl.array as clarray\n'), ((19500, 19533), 'pyopencl.clrandom.PhiloxGenerator', 'clrandom.PhiloxGenerator', (['CONTEXT'], {}), '(CONTEXT)\n', (19524, 19533), True, 'import pyopencl.clrandom as clrandom\n'), ((20056, 20078), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (20070, 20078), True, 'import numpy as np\n'), ((20457, 20490), 'pyopencl.clrandom.PhiloxGenerator', 'clrandom.PhiloxGenerator', (['CONTEXT'], {}), '(CONTEXT)\n', (20481, 20490), True, 'import pyopencl.clrandom as clrandom\n'), ((14803, 14848), 'pyopencl.array.empty', 'clarray.empty', (['QUEUE', 'shape'], {'dtype': 'np.float32'}), '(QUEUE, shape, dtype=np.float32)\n', (14816, 14848), True, 'import pyopencl.array as clarray\n'), ((20945, 20965), 'pyopencl.array.max', 'clarray.max', (['t._data'], {}), '(t._data)\n', (20956, 20965), True, 'import pyopencl.array as clarray\n'), ((21160, 21180), 'pyopencl.array.min', 'clarray.min', (['t._data'], {}), '(t._data)\n', (21171, 21180), True, 'import pyopencl.array as clarray\n'), ((21371, 21391), 'pyopencl.array.sum', 'clarray.sum', (['t._data'], {}), '(t._data)\n', (21382, 21391), True, 'import pyopencl.array as clarray\n'), ((14960, 15005), 'pyopencl.array.empty', 'clarray.empty', (['QUEUE', 'shape'], {'dtype': 'np.float32'}), '(QUEUE, shape, dtype=np.float32)\n', (14973, 15005), True, 'import pyopencl.array as clarray\n'), ((15038, 15083), 'pyopencl.array.empty', 'clarray.empty', (['QUEUE', 'shape'], {'dtype': 'np.float32'}), '(QUEUE, shape, dtype=np.float32)\n', (15051, 15083), True, 'import pyopencl.array as clarray\n'), ((21584, 21604), 'pyopencl.array.sum', 'clarray.sum', (['t._data'], {}), '(t._data)\n', (21595, 21604), True, 'import pyopencl.array as clarray\n')]
|
import json
import logging
from datetime import datetime
import requests
from fftbg.config import FFTBG_API_URL, TOURNAMENTS_ROOT
LOG = logging.getLogger(__name__)
def get_tournament_list():
j = requests.get(f'{FFTBG_API_URL}/api/tournaments?limit=6000').json()
return [(t['ID'], datetime.fromisoformat(t['LastMod'])) for t in j]
def get_tournament(tid):
return requests.get(f'{FFTBG_API_URL}/tournament/{tid}/json').text
def get_latest_tournament():
LOG.info('Retrieving latest tournament json')
return requests.get(f'{FFTBG_API_URL}/tournament/latest/json').text
def tournament_sync():
LOG.info('Beginning tournament sync')
TOURNAMENTS_ROOT.mkdir(exist_ok=True)
changed = False
for (tid, last_mod) in get_tournament_list():
t_path = TOURNAMENTS_ROOT / f'{tid}.json'
if t_path.exists():
text = t_path.read_text()
tournament_json = json.loads(text)
modified = datetime.fromisoformat(tournament_json['LastMod'])
if last_mod <= modified:
continue
LOG.info(f'Downloading tournament {tid} modified {last_mod.isoformat()}')
t_path.write_text(get_tournament(tid))
changed = True
return changed
|
[
"datetime.datetime.fromisoformat",
"json.loads",
"fftbg.config.TOURNAMENTS_ROOT.mkdir",
"requests.get",
"logging.getLogger"
] |
[((139, 166), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (156, 166), False, 'import logging\n'), ((665, 702), 'fftbg.config.TOURNAMENTS_ROOT.mkdir', 'TOURNAMENTS_ROOT.mkdir', ([], {'exist_ok': '(True)'}), '(exist_ok=True)\n', (687, 702), False, 'from fftbg.config import FFTBG_API_URL, TOURNAMENTS_ROOT\n'), ((381, 435), 'requests.get', 'requests.get', (['f"""{FFTBG_API_URL}/tournament/{tid}/json"""'], {}), "(f'{FFTBG_API_URL}/tournament/{tid}/json')\n", (393, 435), False, 'import requests\n'), ((533, 588), 'requests.get', 'requests.get', (['f"""{FFTBG_API_URL}/tournament/latest/json"""'], {}), "(f'{FFTBG_API_URL}/tournament/latest/json')\n", (545, 588), False, 'import requests\n'), ((204, 263), 'requests.get', 'requests.get', (['f"""{FFTBG_API_URL}/api/tournaments?limit=6000"""'], {}), "(f'{FFTBG_API_URL}/api/tournaments?limit=6000')\n", (216, 263), False, 'import requests\n'), ((293, 329), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["t['LastMod']"], {}), "(t['LastMod'])\n", (315, 329), False, 'from datetime import datetime\n'), ((920, 936), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (930, 936), False, 'import json\n'), ((960, 1010), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["tournament_json['LastMod']"], {}), "(tournament_json['LastMod'])\n", (982, 1010), False, 'from datetime import datetime\n')]
|
__author__ = '<NAME>'
from craps import CrapsGame
aCrapsGame = CrapsGame()
print(aCrapsGame.getCurrentBank())
aCrapsGame.placeBet(50)
aCrapsGame.throwDice()
aCrapsGame.throwDice()
print(aCrapsGame.getCurrentBank())
|
[
"craps.CrapsGame"
] |
[((69, 80), 'craps.CrapsGame', 'CrapsGame', ([], {}), '()\n', (78, 80), False, 'from craps import CrapsGame\n')]
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from opacus.data_loader import DPDataLoader
from torch.utils.data import TensorDataset
class DPDataLoaderTest(unittest.TestCase):
def setUp(self):
self.data_size = 10
self.dimension = 7
self.num_classes = 11
def test_collate_classes(self):
x = torch.randn(self.data_size, self.dimension)
y = torch.randint(low=0, high=self.num_classes, size=(self.data_size,))
dataset = TensorDataset(x, y)
data_loader = DPDataLoader(dataset, sample_rate=1e-5)
x_b, y_b = next(iter(data_loader))
self.assertEqual(x_b.size(0), 0)
self.assertEqual(y_b.size(0), 0)
def test_collate_tensor(self):
x = torch.randn(self.data_size, self.dimension)
dataset = TensorDataset(x)
data_loader = DPDataLoader(dataset, sample_rate=1e-5)
(s,) = next(iter(data_loader))
self.assertEqual(s.size(0), 0)
|
[
"torch.utils.data.TensorDataset",
"torch.randint",
"opacus.data_loader.DPDataLoader",
"torch.randn"
] |
[((917, 960), 'torch.randn', 'torch.randn', (['self.data_size', 'self.dimension'], {}), '(self.data_size, self.dimension)\n', (928, 960), False, 'import torch\n'), ((973, 1040), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': 'self.num_classes', 'size': '(self.data_size,)'}), '(low=0, high=self.num_classes, size=(self.data_size,))\n', (986, 1040), False, 'import torch\n'), ((1060, 1079), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x', 'y'], {}), '(x, y)\n', (1073, 1079), False, 'from torch.utils.data import TensorDataset\n'), ((1102, 1142), 'opacus.data_loader.DPDataLoader', 'DPDataLoader', (['dataset'], {'sample_rate': '(1e-05)'}), '(dataset, sample_rate=1e-05)\n', (1114, 1142), False, 'from opacus.data_loader import DPDataLoader\n'), ((1316, 1359), 'torch.randn', 'torch.randn', (['self.data_size', 'self.dimension'], {}), '(self.data_size, self.dimension)\n', (1327, 1359), False, 'import torch\n'), ((1379, 1395), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x'], {}), '(x)\n', (1392, 1395), False, 'from torch.utils.data import TensorDataset\n'), ((1418, 1458), 'opacus.data_loader.DPDataLoader', 'DPDataLoader', (['dataset'], {'sample_rate': '(1e-05)'}), '(dataset, sample_rate=1e-05)\n', (1430, 1458), False, 'from opacus.data_loader import DPDataLoader\n')]
|
import json
import logging
import requests
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as vtransforms
from torchvision.models import squeezenet1_0, squeezenet1_1
RESCALE_SIZE = 256
CROP_SIZE = 224
IMAGENET_CLASS_MAP = 'imagenet_class_index.json'
logger = logging.getLogger('app')
def _fetch_imagenet_class_map():
"""Parse ImageNet Class Index JSON"""
try:
with open(IMAGENET_CLASS_MAP, 'r') as f:
class_map = json.load(f)
logger.info('successfully loaded imagenet class map')
except Exception:
raise(f'unable to retrieve class map from {IMAGENET_CLASS_MAP}')
class_map = {int(i): str(j[1]) for i, j in class_map.items()}
return class_map
def _maybe_optimize(model):
try:
from torch.jit import trace
model = trace(model, example_inputs=torch.rand(1, 3, 224, 224))
logger.info('successfully optimized PyTorch model using JIT tracing')
except ImportError:
logger.warning('unable to leverage torch.jit.trace optimizations')
pass
return model
class ImageNetEvaluator(nn.Module):
"""Evaluator of ImageNet Classes"""
def __init__(self, device, optimize=False):
super().__init__()
self.device = device
self.optimize = optimize
self.transform = vtransforms.Compose([
vtransforms.Resize(RESCALE_SIZE),
vtransforms.CenterCrop((CROP_SIZE, CROP_SIZE)),
vtransforms.ToTensor(),
vtransforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
model = self._fetch_model()
self.model = model.to(self.device).eval()
if self.optimize:
self.model = _maybe_optimize(model)
self.class_map = _fetch_imagenet_class_map()
def _fetch_model(self):
raise NotImplementedError
def forward(self, x):
x = self.transform(x).to(self.device)
num_dims = len(x.size())
if num_dims != 3:
raise ValueError('number dimensions of x must be 3')
with torch.no_grad():
pred_tensor = self.model(x.unsqueeze(0))
pred_logproba = F.log_softmax(pred_tensor, dim=1)
pred_proba, pred_label = torch.max(pred_logproba.detach().exp(), dim=1)
pred_proba, pred_label = pred_proba.item(), pred_label.item()
pred_class = self.class_map[pred_label]
return pred_class, pred_proba
class SqueezeNetV1Evaluator(ImageNetEvaluator):
"""SqueezeNet V1 Evaluator of ImageNet Classes"""
def _fetch_model(self):
model = squeezenet1_0(pretrained=True)
return model
class SqueezeNetV2Evaluator(ImageNetEvaluator):
"""SqueezeNet V2 Evaluator of ImageNet Classes"""
def _fetch_model(self):
model = squeezenet1_1(pretrained=True)
return model
|
[
"json.load",
"torchvision.transforms.ToTensor",
"torchvision.models.squeezenet1_0",
"torchvision.models.squeezenet1_1",
"torch.nn.functional.log_softmax",
"torch.rand",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.Normalize",
"torch.no_grad",
"logging.getLogger",
"torchvision.transforms.Resize"
] |
[((312, 336), 'logging.getLogger', 'logging.getLogger', (['"""app"""'], {}), "('app')\n", (329, 336), False, 'import logging\n'), ((2239, 2272), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['pred_tensor'], {'dim': '(1)'}), '(pred_tensor, dim=1)\n', (2252, 2272), True, 'import torch.nn.functional as F\n'), ((2657, 2687), 'torchvision.models.squeezenet1_0', 'squeezenet1_0', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2670, 2687), False, 'from torchvision.models import squeezenet1_0, squeezenet1_1\n'), ((2857, 2887), 'torchvision.models.squeezenet1_1', 'squeezenet1_1', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2870, 2887), False, 'from torchvision.models import squeezenet1_0, squeezenet1_1\n'), ((496, 508), 'json.load', 'json.load', (['f'], {}), '(f)\n', (505, 508), False, 'import json\n'), ((2145, 2160), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2158, 2160), False, 'import torch\n'), ((872, 898), 'torch.rand', 'torch.rand', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (882, 898), False, 'import torch\n'), ((1381, 1413), 'torchvision.transforms.Resize', 'vtransforms.Resize', (['RESCALE_SIZE'], {}), '(RESCALE_SIZE)\n', (1399, 1413), True, 'import torchvision.transforms as vtransforms\n'), ((1427, 1473), 'torchvision.transforms.CenterCrop', 'vtransforms.CenterCrop', (['(CROP_SIZE, CROP_SIZE)'], {}), '((CROP_SIZE, CROP_SIZE))\n', (1449, 1473), True, 'import torchvision.transforms as vtransforms\n'), ((1487, 1509), 'torchvision.transforms.ToTensor', 'vtransforms.ToTensor', ([], {}), '()\n', (1507, 1509), True, 'import torchvision.transforms as vtransforms\n'), ((1523, 1599), 'torchvision.transforms.Normalize', 'vtransforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1544, 1599), True, 'import torchvision.transforms as vtransforms\n')]
|
# -*- coding: utf-8 -*-
"""SPARC4 spectral response tests.
This script tests the operation of the SPARC4 spectral response classes.
"""
import os
import numpy as np
import pandas as pd
import pytest
from AIS.SPARC4_Spectral_Response import (
Abstract_SPARC4_Spectral_Response,
Concrete_SPARC4_Spectral_Response_1,
Concrete_SPARC4_Spectral_Response_2,
Concrete_SPARC4_Spectral_Response_3,
Concrete_SPARC4_Spectral_Response_4,
)
wavelength_interval = range(350, 1150, 50)
n = len(wavelength_interval)
specific_flux = np.ones((4, n))
ccd_transmitance_c1 = np.asarray(
pd.read_excel(os.path.join("SPARC4_Spectral_Response", "Channel 1", "ccd.xlsx"))
)[1:, 1]
ccd_transmitance_c1 = np.asarray([float(value) for value in ccd_transmitance_c1])
ccd_transmitance_c2 = np.asarray(
pd.read_excel(os.path.join("SPARC4_Spectral_Response", "Channel 2", "ccd.xlsx"))
)[1:, 1]
ccd_transmitance_c2 = np.asarray([float(value) for value in ccd_transmitance_c2])
ccd_transmitance_c3 = np.asarray(
pd.read_excel(os.path.join("SPARC4_Spectral_Response", "Channel 3", "ccd.xlsx"))
)[1:, 1]
ccd_transmitance_c3 = np.asarray([float(value) for value in ccd_transmitance_c3])
ccd_transmitance_c4 = np.asarray(
pd.read_excel(os.path.join("SPARC4_Spectral_Response", "Channel 4", "ccd.xlsx"))
)[1:, 1]
ccd_transmitance_c4 = np.asarray([float(value) for value in ccd_transmitance_c4])
# -------------------------------------------------------------------------------------------------------------
@pytest.fixture
def abs_s4_sr():
chc = Abstract_SPARC4_Spectral_Response()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
@pytest.fixture
def c1_s4_sr():
chc = Concrete_SPARC4_Spectral_Response_1()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
@pytest.fixture
def c2_s4_sr():
chc = Concrete_SPARC4_Spectral_Response_2()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
@pytest.fixture
def c3_s4_sr():
chc = Concrete_SPARC4_Spectral_Response_3()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
@pytest.fixture
def c4_s4_sr():
chc = Concrete_SPARC4_Spectral_Response_4()
chc.write_specific_flux(specific_flux, wavelength_interval)
return chc
# -------------------- Initialize the class -----------------------
def test_specific_flux_abs(abs_s4_sr):
vec = abs_s4_sr.get_specific_flux()
boolean_test = vec == specific_flux
assert boolean_test.all()
def test_specific_flux_c1(c1_s4_sr):
vec = c1_s4_sr.get_specific_flux()
boolean_test = vec == specific_flux
assert boolean_test.all()
# -------------------- Channel ID -----------------------
def test_channel_ID_abs(abs_s4_sr):
assert abs_s4_sr.get_channel_ID() == 0
def test_channel_ID_c1(c1_s4_sr):
assert c1_s4_sr.get_channel_ID() == 1
def test_channel_ID_c2(c2_s4_sr):
assert c2_s4_sr.get_channel_ID() == 2
def test_channel_ID_c3(c3_s4_sr):
assert c3_s4_sr.get_channel_ID() == 3
def test_channel_ID_c4(c4_s4_sr):
assert c4_s4_sr.get_channel_ID() == 4
# -------------------- Apply spectral response -----------------------
# def test_calibration_wheel(abs_s4_sr):
# abs_s4_sr.apply_calibration_wheel()
# vec = abs_s4_sr.get_specific_flux()
# boolean_test = vec == specific_flux
# assert boolean_test.all()
# def test_retarder(abs_s4_sr):
# abs_s4_sr.apply_retarder()
# vec = abs_s4_sr.get_specific_flux()
# boolean_test = vec == specific_flux
# assert boolean_test.all()
# def test_analyzer(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# vec = abs_s4_sr.get_specific_flux()
# boolean_test = vec == specific_flux
# assert boolean_test.all()
# def test_collimator(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# abs_s4_sr.apply_collimator()
# assert np.allclose(abs_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(abs_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_dichroic_abs(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# abs_s4_sr.apply_dichroic()
# def test_dichroic_c1(c1_s4_sr):
# c1_s4_sr.apply_analyser()
# c1_s4_sr.apply_dichroic()
# def test_dichroic_c2(c2_s4_sr):
# c2_s4_sr.apply_analyser()
# c2_s4_sr.apply_dichroic()
# def test_dichroic_c3(c3_s4_sr):
# c3_s4_sr.apply_analyser()
# c3_s4_sr.apply_dichroic()
# def test_dichroic_c4(c4_s4_sr):
# c4_s4_sr.apply_analyser()
# c4_s4_sr.apply_dichroic()
# def test_camera_abs(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# abs_s4_sr.apply_camera()
# assert np.allclose(abs_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(abs_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_camera_c1(c1_s4_sr):
# c1_s4_sr.apply_analyser()
# c1_s4_sr.apply_camera()
# assert np.allclose(c1_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(c1_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_camera_c2(c2_s4_sr):
# c2_s4_sr.apply_analyser()
# c2_s4_sr.apply_camera()
# assert np.allclose(c2_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(c2_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_camera_c3(c3_s4_sr):
# c3_s4_sr.apply_analyser()
# c3_s4_sr.apply_camera()
# assert np.allclose(c3_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(c3_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_camera_c4(c4_s4_sr):
# c4_s4_sr.apply_analyser()
# c4_s4_sr.apply_camera()
# assert np.allclose(c4_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(c4_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_ccd_abs(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# abs_s4_sr.apply_ccd()
# assert np.allclose(abs_s4_sr.specific_ordinary_ray, specific_flux[0, :])
# assert np.allclose(abs_s4_sr.specific_extra_ordinary_ray, specific_flux[0, :])
# def test_ccd_c1(c1_s4_sr):
# new_specific_flux = specific_flux[0, :] * ccd_transmitance_c1 / 100
# c1_s4_sr.apply_analyser()
# c1_s4_sr.apply_ccd()
# assert np.allclose(c1_s4_sr.specific_ordinary_ray, new_specific_flux)
# assert np.allclose(c1_s4_sr.specific_extra_ordinary_ray, new_specific_flux)
# def test_ccd_c2(c2_s4_sr):
# new_specific_flux = specific_flux[0, :] * ccd_transmitance_c2 / 100
# c2_s4_sr.apply_analyser()
# c2_s4_sr.apply_ccd()
# assert np.allclose(c2_s4_sr.specific_ordinary_ray, new_specific_flux)
# assert np.allclose(c2_s4_sr.specific_extra_ordinary_ray, new_specific_flux)
# def test_ccd_c3(c3_s4_sr):
# new_specific_flux = specific_flux[0, :] * ccd_transmitance_c3 / 100
# c3_s4_sr.apply_analyser()
# c3_s4_sr.apply_ccd()
# assert np.allclose(c3_s4_sr.specific_ordinary_ray, new_specific_flux)
# assert np.allclose(c3_s4_sr.specific_extra_ordinary_ray, new_specific_flux)
# def test_ccd_c4(c4_s4_sr):
# new_specific_flux = specific_flux[0, :] * ccd_transmitance_c4 / 100
# c4_s4_sr.apply_analyser()
# c4_s4_sr.apply_ccd()
# assert np.allclose(c4_s4_sr.specific_ordinary_ray, new_specific_flux)
# assert np.allclose(c4_s4_sr.specific_extra_ordinary_ray, new_specific_flux)
# --------------------write specific_flux--------------------
def test_write_specific_flux():
specific_flux = np.asanyarray(
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]
)
wavelength_interval = range(350, 1150, 50)
s4_sr = Abstract_SPARC4_Spectral_Response()
s4_sr.write_specific_flux(specific_flux, wavelength_interval)
boolean_test = s4_sr.specific_flux == specific_flux
assert boolean_test.all()
# ---------------------- get_specific_flux -----------------------------
def test_get_specific_flux(abs_s4_sr):
vec = abs_s4_sr.get_specific_flux()
boolean_test = vec.all() == specific_flux.all()
assert boolean_test.all()
# ----------------------- read_spreadsheet---------------------------
def test_read_spreadsheet_calibration_wheel(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "calibration_wheel.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_retarder(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "retarder.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_analyser_ordinary(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "analyser_ordinary.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_analyser_extra_ordinary(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "analyser_extra_ordinary.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_collimator(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "collimator.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 0", "dichroic 1.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_2(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 0", "dichroic 2.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 0", "camera.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 0", "ccd.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_1_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 1", "dichroic 1.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_1_2(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 1", "dichroic 2.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 1", "camera.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 1", "ccd.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_2_1(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 2", "dichroic 1.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_2_2(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 2", "dichroic 2.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera_2(abs_s4_sr):
file = os.path.join("SPARC4_Spectral_Response", "Channel 2", "camera.xlsx")
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd_2(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 2/ccd.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_3_1(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 3/dichroic 2.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_3_2(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 3/dichroic 2.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera_3(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 3/camera.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd_3(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 3/ccd.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_4_1(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 4/dichroic 1.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_dichroic_4_2(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 4/dichroic 2.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_camera_4(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 4/camera.xlsx"
abs_s4_sr._read_spreadsheet(file)
def test_read_spreadsheet_ccd_4(abs_s4_sr):
file = "./SPARC4_Spectral_Response/Channel 4/ccd.xlsx"
abs_s4_sr._read_spreadsheet(file)
# ----------------------- miscelaneous ----------------------------
def test_multiply_matrices(abs_s4_sr):
a = np.ones((4, 4))
specific_flux = abs_s4_sr._multiply_matrices(a, a)
boolean_test = specific_flux == a
assert boolean_test.all()
def test_calculate_spline():
transmitance = np.ones((1, n))[0]
chc = Abstract_SPARC4_Spectral_Response()
chc.write_specific_flux(specific_flux, wavelength_interval)
new_transmitance = chc._calculate_spline(transmitance, wavelength_interval)
assert np.allclose(new_transmitance, transmitance)
# def test_get_specific_ordinary_ray(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# ord_ray = abs_s4_sr.get_specific_ordinary_ray()
# assert np.allclose(ord_ray, specific_flux[0, :])
# def test_get_specific_extra_ordinary_ray(abs_s4_sr):
# abs_s4_sr.apply_analyser()
# eord_ray = abs_s4_sr.get_specific_extra_ordinary_ray()
# assert np.allclose(eord_ray, specific_flux[0, :])
|
[
"AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_3",
"AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_4",
"numpy.allclose",
"numpy.asanyarray",
"numpy.ones",
"AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_1",
"AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_2",
"os.path.join",
"AIS.SPARC4_Spectral_Response.Abstract_SPARC4_Spectral_Response"
] |
[((539, 554), 'numpy.ones', 'np.ones', (['(4, n)'], {}), '((4, n))\n', (546, 554), True, 'import numpy as np\n'), ((1556, 1591), 'AIS.SPARC4_Spectral_Response.Abstract_SPARC4_Spectral_Response', 'Abstract_SPARC4_Spectral_Response', ([], {}), '()\n', (1589, 1591), False, 'from AIS.SPARC4_Spectral_Response import Abstract_SPARC4_Spectral_Response, Concrete_SPARC4_Spectral_Response_1, Concrete_SPARC4_Spectral_Response_2, Concrete_SPARC4_Spectral_Response_3, Concrete_SPARC4_Spectral_Response_4\n'), ((1715, 1752), 'AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_1', 'Concrete_SPARC4_Spectral_Response_1', ([], {}), '()\n', (1750, 1752), False, 'from AIS.SPARC4_Spectral_Response import Abstract_SPARC4_Spectral_Response, Concrete_SPARC4_Spectral_Response_1, Concrete_SPARC4_Spectral_Response_2, Concrete_SPARC4_Spectral_Response_3, Concrete_SPARC4_Spectral_Response_4\n'), ((1876, 1913), 'AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_2', 'Concrete_SPARC4_Spectral_Response_2', ([], {}), '()\n', (1911, 1913), False, 'from AIS.SPARC4_Spectral_Response import Abstract_SPARC4_Spectral_Response, Concrete_SPARC4_Spectral_Response_1, Concrete_SPARC4_Spectral_Response_2, Concrete_SPARC4_Spectral_Response_3, Concrete_SPARC4_Spectral_Response_4\n'), ((2037, 2074), 'AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_3', 'Concrete_SPARC4_Spectral_Response_3', ([], {}), '()\n', (2072, 2074), False, 'from AIS.SPARC4_Spectral_Response import Abstract_SPARC4_Spectral_Response, Concrete_SPARC4_Spectral_Response_1, Concrete_SPARC4_Spectral_Response_2, Concrete_SPARC4_Spectral_Response_3, Concrete_SPARC4_Spectral_Response_4\n'), ((2198, 2235), 'AIS.SPARC4_Spectral_Response.Concrete_SPARC4_Spectral_Response_4', 'Concrete_SPARC4_Spectral_Response_4', ([], {}), '()\n', (2233, 2235), False, 'from AIS.SPARC4_Spectral_Response import Abstract_SPARC4_Spectral_Response, Concrete_SPARC4_Spectral_Response_1, Concrete_SPARC4_Spectral_Response_2, Concrete_SPARC4_Spectral_Response_3, Concrete_SPARC4_Spectral_Response_4\n'), ((7512, 7583), 'numpy.asanyarray', 'np.asanyarray', (['[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]'], {}), '([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])\n', (7525, 7583), True, 'import numpy as np\n'), ((7657, 7692), 'AIS.SPARC4_Spectral_Response.Abstract_SPARC4_Spectral_Response', 'Abstract_SPARC4_Spectral_Response', ([], {}), '()\n', (7690, 7692), False, 'from AIS.SPARC4_Spectral_Response import Abstract_SPARC4_Spectral_Response, Concrete_SPARC4_Spectral_Response_1, Concrete_SPARC4_Spectral_Response_2, Concrete_SPARC4_Spectral_Response_3, Concrete_SPARC4_Spectral_Response_4\n'), ((8224, 8290), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""calibration_wheel.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'calibration_wheel.xlsx')\n", (8236, 8290), False, 'import os\n'), ((8389, 8446), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""retarder.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'retarder.xlsx')\n", (8401, 8446), False, 'import os\n'), ((8554, 8620), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""analyser_ordinary.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'analyser_ordinary.xlsx')\n", (8566, 8620), False, 'import os\n'), ((8734, 8806), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""analyser_extra_ordinary.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'analyser_extra_ordinary.xlsx')\n", (8746, 8806), False, 'import os\n'), ((8907, 8966), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""collimator.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'collimator.xlsx')\n", (8919, 8966), False, 'import os\n'), ((9067, 9139), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 0"""', '"""dichroic 1.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 0', 'dichroic 1.xlsx')\n", (9079, 9139), False, 'import os\n'), ((9240, 9312), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 0"""', '"""dichroic 2.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 0', 'dichroic 2.xlsx')\n", (9252, 9312), False, 'import os\n'), ((9409, 9477), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 0"""', '"""camera.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 0', 'camera.xlsx')\n", (9421, 9477), False, 'import os\n'), ((9571, 9636), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 0"""', '"""ccd.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 0', 'ccd.xlsx')\n", (9583, 9636), False, 'import os\n'), ((9739, 9811), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 1"""', '"""dichroic 1.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 1', 'dichroic 1.xlsx')\n", (9751, 9811), False, 'import os\n'), ((9914, 9986), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 1"""', '"""dichroic 2.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 1', 'dichroic 2.xlsx')\n", (9926, 9986), False, 'import os\n'), ((10085, 10153), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 1"""', '"""camera.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 1', 'camera.xlsx')\n", (10097, 10153), False, 'import os\n'), ((10249, 10314), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 1"""', '"""ccd.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 1', 'ccd.xlsx')\n", (10261, 10314), False, 'import os\n'), ((10417, 10489), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 2"""', '"""dichroic 1.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 2', 'dichroic 1.xlsx')\n", (10429, 10489), False, 'import os\n'), ((10592, 10664), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 2"""', '"""dichroic 2.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 2', 'dichroic 2.xlsx')\n", (10604, 10664), False, 'import os\n'), ((10763, 10831), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 2"""', '"""camera.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 2', 'camera.xlsx')\n", (10775, 10831), False, 'import os\n'), ((12344, 12359), 'numpy.ones', 'np.ones', (['(4, 4)'], {}), '((4, 4))\n', (12351, 12359), True, 'import numpy as np\n'), ((12562, 12597), 'AIS.SPARC4_Spectral_Response.Abstract_SPARC4_Spectral_Response', 'Abstract_SPARC4_Spectral_Response', ([], {}), '()\n', (12595, 12597), False, 'from AIS.SPARC4_Spectral_Response import Abstract_SPARC4_Spectral_Response, Concrete_SPARC4_Spectral_Response_1, Concrete_SPARC4_Spectral_Response_2, Concrete_SPARC4_Spectral_Response_3, Concrete_SPARC4_Spectral_Response_4\n'), ((12753, 12796), 'numpy.allclose', 'np.allclose', (['new_transmitance', 'transmitance'], {}), '(new_transmitance, transmitance)\n', (12764, 12796), True, 'import numpy as np\n'), ((12533, 12548), 'numpy.ones', 'np.ones', (['(1, n)'], {}), '((1, n))\n', (12540, 12548), True, 'import numpy as np\n'), ((608, 673), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 1"""', '"""ccd.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 1', 'ccd.xlsx')\n", (620, 673), False, 'import os\n'), ((819, 884), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 2"""', '"""ccd.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 2', 'ccd.xlsx')\n", (831, 884), False, 'import os\n'), ((1030, 1095), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 3"""', '"""ccd.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 3', 'ccd.xlsx')\n", (1042, 1095), False, 'import os\n'), ((1241, 1306), 'os.path.join', 'os.path.join', (['"""SPARC4_Spectral_Response"""', '"""Channel 4"""', '"""ccd.xlsx"""'], {}), "('SPARC4_Spectral_Response', 'Channel 4', 'ccd.xlsx')\n", (1253, 1306), False, 'import os\n')]
|
from functools import partial,reduce
from math import sqrt
import inspect
def nargs(function):
print(inspect.getfullargspec(function))
def inc(x):
return x + 1
def compose(f, g):
return lambda x: f(g(x))
x = compose(inc, inc)
print(x(0))
def partial(f, arg0):
return lambda *args: f(arg0, *args)
def add(a,b):
return a+b
inc = partial(add, 1)
print(inc(0))
points = [(-0.3,0.4), (-0.3, -0.2),
(0.6,-0.4), (1, 1)]
def norm(N, point):
coords = map(lambda c: c ** N, point)
return sum(coords) ** (1/N)
max_distance = \
reduce(max,
filter(lambda d: d <= 1.0,
map(partial(norm, 2),
points)))
print(max_distance)
|
[
"functools.partial",
"inspect.getfullargspec"
] |
[((354, 369), 'functools.partial', 'partial', (['add', '(1)'], {}), '(add, 1)\n', (361, 369), False, 'from functools import partial, reduce\n'), ((106, 138), 'inspect.getfullargspec', 'inspect.getfullargspec', (['function'], {}), '(function)\n', (128, 138), False, 'import inspect\n'), ((624, 640), 'functools.partial', 'partial', (['norm', '(2)'], {}), '(norm, 2)\n', (631, 640), False, 'from functools import partial, reduce\n')]
|
#!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import boot_data
import os
import unittest
from boot_data import _SSH_CONFIG_DIR, _SSH_DIR
class TestBootData(unittest.TestCase):
def testProvisionSSHGeneratesFiles(self):
fuchsia_authorized_keys_path = os.path.join(_SSH_DIR,
'fuchsia_authorized_keys')
fuchsia_id_key_path = os.path.join(_SSH_DIR, 'fuchsia_ed25519')
pub_keys_path = os.path.join(_SSH_DIR, 'fuchsia_ed25519.pub')
ssh_config_path = os.path.join(_SSH_CONFIG_DIR, 'ssh_config')
# Check if the keys exists before generating. If they do, delete them
# afterwards before asserting if ProvisionSSH works.
authorized_key_before = os.path.exists(fuchsia_authorized_keys_path)
id_keys_before = os.path.exists(fuchsia_id_key_path)
pub_keys_before = os.path.exists(pub_keys_path)
ssh_config_before = os.path.exists(ssh_config_path)
ssh_dir_before = os.path.exists(_SSH_CONFIG_DIR)
boot_data.ProvisionSSH()
authorized_key_after = os.path.exists(fuchsia_authorized_keys_path)
id_keys_after = os.path.exists(fuchsia_id_key_path)
ssh_config_after = os.path.exists(ssh_config_path)
if not authorized_key_before:
os.remove(fuchsia_authorized_keys_path)
if not id_keys_before:
os.remove(fuchsia_id_key_path)
if not pub_keys_before:
os.remove(pub_keys_path)
if not ssh_config_before:
os.remove(ssh_config_path)
if not ssh_dir_before:
os.rmdir(_SSH_CONFIG_DIR)
self.assertTrue(os.path.exists(authorized_key_after))
self.assertTrue(os.path.exists(id_keys_after))
self.assertTrue(os.path.exists(ssh_config_after))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.remove",
"boot_data.ProvisionSSH",
"os.path.exists",
"os.rmdir",
"os.path.join"
] |
[((1848, 1863), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1861, 1863), False, 'import unittest\n'), ((397, 446), 'os.path.join', 'os.path.join', (['_SSH_DIR', '"""fuchsia_authorized_keys"""'], {}), "(_SSH_DIR, 'fuchsia_authorized_keys')\n", (409, 446), False, 'import os\n'), ((521, 562), 'os.path.join', 'os.path.join', (['_SSH_DIR', '"""fuchsia_ed25519"""'], {}), "(_SSH_DIR, 'fuchsia_ed25519')\n", (533, 562), False, 'import os\n'), ((583, 628), 'os.path.join', 'os.path.join', (['_SSH_DIR', '"""fuchsia_ed25519.pub"""'], {}), "(_SSH_DIR, 'fuchsia_ed25519.pub')\n", (595, 628), False, 'import os\n'), ((651, 694), 'os.path.join', 'os.path.join', (['_SSH_CONFIG_DIR', '"""ssh_config"""'], {}), "(_SSH_CONFIG_DIR, 'ssh_config')\n", (663, 694), False, 'import os\n'), ((854, 898), 'os.path.exists', 'os.path.exists', (['fuchsia_authorized_keys_path'], {}), '(fuchsia_authorized_keys_path)\n', (868, 898), False, 'import os\n'), ((920, 955), 'os.path.exists', 'os.path.exists', (['fuchsia_id_key_path'], {}), '(fuchsia_id_key_path)\n', (934, 955), False, 'import os\n'), ((978, 1007), 'os.path.exists', 'os.path.exists', (['pub_keys_path'], {}), '(pub_keys_path)\n', (992, 1007), False, 'import os\n'), ((1032, 1063), 'os.path.exists', 'os.path.exists', (['ssh_config_path'], {}), '(ssh_config_path)\n', (1046, 1063), False, 'import os\n'), ((1085, 1116), 'os.path.exists', 'os.path.exists', (['_SSH_CONFIG_DIR'], {}), '(_SSH_CONFIG_DIR)\n', (1099, 1116), False, 'import os\n'), ((1121, 1145), 'boot_data.ProvisionSSH', 'boot_data.ProvisionSSH', ([], {}), '()\n', (1143, 1145), False, 'import boot_data\n'), ((1173, 1217), 'os.path.exists', 'os.path.exists', (['fuchsia_authorized_keys_path'], {}), '(fuchsia_authorized_keys_path)\n', (1187, 1217), False, 'import os\n'), ((1238, 1273), 'os.path.exists', 'os.path.exists', (['fuchsia_id_key_path'], {}), '(fuchsia_id_key_path)\n', (1252, 1273), False, 'import os\n'), ((1297, 1328), 'os.path.exists', 'os.path.exists', (['ssh_config_path'], {}), '(ssh_config_path)\n', (1311, 1328), False, 'import os\n'), ((1369, 1408), 'os.remove', 'os.remove', (['fuchsia_authorized_keys_path'], {}), '(fuchsia_authorized_keys_path)\n', (1378, 1408), False, 'import os\n'), ((1442, 1472), 'os.remove', 'os.remove', (['fuchsia_id_key_path'], {}), '(fuchsia_id_key_path)\n', (1451, 1472), False, 'import os\n'), ((1507, 1531), 'os.remove', 'os.remove', (['pub_keys_path'], {}), '(pub_keys_path)\n', (1516, 1531), False, 'import os\n'), ((1568, 1594), 'os.remove', 'os.remove', (['ssh_config_path'], {}), '(ssh_config_path)\n', (1577, 1594), False, 'import os\n'), ((1628, 1653), 'os.rmdir', 'os.rmdir', (['_SSH_CONFIG_DIR'], {}), '(_SSH_CONFIG_DIR)\n', (1636, 1653), False, 'import os\n'), ((1674, 1710), 'os.path.exists', 'os.path.exists', (['authorized_key_after'], {}), '(authorized_key_after)\n', (1688, 1710), False, 'import os\n'), ((1732, 1761), 'os.path.exists', 'os.path.exists', (['id_keys_after'], {}), '(id_keys_after)\n', (1746, 1761), False, 'import os\n'), ((1783, 1815), 'os.path.exists', 'os.path.exists', (['ssh_config_after'], {}), '(ssh_config_after)\n', (1797, 1815), False, 'import os\n')]
|
import numpy
import pandas
import requests
from bs4 import BeautifulSoup as bsoup
from time import sleep
from random import randint
# start and end of urls for imbd top 1000 movies site
URL_START = "https://www.imdb.com/search/title/?groups=top_1000&start="
URL_END = "&ref_=adv_nxt"
# data for each movie
titles = []
years = []
runtimes = []
ratings = []
metascores = []
votes = []
grosses = []
headers = {"Accept-Language": "en-US, en;q=0.5"}
pages = numpy.arange(1,1001,50)
for page in pages:
cur_page = requests.get(URL_START + str(page) + URL_END, headers = headers)
soup = bsoup(cur_page.text, "html.parser")
# find all divs containing data for each movie
movie_divs = soup.find_all('div', class_='lister-item mode-advanced')
for div in movie_divs:
name = div.h3.a.text
titles.append(name)
year = div.h3.find('span', class_='lister-item-year').text
years.append(year)
runtime = div.p.find('span', class_='runtime').text
runtimes.append(runtime)
rating = float(div.strong.text)
ratings.append(rating)
score = div.find('span', class_='metascore').text if div.find('span', class_='metascore') else '-'
metascores.append(score)
# nv contains the class for both the votes and gross (if it is present) <span> tags
nv = div.find_all('span', attrs={'name': 'nv'})
vote = nv[0].text
votes.append(vote)
gross = nv[1].text if len(nv) > 1 else '-'
grosses.append(gross)
# slow down crawling of imbd site to avoid disrupting website activity
sleep(randint(2,8))
movies = pandas.DataFrame({
'movie': titles,
'year': years,
'runtime': runtimes,
'imdb': ratings,
'metascore': metascores,
'votes': votes,
'grossMillions': grosses,
})
# CLEANING DATA
# remove brackets from year and cast string to int
movies['year'] = movies['year'].str.extract('(\d+)').astype(int)
# remove ' min' from runtime and cast string to int
movies['runtime'] = movies['runtime'].str.extract('(\d+)').astype(int)
# convert grossMillions to numeric (int) and transform dashes into NaN values
movies['metascore'] = pandas.to_numeric(movies['metascore'], errors='coerce')
# remove commas from votes and cast string to int
movies['votes'] = movies['votes'].str.replace(',', '').astype(int)
# remove '$' and 'M' from grossMillions and cast string to int
movies['grossMillions'] = movies['grossMillions'].map(lambda x: x.lstrip('$').rstrip('M'))
# convert grossMillions to numeric (float) and transform dashes into NaN values
movies['grossMillions'] = pandas.to_numeric(movies['grossMillions'], errors='coerce')
movies.to_csv('movies.csv')
|
[
"pandas.DataFrame",
"random.randint",
"numpy.arange",
"bs4.BeautifulSoup",
"pandas.to_numeric"
] |
[((456, 481), 'numpy.arange', 'numpy.arange', (['(1)', '(1001)', '(50)'], {}), '(1, 1001, 50)\n', (468, 481), False, 'import numpy\n'), ((1634, 1797), 'pandas.DataFrame', 'pandas.DataFrame', (["{'movie': titles, 'year': years, 'runtime': runtimes, 'imdb': ratings,\n 'metascore': metascores, 'votes': votes, 'grossMillions': grosses}"], {}), "({'movie': titles, 'year': years, 'runtime': runtimes,\n 'imdb': ratings, 'metascore': metascores, 'votes': votes,\n 'grossMillions': grosses})\n", (1650, 1797), False, 'import pandas\n'), ((2180, 2235), 'pandas.to_numeric', 'pandas.to_numeric', (["movies['metascore']"], {'errors': '"""coerce"""'}), "(movies['metascore'], errors='coerce')\n", (2197, 2235), False, 'import pandas\n'), ((2616, 2675), 'pandas.to_numeric', 'pandas.to_numeric', (["movies['grossMillions']"], {'errors': '"""coerce"""'}), "(movies['grossMillions'], errors='coerce')\n", (2633, 2675), False, 'import pandas\n'), ((591, 626), 'bs4.BeautifulSoup', 'bsoup', (['cur_page.text', '"""html.parser"""'], {}), "(cur_page.text, 'html.parser')\n", (596, 626), True, 'from bs4 import BeautifulSoup as bsoup\n'), ((1610, 1623), 'random.randint', 'randint', (['(2)', '(8)'], {}), '(2, 8)\n', (1617, 1623), False, 'from random import randint\n')]
|
import socket
import select
import logging
import binascii
from os import system, path
import sys
import signal
from iolibrary import kill_signal_handler, get_arguments_dict, setup_logger
import constants
signal.signal(signal.SIGINT, kill_signal_handler)
class Connector():
'''
Class that handles the network connection for breach.
'''
def __init__(self, args_dict):
'''
Initialize loggers and arguments dictionary.
'''
self.args_dict = args_dict
if 'full_logger' not in args_dict:
if args_dict['verbose'] < 4:
setup_logger('full_logger', 'full_breach.log', args_dict, logging.ERROR)
else:
setup_logger('full_logger', 'full_breach.log', args_dict)
self.full_logger = logging.getLogger('full_logger')
self.args_dict['full_logger'] = self.full_logger
else:
self.full_logger = args_dict['full_logger']
if 'basic_logger' not in args_dict:
if args_dict['verbose'] < 3:
setup_logger('basic_logger', 'basic_breach.log', args_dict, logging.ERROR)
else:
setup_logger('basic_logger', 'basic_breach.log', args_dict)
self.basic_logger = logging.getLogger('basic_logger')
self.args_dict['basic_logger'] = self.basic_logger
else:
self.basic_logger = args_dict['basic_logger']
if 'debug_logger' not in args_dict:
if args_dict['verbose'] < 2:
setup_logger('debug_logger', 'debug.log', args_dict, logging.ERROR)
else:
setup_logger('debug_logger', 'debug.log', args_dict)
self.debug_logger = logging.getLogger('debug_logger')
self.args_dict['debug_logger'] = self.debug_logger
else:
self.debug_logger = args_dict['debug_logger']
return
def log_data(self, data):
'''
Print hexadecimal and ASCII representation of data
'''
pad = 0
output = []
buff = '' # Buffer of 16 chars
for i in xrange(0, len(data), constants.LOG_BUFFER):
buff = data[i:i+constants.LOG_BUFFER]
hex = binascii.hexlify(buff) # Hex representation of data
pad = 32 - len(hex)
txt = '' # ASCII representation of data
for ch in buff:
if ord(ch)>126 or ord(ch)<33:
txt = txt + '.'
else:
txt = txt + chr(ord(ch))
output.append('%2d\t %s%s\t %s' % (i, hex, pad*' ', txt))
return '\n'.join(output)
def parse(self, data, past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, is_response = False):
'''
Parse data and print header information and payload.
'''
lg = ['\n']
downgrade = False
# Check for defragmentation between packets
if is_response:
# Check if TLS record header was chunked between packets and append it to the beginning
if chunked_endpoint_header:
data = chunked_endpoint_header + data
chunked_endpoint_header = None
# Check if there are any remaining bytes from previous record
if past_bytes_endpoint:
lg.append('Data from previous TLS record: Endpoint\n')
if past_bytes_endpoint >= len(data):
lg.append(self.log_data(data))
lg.append('\n')
past_bytes_endpoint = past_bytes_endpoint - len(data)
return ('\n'.join(lg), past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade)
else:
lg.append(self.log_data(data[0:past_bytes_endpoint]))
lg.append('\n')
data = data[past_bytes_endpoint:]
past_bytes_endpoint = 0
else:
if chunked_user_header:
data = chunked_user_header + data
chunked_user_header = None
if past_bytes_user:
lg.append('Data from previous TLS record: User\n')
if past_bytes_user >= len(data):
lg.append(self.log_data(data))
lg.append('\n')
past_bytes_user = past_bytes_user - len(data)
return ('\n'.join(lg), past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade)
else:
lg.append(self.log_data(data[0:past_bytes_user]))
lg.append('\n')
data = data[past_bytes_user:]
past_bytes_user = 0
try:
cont_type = ord(data[constants.TLS_CONTENT_TYPE])
version = (ord(data[constants.TLS_VERSION_MAJOR]), ord(data[constants.TLS_VERSION_MINOR]))
length = 256*ord(data[constants.TLS_LENGTH_MAJOR]) + ord(data[constants.TLS_LENGTH_MINOR])
except Exception as exc:
self.full_logger.debug('Only %d remaining for next record, TLS header gets chunked' % len(data))
self.full_logger.debug(exc)
if is_response:
chunked_endpoint_header = data
else:
chunked_user_header = data
return ('', past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade)
if is_response:
if cont_type in constants.TLS_CONTENT:
self.basic_logger.debug('Endpoint %s Length: %d' % (constants.TLS_CONTENT[cont_type], length))
if cont_type == 23:
with open('out.out', 'a') as f:
f.write('Endpoint application payload: %d\n' % length)
f.close()
else:
self.basic_logger.debug('Unassigned Content Type record (len = %d)' % len(data))
lg.append('Source : Endpoint')
else:
if cont_type in constants.TLS_CONTENT:
self.basic_logger.debug('User %s Length: %d' % (constants.TLS_CONTENT[cont_type], length))
if cont_type == 22:
if ord(data[constants.MAX_TLS_POSITION]) > constants.MAX_TLS_ALLOWED:
downgrade = True
if cont_type == 23:
with open('out.out', 'a') as f:
f.write('User application payload: %d\n' % length)
f.close()
else:
self.basic_logger.debug('Unassigned Content Type record (len = %d)' % len(data))
lg.append('Source : User')
try:
lg.append('Content Type : ' + constants.TLS_CONTENT[cont_type])
except:
lg.append('Content Type: Unassigned %d' % cont_type)
try:
lg.append('TLS Version : ' + constants.TLS_VERSION[(version[0], version[1])])
except:
lg.append('TLS Version: Uknown %d %d' % (version[0], version[1]))
lg.append('TLS Payload Length: %d' % length)
lg.append('(Remaining) Packet Data length: %d\n' % len(data))
# Check if TLS record spans to next TCP segment
if len(data) - constants.TLS_HEADER_LENGTH < length:
if is_response:
past_bytes_endpoint = length + constants.TLS_HEADER_LENGTH - len(data)
else:
past_bytes_user = length + constants.TLS_HEADER_LENGTH - len(data)
lg.append(self.log_data(data[0:constants.TLS_HEADER_LENGTH]))
lg.append(self.log_data(data[constants.TLS_HEADER_LENGTH:constants.TLS_HEADER_LENGTH+length]))
lg.append('\n')
# Check if packet has more than one TLS records
if length < len(data) - constants.TLS_HEADER_LENGTH:
more_records, past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, _ = self.parse(
data[constants.TLS_HEADER_LENGTH+length:],
past_bytes_endpoint,
past_bytes_user,
chunked_endpoint_header,
chunked_user_header,
is_response
)
lg.append(more_records)
return ('\n'.join(lg), past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade)
def start(self):
'''
Start sockets on user side (proxy as server) and endpoint side (proxy as client).
'''
self.full_logger.info('Starting Proxy')
try:
self.user_setup()
self.endpoint_setup()
except:
pass
self.full_logger.info('Proxy is set up')
return
def restart(self, attempt_counter = 0):
'''
Restart sockets in case of error.
'''
self.full_logger.info('Restarting Proxy')
try:
self.user_socket.close()
self.endpoint_socket.close()
except:
pass
try:
self.user_setup()
self.endpoint_setup()
except:
if attempt_counter < 3:
self.full_logger.debug('Reattempting restart')
self.restart(attempt_counter+1)
else:
self.full_logger.debug('Multiple failed attempts to restart')
self.stop(-9)
sys.exit(-1)
self.full_logger.info('Proxy has restarted')
return
def stop(self, exit_code = 0):
'''
Shutdown sockets and terminate connection.
'''
try:
self.user_connection.close()
self.endpoint_socket.close()
except:
pass
self.full_logger.info('Connection closed')
self.debug_logger.debug('Stopping breach object with code: %d' % exit_code)
return
def user_setup(self):
'''
Create and configure user side socket.
'''
try:
self.full_logger.info('Setting up user socket')
user_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
user_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Set options to reuse socket
user_socket.bind((constants.USER, constants.USER_PORT))
self.full_logger.info('User socket bind complete')
user_socket.listen(1)
self.full_logger.info('User socket listen complete')
self.user_connection, self.address = user_socket.accept()
self.user_socket = user_socket
self.full_logger.info('User socket is set up')
except:
self.stop(-8)
sys.exit(-1)
return
def endpoint_setup(self):
'''
Create and configure endpoint side socket
'''
try:
self.full_logger.info('Setting up endpoint socket')
endpoint_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.full_logger.info('Connecting endpoint socket')
endpoint_socket.connect((constants.ENDPOINT, constants.ENDPOINT_PORT))
endpoint_socket.setblocking(0) # Set non-blocking, i.e. raise exception if send/recv is not completed
self.endpoint_socket = endpoint_socket
self.full_logger.info('Endpoint socket is set up')
except:
self.stop(-7)
sys.exit(-1)
return
def execute_breach(self):
'''
Start proxy and execute main loop
'''
# Initialize parameters for execution.
past_bytes_user = 0 # Number of bytes expanding to future user packets
past_bytes_endpoint = 0 # Number of bytes expanding to future endpoint packets
chunked_user_header = None # TLS user header portion that gets stuck between packets
chunked_endpoint_header = None # TLS endpoint header portion that gets stuck between packets
self.start()
self.full_logger.info('Starting main proxy loop')
try:
while 1:
ready_to_read, ready_to_write, in_error = select.select(
[self.user_connection, self.endpoint_socket],
[],
[],
5
)
if self.user_connection in ready_to_read: # If user side socket is ready to read...
data = ''
try:
data = self.user_connection.recv(constants.SOCKET_BUFFER) # ...receive data from user...
except Exception as exc:
self.full_logger.debug('User connection error')
self.full_logger.debug(exc)
self.stop(-6)
break
if len(data) == 0:
self.full_logger.info('User connection closed')
self.stop(-5)
else:
self.basic_logger.debug('User Packet Length: %d' % len(data))
output, past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade = self.parse(
data,
past_bytes_endpoint,
past_bytes_user,
chunked_endpoint_header,
chunked_user_header
) # ...parse it...
self.full_logger.debug(output)
try:
if downgrade and constants.ATTEMPT_DOWNGRADE:
alert = 'HANDSHAKE_FAILURE'
output, _, _, _, _, _ = self.parse(
constants.ALERT_MESSAGES[alert],
past_bytes_endpoint,
past_bytes_user,
True
)
self.full_logger.debug('\n\n' + 'Downgrade Attempt' + output)
self.user_connection.sendall(constants.ALERT_MESSAGES[alert]) # if we are trying to downgrade, send fatal alert to user
continue
self.endpoint_socket.sendall(data) # ...and send it to endpoint
except Exception as exc:
self.full_logger.debug('User data forwarding error')
self.full_logger.debug(exc)
self.stop(-4)
break
if self.endpoint_socket in ready_to_read: # Same for the endpoint side
data = ''
try:
data = self.endpoint_socket.recv(constants.SOCKET_BUFFER)
except Exception as exc:
self.full_logger.debug('Endpoint connection error')
self.full_logger.debug(exc)
self.stop(-3)
break
if len(data) == 0:
self.full_logger.info('Endpoint connection closed')
self.stop(5)
break
else:
self.basic_logger.debug('Endpoint Packet Length: %d' % len(data))
output, past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, _ = self.parse(
data,
past_bytes_endpoint,
past_bytes_user,
chunked_endpoint_header,
chunked_user_header,
True
)
self.full_logger.debug(output)
try:
self.user_connection.sendall(data)
except Exception as exc:
self.full_logger.debug('Endpoint data forwarding error')
self.full_logger.debug(exc)
self.stop(-2)
break
except Exception as e:
self.stop(-1)
return
if __name__ == '__main__':
args_dict = get_arguments_dict(sys.argv)
conn = Connector(args_dict)
conn.full_logger.info('Hillclimbing parameters file created')
conn.execute_breach()
|
[
"iolibrary.get_arguments_dict",
"binascii.hexlify",
"socket.socket",
"iolibrary.setup_logger",
"select.select",
"sys.exit",
"signal.signal",
"logging.getLogger"
] |
[((206, 255), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'kill_signal_handler'], {}), '(signal.SIGINT, kill_signal_handler)\n', (219, 255), False, 'import signal\n'), ((19732, 19760), 'iolibrary.get_arguments_dict', 'get_arguments_dict', (['sys.argv'], {}), '(sys.argv)\n', (19750, 19760), False, 'from iolibrary import kill_signal_handler, get_arguments_dict, setup_logger\n'), ((793, 825), 'logging.getLogger', 'logging.getLogger', (['"""full_logger"""'], {}), "('full_logger')\n", (810, 825), False, 'import logging\n'), ((1259, 1292), 'logging.getLogger', 'logging.getLogger', (['"""basic_logger"""'], {}), "('basic_logger')\n", (1276, 1292), False, 'import logging\n'), ((1716, 1749), 'logging.getLogger', 'logging.getLogger', (['"""debug_logger"""'], {}), "('debug_logger')\n", (1733, 1749), False, 'import logging\n'), ((2227, 2249), 'binascii.hexlify', 'binascii.hexlify', (['buff'], {}), '(buff)\n', (2243, 2249), False, 'import binascii\n'), ((11121, 11170), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (11134, 11170), False, 'import socket\n'), ((11975, 12024), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (11988, 12024), False, 'import socket\n'), ((597, 669), 'iolibrary.setup_logger', 'setup_logger', (['"""full_logger"""', '"""full_breach.log"""', 'args_dict', 'logging.ERROR'], {}), "('full_logger', 'full_breach.log', args_dict, logging.ERROR)\n", (609, 669), False, 'from iolibrary import kill_signal_handler, get_arguments_dict, setup_logger\n'), ((704, 761), 'iolibrary.setup_logger', 'setup_logger', (['"""full_logger"""', '"""full_breach.log"""', 'args_dict'], {}), "('full_logger', 'full_breach.log', args_dict)\n", (716, 761), False, 'from iolibrary import kill_signal_handler, get_arguments_dict, setup_logger\n'), ((1058, 1132), 'iolibrary.setup_logger', 'setup_logger', (['"""basic_logger"""', '"""basic_breach.log"""', 'args_dict', 'logging.ERROR'], {}), "('basic_logger', 'basic_breach.log', args_dict, logging.ERROR)\n", (1070, 1132), False, 'from iolibrary import kill_signal_handler, get_arguments_dict, setup_logger\n'), ((1167, 1226), 'iolibrary.setup_logger', 'setup_logger', (['"""basic_logger"""', '"""basic_breach.log"""', 'args_dict'], {}), "('basic_logger', 'basic_breach.log', args_dict)\n", (1179, 1226), False, 'from iolibrary import kill_signal_handler, get_arguments_dict, setup_logger\n'), ((1529, 1596), 'iolibrary.setup_logger', 'setup_logger', (['"""debug_logger"""', '"""debug.log"""', 'args_dict', 'logging.ERROR'], {}), "('debug_logger', 'debug.log', args_dict, logging.ERROR)\n", (1541, 1596), False, 'from iolibrary import kill_signal_handler, get_arguments_dict, setup_logger\n'), ((1631, 1683), 'iolibrary.setup_logger', 'setup_logger', (['"""debug_logger"""', '"""debug.log"""', 'args_dict'], {}), "('debug_logger', 'debug.log', args_dict)\n", (1643, 1683), False, 'from iolibrary import kill_signal_handler, get_arguments_dict, setup_logger\n'), ((11735, 11747), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (11743, 11747), False, 'import sys\n'), ((12454, 12466), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (12462, 12466), False, 'import sys\n'), ((13158, 13228), 'select.select', 'select.select', (['[self.user_connection, self.endpoint_socket]', '[]', '[]', '(5)'], {}), '([self.user_connection, self.endpoint_socket], [], [], 5)\n', (13171, 13228), False, 'import select\n'), ((10453, 10465), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (10461, 10465), False, 'import sys\n')]
|
#1/usr/bin/python3
import netmiko,time
#multi vendor library
device1={
'username' : 'lalit',
'password' : '<PASSWORD>',
'device_type' : 'cisco_ios',
'host' : '192.168.234.131'
}
#to connect to target device
#by checking couple of things connect handler will allow you to connect
device_connect=netmiko.ConnectHandler(**device1)
#print([i for i in dir(device_connect) if 'send' in i])
#now sending configuration for device
conf=["hostname pyrouter1","username hello privi 10 password <PASSWORD>","end"]
#output=device_connect.send_config_set(conf)
#print(output)
#sending configuration from file
output1=device_connect.send_config_from_file('myrouter.txt')
print(output1)
|
[
"netmiko.ConnectHandler"
] |
[((303, 336), 'netmiko.ConnectHandler', 'netmiko.ConnectHandler', ([], {}), '(**device1)\n', (325, 336), False, 'import netmiko, time\n')]
|
#########################################################################
### Program clean tweets ###
### 1. spaCy POS tagging for relevant tweets (apple fruit vs iphone) ###
### 2. Sentiment analysis of tweets ###
### 3. Group tweets by date ###
### 4. Process tweets by removing URLs, hashtags, emoticons ###
### 5. Feature engineering ###
### 6. Tokenise, remove stopwords, lemmatise tweets ###
### 7. Join with prices, derive price features and target label ###
### Output 1 pickle per ticker ###
#########################################################################
""" Copyright 2017, <NAME>, All rights reserved. """
## Credit for NLP cleaning portion
import pandas as pd
import numpy as np
import json
import string
import ast
from datetime import timedelta
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk import word_tokenize
# nltk.download('stopwords')
# nltk.download('punkt')
# nltk.download('wordnet')
# nltk.download('averaged_perceptron_tagger')
stoplist = stopwords.words('english')
my_stopwords = "multiExclamation multiQuestion multiStop url atUser st rd nd th am pm" # my extra stopwords
stoplist = stoplist + my_stopwords.split()
lemmatizer = WordNetLemmatizer() # set lemmatizer
from techniques import *
import spacy
from spacy import displacy
import en_core_web_sm
nlp = en_core_web_sm.load()
from nltk.sentiment.vader import SentimentIntensityAnalyzer
analyser = SentimentIntensityAnalyzer()
# Remove 5 companies: CAT, DIS, DOW, TRV, WBA
ticker = ["MMM OR 3M", "AXP OR American Express", "AAPL OR Apple", "BA OR Boeing", \
"CVX OR Chevron", "CSCO OR Cisco", "KO OR Coca-Cola", "XOM OR Exxon Mobil", \
"GS OR Goldman Sachs", "HD OR Home Depot", "IBM", "INTC OR Intel", \
"JNJ OR Johnson & Johnson", "JPM OR JPMorgan Chase", "MCD OR McDonald's", \
"MRK OR Merck", "MSFT OR Microsoft", "NKE OR Nike", "PFE OR Pfizer", \
"PG OR Procter & Gamble", "UTX OR United Technologies", "UNH OR UnitedHealth", \
"VZ OR Verizon", "V OR Visa", "WMT OR Wal-Mart"]
ticker_symbol = ["MMM", "AXP", "AAPL", "BA", \
"CVX", "CSCO", "KO", "XOM", \
"GS", "HD", "IBM", "INTC", \
"JNJ", "JPM", "MCD", \
"MRK", "MSFT", "NKE", "PFE", \
"PG", "UTX", "UNH",
"VZ", "V", "WMT"]
########################################################################
### 1. spaCy POS tagging for relevant tweets (apple fruit vs iphone) ###
########################################################################
def spacy_pos(df, name):
'''
POS-tag each token and filter for texts with "ORG" label
Parameters
----------
df (pandas DataFrame)
name (string) ticker name
Returns
-------
the processed pandas DataFrame
'''
def find_org(text, name):
doc = nlp(text)
for ent in doc.ents:
# print(ent.text, ent.label_)
if (ent.text.lower()==name.lower()) & (ent.label_=='ORG'):
return True
return False
df['relevant'] = [find_org(text,name) for text in df['text']]
print("Before:", df.shape)
df = df[(df['relevant']==True)]
print("After:", df.shape)
return df
########################################################################
### 2. Sentiment analysis of tweets ###
### 3. Group tweets by date ###
########################################################################
def group_tweets_by_date(df, symbol, name):
'''
Aggregate all columns after grouping rows by dates.
Shift weekend tweets to following Monday.
Parameters
----------
df (pandas DataFrame)
symbol (string) ticker symbol eg. AAPL
name (string) ticker name eg. Apple
Returns
-------
the processed pandas DataFrame
'''
df_filter = df[["text", "hashtags", "likes", "replies", "parent_tweet_id", "timestamp"]]
df_filter.likes = df.likes.astype('int64')
df_filter.replies = df.replies.astype('int64')
# remove retweets
df_filter = df_filter[df_filter.parent_tweet_id.isnull()]
df_filter['hashtags'] = df_filter['hashtags'].apply(ast.literal_eval)
df_filter['hashtags'] = df_filter['hashtags'].apply(lambda x : ','.join(x))
df_filter['timestamp'] = pd.to_datetime(df_filter['timestamp'])
df_filter['day'] = df_filter['timestamp'].dt.dayofweek
df_filter['vader'] = [analyser.polarity_scores(tweet)['compound'] for tweet in df_filter['text']]
# carry forward weekend tweets to following Monday (1 or 2 days)
df_filter['stock_date'] = np.where(df_filter['day']>4,
df_filter['timestamp'] + pd.to_timedelta(7-df_filter['day'], unit='d'),
df_filter['timestamp']
)
# group tweets by dates
df_filter['stock_date'] = df_filter['stock_date'].dt.date
df_filter = df_filter.groupby(df_filter['stock_date']).agg({'text': lambda x: ','.join(x),
'hashtags': lambda x: ','.join(x),
'likes':'sum',
'replies': 'sum',
'vader': 'mean'
})
df_filter['hashtags'] = df_filter['hashtags'].apply(lambda hashtags: list(filter(None, hashtags.split(','))))
df_filter['text_removeCompany'] = df_filter.text.str.replace(symbol+' ','')
name = name.lower()
df_filter['text_removeCompany'] = df_filter.text_removeCompany.str.lower().str.replace(name+" ",'')
df_filter = df_filter.reset_index(drop=False)
return df_filter
########################################################################
### 6. Tokenise, remove stopwords, lemmatise tweets ###
########################################################################
def tokenize(text):
'''
Tokenise texts, remove stopwords, lemmatise word.
Parameters
----------
text (string)
Returns
-------
list of tokens (string)
'''
onlyOneSentenceTokens = [] # tokens of one sentence each time
tokens = word_tokenize(text)
tokens = replaceNegations(tokens)
translator = str.maketrans('', '', string.punctuation)
text = text.translate(translator) # Remove punctuation
tokens = nltk.word_tokenize(text)
for w in tokens:
if (w not in stoplist):
final_word = w.lower()
final_word = replaceElongated(final_word)
final_word = lemmatizer.lemmatize(final_word)
onlyOneSentenceTokens.append(final_word)
onlyOneSentence = " ".join(onlyOneSentenceTokens) # form again the sentence from the list of tokens
return onlyOneSentenceTokens
########################################################################
### 4. Process tweets by removing URLs, hashtags, emoticons ###
### 5. Feature engineering of numerical features ###
########################################################################
# A clean tweet should not contain URLs, hashtags (i.e. #happy) or mentions (i.e. @BarackObama)
def clean_dirty_tweets(text_series):
'''
Clean tweets before tokenisation.
Parameters
----------
text_series (pandas Series)
Returns
-------
the pandas DataFrame containing processed text
and other engineered features
'''
clean_tweets = []
for text in text_series:
totalEmoticons = 0
totalSlangs = 0
totalSlangsFound = []
totalElongated = 0
totalMultiExclamationMarks = 0
totalMultiQuestionMarks = 0
totalMultiStopMarks = 0
totalAllCaps = 0
text = removeUnicode(text)
text = replaceURL(text)
text = replaceAtUser(text)
text = removeWholeHashtag(text)
temp_slangs, temp_slangsFound = countSlang(text)
totalSlangs += temp_slangs
for word in temp_slangsFound:
totalSlangsFound.append(word) # all the slangs found in all sentences
text = replaceSlang(text)
text = replaceContraction(text)
text = removeNumbers(text)
emoticons = countEmoticons(text)
totalEmoticons += emoticons
text = removeEmoticons(text)
totalAllCaps += countAllCaps(text)
totalMultiExclamationMarks += countMultiExclamationMarks(text)
totalMultiQuestionMarks += countMultiQuestionMarks(text)
totalMultiStopMarks += countMultiStopMarks(text)
text = replaceMultiExclamationMark(text)
text = replaceMultiQuestionMark(text)
text = replaceMultiStopMark(text)
totalElongated += countElongated(text)
tokenized_tweet = tokenize(text)
clean_tweets.append([tokenized_tweet, totalEmoticons, totalSlangs,
totalSlangsFound, totalElongated, totalMultiExclamationMarks,
totalMultiQuestionMarks, totalMultiStopMarks, totalAllCaps])
# form new dataframe
df_clean_tweets = pd.DataFrame(clean_tweets,columns=['tokenized_tweet', 'totalEmoticons', 'totalSlangs',
'totalSlangsFound', 'totalElongated', 'totalMultiExclamationMarks',
'totalMultiQuestionMarks', 'totalMultiStopMarks', 'totalAllCaps'])
return df_clean_tweets
# def spellcheck(tweet):
# tweet_spellchecked = []
# print(len(tweet))
# for word in tweet:
# if len(word)>1:
# word = spellCorrection(word) # Technique 12: correction of spelling errors
# tweet_spellchecked.append(word)
# return tweet_spellchecked
price_labels = pd.read_csv("../../Raw Data/Price/price_labels.csv")
for i in range(len(ticker_symbol)):
df = pd.read_csv('../Raw Data/Tweets/'+ticker_symbol[i]+'_tweets.csv')
print("Now cleaning:", ticker_symbol[i])
print("Check pos tag...")
if ticker_symbol[i] in ['JPM', "MMM", "KO", "JNJ", "PFE", "TRV", "V", "UNH"]:
df_filter = df
else:
df_filter = spacy_pos(df, ticker_name[i])
print("Group tweets by date...")
df_filter = group_tweets_by_date(df, ticker_symbol[i], ticker_name[i])
print("Number of records (weekdays):", df_filter.shape)
print("Process raw tweets...")
df_clean_tweets = clean_dirty_tweets(df_filter.text_removeCompany)
# # spell_check_col = [spellcheck(tweet) for tweet in df_clean_tweets['tokenized_tweet']]
# # print("spell check")
# # df_clean_tweets['tokenized_tweet_spellcheck'] = spell_check_col
# Join original df with df from tokenising + results
df_tweets_final = pd.concat([df_filter, df_clean_tweets], axis = 1)
####################################################################
### 7. Join with prices, derive price features and target label ###
####################################################################
price_labels_xticker = price_labels[price_labels['Ticker']==ticker_symbol[i]][['Date', "Adj Close"]]
print("Number of business days:", price_labels_xticker.shape)
price_labels_xticker.loc[:,'Date'] = pd.to_datetime(price_labels_xticker['Date']).dt.date
price_labels_xticker.loc[:,'hist_returns'] = np.log10(price_labels_xticker['Adj Close']/price_labels_xticker['Adj Close'].shift())
price_labels_xticker.loc[:,'returns5'] = np.log10(price_labels_xticker['Adj Close'].shift(-5)/price_labels_xticker['Adj Close'])
price_labels_xticker.loc[:,'label5'] = np.where(price_labels_xticker['returns5']>=0,1,-1)
joined_df = price_labels_xticker.join(df_tweets_final.set_index("stock_date"), on='Date', how='left')
print("Longest NaN period:", joined_df.text.isnull().astype(int).groupby(joined_df.text.notnull().astype(int).cumsum()).sum().max())
# joined_df = joined_df.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis=1)
joined_df['Date'] = pd.to_datetime(joined_df['Date'])
joined_df['Year'] = joined_df.Date.dt.year
joined_df['Month'] = joined_df.Date.dt.month
joined_df['vader_standardise'] = (joined_df['vader']-joined_df['vader'].expanding().mean())/joined_df['vader'].expanding().std()
joined_df['vader3'] = joined_df['vader_standardise'].rolling(window=3, min_periods=2).sum()
joined_df.to_pickle("../../Processed Data/Tweets/"+ticker_symbol[i]+"_df.pkl")
|
[
"pandas.DataFrame",
"nltk.stem.WordNetLemmatizer",
"nltk.sentiment.vader.SentimentIntensityAnalyzer",
"pandas.read_csv",
"numpy.where",
"pandas.to_datetime",
"pandas.to_timedelta",
"nltk.corpus.stopwords.words",
"en_core_web_sm.load",
"pandas.concat",
"nltk.word_tokenize"
] |
[((1269, 1295), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1284, 1295), False, 'from nltk.corpus import stopwords\n'), ((1460, 1479), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (1477, 1479), False, 'from nltk.stem import WordNetLemmatizer\n'), ((1592, 1613), 'en_core_web_sm.load', 'en_core_web_sm.load', ([], {}), '()\n', (1611, 1613), False, 'import en_core_web_sm\n'), ((1686, 1714), 'nltk.sentiment.vader.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (1712, 1714), False, 'from nltk.sentiment.vader import SentimentIntensityAnalyzer\n'), ((10368, 10420), 'pandas.read_csv', 'pd.read_csv', (['"""../../Raw Data/Price/price_labels.csv"""'], {}), "('../../Raw Data/Price/price_labels.csv')\n", (10379, 10420), True, 'import pandas as pd\n'), ((4735, 4773), 'pandas.to_datetime', 'pd.to_datetime', (["df_filter['timestamp']"], {}), "(df_filter['timestamp'])\n", (4749, 4773), True, 'import pandas as pd\n'), ((6792, 6811), 'nltk.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (6805, 6811), False, 'from nltk import word_tokenize\n'), ((6984, 7008), 'nltk.word_tokenize', 'nltk.word_tokenize', (['text'], {}), '(text)\n', (7002, 7008), False, 'import nltk\n'), ((9751, 9985), 'pandas.DataFrame', 'pd.DataFrame', (['clean_tweets'], {'columns': "['tokenized_tweet', 'totalEmoticons', 'totalSlangs', 'totalSlangsFound',\n 'totalElongated', 'totalMultiExclamationMarks',\n 'totalMultiQuestionMarks', 'totalMultiStopMarks', 'totalAllCaps']"}), "(clean_tweets, columns=['tokenized_tweet', 'totalEmoticons',\n 'totalSlangs', 'totalSlangsFound', 'totalElongated',\n 'totalMultiExclamationMarks', 'totalMultiQuestionMarks',\n 'totalMultiStopMarks', 'totalAllCaps'])\n", (9763, 9985), True, 'import pandas as pd\n'), ((10467, 10536), 'pandas.read_csv', 'pd.read_csv', (["('../Raw Data/Tweets/' + ticker_symbol[i] + '_tweets.csv')"], {}), "('../Raw Data/Tweets/' + ticker_symbol[i] + '_tweets.csv')\n", (10478, 10536), True, 'import pandas as pd\n'), ((11350, 11397), 'pandas.concat', 'pd.concat', (['[df_filter, df_clean_tweets]'], {'axis': '(1)'}), '([df_filter, df_clean_tweets], axis=1)\n', (11359, 11397), True, 'import pandas as pd\n'), ((12200, 12254), 'numpy.where', 'np.where', (["(price_labels_xticker['returns5'] >= 0)", '(1)', '(-1)'], {}), "(price_labels_xticker['returns5'] >= 0, 1, -1)\n", (12208, 12254), True, 'import numpy as np\n'), ((12596, 12629), 'pandas.to_datetime', 'pd.to_datetime', (["joined_df['Date']"], {}), "(joined_df['Date'])\n", (12610, 12629), True, 'import pandas as pd\n'), ((5131, 5178), 'pandas.to_timedelta', 'pd.to_timedelta', (["(7 - df_filter['day'])"], {'unit': '"""d"""'}), "(7 - df_filter['day'], unit='d')\n", (5146, 5178), True, 'import pandas as pd\n'), ((11836, 11880), 'pandas.to_datetime', 'pd.to_datetime', (["price_labels_xticker['Date']"], {}), "(price_labels_xticker['Date'])\n", (11850, 11880), True, 'import pandas as pd\n')]
|
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Generate MP4 videos with map entities rendered on top of sensor imagery, for all cameras, for a single log.
We use a inferred depth map from LiDAR to render only visible map entities (lanes and pedestrian crossings).
"""
import logging
import os
import sys
import time
from pathlib import Path
from typing import Final, List, Tuple
import click
import numpy as np
import av2.geometry.interpolate as interp_utils
import av2.rendering.video as video_utils
import av2.utils.io as io_utils
import av2.utils.raster as raster_utils
from av2.datasets.sensor.av2_sensor_dataloader import AV2SensorDataLoader
from av2.datasets.sensor.constants import RingCameras
from av2.map.map_api import ArgoverseStaticMap
from av2.rendering.color import BLUE_BGR
from av2.rendering.map import EgoViewMapRenderer
from av2.utils.typing import NDArrayByte
RING_CAMERA_FPS: Final[int] = 20
logger = logging.getLogger(__name__)
def generate_egoview_overlaid_map(
data_root: Path,
output_dir: Path,
log_id: str,
max_range_m: float,
use_depth_map_for_occlusion: bool,
dump_single_frames: bool,
cam_names: List[RingCameras],
) -> None:
"""Render the map from a particular camera's viewpoint for each camera frame.
Args:
data_root: path to where the AV2 logs live.
output_dir: path to directory where renderings will be saved.
log_id: unique ID for AV2 scenario/log.
max_range_m: maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm).
use_depth_map_for_occlusion: whether to use an inferred depth map for rendering occluded elements.
dump_single_frames: Whether to save to disk individual RGB frames of the rendering, in addition to generating
the mp4 file.
cam_names: list of camera names. For each camera, its viewport will be used to render the map.
"""
loader = AV2SensorDataLoader(data_dir=data_root, labels_dir=data_root)
log_map_dirpath = data_root / log_id / "map"
avm = ArgoverseStaticMap.from_map_dir(log_map_dirpath, build_raster=True)
for _, cam_enum in enumerate(cam_names):
cam_name = cam_enum.value
pinhole_cam = loader.get_log_pinhole_camera(log_id, cam_name)
cam_im_fpaths = loader.get_ordered_log_cam_fpaths(log_id, cam_name)
num_cam_imgs = len(cam_im_fpaths)
video_list = []
for i, img_fpath in enumerate(cam_im_fpaths):
if i % 50 == 0:
logging.info(f"\tOn file {i}/{num_cam_imgs} of camera {cam_name} of {log_id}")
cam_timestamp_ns = int(img_fpath.stem)
city_SE3_ego = loader.get_city_SE3_ego(log_id, cam_timestamp_ns)
if city_SE3_ego is None:
logger.info("missing LiDAR pose")
continue
# load feather file path, e.g. '315978406032859416.feather"
lidar_fpath = loader.get_closest_lidar_fpath(log_id, cam_timestamp_ns)
if lidar_fpath is None:
# without depth map, can't do this accurately
continue
lidar_points = io_utils.read_lidar_sweep(lidar_fpath, attrib_spec="xyz")
lidar_timestamp_ns = int(lidar_fpath.stem)
if use_depth_map_for_occlusion:
depth_map = loader.get_depth_map_from_lidar(
lidar_points=lidar_points,
cam_name=cam_name,
log_id=log_id,
cam_timestamp_ns=cam_timestamp_ns,
lidar_timestamp_ns=lidar_timestamp_ns,
)
else:
depth_map = None
egoview_renderer = EgoViewMapRenderer(
depth_map=depth_map, city_SE3_ego=city_SE3_ego, pinhole_cam=pinhole_cam, avm=avm
)
frame_rgb = render_egoview(
output_dir=output_dir,
img_fpath=img_fpath,
egoview_renderer=egoview_renderer,
cam_timestamp_ns=cam_timestamp_ns,
log_id=log_id,
max_range_m=max_range_m,
dump_single_frames=dump_single_frames,
)
video_list.append(frame_rgb)
video: NDArrayByte = np.stack(video_list).astype(np.uint8)
video_output_dir = output_dir / "videos"
video_utils.write_video(
video=video,
dst=video_output_dir / f"{log_id}_{cam_name}.mp4",
fps=RING_CAMERA_FPS,
preset="medium",
)
def render_egoview(
output_dir: Path,
img_fpath: Path,
egoview_renderer: EgoViewMapRenderer,
cam_timestamp_ns: int,
log_id: str,
max_range_m: float,
dump_single_frames: bool,
) -> NDArrayByte:
"""Synthetically manipulate a vector map, render the map in the ego-view, and save rendering to disk.
Args:
output_dir: path to directory where renderings will be saved.
img_fpath: path to RGB image, from one of the ring or stereo cameras.
egoview_renderer: rendering engine for map elements in the ego-view.
cam_timestamp_ns: nanosecond camera timestamp when image was captured.
log_id: unique ID for AV2 scenario/log.
max_range_m: maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm).
dump_single_frames: Whether to save to disk individual RGB frames of the rendering, in addition to generating
the mp4 file.
Returns:
array of shape (H,W,3) and type uint8 representing a RGB image.
"""
save_dir = output_dir / log_id
if dump_single_frames:
# we only create log-specific directories, if dumping individual frames.
save_dir.mkdir(exist_ok=True, parents=True)
img_fname = f"{egoview_renderer.pinhole_cam.cam_name}_{cam_timestamp_ns}_vectormap.jpg"
save_fpath = save_dir / img_fname
if save_fpath.exists():
logger.info("Rendered image already exists, skipping")
img: NDArrayByte = io_utils.read_img(save_fpath)
return img
start = time.time()
img_rgb: NDArrayByte = io_utils.read_img(img_fpath)
# to prevent washing out, can pass in black image, and get just mask back, or can overlay directly.
img_h, img_w, _ = img_rgb.shape
img_empty: NDArrayByte = np.full(
(img_h, img_w, 3), fill_value=128, dtype=np.uint8
) # pure white polylines will disappear @ 255
img_empty = render_egoview_with_occlusion_checks(
img_canvas=img_empty,
egoview_renderer=egoview_renderer,
max_range_m=max_range_m,
)
end = time.time()
duration = end - start
logger.info(f"Rendering single image took {duration:.2f} sec.")
frame_rgb = raster_utils.blend_images(img_rgb, img_empty, alpha=0.45)
if dump_single_frames:
io_utils.write_img(save_fpath, frame_rgb, channel_order="RGB")
return frame_rgb
def render_egoview_with_occlusion_checks(
img_canvas: NDArrayByte, egoview_renderer: EgoViewMapRenderer, max_range_m: float, line_width_px: int = 10
) -> NDArrayByte:
"""Render pedestrian crossings and lane segments in the ego-view.
Pedestrian crossings (crosswalks) will be rendered in blue, and lane markings will be colored according to their
marking color, or otherwise red, if markings are implicit.
Args:
img_canvas: array of shape (H,W,3) representing BGR canvas to rasterize map elements onto.
egoview_renderer: rendering engine for map elements in the ego-view.
max_range_m: maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm).
line_width_px: thickness (in pixels) to use for rendering each polyline.
Returns:
array of shape (H,W,3) and type uint8 representing a RGB image.
"""
for ls in egoview_renderer.avm.get_scenario_lane_segments():
img_canvas = egoview_renderer.render_lane_boundary_egoview(img_canvas, ls, "right", line_width_px)
img_canvas = egoview_renderer.render_lane_boundary_egoview(img_canvas, ls, "left", line_width_px)
for pc in egoview_renderer.avm.get_scenario_ped_crossings():
EPS = 1e-5
crosswalk_color = BLUE_BGR
# render ped crossings (pc's)
xwalk_polygon = pc.polygon
# prevent duplicate first and last coords
xwalk_polygon[:-1] += EPS
N_INTERP_PTS = 100
# For pixel-perfect rendering, querying crosswalk boundary ground height at waypoints throughout
# the street is much more accurate than 3d linear interpolation using only the 4 annotated corners.
polygon_city_frame = interp_utils.interp_arc(t=N_INTERP_PTS, points=xwalk_polygon[:, :2])
polygon_city_frame = egoview_renderer.avm.append_height_to_2d_city_pt_cloud(points_xy=polygon_city_frame)
egoview_renderer.render_polyline_egoview(
polygon_city_frame,
img_canvas,
crosswalk_color,
thickness_px=line_width_px,
)
# convert BGR to RGB
img_rgb: NDArrayByte = img_canvas[:, :, ::-1]
return img_rgb
def parse_camera_enum_types(cam_names: Tuple[str, ...]) -> List[RingCameras]:
"""Convert a list of CLI string types, to enums of type RingCameras, and validate each input.
Args:
cam_names: Tuple of camera names to use for rendering the map.
Returns:
List of camera enums to use for rendering the map.
Raises:
ValueError: If an invalid camera name is provided.
"""
valid_ring_cams = set([x.value for x in list(RingCameras)])
cam_enums: List[RingCameras] = []
for cam_name in list(cam_names):
if cam_name in valid_ring_cams:
cam_enums.append(RingCameras(cam_name))
else:
raise ValueError("Must provide _valid_ camera names!")
return cam_enums
@click.command(help="Generate map visualizations on ego-view imagery from the Argoverse 2 Sensor or TbV Datasets.")
@click.option(
"-d",
"--data-root",
required=True,
help="Path to local directory where the Argoverse 2 Sensor Dataset or TbV logs are stored.",
type=click.Path(exists=True),
)
@click.option(
"-o",
"--output-dir",
required=True,
help="Path to local directory where renderings will be saved.",
type=str,
)
@click.option(
"-l",
"--log-id",
default="00a6ffc1-6ce9-3bc3-a060-6006e9893a1a",
help="unique log identifier.",
type=str,
)
@click.option(
"-r",
"--max-range-m",
type=float,
default=100,
help="Maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm).",
)
@click.option(
"-d",
"--use-depth-map-for_occlusion",
default=True,
help="Whether to use an inferred depth map for rendering occluded elements (defaults to True).",
type=bool,
)
@click.option(
"-s",
"--dump-single-frames",
default=False,
help="Whether to save to disk individual RGB frames of the rendering, in addition to generating the mp4 file"
"(defaults to False). Note: can quickly generate 100s of MBs, for 200 KB frames.",
type=bool,
)
@click.option(
"-c",
"--cam-names",
default=tuple(x.value for x in list(RingCameras)),
help="List of camera viewpoints to render the map from.",
multiple=True,
type=str,
)
def run_generate_egoview_overlaid_map(
data_root: "os.PathLike[str]",
output_dir: "os.PathLike[str]",
log_id: str,
max_range_m: float,
use_depth_map_for_occlusion: bool,
dump_single_frames: bool,
cam_names: Tuple[str, ...],
) -> None:
"""Click entry point for visualizing map entities rendered on top of sensor imagery."""
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
data_root = Path(data_root)
output_dir = Path(output_dir)
logger.info(
"data_root: %s, output_dir: %s, log_id: %s, max_range_m: %f, "
"use_depth_map_for_occlusion: %s, dump_single_frames %s",
data_root,
output_dir,
log_id,
max_range_m,
use_depth_map_for_occlusion,
dump_single_frames,
)
generate_egoview_overlaid_map(
data_root=data_root,
output_dir=output_dir,
log_id=log_id,
max_range_m=max_range_m,
use_depth_map_for_occlusion=use_depth_map_for_occlusion,
dump_single_frames=dump_single_frames,
cam_names=parse_camera_enum_types(cam_names),
)
if __name__ == "__main__":
run_generate_egoview_overlaid_map()
|
[
"av2.map.map_api.ArgoverseStaticMap.from_map_dir",
"av2.utils.io.read_img",
"av2.utils.io.write_img",
"av2.geometry.interpolate.interp_arc",
"click.option",
"av2.rendering.video.write_video",
"pathlib.Path",
"click.Path",
"numpy.full",
"click.command",
"av2.rendering.map.EgoViewMapRenderer",
"numpy.stack",
"av2.datasets.sensor.constants.RingCameras",
"logging.basicConfig",
"av2.utils.io.read_lidar_sweep",
"av2.datasets.sensor.av2_sensor_dataloader.AV2SensorDataLoader",
"time.time",
"logging.info",
"av2.utils.raster.blend_images",
"logging.getLogger"
] |
[((950, 977), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (967, 977), False, 'import logging\n'), ((9898, 10022), 'click.command', 'click.command', ([], {'help': '"""Generate map visualizations on ego-view imagery from the Argoverse 2 Sensor or TbV Datasets."""'}), "(help=\n 'Generate map visualizations on ego-view imagery from the Argoverse 2 Sensor or TbV Datasets.'\n )\n", (9911, 10022), False, 'import click\n'), ((10210, 10338), 'click.option', 'click.option', (['"""-o"""', '"""--output-dir"""'], {'required': '(True)', 'help': '"""Path to local directory where renderings will be saved."""', 'type': 'str'}), "('-o', '--output-dir', required=True, help=\n 'Path to local directory where renderings will be saved.', type=str)\n", (10222, 10338), False, 'import click\n'), ((10358, 10486), 'click.option', 'click.option', (['"""-l"""', '"""--log-id"""'], {'default': '"""00a6ffc1-6ce9-3bc3-a060-6006e9893a1a"""', 'help': '"""unique log identifier."""', 'type': 'str'}), "('-l', '--log-id', default=\n '00a6ffc1-6ce9-3bc3-a060-6006e9893a1a', help='unique log identifier.',\n type=str)\n", (10370, 10486), False, 'import click\n'), ((10502, 10674), 'click.option', 'click.option', (['"""-r"""', '"""--max-range-m"""'], {'type': 'float', 'default': '(100)', 'help': '"""Maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm)."""'}), "('-r', '--max-range-m', type=float, default=100, help=\n 'Maximum range of map entities from egovehicle to consider for rendering (by l-infinity norm).'\n )\n", (10514, 10674), False, 'import click\n'), ((10689, 10872), 'click.option', 'click.option', (['"""-d"""', '"""--use-depth-map-for_occlusion"""'], {'default': '(True)', 'help': '"""Whether to use an inferred depth map for rendering occluded elements (defaults to True)."""', 'type': 'bool'}), "('-d', '--use-depth-map-for_occlusion', default=True, help=\n 'Whether to use an inferred depth map for rendering occluded elements (defaults to True).'\n , type=bool)\n", (10701, 10872), False, 'import click\n'), ((10887, 11155), 'click.option', 'click.option', (['"""-s"""', '"""--dump-single-frames"""'], {'default': '(False)', 'help': '"""Whether to save to disk individual RGB frames of the rendering, in addition to generating the mp4 file(defaults to False). Note: can quickly generate 100s of MBs, for 200 KB frames."""', 'type': 'bool'}), "('-s', '--dump-single-frames', default=False, help=\n 'Whether to save to disk individual RGB frames of the rendering, in addition to generating the mp4 file(defaults to False). Note: can quickly generate 100s of MBs, for 200 KB frames.'\n , type=bool)\n", (10899, 11155), False, 'import click\n'), ((1966, 2027), 'av2.datasets.sensor.av2_sensor_dataloader.AV2SensorDataLoader', 'AV2SensorDataLoader', ([], {'data_dir': 'data_root', 'labels_dir': 'data_root'}), '(data_dir=data_root, labels_dir=data_root)\n', (1985, 2027), False, 'from av2.datasets.sensor.av2_sensor_dataloader import AV2SensorDataLoader\n'), ((2088, 2155), 'av2.map.map_api.ArgoverseStaticMap.from_map_dir', 'ArgoverseStaticMap.from_map_dir', (['log_map_dirpath'], {'build_raster': '(True)'}), '(log_map_dirpath, build_raster=True)\n', (2119, 2155), False, 'from av2.map.map_api import ArgoverseStaticMap\n'), ((6123, 6134), 'time.time', 'time.time', ([], {}), '()\n', (6132, 6134), False, 'import time\n'), ((6163, 6191), 'av2.utils.io.read_img', 'io_utils.read_img', (['img_fpath'], {}), '(img_fpath)\n', (6180, 6191), True, 'import av2.utils.io as io_utils\n'), ((6362, 6420), 'numpy.full', 'np.full', (['(img_h, img_w, 3)'], {'fill_value': '(128)', 'dtype': 'np.uint8'}), '((img_h, img_w, 3), fill_value=128, dtype=np.uint8)\n', (6369, 6420), True, 'import numpy as np\n'), ((6657, 6668), 'time.time', 'time.time', ([], {}), '()\n', (6666, 6668), False, 'import time\n'), ((6781, 6838), 'av2.utils.raster.blend_images', 'raster_utils.blend_images', (['img_rgb', 'img_empty'], {'alpha': '(0.45)'}), '(img_rgb, img_empty, alpha=0.45)\n', (6806, 6838), True, 'import av2.utils.raster as raster_utils\n'), ((11731, 11789), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (11750, 11789), False, 'import logging\n'), ((11806, 11821), 'pathlib.Path', 'Path', (['data_root'], {}), '(data_root)\n', (11810, 11821), False, 'from pathlib import Path\n'), ((11839, 11855), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (11843, 11855), False, 'from pathlib import Path\n'), ((4386, 4515), 'av2.rendering.video.write_video', 'video_utils.write_video', ([], {'video': 'video', 'dst': "(video_output_dir / f'{log_id}_{cam_name}.mp4')", 'fps': 'RING_CAMERA_FPS', 'preset': '"""medium"""'}), "(video=video, dst=video_output_dir /\n f'{log_id}_{cam_name}.mp4', fps=RING_CAMERA_FPS, preset='medium')\n", (4409, 4515), True, 'import av2.rendering.video as video_utils\n'), ((6061, 6090), 'av2.utils.io.read_img', 'io_utils.read_img', (['save_fpath'], {}), '(save_fpath)\n', (6078, 6090), True, 'import av2.utils.io as io_utils\n'), ((6875, 6937), 'av2.utils.io.write_img', 'io_utils.write_img', (['save_fpath', 'frame_rgb'], {'channel_order': '"""RGB"""'}), "(save_fpath, frame_rgb, channel_order='RGB')\n", (6893, 6937), True, 'import av2.utils.io as io_utils\n'), ((8685, 8753), 'av2.geometry.interpolate.interp_arc', 'interp_utils.interp_arc', ([], {'t': 'N_INTERP_PTS', 'points': 'xwalk_polygon[:, :2]'}), '(t=N_INTERP_PTS, points=xwalk_polygon[:, :2])\n', (8708, 8753), True, 'import av2.geometry.interpolate as interp_utils\n'), ((10182, 10205), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (10192, 10205), False, 'import click\n'), ((3175, 3232), 'av2.utils.io.read_lidar_sweep', 'io_utils.read_lidar_sweep', (['lidar_fpath'], {'attrib_spec': '"""xyz"""'}), "(lidar_fpath, attrib_spec='xyz')\n", (3200, 3232), True, 'import av2.utils.io as io_utils\n'), ((3730, 3834), 'av2.rendering.map.EgoViewMapRenderer', 'EgoViewMapRenderer', ([], {'depth_map': 'depth_map', 'city_SE3_ego': 'city_SE3_ego', 'pinhole_cam': 'pinhole_cam', 'avm': 'avm'}), '(depth_map=depth_map, city_SE3_ego=city_SE3_ego,\n pinhole_cam=pinhole_cam, avm=avm)\n', (3748, 3834), False, 'from av2.rendering.map import EgoViewMapRenderer\n'), ((2548, 2626), 'logging.info', 'logging.info', (['f"""\tOn file {i}/{num_cam_imgs} of camera {cam_name} of {log_id}"""'], {}), "(f'\\tOn file {i}/{num_cam_imgs} of camera {cam_name} of {log_id}')\n", (2560, 2626), False, 'import logging\n'), ((4291, 4311), 'numpy.stack', 'np.stack', (['video_list'], {}), '(video_list)\n', (4299, 4311), True, 'import numpy as np\n'), ((9770, 9791), 'av2.datasets.sensor.constants.RingCameras', 'RingCameras', (['cam_name'], {}), '(cam_name)\n', (9781, 9791), False, 'from av2.datasets.sensor.constants import RingCameras\n')]
|
from gpiozero import LEDBoard
from signal import pause
leds = LEDBoard(5, 6, 13, 19, 26, pwm=True)
leds.value = (0.2, 0.4, 0.6, 0.8, 1.0)
pause()
|
[
"signal.pause",
"gpiozero.LEDBoard"
] |
[((63, 99), 'gpiozero.LEDBoard', 'LEDBoard', (['(5)', '(6)', '(13)', '(19)', '(26)'], {'pwm': '(True)'}), '(5, 6, 13, 19, 26, pwm=True)\n', (71, 99), False, 'from gpiozero import LEDBoard\n'), ((141, 148), 'signal.pause', 'pause', ([], {}), '()\n', (146, 148), False, 'from signal import pause\n')]
|
#!/usr/bin/env python3
from numba import njit, typeof, typed, types
import rasterio
import numpy as np
import argparse
import os
from osgeo import ogr, gdal
def rel_dem(dem_fileName, pixel_watersheds_fileName, rem_fileName, thalweg_raster):
"""
Calculates REM/HAND/Detrended DEM
Parameters
----------
dem_fileName : str
File name of pit filled DEM raster.
pixel_watersheds_fileName : str
File name of stream pixel watersheds raster.
rem_fileName : str
File name of output relative elevation raster.
"""
# ------------------------------------------- Get catchment_min_dict --------------------------------------------------- #
# The following creates a dictionary of the catchment ids (key) and their elevation along the thalweg (value).
@njit
def make_catchment_min_dict(flat_dem, catchment_min_dict, flat_catchments, thalweg_window):
for i,cm in enumerate(flat_catchments):
if thalweg_window[i] == 1: # Only allow reference elevation to be within thalweg.
# If the catchment really exists in the dictionary, compare elevation values.
if (cm in catchment_min_dict):
if (flat_dem[i] < catchment_min_dict[cm]):
# If the flat_dem's elevation value is less than the catchment_min_dict min, update the catchment_min_dict min.
catchment_min_dict[cm] = flat_dem[i]
else:
catchment_min_dict[cm] = flat_dem[i]
return(catchment_min_dict)
# Open the masked gw_catchments_pixels_masked and dem_thalwegCond_masked.
gw_catchments_pixels_masked_object = rasterio.open(pixel_watersheds_fileName)
dem_thalwegCond_masked_object = rasterio.open(dem_fileName)
thalweg_raster_object = rasterio.open(thalweg_raster)
# Specify raster object metadata.
meta = dem_thalwegCond_masked_object.meta.copy()
meta['tiled'], meta['compress'] = True, 'lzw'
# -- Create catchment_min_dict -- #
catchment_min_dict = typed.Dict.empty(types.int32,types.float32) # Initialize an empty dictionary to store the catchment minimums.
# Update catchment_min_dict with pixel sheds minimum.
for ji, window in dem_thalwegCond_masked_object.block_windows(1): # Iterate over windows, using dem_rasterio_object as template.
dem_window = dem_thalwegCond_masked_object.read(1,window=window).ravel() # Define dem_window.
catchments_window = gw_catchments_pixels_masked_object.read(1,window=window).ravel() # Define catchments_window.
thalweg_window = thalweg_raster_object.read(1, window=window).ravel() # Define cost_window.
# Call numba-optimized function to update catchment_min_dict with pixel sheds minimum.
catchment_min_dict = make_catchment_min_dict(dem_window, catchment_min_dict, catchments_window, thalweg_window)
dem_thalwegCond_masked_object.close()
gw_catchments_pixels_masked_object.close()
thalweg_raster_object.close()
# ------------------------------------------------------------------------------------------------------------------------ #
# ------------------------------------------- Produce relative elevation model ------------------------------------------- #
@njit
def calculate_rem(flat_dem,catchmentMinDict,flat_catchments,ndv):
rem_window = np.zeros(len(flat_dem),dtype=np.float32)
for i,cm in enumerate(flat_catchments):
if cm in catchmentMinDict:
if catchmentMinDict[cm] == ndv:
rem_window[i] = ndv
else:
rem_window[i] = flat_dem[i] - catchmentMinDict[cm]
return(rem_window)
rem_rasterio_object = rasterio.open(rem_fileName,'w',**meta) # Open rem_rasterio_object for writing to rem_fileName.
pixel_catchments_rasterio_object = rasterio.open(pixel_watersheds_fileName) # Open pixel_catchments_rasterio_object
dem_rasterio_object = rasterio.open(dem_fileName)
for ji, window in dem_rasterio_object.block_windows(1):
dem_window = dem_rasterio_object.read(1,window=window)
window_shape = dem_window.shape
dem_window = dem_window.ravel()
catchments_window = pixel_catchments_rasterio_object.read(1,window=window).ravel()
rem_window = calculate_rem(dem_window, catchment_min_dict, catchments_window, meta['nodata'])
rem_window = rem_window.reshape(window_shape).astype(np.float32)
rem_rasterio_object.write(rem_window, window=window, indexes=1)
dem_rasterio_object.close()
pixel_catchments_rasterio_object.close()
rem_rasterio_object.close()
# ------------------------------------------------------------------------------------------------------------------------ #
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser(description='Relative elevation from pixel based watersheds')
parser.add_argument('-d','--dem', help='DEM to use within project path', required=True)
parser.add_argument('-w','--watersheds',help='Pixel based watersheds raster to use within project path',required=True)
parser.add_argument('-t','--thalweg-raster',help='A binary raster representing the thalweg. 1 for thalweg, 0 for non-thalweg.',required=True)
parser.add_argument('-o','--rem',help='Output REM raster',required=True)
# extract to dictionary
args = vars(parser.parse_args())
# rename variable inputs
dem_fileName = args['dem']
pixel_watersheds_fileName = args['watersheds']
rem_fileName = args['rem']
thalweg_raster = args['thalweg_raster']
rel_dem(dem_fileName, pixel_watersheds_fileName, rem_fileName, thalweg_raster)
|
[
"numba.typed.Dict.empty",
"rasterio.open",
"argparse.ArgumentParser"
] |
[((1807, 1847), 'rasterio.open', 'rasterio.open', (['pixel_watersheds_fileName'], {}), '(pixel_watersheds_fileName)\n', (1820, 1847), False, 'import rasterio\n'), ((1884, 1911), 'rasterio.open', 'rasterio.open', (['dem_fileName'], {}), '(dem_fileName)\n', (1897, 1911), False, 'import rasterio\n'), ((1940, 1969), 'rasterio.open', 'rasterio.open', (['thalweg_raster'], {}), '(thalweg_raster)\n', (1953, 1969), False, 'import rasterio\n'), ((2186, 2230), 'numba.typed.Dict.empty', 'typed.Dict.empty', (['types.int32', 'types.float32'], {}), '(types.int32, types.float32)\n', (2202, 2230), False, 'from numba import njit, typeof, typed, types\n'), ((3897, 3937), 'rasterio.open', 'rasterio.open', (['rem_fileName', '"""w"""'], {}), "(rem_fileName, 'w', **meta)\n", (3910, 3937), False, 'import rasterio\n'), ((4032, 4072), 'rasterio.open', 'rasterio.open', (['pixel_watersheds_fileName'], {}), '(pixel_watersheds_fileName)\n', (4045, 4072), False, 'import rasterio\n'), ((4140, 4167), 'rasterio.open', 'rasterio.open', (['dem_fileName'], {}), '(dem_fileName)\n', (4153, 4167), False, 'import rasterio\n'), ((5049, 5139), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Relative elevation from pixel based watersheds"""'}), "(description=\n 'Relative elevation from pixel based watersheds')\n", (5072, 5139), False, 'import argparse\n')]
|
from sqlalchemy import create_engine
import os
FLASK_DB_URI = os.environ.get("FLASK_DB_URI")
# Create database connection
engine = create_engine(FLASK_DB_URI)
|
[
"os.environ.get",
"sqlalchemy.create_engine"
] |
[((66, 96), 'os.environ.get', 'os.environ.get', (['"""FLASK_DB_URI"""'], {}), "('FLASK_DB_URI')\n", (80, 96), False, 'import os\n'), ((139, 166), 'sqlalchemy.create_engine', 'create_engine', (['FLASK_DB_URI'], {}), '(FLASK_DB_URI)\n', (152, 166), False, 'from sqlalchemy import create_engine\n')]
|
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic import RedirectView
from profiles.views import SignupView
from . import views
urlpatterns = [
url(r'^$', views.HomePage.as_view(), name='home'),
url(r'^about/$', views.AboutPage.as_view(), name='about'),
url(r'^users/', include('profiles.urls', namespace='profiles')),
url(r'^admin/', include(admin.site.urls)),
url(r"^account/signup/$", SignupView.as_view(), name="account_signup"),
# redirect unneeded/unused social accounts page to settings page
url(r"account/social/accounts/", RedirectView.as_view(url='/account/settings/')),
url(r"^account/", include("account.urls")),
]
# User-uploaded files like profile pics need to be served in development
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Include django debug toolbar if DEBUG is on
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
[
"django.views.generic.RedirectView.as_view",
"profiles.views.SignupView.as_view",
"django.conf.urls.static.static",
"django.conf.urls.include"
] |
[((878, 939), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (884, 939), False, 'from django.conf.urls.static import static\n'), ((412, 458), 'django.conf.urls.include', 'include', (['"""profiles.urls"""'], {'namespace': '"""profiles"""'}), "('profiles.urls', namespace='profiles')\n", (419, 458), False, 'from django.conf.urls import include, url\n'), ((481, 505), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (488, 505), False, 'from django.conf.urls import include, url\n'), ((538, 558), 'profiles.views.SignupView.as_view', 'SignupView.as_view', ([], {}), '()\n', (556, 558), False, 'from profiles.views import SignupView\n'), ((690, 736), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': '"""/account/settings/"""'}), "(url='/account/settings/')\n", (710, 736), False, 'from django.views.generic import RedirectView\n'), ((761, 784), 'django.conf.urls.include', 'include', (['"""account.urls"""'], {}), "('account.urls')\n", (768, 784), False, 'from django.conf.urls import include, url\n'), ((1080, 1107), 'django.conf.urls.include', 'include', (['debug_toolbar.urls'], {}), '(debug_toolbar.urls)\n', (1087, 1107), False, 'from django.conf.urls import include, url\n')]
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from django.shortcuts import render,redirect,get_object_or_404
from django.http import HttpResponse, Http404,HttpResponseRedirect
from django.contrib.auth.forms import UserCreationForm
from .models import Profile,OrderItem, Order, Transaction,Product, Category, Comment, Rate,Delivery
from django.contrib.auth import login, authenticate
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.contrib.auth.models import User
from django.urls import reverse
from .forms import SignUpForm, UpdateUserProfileForm,CommentForm,RateForm,DeliveryForm
from .decorators import admin_only,allowed_users
from django.contrib import messages
import datetime
# import stripe
# Create your views here.
# @login_required(login_url='login')
def home(request):
object_list = Product.objects.all()
categorys = Category.get_category()
return render(request, 'home.html',{'object_list':object_list,'categorys':categorys})
def search_product(request):
categorys = Category.get_category()
if 'searchproject' in request.GET and request.GET["searchproject"]:
search_term = request.GET.get("searchproject")
searched_project = Product.search_by_name(search_term)
message = f"{search_term}"
context = {'object_list':searched_project,'message': message,'categorys':categorys}
return render(request, "search.html",context)
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
def search_products(request):
categorys = Category.get_category()
filtered_orders = Order.objects.filter(owner=request.user.profile, is_ordered=False)
current_order_products = []
if filtered_orders.exists():
user_order = filtered_orders[0]
user_order_items = user_order.items.all()
current_order_products = [product.product for product in user_order_items]
if 'searchproduct' in request.GET and request.GET["searchproduct"]:
search_term = request.GET.get("searchproduct")
searched_project = Product.search_by_name(search_term)
message = f"{search_term}"
context = {'object_list':searched_project,'message': message,'categorys':categorys,'current_order_products': current_order_products,}
return render(request, "searching.html",context)
else:
message = "You haven't searched for any term"
return render(request, 'searching.html',{"message":message})
def product_category(request, category):
object_list = Product.filter_by_category(category)
categorys = Category.get_category()
context = {'object_list':object_list,'categorys': categorys}
return render(request,'category/notlogged.html',context)
# @login_required(login_url='login')
def comment(request, pk):
image = get_object_or_404(Product, pk=pk)
product = Product.objects.get(id = pk)
rates = Rate.objects.order_by('-date')
current_user = request.user
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.product = image
comment.user = request.user.profile
comment.save()
return HttpResponseRedirect(request.path_info)
else:
form = CommentForm()
if request.method == 'POST':
form_rate = RateForm(request.POST)
if form_rate.is_valid():
test = form_rate.cleaned_data['test']
price = form_rate.cleaned_data['price']
durability = form_rate.cleaned_data['durability']
rate = Rate()
rate.product = image
rate.user = current_user
rate.test = test
rate.price = price
rate.durability = durability
rate.average = (rate.test + rate.price + rate.durability)/3
rate.save()
return HttpResponseRedirect(request.path_info)
else:
form_rate = RateForm()
context = {
'image': image,
'form': form,
'form_rate':form_rate,
'rates':rates,
'product':product,
}
return render(request, 'product.html', context)
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
group = Group.objects.get(name = 'customer')
user.groups.add(group)
messages.info(request, "Your account has been Created successfully.")
return redirect("/login")
else:
form = SignUpForm()
return render(request, 'register/register.html', {'form': form})
def profile(request, username):
my_user_profile = Profile.objects.filter(user=request.user).first()
my_orders = Order.objects.filter(is_ordered=True, owner=my_user_profile)
if request.method == 'POST':
prof_form = UpdateUserProfileForm(request.POST, request.FILES, instance=request.user.profile)
if prof_form.is_valid():
prof_form.save()
return redirect(request.path_info)
else:
prof_form = UpdateUserProfileForm(instance=request.user.profile)
context = {
'prof_form': prof_form,
'my_orders':my_orders,
}
return render(request, 'profile.html', context)
@login_required(login_url='login')
def product_list(request):
object_list = Product.objects.all()
categorys = Category.get_category()
filtered_orders = Order.objects.filter(owner=request.user.profile, is_ordered=False)
current_order_products = []
if filtered_orders.exists():
user_order = filtered_orders[0]
user_order_items = user_order.items.all()
current_order_products = [product.product for product in user_order_items]
context = {
'object_list': object_list,
'current_order_products': current_order_products,
'categorys':categorys
}
return render(request, "products/product_list.html", context)
def products_category(request, category):
object_list = Product.filter_by_category(category)
categorys = Category.get_category()
filtered_orders = Order.objects.filter(owner=request.user.profile, is_ordered=False)
current_order_products = []
if filtered_orders.exists():
user_order = filtered_orders[0]
user_order_items = user_order.items.all()
current_order_products = [product.product for product in user_order_items]
context = {'object_list':object_list,'categorys': categorys,'current_order_products':current_order_products}
return render(request,'category/logedin.html',context)
def get_user_pending_order(request):
# get order for the correct user
user_profile = get_object_or_404(Profile, user=request.user)
order = Order.objects.filter(owner=user_profile, is_ordered=False)
if order.exists():
# get the only order in the list of filtered orders
return order[0]
return 0
@login_required()
def add_to_cart(request, **kwargs):
# get the user profile
user_profile = get_object_or_404(Profile, user=request.user)
# filter products by id
product = Product.objects.filter(id=kwargs.get('item_id', "")).first()
# check if the user already owns this product
# if product in request.user.profile.ebooks.all():
# messages.info(request, 'You already own this ebook')
# return redirect(reverse('product_list'))
# create orderItem of the selected product
order_item, status = OrderItem.objects.get_or_create(product=product)
# create order associated with the user
user_order, status = Order.objects.get_or_create(owner=user_profile, is_ordered=False)
user_order.items.add(order_item)
if status:
# generate a reference code
user_order.ref_code = 221
user_order.save()
# show confirmation message and redirect back to the same page
messages.info(request, "item added to cart")
return redirect(reverse('product_list'))
@login_required(login_url='login')
def delete_from_cart(request, item_id):
item_to_delete = OrderItem.objects.filter(pk=item_id)
if item_to_delete.exists():
item_to_delete[0].delete()
messages.info(request, "Item has been deleted")
return redirect(reverse('order_summary'))
@login_required(login_url='login')
def order_details(request, **kwargs):
existing_order = get_user_pending_order(request)
context = {
'order': existing_order
}
return render(request, 'shopping_cart/order_summary.html', context)
@login_required(login_url='login')
def checkout(request, **kwargs):
client_token = 222
current_user = request.user
existing_order = get_user_pending_order(request)
publishKey = 111
if request.method == 'POST':
form = DeliveryForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.user = current_user
comment.save()
clear_from_cart(request)
return redirect('product_list')
else:
form = DeliveryForm()
context = {
'order': existing_order,
'client_token': client_token,
'form':form,
}
return render(request, 'shopping_cart/checkout.html', context)
@login_required(login_url='login')
def clear_from_cart(request):
current_user = request.user
cat = get_object_or_404(Order, owner=current_user.id)
cat.delete()
messages.info(request, "Thanks for shopping with us")
return redirect('product_list')
def admin_page(request):
return render(request,'admin_page.html')
def about(request):
return render(request,'about.html')
|
[
"django.contrib.auth.decorators.login_required",
"django.shortcuts.redirect",
"django.urls.reverse",
"django.shortcuts.get_object_or_404",
"django.contrib.messages.info",
"django.shortcuts.render",
"django.http.HttpResponseRedirect",
"django.contrib.auth.models.Group.objects.get"
] |
[((5471, 5504), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""login"""'}), "(login_url='login')\n", (5485, 5504), False, 'from django.contrib.auth.decorators import login_required\n'), ((7111, 7127), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (7125, 7127), False, 'from django.contrib.auth.decorators import login_required\n'), ((8148, 8181), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""login"""'}), "(login_url='login')\n", (8162, 8181), False, 'from django.contrib.auth.decorators import login_required\n'), ((8452, 8485), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""login"""'}), "(login_url='login')\n", (8466, 8485), False, 'from django.contrib.auth.decorators import login_required\n'), ((8706, 8739), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""login"""'}), "(login_url='login')\n", (8720, 8739), False, 'from django.contrib.auth.decorators import login_required\n'), ((9443, 9476), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""login"""'}), "(login_url='login')\n", (9457, 9476), False, 'from django.contrib.auth.decorators import login_required\n'), ((1015, 1101), 'django.shortcuts.render', 'render', (['request', '"""home.html"""', "{'object_list': object_list, 'categorys': categorys}"], {}), "(request, 'home.html', {'object_list': object_list, 'categorys':\n categorys})\n", (1021, 1101), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((2832, 2883), 'django.shortcuts.render', 'render', (['request', '"""category/notlogged.html"""', 'context'], {}), "(request, 'category/notlogged.html', context)\n", (2838, 2883), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((2958, 2991), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Product'], {'pk': 'pk'}), '(Product, pk=pk)\n', (2975, 2991), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((4300, 4340), 'django.shortcuts.render', 'render', (['request', '"""product.html"""', 'context'], {}), "(request, 'product.html', context)\n", (4306, 4340), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((4762, 4819), 'django.shortcuts.render', 'render', (['request', '"""register/register.html"""', "{'form': form}"], {}), "(request, 'register/register.html', {'form': form})\n", (4768, 4819), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((5428, 5468), 'django.shortcuts.render', 'render', (['request', '"""profile.html"""', 'context'], {}), "(request, 'profile.html', context)\n", (5434, 5468), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((6089, 6143), 'django.shortcuts.render', 'render', (['request', '"""products/product_list.html"""', 'context'], {}), "(request, 'products/product_list.html', context)\n", (6095, 6143), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((6724, 6773), 'django.shortcuts.render', 'render', (['request', '"""category/logedin.html"""', 'context'], {}), "(request, 'category/logedin.html', context)\n", (6730, 6773), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((6871, 6916), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Profile'], {'user': 'request.user'}), '(Profile, user=request.user)\n', (6888, 6916), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((7210, 7255), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Profile'], {'user': 'request.user'}), '(Profile, user=request.user)\n', (7227, 7255), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((8055, 8099), 'django.contrib.messages.info', 'messages.info', (['request', '"""item added to cart"""'], {}), "(request, 'item added to cart')\n", (8068, 8099), False, 'from django.contrib import messages\n'), ((8642, 8702), 'django.shortcuts.render', 'render', (['request', '"""shopping_cart/order_summary.html"""', 'context'], {}), "(request, 'shopping_cart/order_summary.html', context)\n", (8648, 8702), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((9378, 9433), 'django.shortcuts.render', 'render', (['request', '"""shopping_cart/checkout.html"""', 'context'], {}), "(request, 'shopping_cart/checkout.html', context)\n", (9384, 9433), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((9549, 9596), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Order'], {'owner': 'current_user.id'}), '(Order, owner=current_user.id)\n', (9566, 9596), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((9618, 9671), 'django.contrib.messages.info', 'messages.info', (['request', '"""Thanks for shopping with us"""'], {}), "(request, 'Thanks for shopping with us')\n", (9631, 9671), False, 'from django.contrib import messages\n'), ((9683, 9707), 'django.shortcuts.redirect', 'redirect', (['"""product_list"""'], {}), "('product_list')\n", (9691, 9707), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((9750, 9784), 'django.shortcuts.render', 'render', (['request', '"""admin_page.html"""'], {}), "(request, 'admin_page.html')\n", (9756, 9784), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((9816, 9845), 'django.shortcuts.render', 'render', (['request', '"""about.html"""'], {}), "(request, 'about.html')\n", (9822, 9845), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((1497, 1536), 'django.shortcuts.render', 'render', (['request', '"""search.html"""', 'context'], {}), "(request, 'search.html', context)\n", (1503, 1536), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((1611, 1663), 'django.shortcuts.render', 'render', (['request', '"""search.html"""', "{'message': message}"], {}), "(request, 'search.html', {'message': message})\n", (1617, 1663), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((2441, 2483), 'django.shortcuts.render', 'render', (['request', '"""searching.html"""', 'context'], {}), "(request, 'searching.html', context)\n", (2447, 2483), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((2558, 2613), 'django.shortcuts.render', 'render', (['request', '"""searching.html"""', "{'message': message}"], {}), "(request, 'searching.html', {'message': message})\n", (2564, 2613), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((8120, 8143), 'django.urls.reverse', 'reverse', (['"""product_list"""'], {}), "('product_list')\n", (8127, 8143), False, 'from django.urls import reverse\n'), ((8355, 8402), 'django.contrib.messages.info', 'messages.info', (['request', '"""Item has been deleted"""'], {}), "(request, 'Item has been deleted')\n", (8368, 8402), False, 'from django.contrib import messages\n'), ((8423, 8447), 'django.urls.reverse', 'reverse', (['"""order_summary"""'], {}), "('order_summary')\n", (8430, 8447), False, 'from django.urls import reverse\n'), ((3389, 3428), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['request.path_info'], {}), '(request.path_info)\n', (3409, 3428), False, 'from django.http import HttpResponse, Http404, HttpResponseRedirect\n'), ((4054, 4093), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['request.path_info'], {}), '(request.path_info)\n', (4074, 4093), False, 'from django.http import HttpResponse, Http404, HttpResponseRedirect\n'), ((4521, 4555), 'django.contrib.auth.models.Group.objects.get', 'Group.objects.get', ([], {'name': '"""customer"""'}), "(name='customer')\n", (4538, 4555), False, 'from django.contrib.auth.models import Group\n'), ((4605, 4674), 'django.contrib.messages.info', 'messages.info', (['request', '"""Your account has been Created successfully."""'], {}), "(request, 'Your account has been Created successfully.')\n", (4618, 4674), False, 'from django.contrib import messages\n'), ((4694, 4712), 'django.shortcuts.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (4702, 4712), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((5219, 5246), 'django.shortcuts.redirect', 'redirect', (['request.path_info'], {}), '(request.path_info)\n', (5227, 5246), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((9174, 9198), 'django.shortcuts.redirect', 'redirect', (['"""product_list"""'], {}), "('product_list')\n", (9182, 9198), False, 'from django.shortcuts import render, redirect, get_object_or_404\n')]
|