code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import tqdm
import torch
from lav.lav_privileged import LAV
from lav.utils.datasets import get_data_loader
from lav.utils.logger import Logger
def main(args):
dmd = LAV(args)
data_loader = get_data_loader('bev', args)
logger = Logger('lav_bev', args)
save_dir = logger.save_dir
torch.manual_seed(args.seed)
# logger.watch_model(dmd.uniplanner)
global_it = 0
for epoch in range(args.num_epoch):
for data in tqdm.tqdm(data_loader, desc=f'Epoch {epoch}'):
opt_info = dmd.train_bev(*data)
if global_it % args.num_per_log == 0:
logger.log_bev_info(global_it, opt_info)
global_it += 1
dmd.bev_scheduler.step()
if (epoch+1) % args.num_per_save == 0:
bev_path = f'{save_dir}/bev_{epoch+1}.th'
torch.save(dmd.state_dict('bev'), bev_path)
print (f'save to {bev_path}')
logger.save([bev_path])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config-path', default='config.yaml')
parser.add_argument('--device', default='cuda', choices=['cuda', 'cpu'])
# Training misc
parser.add_argument('--num-epoch', type=int, default=160)
parser.add_argument('--num-per-log', type=int, default=100, help='log per iter')
parser.add_argument('--num-per-save', type=int, default=10, help='save per epoch')
parser.add_argument('--batch-size', type=int, default=512)
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--num-workers', type=int, default=16)
# Reproducibility (still not fully determinstic due to CUDA/CuDNN)
parser.add_argument('--seed', type=int, default=2021)
args = parser.parse_args()
main(args)
| [
"torch.manual_seed",
"argparse.ArgumentParser",
"tqdm.tqdm",
"lav.utils.logger.Logger",
"lav.lav_privileged.LAV",
"lav.utils.datasets.get_data_loader"
] | [((171, 180), 'lav.lav_privileged.LAV', 'LAV', (['args'], {}), '(args)\n', (174, 180), False, 'from lav.lav_privileged import LAV\n'), ((199, 227), 'lav.utils.datasets.get_data_loader', 'get_data_loader', (['"""bev"""', 'args'], {}), "('bev', args)\n", (214, 227), False, 'from lav.utils.datasets import get_data_loader\n'), ((241, 264), 'lav.utils.logger.Logger', 'Logger', (['"""lav_bev"""', 'args'], {}), "('lav_bev', args)\n", (247, 264), False, 'from lav.utils.logger import Logger\n'), ((301, 329), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (318, 329), False, 'import torch\n'), ((1012, 1037), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1035, 1037), False, 'import argparse\n'), ((451, 496), 'tqdm.tqdm', 'tqdm.tqdm', (['data_loader'], {'desc': 'f"""Epoch {epoch}"""'}), "(data_loader, desc=f'Epoch {epoch}')\n", (460, 496), False, 'import tqdm\n')] |
#!/usr/bin/env python3
# async_requests.py
"""Asynchronously get links embedded in multiple pages' HTML."""
import asyncio
import logging
import re
import sys
# from typing import IO # Use pathlib instead
import urllib.error
import urllib.parse
import aiofiles
import aiohttp
from aiohttp import ClientSession
import pathlib # To check path where script was launched
"""Init Logging"""
logging.basicConfig(
format="%(asctime)s %(levelname)s:%(name)s: %(message)s",
level=logging.DEBUG,
datefmt="%H:%M:%S",
stream=sys.stderr,
) # Basic configuration for logger
logger = logging.getLogger("async_requests") # Create new logger
logging.getLogger("chardet.charsetprober").disabled = True # ?
HREF_RE = re.compile(r'href="(.*?)"') # A regular expression to extract what we’re searching for: href tags within HTML
async def fetch_html(url: str, session: ClientSession, **kwargs) -> str:
"""
The coroutine fetch_html() is a wrapper around a GET request to make the request and decode the resulting page HTML.
It makes the request, awaits the response, and raises right away in the case of a non-200 status
kwargs are passed to `session.request()`.
"""
# Don't do any try/except here. If either the request or reading
# of bytes raises, let that be handled by caller.
resp = await session.request(method="GET", url=url, **kwargs) # Make request to url in a single user's session
resp.raise_for_status() # Raise exception if response status is >= 400
logger.info("Got response [%s] for URL: %s", resp.status, url) # Write log if response is ok
html = await resp.text() # Async get html text. For bytes: resp.read()
return html
async def parse(url: str, session: ClientSession, **kwargs) -> set:
"""Find HREFs in the HTML of `url`."""
found = set() # Set to store found URLs
try:
html = await fetch_html(url=url, session=session, **kwargs) # Async get html text from url
except (
aiohttp.ClientError,
# aiohttp.http_exceptions.HttpProcessingError,
) as e: # Some aiohttp exception occurred
logger.error(
"aiohttp exception for %s [%s]: %s",
url,
getattr(e, "status", None),
getattr(e, "message", None),
)
return found
except Exception as e: # Some other exception occurred
# May be raised from other libraries, such as chardet or yarl.
# logger.exception will show the full traceback.
logger.exception(
"Non-aiohttp exception occurred: %s", getattr(e, "__dict__", {})
)
return found
else: # If no exceptions -> process html. It is a blocking operation but it is fast
for link in HREF_RE.findall(html): # Apply regex to extract all of the href tags
try:
abs_link = urllib.parse.urljoin(url, link) # Try to format link as an absolute path
except (urllib.error.URLError, ValueError): # Parsing failed
logger.exception("Error parsing URL: %s", link)
pass
else:
found.add(abs_link) # Save found url to a set
logger.info("Found %d links for %s", len(found), url) # How many urls found?
return found
async def write_one(file: pathlib.Path, url: str, **kwargs) -> None:
"""
This coroutine takes a file object and a single URL, and waits on parse() to return a set of the parsed URLs,
writing each to the file asynchronously along with its source URL through use of aiofiles.
:param file: a file object to save urls in.
:param url: an url to extract HREFs from.
:param kwargs: any other keyword arguments.
:return: None
"""
res = await parse(url=url, **kwargs) # Launch parsing and wait result
if not res: # If parsing fails
return None
async with aiofiles.open(file, "a") as f: # Init async writing to file
for p in res: # Loop through HREFs in results parsed from html
await f.write(f"{url}\t{p}\n") # Async wait for writing
logger.info("Wrote results for source URL: %s", url)
async def bulk_crawl_and_write(file: pathlib.Path, urls_set: set, **kwargs) -> None:
"""
This coroutine serves as the main entry point into the script’s chain of coroutines.
It uses a single session, and a task is created for each URL that is ultimately read from urls.txt.
:param file: a file object to which we async save HREFs from HTML pages.
:param urls_set: a set of urls to crawl and parse.
:param kwargs: some other keyword arguments.
:return: None
"""
"""
The default ClientSession has an adapter with a maximum of 100 open connections.
To change that, pass an instance of asyncio.connector.TCPConnector to ClientSession.
You can also specify limits on a per-host basis.
"""
async with ClientSession() as session: # Async start new session. Sessions are used for better performance
tasks = [] # Tasks stored here. One task for every url in list
for url in urls_set:
tasks.append(
write_one(file=file, url=url, session=session, **kwargs)
) # Every task is a job to parse URL for HREFs
try:
await asyncio.gather(*tasks, return_exceptions=True) # Launch all tasks
except RuntimeError as e:
print(e)
if __name__ == "__main__":
import sys # To perform versions compatibility
assert sys.version_info >= (3, 7), "Script requires Python 3.7+."
here = pathlib.Path(__file__).parent
with open(here.joinpath("urls.txt")) as infile:
urls = set(map(str.strip, infile)) # Get URLs from file
out_path = here.joinpath("found_urls.txt") # Initialize the file where the results will be written
with open(out_path, "w") as outfile:
outfile.write("source_url\tparsed_url\n")
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # To deal with RuntimeErrors on windows
asyncio.run(bulk_crawl_and_write(file=out_path, urls_set=urls)) # Async run main job
| [
"logging.basicConfig",
"aiohttp.ClientSession",
"logging.getLogger",
"asyncio.WindowsSelectorEventLoopPolicy",
"re.compile",
"pathlib.Path",
"aiofiles.open",
"asyncio.gather"
] | [((393, 539), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s:%(name)s: %(message)s"""', 'level': 'logging.DEBUG', 'datefmt': '"""%H:%M:%S"""', 'stream': 'sys.stderr'}), "(format=\n '%(asctime)s %(levelname)s:%(name)s: %(message)s', level=logging.DEBUG,\n datefmt='%H:%M:%S', stream=sys.stderr)\n", (412, 539), False, 'import logging\n'), ((593, 628), 'logging.getLogger', 'logging.getLogger', (['"""async_requests"""'], {}), "('async_requests')\n", (610, 628), False, 'import logging\n'), ((725, 751), 're.compile', 're.compile', (['"""href="(.*?)\\""""'], {}), '(\'href="(.*?)"\')\n', (735, 751), False, 'import re\n'), ((650, 692), 'logging.getLogger', 'logging.getLogger', (['"""chardet.charsetprober"""'], {}), "('chardet.charsetprober')\n", (667, 692), False, 'import logging\n'), ((3889, 3913), 'aiofiles.open', 'aiofiles.open', (['file', '"""a"""'], {}), "(file, 'a')\n", (3902, 3913), False, 'import aiofiles\n'), ((4907, 4922), 'aiohttp.ClientSession', 'ClientSession', ([], {}), '()\n', (4920, 4922), False, 'from aiohttp import ClientSession\n'), ((5580, 5602), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (5592, 5602), False, 'import pathlib\n'), ((5959, 5999), 'asyncio.WindowsSelectorEventLoopPolicy', 'asyncio.WindowsSelectorEventLoopPolicy', ([], {}), '()\n', (5997, 5999), False, 'import asyncio\n'), ((5295, 5341), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {'return_exceptions': '(True)'}), '(*tasks, return_exceptions=True)\n', (5309, 5341), False, 'import asyncio\n')] |
# -*- coding: utf-8 -*-
from django.shortcuts import render
from qiniuyun.backend import QiniuPush
from qiniuyun.models import ImageAtQiniu
from .indexView import ImgList
from DJangoHotel.models import RoomInfo
def roomInfo(request):
rooms=RoomInfo.objects.all()
imgObjs=ImageAtQiniu.objects.all()
imgUrls=[QiniuPush.private_download_url(i.fullname) for i in imgObjs]
imgs=ImgList()
for i in imgUrls:
if 'hotel-logo' in i:
imgs.logo=i
return render(request,'roominfo.html',{'roomInfoList':rooms,'img':imgs})
| [
"django.shortcuts.render",
"qiniuyun.models.ImageAtQiniu.objects.all",
"qiniuyun.backend.QiniuPush.private_download_url",
"DJangoHotel.models.RoomInfo.objects.all"
] | [((246, 268), 'DJangoHotel.models.RoomInfo.objects.all', 'RoomInfo.objects.all', ([], {}), '()\n', (266, 268), False, 'from DJangoHotel.models import RoomInfo\n'), ((281, 307), 'qiniuyun.models.ImageAtQiniu.objects.all', 'ImageAtQiniu.objects.all', ([], {}), '()\n', (305, 307), False, 'from qiniuyun.models import ImageAtQiniu\n'), ((518, 588), 'django.shortcuts.render', 'render', (['request', '"""roominfo.html"""', "{'roomInfoList': rooms, 'img': imgs}"], {}), "(request, 'roominfo.html', {'roomInfoList': rooms, 'img': imgs})\n", (524, 588), False, 'from django.shortcuts import render\n'), ((321, 363), 'qiniuyun.backend.QiniuPush.private_download_url', 'QiniuPush.private_download_url', (['i.fullname'], {}), '(i.fullname)\n', (351, 363), False, 'from qiniuyun.backend import QiniuPush\n')] |
import pyb
import sensor
import image
import time
# Status LED
led = pyb.LED(3)
# Configure camera
sensor.reset()
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.GRAYSCALE)
# Get center x, y of camera image
WIDTH = sensor.width()
HEIGHT = sensor.height()
CENTER_X = int(WIDTH / 2 + 0.5)
CENTER_Y = int(HEIGHT / 2 + 0.5)
# Start clock
clock = time.clock()
# Create cascade for finding faces
face_cascade = image.HaarCascade("frontalface", stages=25)
# Superloop
while(True):
# Take timestamp (for calculating FPS)
clock.tick()
# Take photo
img = sensor.snapshot()
# Find faces in image
objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)
# Find largest face in image
largest_face_size = 0
largest_face_bb = None
for r in objects:
# Find largest bounding box
face_size = r[2] * r[3]
if (face_size > largest_face_size):
largest_face_size = face_size
largest_face_bb = r
# Draw bounding boxes around all faces
img.draw_rectangle(r)
# Find line from center of face to center of frame
if largest_face_bb is not None:
# Turn on status LED
led.on()
# Print out the largest face info
print("Face:", largest_face_bb)
# Find x, y of center of largest face in image
face_x = largest_face_bb[0] + int((largest_face_bb[2]) / 2 + 0.5)
face_y = largest_face_bb[1] + int((largest_face_bb[3]) / 2 + 0.5)
# Draw line from center of image to center of face
img.draw_line(CENTER_X, CENTER_Y, face_x, face_y)
# If there is no face, don't do anything
else:
# Turn off status LED
led.off()
# Print FPS
print("FPS:", clock.fps())
| [
"sensor.set_contrast",
"sensor.set_gainceiling",
"sensor.set_pixformat",
"time.clock",
"sensor.set_framesize",
"image.HaarCascade",
"sensor.width",
"sensor.reset",
"sensor.height",
"sensor.snapshot",
"pyb.LED"
] | [((70, 80), 'pyb.LED', 'pyb.LED', (['(3)'], {}), '(3)\n', (77, 80), False, 'import pyb\n'), ((101, 115), 'sensor.reset', 'sensor.reset', ([], {}), '()\n', (113, 115), False, 'import sensor\n'), ((116, 138), 'sensor.set_contrast', 'sensor.set_contrast', (['(3)'], {}), '(3)\n', (135, 138), False, 'import sensor\n'), ((139, 165), 'sensor.set_gainceiling', 'sensor.set_gainceiling', (['(16)'], {}), '(16)\n', (161, 165), False, 'import sensor\n'), ((166, 199), 'sensor.set_framesize', 'sensor.set_framesize', (['sensor.QVGA'], {}), '(sensor.QVGA)\n', (186, 199), False, 'import sensor\n'), ((200, 238), 'sensor.set_pixformat', 'sensor.set_pixformat', (['sensor.GRAYSCALE'], {}), '(sensor.GRAYSCALE)\n', (220, 238), False, 'import sensor\n'), ((282, 296), 'sensor.width', 'sensor.width', ([], {}), '()\n', (294, 296), False, 'import sensor\n'), ((306, 321), 'sensor.height', 'sensor.height', ([], {}), '()\n', (319, 321), False, 'import sensor\n'), ((410, 422), 'time.clock', 'time.clock', ([], {}), '()\n', (420, 422), False, 'import time\n'), ((474, 517), 'image.HaarCascade', 'image.HaarCascade', (['"""frontalface"""'], {'stages': '(25)'}), "('frontalface', stages=25)\n", (491, 517), False, 'import image\n'), ((633, 650), 'sensor.snapshot', 'sensor.snapshot', ([], {}), '()\n', (648, 650), False, 'import sensor\n')] |
from tortoise import fields, models
class User(models.Model):
""" Model user """
username = fields.CharField(max_length=100, unique=True)
password = fields.CharField(max_length=100)
email = fields.CharField(max_length=100, unique=True)
first_name = fields.CharField(max_length=100)
last_name = fields.CharField(max_length=100, null=True)
image = fields.CharField(max_length=100, null=True)
bio = fields.TextField(null=True)
date_join = fields.DatetimeField(auto_now_add=True)
last_login = fields.DatetimeField(null=True)
is_active = fields.BooleanField(default=False)
is_staff = fields.BooleanField(default=False)
is_superuser = fields.BooleanField(default=False)
| [
"tortoise.fields.CharField",
"tortoise.fields.DatetimeField",
"tortoise.fields.BooleanField",
"tortoise.fields.TextField"
] | [((102, 147), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (118, 147), False, 'from tortoise import fields, models\n'), ((163, 195), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (179, 195), False, 'from tortoise import fields, models\n'), ((208, 253), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (224, 253), False, 'from tortoise import fields, models\n'), ((271, 303), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (287, 303), False, 'from tortoise import fields, models\n'), ((320, 363), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(100)', 'null': '(True)'}), '(max_length=100, null=True)\n', (336, 363), False, 'from tortoise import fields, models\n'), ((376, 419), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(100)', 'null': '(True)'}), '(max_length=100, null=True)\n', (392, 419), False, 'from tortoise import fields, models\n'), ((430, 457), 'tortoise.fields.TextField', 'fields.TextField', ([], {'null': '(True)'}), '(null=True)\n', (446, 457), False, 'from tortoise import fields, models\n'), ((474, 513), 'tortoise.fields.DatetimeField', 'fields.DatetimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (494, 513), False, 'from tortoise import fields, models\n'), ((531, 562), 'tortoise.fields.DatetimeField', 'fields.DatetimeField', ([], {'null': '(True)'}), '(null=True)\n', (551, 562), False, 'from tortoise import fields, models\n'), ((580, 614), 'tortoise.fields.BooleanField', 'fields.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (599, 614), False, 'from tortoise import fields, models\n'), ((630, 664), 'tortoise.fields.BooleanField', 'fields.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (649, 664), False, 'from tortoise import fields, models\n'), ((684, 718), 'tortoise.fields.BooleanField', 'fields.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (703, 718), False, 'from tortoise import fields, models\n')] |
import webbrowser
from ontology.exception.exception import SDKException
from click import (
argument,
pass_context
)
from .main import main
from punica.box.repo_box import Box
from punica.utils.output import echo_cli_exception
from punica.exception.punica_exception import PunicaException
@main.command('unbox')
@argument('box_name', nargs=1)
@pass_context
def unbox_cmd(ctx, box_name):
"""
Download a Punica Box, a pre-built Punica project.
"""
box = Box(ctx.obj['PROJECT_DIR'])
try:
box.unbox(box_name)
except (PunicaException, SDKException) as e:
echo_cli_exception(e)
@main.command('init')
@pass_context
def init_cmd(ctx):
"""
Initialize new and empty Ontology project.
"""
box = Box(ctx.obj['PROJECT_DIR'])
try:
box.init_box()
except (PunicaException, SDKException) as e:
echo_cli_exception(e)
@main.command('boxes')
@pass_context
def boxes_cmd(ctx):
"""
List all available punica box.
"""
box = Box(ctx.obj['PROJECT_DIR'])
try:
box.list_boxes()
except (PunicaException, SDKException):
webbrowser.open('https://punica.ont.io/boxes/')
| [
"click.argument",
"punica.utils.output.echo_cli_exception",
"punica.box.repo_box.Box",
"webbrowser.open"
] | [((326, 355), 'click.argument', 'argument', (['"""box_name"""'], {'nargs': '(1)'}), "('box_name', nargs=1)\n", (334, 355), False, 'from click import argument, pass_context\n'), ((481, 508), 'punica.box.repo_box.Box', 'Box', (["ctx.obj['PROJECT_DIR']"], {}), "(ctx.obj['PROJECT_DIR'])\n", (484, 508), False, 'from punica.box.repo_box import Box\n'), ((755, 782), 'punica.box.repo_box.Box', 'Box', (["ctx.obj['PROJECT_DIR']"], {}), "(ctx.obj['PROJECT_DIR'])\n", (758, 782), False, 'from punica.box.repo_box import Box\n'), ((1014, 1041), 'punica.box.repo_box.Box', 'Box', (["ctx.obj['PROJECT_DIR']"], {}), "(ctx.obj['PROJECT_DIR'])\n", (1017, 1041), False, 'from punica.box.repo_box import Box\n'), ((603, 624), 'punica.utils.output.echo_cli_exception', 'echo_cli_exception', (['e'], {}), '(e)\n', (621, 624), False, 'from punica.utils.output import echo_cli_exception\n'), ((872, 893), 'punica.utils.output.echo_cli_exception', 'echo_cli_exception', (['e'], {}), '(e)\n', (890, 893), False, 'from punica.utils.output import echo_cli_exception\n'), ((1128, 1175), 'webbrowser.open', 'webbrowser.open', (['"""https://punica.ont.io/boxes/"""'], {}), "('https://punica.ont.io/boxes/')\n", (1143, 1175), False, 'import webbrowser\n')] |
import sys
from datetime import date, timedelta
import requests
def date_gen(d1, d2):
# d1 = date(2020, 5, 1)
# d2 = date(2020, 5, 31)
delta = d2 - d1
return [(d1 + timedelta(days=i)).strftime('%Y%m%d') for i in range(delta.days + 1)]
def download_by_dates(date_list):
for date_to_download in date_list:
url = "http://mawi.nezu.wide.ad.jp/mawi/samplepoint-F/2020/{d}1400.pcap.gz".format(d=date_to_download)
file_name = str("download/{d}1400.pcap.gz".format(d=date_to_download))
with open(file_name, "wb") as f:
print("Downloading %s" % url)
response = requests.get(url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None: # no content length header
f.write(response.content)
else:
dl = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(50 * dl / total_length)
sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' * (50 - done)))
sys.stdout.flush()
'''
Example Usage Below:
'''
if __name__ == '__main__':
date_list = date_gen(d1=date(2020, 3, 1), d2=date(2020, 3, 6))
date_list += date_gen(d1=date(2020, 5, 1), d2=date(2020, 5, 31))
print(date_list)
download(date_list)
| [
"requests.get",
"datetime.timedelta",
"datetime.date",
"sys.stdout.flush",
"sys.stdout.write"
] | [((625, 655), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (637, 655), False, 'import requests\n'), ((1321, 1337), 'datetime.date', 'date', (['(2020)', '(3)', '(1)'], {}), '(2020, 3, 1)\n', (1325, 1337), False, 'from datetime import date, timedelta\n'), ((1342, 1358), 'datetime.date', 'date', (['(2020)', '(3)', '(6)'], {}), '(2020, 3, 6)\n', (1346, 1358), False, 'from datetime import date, timedelta\n'), ((1389, 1405), 'datetime.date', 'date', (['(2020)', '(5)', '(1)'], {}), '(2020, 5, 1)\n', (1393, 1405), False, 'from datetime import date, timedelta\n'), ((1410, 1427), 'datetime.date', 'date', (['(2020)', '(5)', '(31)'], {}), '(2020, 5, 31)\n', (1414, 1427), False, 'from datetime import date, timedelta\n'), ((184, 201), 'datetime.timedelta', 'timedelta', ([], {'days': 'i'}), '(days=i)\n', (193, 201), False, 'from datetime import date, timedelta\n'), ((1133, 1195), 'sys.stdout.write', 'sys.stdout.write', (["('\\r[%s%s]' % ('=' * done, ' ' * (50 - done)))"], {}), "('\\r[%s%s]' % ('=' * done, ' ' * (50 - done)))\n", (1149, 1195), False, 'import sys\n'), ((1216, 1234), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1232, 1234), False, 'import sys\n')] |
from heapq import heapify, heappush, heappop
from collections import defaultdict
import math
def shortest_path(M, start, goal):
frontier = {start}
explored = set()
came_from = dict()
f_costs = get_initial_f_costs(M, start, goal) # heapq type
g_costs = get_initial_g_costs(start) # defaultdict type
while frontier:
curr_node = heappop(f_costs)[1] # cheapest f cost node in the priority queue
if curr_node == goal:
path = get_path(came_from, start, goal)
print(f"path found: start={start}, goal={goal} path={path}")
return path
elif curr_node in frontier:
frontier.remove(curr_node)
explored.add(curr_node)
neighbours = M.roads[curr_node]
for neighbour in neighbours:
if neighbour not in explored:
# since set is a collection of unique elements based on hash tables, this line is equal to
# 'if neighbour not in frontier' membership check as both operate at O(1)
frontier.add(neighbour)
estimated_g_cost = g_costs[curr_node] + distance(M, curr_node, neighbour)
# if estimated g cost to the neighbour node is less than neighbour node g cost, update it as for now it is the shortest path
if estimated_g_cost < g_costs[neighbour]:
g_costs[neighbour] = estimated_g_cost
f_cost = get_f_cost(estimated_g_cost, M, neighbour, goal)
heappush(f_costs, (f_cost, neighbour))
came_from[neighbour] = curr_node # record the path
return None
def get_h_cost(M, current, goal):
return distance(M, current, goal)
def get_f_cost(g_cost, M, current, goal):
return g_cost + get_h_cost(M, current, goal)
def get_initial_f_costs(M, start, goal):
f_costs = []
heapify(f_costs)
f_cost_start_node = get_f_cost(0, M, start, goal)
heappush(f_costs, (f_cost_start_node, start))
return f_costs
def get_initial_g_costs(start):
g_costs = defaultdict(lambda: math.inf)
g_costs[start] = 0
return g_costs
def distance(M, node_1, node_2):
"""Computes the Euclidean L2 Distance."""
x1, y1 = M.intersections[node_1]
x2, y2 = M.intersections[node_2]
return math.sqrt(math.pow(x1 - x2, 2) + math.pow(y1 - y2, 2))
def get_path(came_from, start, goal):
"""Reconstructs the path using came_from references"""
path = [goal]
curr_node = goal
while curr_node != start:
curr_node = came_from[curr_node]
path.append(curr_node)
path.reverse()
return path
| [
"math.pow",
"heapq.heappop",
"collections.defaultdict",
"heapq.heappush",
"heapq.heapify"
] | [((1937, 1953), 'heapq.heapify', 'heapify', (['f_costs'], {}), '(f_costs)\n', (1944, 1953), False, 'from heapq import heapify, heappush, heappop\n'), ((2012, 2057), 'heapq.heappush', 'heappush', (['f_costs', '(f_cost_start_node, start)'], {}), '(f_costs, (f_cost_start_node, start))\n', (2020, 2057), False, 'from heapq import heapify, heappush, heappop\n'), ((2125, 2155), 'collections.defaultdict', 'defaultdict', (['(lambda : math.inf)'], {}), '(lambda : math.inf)\n', (2136, 2155), False, 'from collections import defaultdict\n'), ((363, 379), 'heapq.heappop', 'heappop', (['f_costs'], {}), '(f_costs)\n', (370, 379), False, 'from heapq import heapify, heappush, heappop\n'), ((2373, 2393), 'math.pow', 'math.pow', (['(x1 - x2)', '(2)'], {}), '(x1 - x2, 2)\n', (2381, 2393), False, 'import math\n'), ((2396, 2416), 'math.pow', 'math.pow', (['(y1 - y2)', '(2)'], {}), '(y1 - y2, 2)\n', (2404, 2416), False, 'import math\n'), ((1573, 1611), 'heapq.heappush', 'heappush', (['f_costs', '(f_cost, neighbour)'], {}), '(f_costs, (f_cost, neighbour))\n', (1581, 1611), False, 'from heapq import heapify, heappush, heappop\n')] |
# -*- coding: utf-8 -*-
import cherrypy
from jinja2 import Template
import mock
from tests.utils import BaseToolsTest
from lib.tool.allowed_methods import AllowedMethodsTool
from lib.tool.cpemail import EmailTool
from lib.tool.template import Jinja2Tool
class TestAllowedMethods(BaseToolsTest):
_cp_config = {
'tools.allowed.on': True,
'tools.allowed.allowed_methods': ['GET']
}
def setUp(self):
super(TestAllowedMethods, self).setUp()
cherrypy.tools.allowed = AllowedMethodsTool()
def test_allowed(self):
request, response = self.request('/')
self.assertEqual(response.output_status, '200 OK')
def test_disallowed(self):
request, response = self.request('/', method='POST')
self.assertEqual(response.output_status, '405 Method Not Allowed')
class TestJinja2Tool(BaseToolsTest):
_cp_config = {
'tools.render.on': True,
'tools.render.template': 'test.html'
}
def setUp(self):
super(TestJinja2Tool, self).setUp()
cherrypy.tools.render = Jinja2Tool()
| [
"lib.tool.allowed_methods.AllowedMethodsTool",
"lib.tool.template.Jinja2Tool"
] | [((509, 529), 'lib.tool.allowed_methods.AllowedMethodsTool', 'AllowedMethodsTool', ([], {}), '()\n', (527, 529), False, 'from lib.tool.allowed_methods import AllowedMethodsTool\n'), ((1072, 1084), 'lib.tool.template.Jinja2Tool', 'Jinja2Tool', ([], {}), '()\n', (1082, 1084), False, 'from lib.tool.template import Jinja2Tool\n')] |
import os
import logging
import stackprinter
from celery import Celery, Task
from celery.schedules import crontab
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "config.settings.local"
) # pragma: no cover
app = Celery("instanotifier")
class CeleryConfig(AppConfig):
name = "instanotifier.taskapp"
verbose_name = "Celery Config"
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object("django.conf:settings", namespace="CELERY")
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
app.conf.beat_schedule = {
"fetch_all_sources_daily": {
"task": "instanotifier.feedsource.tasks.fetch_all_sources",
"schedule": crontab(minute=00, hour=[11, 23]),
}
}
class LogErrorsTask(Task):
def on_failure(self, exc, task_id, args, kwargs, einfo):
tb = stackprinter.format(exc)
logging.error(tb)
super().on_failure(exc, task_id, args, kwargs, einfo)
@app.task(bind=True)
def debug_task(self):
print("Request: {0!r}".format(self.request)) # pragma: no cover
| [
"os.environ.setdefault",
"celery.Celery",
"django.apps.apps.get_app_configs",
"stackprinter.format",
"celery.schedules.crontab",
"logging.error"
] | [((407, 430), 'celery.Celery', 'Celery', (['"""instanotifier"""'], {}), "('instanotifier')\n", (413, 430), False, 'from celery import Celery, Task\n'), ((293, 365), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""config.settings.local"""'], {}), "('DJANGO_SETTINGS_MODULE', 'config.settings.local')\n", (314, 365), False, 'import os\n'), ((1044, 1076), 'celery.schedules.crontab', 'crontab', ([], {'minute': '(0)', 'hour': '[11, 23]'}), '(minute=0, hour=[11, 23])\n', (1051, 1076), False, 'from celery.schedules import crontab\n'), ((1190, 1214), 'stackprinter.format', 'stackprinter.format', (['exc'], {}), '(exc)\n', (1209, 1214), False, 'import stackprinter\n'), ((1223, 1240), 'logging.error', 'logging.error', (['tb'], {}), '(tb)\n', (1236, 1240), False, 'import logging\n'), ((803, 825), 'django.apps.apps.get_app_configs', 'apps.get_app_configs', ([], {}), '()\n', (823, 825), False, 'from django.apps import apps, AppConfig\n')] |
from snovault import (
AuditFailure,
audit_checker,
)
@audit_checker('ReferenceEpigenome', frame=['related_datasets',
'related_datasets.replicates',
'related_datasets.replicates.library',
'related_datasets.replicates.library.biosample',
'related_datasets.replicates.library.biosample.donor',
'related_datasets.replicates.library.biosample.treatments'])
def audit_reference_epigenome_donor_biosample(value, system):
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if 'related_datasets' not in value:
return
treatments_set = set()
biosample_name_set = set()
for assay in value['related_datasets']:
if assay['status'] not in ['deleted', 'replaced', 'revoked']:
if 'replicates' in assay:
for rep in assay['replicates']:
if rep['status'] not in ['deleted'] and \
'library' in rep and 'biosample' in rep['library']:
biosample_object = rep['library']['biosample']
if 'biosample_term_name' in biosample_object:
biosample_name_set.add(biosample_object['biosample_term_name'])
if 'treatments' in biosample_object:
if len(biosample_object['treatments']) == 0:
treatments_set.add('untreated')
else:
treatments_to_add = []
for t in biosample_object['treatments']:
treatments_to_add.append('treated with ' +
t['treatment_term_name'])
treatments_set.add(', '.join(sorted(treatments_to_add)))
else:
treatments_set.add('untreated')
if len(treatments_set) > 1:
detail = 'Reference Epigenome {} '.format(value['@id']) + \
' has biosample associated with different tretments {}.'.format(treatments_set)
yield AuditFailure('multiple biosample treatments in reference epigenome',
detail, level='WARNING')
if len(biosample_name_set) > 1:
detail = 'Reference Epigenome {} '.format(value['@id']) + \
' has multiple biosample term names {}.'.format(biosample_name_set)
yield AuditFailure('multiple biosample term names in reference epigenome',
detail, level='WARNING')
return
@audit_checker('ReferenceEpigenome', frame=['award',
'related_datasets',
'related_datasets.target'])
def audit_reference_epigenome_assay_types_requirments(value, system):
detail_prefix = 'Reference Epigenome {} '.format(value['@id'])
if 'related_datasets' not in value:
detail = detail_prefix + \
'has no related datasets. It lacks all of the IHEC required ' + \
'assays.'
yield AuditFailure('partial reference epigenome', detail, level='WARNING')
return
roadmap_flag = False
if 'award' in value and \
value['award']['rfa'] == 'Roadmap':
roadmap_flag = True
required_assays = {('OBI:0000716', 'Control'): 0,
('OBI:0000716', 'H3K27me3'): 0,
('OBI:0000716', 'H3K36me3'): 0,
('OBI:0000716', 'H3K4me1'): 0,
('OBI:0000716', 'H3K4me3'): 0,
('OBI:0000716', 'H3K9me3'): 0,
'OBI:0001271': 0, # RNA-seq
'OBI:0001463': 0, # Arrays
'OBI:0000693': 0, # MeDIP
'OBI:0001861': 0, # MRE-seq
'OBI:0001863': 0, # MethylCap-seq
'OBI:0001862': 0} # RRBS
project_detail = 'required according to standards of NIH ' + \
'Roadmap Minimal Reference Epigenome'
else:
required_assays = {('OBI:0000716', 'Control'): 0,
('OBI:0000716', 'H3K27me3'): 0,
('OBI:0000716', 'H3K36me3'): 0,
('OBI:0000716', 'H3K4me1'): 0,
('OBI:0000716', 'H3K4me3'): 0,
('OBI:0000716', 'H3K27ac'): 0,
('OBI:0000716', 'H3K9me3'): 0,
'OBI:0001863': 0, # WGBS
'OBI:0001271': 0} # RNA-seq
project_detail = 'required according to standards of Minimal IHEC Reference Epigenome.'
for assay in value['related_datasets']:
assay_id = assay['assay_term_id']
if (assay_id == 'OBI:0000716'):
if 'target' in assay:
assay_taget = assay['target']['label']
key = (assay_id, assay_taget)
if key in required_assays:
required_assays[key] = 1
elif assay_id in required_assays:
required_assays[assay_id] = 1
if required_assays[('OBI:0000716', 'Control')] == 0:
detail = detail_prefix + \
'is missing control ChIP-seq assay, ' + \
project_detail
yield AuditFailure('partial reference epigenome', detail, level='WARNING')
if required_assays[('OBI:0000716', 'H3K27me3')] == 0:
detail = detail_prefix + \
'is missing H3K27me3 ChIP-seq assay, ' + \
project_detail
yield AuditFailure('partial reference epigenome', detail, level='WARNING')
if required_assays[('OBI:0000716', 'H3K36me3')] == 0:
detail = detail_prefix + \
'is missing H3K36me3 ChIP-seq assay, ' + \
project_detail
yield AuditFailure('partial reference epigenome', detail, level='WARNING')
if required_assays[('OBI:0000716', 'H3K4me1')] == 0:
detail = detail_prefix + \
'is missing H3K4me1 ChIP-seq assay, ' + \
project_detail
yield AuditFailure('partial reference epigenome', detail, level='WARNING')
if required_assays[('OBI:0000716', 'H3K4me3')] == 0:
detail = detail_prefix + \
'is missing H3K4me3 ChIP-seq assay, ' + \
project_detail
yield AuditFailure('partial reference epigenome', detail, level='WARNING')
if required_assays[('OBI:0000716', 'H3K9me3')] == 0:
detail = detail_prefix + \
'is missing H3K9me3 ChIP-seq assay, ' + \
project_detail
yield AuditFailure('partial reference epigenome', detail, level='WARNING')
if roadmap_flag is True:
rna_assays = required_assays['OBI:0001271'] + \
required_assays['OBI:0001463']
methylation_assays = required_assays['OBI:0000693'] + \
required_assays['OBI:0001861'] + \
required_assays['OBI:0001863'] + \
required_assays['OBI:0001862']
if methylation_assays == 0:
detail = detail_prefix + \
'is missing MeDIP-seq, MRE-seq, RRBS, or MethylCap-seq assays. ' + \
'At least one is ' + project_detail
yield AuditFailure('partial reference epigenome', detail, level='WARNING')
if rna_assays == 0:
detail = detail_prefix + \
'is missing RNA-seq or array based transcription assays. ' + \
'At least one is ' + project_detail
yield AuditFailure('partial reference epigenome', detail, level='WARNING')
else:
if required_assays[('OBI:0000716', 'H3K27ac')] == 0:
detail = detail_prefix + \
'is missing H3K27ac ChIP-seq assay, ' + \
project_detail
yield AuditFailure('partial reference epigenome', detail, level='WARNING')
if required_assays['OBI:0001863'] == 0:
detail = detail_prefix + \
'is missing WGBS assay, ' + \
project_detail
yield AuditFailure('partial reference epigenome', detail, level='WARNING')
if required_assays['OBI:0001271'] == 0:
detail = detail_prefix + \
'is missing RNA-seq assay, ' + \
project_detail
yield AuditFailure('partial reference epigenome', detail, level='WARNING')
return
| [
"snovault.AuditFailure",
"snovault.audit_checker"
] | [((65, 378), 'snovault.audit_checker', 'audit_checker', (['"""ReferenceEpigenome"""'], {'frame': "['related_datasets', 'related_datasets.replicates',\n 'related_datasets.replicates.library',\n 'related_datasets.replicates.library.biosample',\n 'related_datasets.replicates.library.biosample.donor',\n 'related_datasets.replicates.library.biosample.treatments']"}), "('ReferenceEpigenome', frame=['related_datasets',\n 'related_datasets.replicates', 'related_datasets.replicates.library',\n 'related_datasets.replicates.library.biosample',\n 'related_datasets.replicates.library.biosample.donor',\n 'related_datasets.replicates.library.biosample.treatments'])\n", (78, 378), False, 'from snovault import AuditFailure, audit_checker\n'), ((2783, 2886), 'snovault.audit_checker', 'audit_checker', (['"""ReferenceEpigenome"""'], {'frame': "['award', 'related_datasets', 'related_datasets.target']"}), "('ReferenceEpigenome', frame=['award', 'related_datasets',\n 'related_datasets.target'])\n", (2796, 2886), False, 'from snovault import AuditFailure, audit_checker\n'), ((2323, 2420), 'snovault.AuditFailure', 'AuditFailure', (['"""multiple biosample treatments in reference epigenome"""', 'detail'], {'level': '"""WARNING"""'}), "('multiple biosample treatments in reference epigenome', detail,\n level='WARNING')\n", (2335, 2420), False, 'from snovault import AuditFailure, audit_checker\n'), ((2648, 2745), 'snovault.AuditFailure', 'AuditFailure', (['"""multiple biosample term names in reference epigenome"""', 'detail'], {'level': '"""WARNING"""'}), "('multiple biosample term names in reference epigenome', detail,\n level='WARNING')\n", (2660, 2745), False, 'from snovault import AuditFailure, audit_checker\n'), ((3297, 3365), 'snovault.AuditFailure', 'AuditFailure', (['"""partial reference epigenome"""', 'detail'], {'level': '"""WARNING"""'}), "('partial reference epigenome', detail, level='WARNING')\n", (3309, 3365), False, 'from snovault import AuditFailure, audit_checker\n'), ((5578, 5646), 'snovault.AuditFailure', 'AuditFailure', (['"""partial reference epigenome"""', 'detail'], {'level': '"""WARNING"""'}), "('partial reference epigenome', detail, level='WARNING')\n", (5590, 5646), False, 'from snovault import AuditFailure, audit_checker\n'), ((5836, 5904), 'snovault.AuditFailure', 'AuditFailure', (['"""partial reference epigenome"""', 'detail'], {'level': '"""WARNING"""'}), "('partial reference epigenome', detail, level='WARNING')\n", (5848, 5904), False, 'from snovault import AuditFailure, audit_checker\n'), ((6094, 6162), 'snovault.AuditFailure', 'AuditFailure', (['"""partial reference epigenome"""', 'detail'], {'level': '"""WARNING"""'}), "('partial reference epigenome', detail, level='WARNING')\n", (6106, 6162), False, 'from snovault import AuditFailure, audit_checker\n'), ((6350, 6418), 'snovault.AuditFailure', 'AuditFailure', (['"""partial reference epigenome"""', 'detail'], {'level': '"""WARNING"""'}), "('partial reference epigenome', detail, level='WARNING')\n", (6362, 6418), False, 'from snovault import AuditFailure, audit_checker\n'), ((6606, 6674), 'snovault.AuditFailure', 'AuditFailure', (['"""partial reference epigenome"""', 'detail'], {'level': '"""WARNING"""'}), "('partial reference epigenome', detail, level='WARNING')\n", (6618, 6674), False, 'from snovault import AuditFailure, audit_checker\n'), ((6863, 6931), 'snovault.AuditFailure', 'AuditFailure', (['"""partial reference epigenome"""', 'detail'], {'level': '"""WARNING"""'}), "('partial reference epigenome', detail, level='WARNING')\n", (6875, 6931), False, 'from snovault import AuditFailure, audit_checker\n'), ((7494, 7562), 'snovault.AuditFailure', 'AuditFailure', (['"""partial reference epigenome"""', 'detail'], {'level': '"""WARNING"""'}), "('partial reference epigenome', detail, level='WARNING')\n", (7506, 7562), False, 'from snovault import AuditFailure, audit_checker\n'), ((7779, 7847), 'snovault.AuditFailure', 'AuditFailure', (['"""partial reference epigenome"""', 'detail'], {'level': '"""WARNING"""'}), "('partial reference epigenome', detail, level='WARNING')\n", (7791, 7847), False, 'from snovault import AuditFailure, audit_checker\n'), ((8065, 8133), 'snovault.AuditFailure', 'AuditFailure', (['"""partial reference epigenome"""', 'detail'], {'level': '"""WARNING"""'}), "('partial reference epigenome', detail, level='WARNING')\n", (8077, 8133), False, 'from snovault import AuditFailure, audit_checker\n'), ((8316, 8384), 'snovault.AuditFailure', 'AuditFailure', (['"""partial reference epigenome"""', 'detail'], {'level': '"""WARNING"""'}), "('partial reference epigenome', detail, level='WARNING')\n", (8328, 8384), False, 'from snovault import AuditFailure, audit_checker\n'), ((8570, 8638), 'snovault.AuditFailure', 'AuditFailure', (['"""partial reference epigenome"""', 'detail'], {'level': '"""WARNING"""'}), "('partial reference epigenome', detail, level='WARNING')\n", (8582, 8638), False, 'from snovault import AuditFailure, audit_checker\n')] |
import openmc
from scipy import interpolate
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib import ticker
import matplotx
import numpy as np
import scipy.ndimage as ndimage
def reshape_values_to_mesh_shape(tally, values):
mesh_filter = tally.find_filter(filter_type=openmc.MeshFilter)
# shape = mesh_filter.mesh.dimension.tolist()
shape = [
len(mesh_filter.mesh.r_grid) - 1,
len(mesh_filter.mesh.phi_grid) - 1,
len(mesh_filter.mesh.z_grid) - 1,
]
# 2d mesh has a shape in the form [1, 400, 400]
if 1 in shape:
shape.remove(1)
return values.reshape(shape[::-1])
def get_tally_extent(tally):
for filter in tally.filters:
if isinstance(filter, openmc.MeshFilter):
mesh_filter = filter
extent_x = (
min(mesh_filter.mesh.r_grid),
max(mesh_filter.mesh.r_grid),
)
extent_y = (
min(mesh_filter.mesh.phi_grid),
max(mesh_filter.mesh.phi_grid),
)
extent_z = (
min(mesh_filter.mesh.z_grid),
max(mesh_filter.mesh.z_grid),
)
shape = [
len(mesh_filter.mesh.r_grid) - 1,
len(mesh_filter.mesh.phi_grid) - 1,
len(mesh_filter.mesh.z_grid) - 1,
]
if 1 in shape:
print("2d mesh tally")
index_of_1d = shape.index(1)
print("index", index_of_1d)
if index_of_1d == 0:
(left, right) = extent_y
(bottom, top) = extent_z
if index_of_1d == 1:
(left, right) = extent_x
(bottom, top) = extent_z
if index_of_1d == 2:
(left, right) = extent_x
(bottom, top) = extent_y
return (left, right, bottom, top)
return None
def interpolate_tally(tally, sigma=3.0):
mesh = tally.find_filter(filter_type=openmc.MeshFilter).mesh
data = tally.get_pandas_dataframe()
mean = np.array(data["mean"])
# convert tally
mean *= source_strength
volumes = mesh.calc_mesh_volumes().T.flatten()
mean = mean/volumes
mean = reshape_values_to_mesh_shape(tally, mean)
# # Interpolate data
# get centers of row and column
centers_x = (mesh.r_grid[1:] + mesh.r_grid[:-1]) / 2
centers_y = (mesh.z_grid[1:] + mesh.z_grid[:-1]) / 2
mean = ndimage.gaussian_filter(mean, sigma=sigma, order=0)
# too heavy for big arrays
# https://stackoverflow.com/questions/63668864/scipy-interpolate-interp2d-do-i-really-have-too-many-data-points?rq=1
# xx, yy = np.meshgrid(centers_x, centers_y)
f = interpolate.interp2d(centers_x, centers_y, mean, kind='linear')
return f
source_strength = 1e10/4 # n/s
statepoint_file = "statepoint.4.h5"
# loads up the statepoint file with simulation results
statepoint = openmc.StatePoint(filepath=statepoint_file)
t_prod_cyl = statepoint.get_tally(name="(n,Xt)_cylindrical")
mean = np.array(t_prod_cyl.get_pandas_dataframe()["mean"])
mesh = t_prod_cyl.find_filter(filter_type=openmc.MeshFilter).mesh
volumes = mesh.calc_mesh_volumes().T.flatten()
mean = mean/volumes*source_strength # convert tally
mean = reshape_values_to_mesh_shape(t_prod_cyl, mean)
with plt.style.context(matplotx.styles.dufte):
fig, axs = plt.subplots(1, 2, sharey=True, sharex=True, figsize=(6.4, 5.4))
# plot real data
plt.sca(axs[0])
plt.gca().set_title("Real")
matplotx.ylabel_top("Z [cm]")
plt.xlabel("X [cm]")
image_map = plt.imshow(mean, extent=get_tally_extent(t_prod_cyl), origin="lower", zorder=1, cmap='Purples', norm=LogNorm(vmin=1e3))
plt.scatter(0.1, 66)
# plot interpolated data
plt.sca(axs[1])
plt.gca().set_title("Interpolated + Smoothed", size=12)
plt.xlabel("X [cm]")
x_new = np.linspace(0, 50, 600)
y_new = np.linspace(0, 110, 600)
xx, yy = np.meshgrid(x_new, y_new)
z = interpolate_tally(t_prod_cyl, sigma=3)(x_new, y_new)
z.reshape(y_new.size, x_new.size)
levels = np.logspace(3, np.log10(mean.max()), 100)
cs = plt.contourf(xx, yy, z, levels=levels, cmap='Purples', norm=LogNorm(vmin=1e3))
levels2 = np.logspace(4, np.log10(mean.max()), 6, endpoint=False)
plt.contour(xx, yy, z, levels=levels2, colors="white", alpha=0.3)
plt.scatter(0.1, 66)
plt.colorbar(image_map, ax=axs.ravel().tolist(), label="T production rate (T/m3/s)")
plt.gca().set_aspect('equal')
# plt.tight_layout()
plt.savefig('real_vs_interpolated.png')
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.sca",
"matplotx.ylabel_top",
"numpy.array",
"openmc.StatePoint",
"matplotlib.pyplot.style.context",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.scatter",
"scipy.ndimage.g... | [((2780, 2823), 'openmc.StatePoint', 'openmc.StatePoint', ([], {'filepath': 'statepoint_file'}), '(filepath=statepoint_file)\n', (2797, 2823), False, 'import openmc\n'), ((1908, 1930), 'numpy.array', 'np.array', (["data['mean']"], {}), "(data['mean'])\n", (1916, 1930), True, 'import numpy as np\n'), ((2302, 2353), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['mean'], {'sigma': 'sigma', 'order': '(0)'}), '(mean, sigma=sigma, order=0)\n', (2325, 2353), True, 'import scipy.ndimage as ndimage\n'), ((2564, 2627), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['centers_x', 'centers_y', 'mean'], {'kind': '"""linear"""'}), "(centers_x, centers_y, mean, kind='linear')\n", (2584, 2627), False, 'from scipy import interpolate\n'), ((3173, 3213), 'matplotlib.pyplot.style.context', 'plt.style.context', (['matplotx.styles.dufte'], {}), '(matplotx.styles.dufte)\n', (3190, 3213), True, 'import matplotlib.pyplot as plt\n'), ((3230, 3294), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '(True)', 'sharex': '(True)', 'figsize': '(6.4, 5.4)'}), '(1, 2, sharey=True, sharex=True, figsize=(6.4, 5.4))\n', (3242, 3294), True, 'import matplotlib.pyplot as plt\n'), ((3321, 3336), 'matplotlib.pyplot.sca', 'plt.sca', (['axs[0]'], {}), '(axs[0])\n', (3328, 3336), True, 'import matplotlib.pyplot as plt\n'), ((3373, 3402), 'matplotx.ylabel_top', 'matplotx.ylabel_top', (['"""Z [cm]"""'], {}), "('Z [cm]')\n", (3392, 3402), False, 'import matplotx\n'), ((3407, 3427), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X [cm]"""'], {}), "('X [cm]')\n", (3417, 3427), True, 'import matplotlib.pyplot as plt\n'), ((3568, 3588), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(0.1)', '(66)'], {}), '(0.1, 66)\n', (3579, 3588), True, 'import matplotlib.pyplot as plt\n'), ((3623, 3638), 'matplotlib.pyplot.sca', 'plt.sca', (['axs[1]'], {}), '(axs[1])\n', (3630, 3638), True, 'import matplotlib.pyplot as plt\n'), ((3703, 3723), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X [cm]"""'], {}), "('X [cm]')\n", (3713, 3723), True, 'import matplotlib.pyplot as plt\n'), ((3736, 3759), 'numpy.linspace', 'np.linspace', (['(0)', '(50)', '(600)'], {}), '(0, 50, 600)\n', (3747, 3759), True, 'import numpy as np\n'), ((3772, 3796), 'numpy.linspace', 'np.linspace', (['(0)', '(110)', '(600)'], {}), '(0, 110, 600)\n', (3783, 3796), True, 'import numpy as np\n'), ((3810, 3835), 'numpy.meshgrid', 'np.meshgrid', (['x_new', 'y_new'], {}), '(x_new, y_new)\n', (3821, 3835), True, 'import numpy as np\n'), ((4152, 4217), 'matplotlib.pyplot.contour', 'plt.contour', (['xx', 'yy', 'z'], {'levels': 'levels2', 'colors': '"""white"""', 'alpha': '(0.3)'}), "(xx, yy, z, levels=levels2, colors='white', alpha=0.3)\n", (4163, 4217), True, 'import matplotlib.pyplot as plt\n'), ((4222, 4242), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(0.1)', '(66)'], {}), '(0.1, 66)\n', (4233, 4242), True, 'import matplotlib.pyplot as plt\n'), ((4396, 4435), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""real_vs_interpolated.png"""'], {}), "('real_vs_interpolated.png')\n", (4407, 4435), True, 'import matplotlib.pyplot as plt\n'), ((3341, 3350), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3348, 3350), True, 'import matplotlib.pyplot as plt\n'), ((3545, 3565), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {'vmin': '(1000.0)'}), '(vmin=1000.0)\n', (3552, 3565), False, 'from matplotlib.colors import LogNorm\n'), ((3643, 3652), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3650, 3652), True, 'import matplotlib.pyplot as plt\n'), ((4059, 4079), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {'vmin': '(1000.0)'}), '(vmin=1000.0)\n', (4066, 4079), False, 'from matplotlib.colors import LogNorm\n'), ((4336, 4345), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4343, 4345), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
Provides base logging functions
:copyright: © 2018 by <NAME>
:license: MIT, see LICENSE for more details.
"""
from logging import getLevelName, INFO, WARN, ERROR, DEBUG
from multiprocessing import current_process
from time import strftime
from core.common import load_config
from core.constants import TIME_FORMAT
class Logger(object):
"""
logger class that prints to stdout
"""
def __init__(self):
"""
partially initializes format
"""
self.format = "%s " + current_process().name + "-keeper[%s]: %s"
self.is_debug = bool(load_config()["debug"])
def info(self, message):
"""
prints an info message
:param message: message
"""
self._log(INFO, message)
def warning(self, message):
"""
prints a warning message
:param message: message
"""
self._log(WARN, message)
def error(self, message):
"""
prints an error message
:param message: message
"""
self._log(ERROR, message)
def debug(self, message, *args):
"""
prints a debug message
:param message: message
:param args: arguments
"""
self.log(DEBUG, message, *args)
def log(self, level, message, *args):
"""
prints a message with args
:param level: log level
:param message: message
:param args: arguments
"""
if level != DEBUG or self.is_debug:
if args:
message = message % args
self._log(level, message)
def _log(self, level, message):
"""
prints a message
:param level: log level
:param message: message
"""
print(self.format % (strftime(TIME_FORMAT), getLevelName(level), message))
| [
"logging.getLevelName",
"time.strftime",
"multiprocessing.current_process",
"core.common.load_config"
] | [((623, 636), 'core.common.load_config', 'load_config', ([], {}), '()\n', (634, 636), False, 'from core.common import load_config\n'), ((551, 568), 'multiprocessing.current_process', 'current_process', ([], {}), '()\n', (566, 568), False, 'from multiprocessing import current_process\n'), ((1828, 1849), 'time.strftime', 'strftime', (['TIME_FORMAT'], {}), '(TIME_FORMAT)\n', (1836, 1849), False, 'from time import strftime\n'), ((1851, 1870), 'logging.getLevelName', 'getLevelName', (['level'], {}), '(level)\n', (1863, 1870), False, 'from logging import getLevelName, INFO, WARN, ERROR, DEBUG\n')] |
from interfaces.interface import Publisher, stock_list
from utils.notifier import NotificationUtils
from service.loader import LoadStock
from utils import cache_util, common_constants
class WatchTower(Publisher):
state = 0
observser_list = list()
def __init__(self, stock_val):
self.stock_val = stock_val
def attach_observer(self):
"""
Implement Observer pattern in future when the observers increase
:return:
"""
self.observser_list.append(NotificationUtils.__name__)
def detach_observer(self):
pass
def notify(self, user_email, messages):
NotificationUtils(user_email=user_email, custom_message=messages).send_mail()
def business_logic(self):
new_stock_list = list()
for stocks_ in self.stock_val:
message = []
stock = stocks_
stock_nse_code = stock[common_constants.NSE_CODE]
stock_exchange_name = stock[common_constants.STOCK_EXCHANGE]
initial_price = stock[common_constants.PRICE_RECORD]
percentage_change = stock[common_constants.PERCENT_MARGIN]
stop_loss = stock[common_constants.STOP_LOSS]
target = stock[common_constants.TARGET]
user_email = stock[common_constants.USER_EMAIL]
current_data = LoadStock(nse_code=stock_nse_code, stock_exchange_name=stock_exchange_name).get_all_data()
if float(current_data[common_constants.CURRENT_PRICE]) >= float(target):
self.state = 1
text = "Hey! " + stock_nse_code + " has reached the target*** " + current_data[common_constants.CURRENT_PRICE]
message.append(text)
if float(current_data[common_constants.CURRENT_PRICE]) <= float(stop_loss):
self.state = 1
text = "Loosing! " + stock_nse_code + " has gone down: " + current_data[common_constants.CURRENT_PRICE]
message.append(text)
print(type(percentage_change))
if float(current_data[common_constants.PRICE_PERCENTAGE_CHANGE]) >= float(percentage_change):
self.state = 1
text = "Hola! " + stock_nse_code + " has reached the set margin percent"
message.append(text)
if self.state == 1:
self.notify(user_email=user_email, messages=message)
else:
print(f"=== ALL GOOD NOTHING TO NOTIFY ===")
stock[common_constants.CURRENT_DETAILS] = current_data
new_stock_list.append(stock)
cache_util.create_cache_client().set(common_constants.CACHE_KEY, new_stock_list)
| [
"utils.cache_util.create_cache_client",
"service.loader.LoadStock",
"utils.notifier.NotificationUtils"
] | [((635, 700), 'utils.notifier.NotificationUtils', 'NotificationUtils', ([], {'user_email': 'user_email', 'custom_message': 'messages'}), '(user_email=user_email, custom_message=messages)\n', (652, 700), False, 'from utils.notifier import NotificationUtils\n'), ((2588, 2620), 'utils.cache_util.create_cache_client', 'cache_util.create_cache_client', ([], {}), '()\n', (2618, 2620), False, 'from utils import cache_util, common_constants\n'), ((1336, 1411), 'service.loader.LoadStock', 'LoadStock', ([], {'nse_code': 'stock_nse_code', 'stock_exchange_name': 'stock_exchange_name'}), '(nse_code=stock_nse_code, stock_exchange_name=stock_exchange_name)\n', (1345, 1411), False, 'from service.loader import LoadStock\n')] |
"""This module tests Exceptions functionality in stereomideval module"""
import pytest
import numpy as np
from stereomideval.dataset import Dataset
from stereomideval.exceptions import ImageSizeNotEqual, PathNotFound, InvalidSceneName
def test_catch_invalid_image_sizes():
"""Test catching invalid image sizes"""
image_a = np.zeros((5, 5))
image_b = np.zeros((5, 6))
with pytest.raises(ImageSizeNotEqual):
ImageSizeNotEqual.validate(image_a, image_b)
def test_catch_path_not_found():
"""Test catching path not found"""
path = "stereomideval/not_a_path"
with pytest.raises(PathNotFound):
PathNotFound.validate(path)
def test_catch_invalid_scene_name():
"""Test catching invalid scene name"""
scene_name = "Invalid scene name"
with pytest.raises(InvalidSceneName):
InvalidSceneName.validate_scene_list(scene_name, Dataset.get_scene_list())
with pytest.raises(InvalidSceneName):
InvalidSceneName.validate_scene_info_list(scene_name, Dataset.get_training_scene_list())
| [
"stereomideval.dataset.Dataset.get_training_scene_list",
"stereomideval.dataset.Dataset.get_scene_list",
"stereomideval.exceptions.ImageSizeNotEqual.validate",
"numpy.zeros",
"pytest.raises",
"stereomideval.exceptions.PathNotFound.validate"
] | [((333, 349), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (341, 349), True, 'import numpy as np\n'), ((364, 380), 'numpy.zeros', 'np.zeros', (['(5, 6)'], {}), '((5, 6))\n', (372, 380), True, 'import numpy as np\n'), ((390, 422), 'pytest.raises', 'pytest.raises', (['ImageSizeNotEqual'], {}), '(ImageSizeNotEqual)\n', (403, 422), False, 'import pytest\n'), ((432, 476), 'stereomideval.exceptions.ImageSizeNotEqual.validate', 'ImageSizeNotEqual.validate', (['image_a', 'image_b'], {}), '(image_a, image_b)\n', (458, 476), False, 'from stereomideval.exceptions import ImageSizeNotEqual, PathNotFound, InvalidSceneName\n'), ((598, 625), 'pytest.raises', 'pytest.raises', (['PathNotFound'], {}), '(PathNotFound)\n', (611, 625), False, 'import pytest\n'), ((635, 662), 'stereomideval.exceptions.PathNotFound.validate', 'PathNotFound.validate', (['path'], {}), '(path)\n', (656, 662), False, 'from stereomideval.exceptions import ImageSizeNotEqual, PathNotFound, InvalidSceneName\n'), ((792, 823), 'pytest.raises', 'pytest.raises', (['InvalidSceneName'], {}), '(InvalidSceneName)\n', (805, 823), False, 'import pytest\n'), ((917, 948), 'pytest.raises', 'pytest.raises', (['InvalidSceneName'], {}), '(InvalidSceneName)\n', (930, 948), False, 'import pytest\n'), ((882, 906), 'stereomideval.dataset.Dataset.get_scene_list', 'Dataset.get_scene_list', ([], {}), '()\n', (904, 906), False, 'from stereomideval.dataset import Dataset\n'), ((1012, 1045), 'stereomideval.dataset.Dataset.get_training_scene_list', 'Dataset.get_training_scene_list', ([], {}), '()\n', (1043, 1045), False, 'from stereomideval.dataset import Dataset\n')] |
# -*- coding: utf8 -*-
# ============LICENSE_START=======================================================
# org.onap.vvp/validation-scripts
# ===================================================================
# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
# ===================================================================
#
# Unless otherwise specified, all software contained herein is licensed
# under the Apache License, Version 2.0 (the "License");
# you may not use this software except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Unless otherwise specified, all documentation contained herein is licensed
# under the Creative Commons License, Attribution 4.0 Intl. (the "License");
# you may not use this documentation except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by/4.0/
#
# Unless required by applicable law or agreed to in writing, documentation
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ============LICENSE_END============================================
#
#
import pytest
import re
from tests import cached_yaml as yaml
from .helpers import validates
from .utils.network_roles import property_uses_get_resource
RE_INTERNAL_NETWORK_RID = re.compile( # match pattern
r"int_(?P<network_role>.+)_network$"
)
NETWORK_RESOURCE_TYPES = ["OS::Neutron::Net", "OS::ContrailV2::VirtualNetwork"]
@validates("R-16968", "R-35666")
def test_network_resource_id_format(yaml_file):
"""
Make sure all network resource ids use the allowed naming
convention
"""
RE_INTERNAL_NETWORK_RID = re.compile( # match pattern
r"int_(?P<network_role>.+)_network$"
)
with open(yaml_file) as fh:
yml = yaml.load(fh)
# skip if resources are not defined
if "resources" not in yml:
pytest.skip("No resources specified in the heat template")
invalid_networks = []
for k, v in yml["resources"].items():
if not isinstance(v, dict):
continue
if "properties" not in v:
continue
if property_uses_get_resource(v, "network"):
continue
if v.get("type") not in NETWORK_RESOURCE_TYPES:
continue
match = RE_INTERNAL_NETWORK_RID.match(k)
if not match:
invalid_networks.append(k)
assert not set(invalid_networks), (
"Heat templates must only create internal networks "
"and follow format int_{{network-role}}_network"
"{}".format(", ".join(invalid_networks))
)
@validates("R-16241")
def test_network_has_subnet(yaml_file):
"""
if creating internal network, make sure there is a
corresponding subnet that references it
"""
with open(yaml_file) as fh:
yml = yaml.load(fh)
# skip if resources are not defined
if "resources" not in yml:
pytest.skip("No resources specified in the heat template")
networks = []
for k, v in yml["resources"].items():
if not isinstance(v, dict):
continue
if "properties" not in v:
continue
# need to check if contrail networks also require subnet
# and it is defined the same as neutron networks
# if v.get("type") not in NETWORK_RESOURCE_TYPES:
if v.get("type") not in ["OS::Neutron::Net"]:
continue
networks.append(k)
for k, v in yml["resources"].items():
if not isinstance(v, dict):
continue
if "properties" not in v:
continue
if v.get("type") != "OS::Neutron::Subnet":
continue
network_prop = v.get("properties", {}).get("network", {}).get("get_resource")
if not network_prop:
continue
x = 0
for network in networks:
if network == network_prop:
networks.pop(x)
break
x += 1
assert not networks, "Networks detected without subnet {}".format(networks)
| [
"pytest.skip",
"tests.cached_yaml.load",
"re.compile"
] | [((1890, 1937), 're.compile', 're.compile', (['"""int_(?P<network_role>.+)_network$"""'], {}), "('int_(?P<network_role>.+)_network$')\n", (1900, 1937), False, 'import re\n'), ((2248, 2295), 're.compile', 're.compile', (['"""int_(?P<network_role>.+)_network$"""'], {}), "('int_(?P<network_role>.+)_network$')\n", (2258, 2295), False, 'import re\n'), ((2375, 2388), 'tests.cached_yaml.load', 'yaml.load', (['fh'], {}), '(fh)\n', (2384, 2388), True, 'from tests import cached_yaml as yaml\n'), ((2469, 2527), 'pytest.skip', 'pytest.skip', (['"""No resources specified in the heat template"""'], {}), "('No resources specified in the heat template')\n", (2480, 2527), False, 'import pytest\n'), ((3410, 3423), 'tests.cached_yaml.load', 'yaml.load', (['fh'], {}), '(fh)\n', (3419, 3423), True, 'from tests import cached_yaml as yaml\n'), ((3504, 3562), 'pytest.skip', 'pytest.skip', (['"""No resources specified in the heat template"""'], {}), "('No resources specified in the heat template')\n", (3515, 3562), False, 'import pytest\n')] |
import json
from bson import ObjectId
from pymongo import ReturnDocument
from .exceptions import DBException
class DBActionsMixin:
def __init__(self, model, db):
self._model_cls = model
self._db = db
def add(self, item):
db_obj = self._collection.insert_one(item.prepare_for_db())
model = self._model_cls(id=str(db_obj.inserted_id))
return self.query(**model.db_key()).pop()
def query(self, **query_params):
return [
self._model_cls.from_db_object(db_obj) for db_obj in self._collection.find(query_params)
]
def remove(self, todo_item):
db_obj = self._collection.find_one_and_delete(todo_item.db_key())
if db_obj:
return self._model_cls.from_db_object(db_obj)
raise DBException(f'Unable to remove. Object with id={todo_item.id} is absent.')
def update(self, old_todo_item, new_todo_item):
if not self.query(**old_todo_item.db_key()):
raise DBException(f'Unable to update. Object with id={old_todo_item.id} is absent.')
db_obj = self._collection.find_one_and_update(
old_todo_item.db_key(),
{'$set': new_todo_item.prepare_for_db(with_empty_fields=False)},
return_document=ReturnDocument.AFTER
)
return self._model_cls.from_db_object(db_obj)
class ToDBModelMixin:
def prepare_for_db(self, with_empty_fields=True):
obj = vars(self)
del obj['id']
filter_func = lambda x: True if with_empty_fields else lambda x: x
obj = {key: str(value) for key, value in obj.items() if filter_func(value)}
return obj
def db_key(self):
return {'_id': ObjectId(self.id)}
class ModelSerializeMixin:
def __str__(self):
return json.dumps(self)
class ModelContentMixin:
@property
def is_empty(self):
return not bool([v for v in vars(self).values() if v])
| [
"bson.ObjectId",
"json.dumps"
] | [((1785, 1801), 'json.dumps', 'json.dumps', (['self'], {}), '(self)\n', (1795, 1801), False, 'import json\n'), ((1699, 1716), 'bson.ObjectId', 'ObjectId', (['self.id'], {}), '(self.id)\n', (1707, 1716), False, 'from bson import ObjectId\n')] |
# -*- coding: utf-8 -*-
from random import Random
#from core.dataloader import DataLoader
from torch.utils.data import DataLoader
import numpy as np
from math import *
import logging
from scipy import stats
import numpy as np
from pyemd import emd
from collections import OrderedDict
import time
import pickle, random
from argParser import args
class Partition(object):
""" Dataset partitioning helper """
def __init__(self, data, index):
self.data = data
self.index = index
def __len__(self):
return len(self.index)
def __getitem__(self, index):
data_idx = self.index[index]
return self.data[data_idx]
class DataPartitioner(object):
# len(sizes) is the number of workers
# sequential 1-> random 2->zipf 3-> identical
def __init__(self, data, numOfClass=0, seed=10, splitConfFile=None, isTest=False, dataMapFile=None):
self.partitions = []
self.rng = Random()
self.rng.seed(seed)
self.data = data
self.labels = self.data.targets
self.is_trace = False
self.dataMapFile = None
self.args = args
self.isTest = isTest
np.random.seed(seed)
stime = time.time()
#logging.info("====Start to initiate DataPartitioner")
self.targets = OrderedDict()
self.indexToLabel = {}
self.totalSamples = 0
self.data_len = len(self.data)
self.task = args.task
self.skip_partition = True if self.data.targets[0] is -1 or args.skip_partition is True else False
if self.skip_partition:
logging.info("====Warning: skip_partition is True")
if self.skip_partition:
pass
elif splitConfFile is None:
# categarize the samples
for index, label in enumerate(self.labels):
if label not in self.targets:
self.targets[label] = []
self.targets[label].append(index)
self.indexToLabel[index] = label
self.totalSamples += len(self.data)
else:
# each row denotes the number of samples in this class
with open(splitConfFile, 'r') as fin:
labelSamples = [int(x.strip()) for x in fin.readlines()]
# categarize the samples
baseIndex = 0
for label, _samples in enumerate(labelSamples):
for k in range(_samples):
self.indexToLabel[baseIndex + k] = label
self.targets[label] = [baseIndex + k for k in range(_samples)]
self.totalSamples += _samples
baseIndex += _samples
if dataMapFile is not None:
self.dataMapFile = dataMapFile
self.is_trace = True
self.numOfLabels = max(len(self.targets.keys()), numOfClass)
self.workerDistance = []
self.classPerWorker = None
logging.info("====Initiating DataPartitioner takes {} s\n".format(time.time() - stime))
def getTargets(self):
tempTarget = self.targets.copy()
for key in tempTarget:
self.rng.shuffle(tempTarget[key])
return tempTarget
def getNumOfLabels(self):
return self.numOfLabels
def getDataLen(self):
return self.data_len
# Calculates JSD between pairs of distribution
def js_distance(self, x, y):
m = (x + y)/2
js = 0.5 * stats.entropy(x, m) + 0.5 * stats.entropy(y, m)
return js
# Caculates Jensen-Shannon Divergence for each worker
def get_JSD(self, dataDistr, tempClassPerWorker, sizes):
for worker in range(len(sizes)):
# tempDataSize = sum(tempClassPerWorker[worker])
# if tempDataSize == 0:
# continue
# tempDistr =np.array([c / float(tempDataSize) for c in tempClassPerWorker[worker]])
self.workerDistance.append(0)#self.js_distance(dataDistr, tempDistr))
# Generates a distance matrix for EMD
def generate_distance_matrix(self, size):
return np.logical_xor(1, np.identity(size)) * 1.0
# Caculates Earth Mover's Distance for each worker
def get_EMD(self, dataDistr, tempClassPerWorker, sizes):
dist_matrix = self.generate_distance_matrix_v2(len(dataDistr))
for worker in range(len(sizes)):
tempDataSize = sum(tempClassPerWorker[worker])
if tempDataSize == 0:
continue
tempDistr =np.array([c / float(tempDataSize) for c in tempClassPerWorker[worker]])
self.workerDistance.append(emd(dataDistr, tempDistr, dist_matrix))
def loadFilterInfo(self):
# load data-to-client mapping
indicesToRm = []
try:
dataToClient = OrderedDict()
with open(self.args.data_mapfile, 'rb') as db:
dataToClient = pickle.load(db)
clientNumSamples = {}
sampleIdToClient = []
# data share the same index with labels
for index, _sample in enumerate(self.data.data):
sample = _sample.split('__')[0]
clientId = dataToClient[sample]
if clientId not in clientNumSamples:
clientNumSamples[clientId] = 0
clientNumSamples[clientId] += 1
sampleIdToClient.append(clientId)
for index, clientId in enumerate(sampleIdToClient):
if clientNumSamples[clientId] < self.args.filter_less:
indicesToRm.append(index)
except Exception as e:
logging.info("====Failed to generate indicesToRm, because of {}".format(e))
#pass
return indicesToRm
def loadFilterInfoNLP(self):
indices = []
base = 0
for idx, sample in enumerate(self.data.slice_index):
if sample < args.filter_less:
indices = indices + [base+i for i in range(sample)]
base += sample
return indices
def loadFilterInfoBase(self):
indices = []
try:
for client in self.data.client_mapping:
if len(self.data.client_mapping[client]) < args.filter_less or len(self.data.client_mapping[client]) > args.filter_more:
indices += self.data.client_mapping[client]
# remove the metadata
for idx in self.data.client_mapping[client]:
self.data[idx] = None
except Exception as e:
pass
return indices
def partitionTraceCV(self, dataToClient):
clientToData = {}
clientNumSamples = {}
numOfLabels = self.numOfLabels
# data share the same index with labels
for index, sample in enumerate(self.data.data):
sample = sample.split('__')[0]
clientId = dataToClient[sample]
labelId = self.labels[index]
if clientId not in clientToData:
clientToData[clientId] = []
clientNumSamples[clientId] = [0] * numOfLabels
clientToData[clientId].append(index)
clientNumSamples[clientId][labelId] += 1
numOfClients = len(clientToData.keys())
self.classPerWorker = np.zeros([numOfClients, numOfLabels])
for clientId in range(numOfClients):
self.classPerWorker[clientId] = clientNumSamples[clientId]
self.rng.shuffle(clientToData[clientId])
self.partitions.append(clientToData[clientId])
overallNumSamples = np.asarray(self.classPerWorker.sum(axis=0)).reshape(-1)
totalNumOfSamples = self.classPerWorker.sum()
self.get_JSD(overallNumSamples/float(totalNumOfSamples), self.classPerWorker, [0] * numOfClients)
def partitionTraceSpeech(self, dataToClient):
clientToData = {}
clientNumSamples = {}
numOfLabels = 35
# data share the same index with labels
for index, sample in enumerate(self.data.data):
clientId = dataToClient[sample]
labelId = self.labels[index]
if clientId not in clientToData:
clientToData[clientId] = []
clientNumSamples[clientId] = [0] * numOfLabels
clientToData[clientId].append(index)
clientNumSamples[clientId][labelId] += 1
numOfClients = len(clientToData.keys())
self.classPerWorker = np.zeros([numOfClients, numOfLabels])
for clientId in range(numOfClients):
#logging.info(clientId)
self.classPerWorker[clientId] = clientNumSamples[clientId]
self.rng.shuffle(clientToData[clientId])
self.partitions.append(clientToData[clientId])
overallNumSamples = np.asarray(self.classPerWorker.sum(axis=0)).reshape(-1)
totalNumOfSamples = self.classPerWorker.sum()
self.get_JSD(overallNumSamples/float(totalNumOfSamples), self.classPerWorker, [0] * numOfClients)
def partitionTraceNLP(self):
clientToData = {}
clientNumSamples = {}
numOfLabels = 1
base = 0
numOfClients = 0
numOfLabels = self.args.num_class
for index, cId in enumerate(self.data.dict.keys()):
clientId = cId
labelId = self.data.targets[index]
if clientId not in clientToData:
clientToData[clientId] = []
clientNumSamples[clientId] = [0] * numOfLabels
clientToData[clientId].append(index)
numOfClients = len(self.clientToData)
def partitionTraceBase(self):
clientToData = {}
clientNumSamples = {}
numOfLabels = self.args.num_class
clientToData = self.data.client_mapping
for clientId in clientToData:
clientNumSamples[clientId] = [1] * numOfLabels
numOfClients = len(clientToData)
self.classPerWorker = np.zeros([numOfClients+1, numOfLabels])
for clientId in range(numOfClients):
self.classPerWorker[clientId] = clientNumSamples[clientId]
self.rng.shuffle(clientToData[clientId])
self.partitions.append(clientToData[clientId])
# if len(clientToData[clientId]) < args.filter_less or len(clientToData[clientId]) > args.filter_more:
# # mask the raw data
# for idx in clientToData[clientId]:
# self.data[idx] = None
overallNumSamples = np.asarray(self.classPerWorker.sum(axis=0)).reshape(-1)
totalNumOfSamples = self.classPerWorker.sum()
self.get_JSD(overallNumSamples/float(totalNumOfSamples), self.classPerWorker, [0] * numOfClients)
def partitionDataByDefault(self, sizes, sequential, ratioOfClassWorker, filter_class, _args):
if self.is_trace and not self.args.enforce_random:
# use the real trace, thus no need to partition
if self.task == 'speech' or self.task == 'cv':
dataToClient = OrderedDict()
with open(self.dataMapFile, 'rb') as db:
dataToClient = pickle.load(db)
if self.task == 'speech':
self.partitionTraceSpeech(dataToClient=dataToClient)
else:
self.partitionTraceCV(dataToClient=dataToClient)
else:
self.partitionTraceBase()
else:
self.partitionData(sizes=sizes, sequential=sequential,
ratioOfClassWorker=ratioOfClassWorker,
filter_class=filter_class, args=_args)
def partitionData(self, sizes=None, sequential=0, ratioOfClassWorker=None, filter_class=0, args = None):
targets = self.getTargets()
numOfLabels = self.getNumOfLabels()
data_len = self.getDataLen()
usedSamples = 100000
keyDir = {key:int(key) for i, key in enumerate(targets.keys())}
keyLength = [0] * numOfLabels
if not self.skip_partition:
for key in keyDir.keys():
keyLength[keyDir[key]] = len(targets[key])
# classPerWorker -> Rows are workers and cols are classes
tempClassPerWorker = np.zeros([len(sizes), numOfLabels])
# random partition
if sequential == 0:
logging.info("========= Start of Random Partition =========\n")
# may need to filter ...
indicesToRm = set()
indexes = None
if self.args.filter_less != 0 and self.isTest is False:
if self.task == 'cv':
indicesToRm = set(self.loadFilterInfo())
else:
indicesToRm = set(self.loadFilterInfoBase())
indexes = [x for x in range(0, data_len) if x not in indicesToRm]
# we need to remove those with less than certain number of samples
logging.info("====Try to remove clients w/ less than {} samples, and remove {} samples".format(self.args.filter_less, len(indicesToRm)))
else:
indexes = [x for x in range(data_len)]
self.rng.shuffle(indexes)
realDataLen = len(indexes)
for ratio in sizes:
part_len = int(ratio * realDataLen)
self.partitions.append(indexes[0:part_len])
indexes = indexes[part_len:]
if not self.skip_partition:
for id, partition in enumerate(self.partitions):
for index in partition:
tempClassPerWorker[id][self.indexToLabel[index]] += 1
else:
logging.info('========= Start of Class/Worker =========\n')
if ratioOfClassWorker is None:
# random distribution
if sequential == 1:
ratioOfClassWorker = np.random.rand(len(sizes), numOfLabels)
# zipf distribution
elif sequential == 2:
ratioOfClassWorker = np.random.zipf(args['param'], [len(sizes), numOfLabels])
logging.info("==== Load Zipf Distribution ====\n {} \n".format(repr(ratioOfClassWorker)))
ratioOfClassWorker = ratioOfClassWorker.astype(np.float32)
else:
ratioOfClassWorker = np.ones((len(sizes), numOfLabels)).astype(np.float32)
if filter_class > 0:
for w in range(len(sizes)):
# randomly filter classes by forcing zero samples
wrandom = self.rng.sample(range(numOfLabels), filter_class)
for wr in wrandom:
ratioOfClassWorker[w][wr] = 0.001
# normalize the ratios
if sequential == 1 or sequential == 3:
sumRatiosPerClass = np.sum(ratioOfClassWorker, axis=1)
for worker in range(len(sizes)):
ratioOfClassWorker[worker, :] = ratioOfClassWorker[worker, :]/float(sumRatiosPerClass[worker])
# split the classes
for worker in range(len(sizes)):
self.partitions.append([])
# enumerate the ratio of classes it should take
for c in list(targets.keys()):
takeLength = min(floor(usedSamples * ratioOfClassWorker[worker][keyDir[c]]), keyLength[keyDir[c]])
self.rng.shuffle(targets[c])
self.partitions[-1] += targets[c][0:takeLength]
tempClassPerWorker[worker][keyDir[c]] += takeLength
self.rng.shuffle(self.partitions[-1])
elif sequential == 2:
sumRatiosPerClass = np.sum(ratioOfClassWorker, axis=0)
for c in targets.keys():
ratioOfClassWorker[:, keyDir[c]] = ratioOfClassWorker[:, keyDir[c]]/float(sumRatiosPerClass[keyDir[c]])
# split the classes
for worker in range(len(sizes)):
self.partitions.append([])
# enumerate the ratio of classes it should take
for c in list(targets.keys()):
takeLength = min(int(math.ceil(keyLength[keyDir[c]] * ratioOfClassWorker[worker][keyDir[c]])), len(targets[c]))
self.partitions[-1] += targets[c][0:takeLength]
tempClassPerWorker[worker][keyDir[c]] += takeLength
targets[c] = targets[c][takeLength:]
self.rng.shuffle(self.partitions[-1])
elif sequential == 4:
# load data from given config file
clientGivenSamples = {}
with open(args['clientSampleConf'], 'r') as fin:
for clientId, line in enumerate(fin.readlines()):
clientGivenSamples[clientId] = [int(x) for x in line.strip().split()]
# split the data
for clientId in range(len(clientGivenSamples.keys())):
self.partitions.append([])
for c in list(targets.keys()):
takeLength = clientGivenSamples[clientId][c]
if clientGivenSamples[clientId][c] > targets[c]:
logging.info("========== Failed to allocate {} samples for class {} to client {}, actual quota is {}"\
.format(clientGivenSamples[clientId][c], c, clientId, targets[c]))
takeLength = targets[c]
self.partitions[-1] += targets[c][0:takeLength]
tempClassPerWorker[worker][keyDir[c]] += takeLength
targets[c] = targets[c][takeLength:]
self.rng.shuffle(self.partitions[-1])
# concatenate ClassPerWorker
if self.classPerWorker is None:
self.classPerWorker = tempClassPerWorker
else:
self.classPerWorker = np.concatenate((self.classPerWorker, tempClassPerWorker), axis=0)
# Calculates statistical distances
totalDataSize = max(sum(keyLength), 1)
# Overall data distribution
dataDistr = np.array([key / float(totalDataSize) for key in keyLength])
self.get_JSD(dataDistr, tempClassPerWorker, sizes)
logging.info("Raw class per worker is : " + repr(tempClassPerWorker) + '\n')
logging.info('========= End of Class/Worker =========\n')
def log_selection(self):
# totalLabels = [0 for i in range(len(self.classPerWorker[0]))]
# logging.info("====Total # of workers is :{}, w/ {} labels, {}, {}".format(len(self.classPerWorker), len(self.classPerWorker[0]), len(self.partitions), len(self.workerDistance)))
# for index, row in enumerate(self.classPerWorker):
# rowStr = ''
# numSamples = 0
# for i, label in enumerate(self.classPerWorker[index]):
# rowStr += '\t'+str(int(label))
# totalLabels[i] += label
# numSamples += label
# logging.info(str(index) + ':\t' + rowStr + '\n' + 'with sum:\t' + str(numSamples) + '\t' + repr(len(self.partitions[index]))+ '\nDistance: ' + str(self.workerDistance[index])+ '\n')
# logging.info("=====================================\n")
# logging.info("Total selected samples is: {}, with {}\n".format(str(sum(totalLabels)), repr(totalLabels)))
# logging.info("=====================================\n")
# remove unused variables
self.classPerWorker = None
self.numOfLabels = None
pass
def use(self, partition, istest, is_rank, fractional):
_partition = partition
resultIndex = []
if is_rank == -1:
resultIndex = self.partitions[_partition]
else:
for i in range(len(self.partitions)):
if i % self.args.total_worker == is_rank:
resultIndex += self.partitions[i]
exeuteLength = -1 if istest == False or fractional == False else int(len(resultIndex) * args.test_ratio)
resultIndex = resultIndex[:exeuteLength]
self.rng.shuffle(resultIndex)
#logging.info("====Data length for client {} is {}".format(partition, len(resultIndex)))
return Partition(self.data, resultIndex)
def getDistance(self):
return self.workerDistance
def getSize(self):
# return the size of samples
return [len(partition) for partition in self.partitions]
def partition_dataset(partitioner, workers, partitionRatio=[], sequential=0, ratioOfClassWorker=None, filter_class=0, arg={'param': 1.95}):
""" Partitioning Data """
stime = time.time()
workers_num = len(workers)
partition_sizes = [1.0 / workers_num for _ in range(workers_num)]
if len(partitionRatio) > 0:
partition_sizes = partitionRatio
partitioner.partitionDataByDefault(sizes=partition_sizes, sequential=sequential, ratioOfClassWorker=ratioOfClassWorker,filter_class=filter_class, _args=arg)
#logging.info("====Partitioning data takes {} s\n".format(time.time() - stime()))
def select_dataset(rank: int, partition: DataPartitioner, batch_size: int, isTest=False, is_rank=0, fractional=True, collate_fn=None):
partition = partition.use(rank - 1, isTest, is_rank-1, fractional)
timeOut = 0 if isTest else 60
numOfThreads = args.num_loaders #int(min(args.num_loaders, len(partition)/(batch_size+1)))
dropLast = False if isTest else True
if collate_fn is None:
return DataLoader(partition, batch_size=batch_size, shuffle=True, pin_memory=False, num_workers=numOfThreads, drop_last=dropLast, timeout=timeOut)#, worker_init_fn=np.random.seed(12))
else:
return DataLoader(partition, batch_size=batch_size, shuffle=True, pin_memory=False, num_workers=numOfThreads, drop_last=dropLast, timeout=timeOut, collate_fn=collate_fn)#, worker_init_fn=np.random.seed(12))
| [
"numpy.identity",
"pyemd.emd",
"collections.OrderedDict",
"scipy.stats.entropy",
"random.Random",
"pickle.load",
"logging.info",
"numpy.sum",
"numpy.zeros",
"numpy.random.seed",
"numpy.concatenate",
"torch.utils.data.DataLoader",
"time.time"
] | [((20762, 20773), 'time.time', 'time.time', ([], {}), '()\n', (20771, 20773), False, 'import time\n'), ((941, 949), 'random.Random', 'Random', ([], {}), '()\n', (947, 949), False, 'from random import Random\n'), ((1168, 1188), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1182, 1188), True, 'import numpy as np\n'), ((1206, 1217), 'time.time', 'time.time', ([], {}), '()\n', (1215, 1217), False, 'import time\n'), ((1305, 1318), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1316, 1318), False, 'from collections import OrderedDict\n'), ((7272, 7309), 'numpy.zeros', 'np.zeros', (['[numOfClients, numOfLabels]'], {}), '([numOfClients, numOfLabels])\n', (7280, 7309), True, 'import numpy as np\n'), ((8443, 8480), 'numpy.zeros', 'np.zeros', (['[numOfClients, numOfLabels]'], {}), '([numOfClients, numOfLabels])\n', (8451, 8480), True, 'import numpy as np\n'), ((9925, 9966), 'numpy.zeros', 'np.zeros', (['[numOfClients + 1, numOfLabels]'], {}), '([numOfClients + 1, numOfLabels])\n', (9933, 9966), True, 'import numpy as np\n'), ((18439, 18496), 'logging.info', 'logging.info', (['"""========= End of Class/Worker =========\n"""'], {}), "('========= End of Class/Worker =========\\n')\n", (18451, 18496), False, 'import logging\n'), ((21618, 21761), 'torch.utils.data.DataLoader', 'DataLoader', (['partition'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'pin_memory': '(False)', 'num_workers': 'numOfThreads', 'drop_last': 'dropLast', 'timeout': 'timeOut'}), '(partition, batch_size=batch_size, shuffle=True, pin_memory=False,\n num_workers=numOfThreads, drop_last=dropLast, timeout=timeOut)\n', (21628, 21761), False, 'from torch.utils.data import DataLoader\n'), ((21820, 21990), 'torch.utils.data.DataLoader', 'DataLoader', (['partition'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'pin_memory': '(False)', 'num_workers': 'numOfThreads', 'drop_last': 'dropLast', 'timeout': 'timeOut', 'collate_fn': 'collate_fn'}), '(partition, batch_size=batch_size, shuffle=True, pin_memory=False,\n num_workers=numOfThreads, drop_last=dropLast, timeout=timeOut,\n collate_fn=collate_fn)\n', (21830, 21990), False, 'from torch.utils.data import DataLoader\n'), ((1601, 1652), 'logging.info', 'logging.info', (['"""====Warning: skip_partition is True"""'], {}), "('====Warning: skip_partition is True')\n", (1613, 1652), False, 'import logging\n'), ((4765, 4778), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4776, 4778), False, 'from collections import OrderedDict\n'), ((12312, 12375), 'logging.info', 'logging.info', (['"""========= Start of Random Partition =========\n"""'], {}), "('========= Start of Random Partition =========\\n')\n", (12324, 12375), False, 'import logging\n'), ((13641, 13700), 'logging.info', 'logging.info', (['"""========= Start of Class/Worker =========\n"""'], {}), "('========= Start of Class/Worker =========\\n')\n", (13653, 13700), False, 'import logging\n'), ((18013, 18078), 'numpy.concatenate', 'np.concatenate', (['(self.classPerWorker, tempClassPerWorker)'], {'axis': '(0)'}), '((self.classPerWorker, tempClassPerWorker), axis=0)\n', (18027, 18078), True, 'import numpy as np\n'), ((3432, 3451), 'scipy.stats.entropy', 'stats.entropy', (['x', 'm'], {}), '(x, m)\n', (3445, 3451), False, 'from scipy import stats\n'), ((3460, 3479), 'scipy.stats.entropy', 'stats.entropy', (['y', 'm'], {}), '(y, m)\n', (3473, 3479), False, 'from scipy import stats\n'), ((4084, 4101), 'numpy.identity', 'np.identity', (['size'], {}), '(size)\n', (4095, 4101), True, 'import numpy as np\n'), ((4590, 4628), 'pyemd.emd', 'emd', (['dataDistr', 'tempDistr', 'dist_matrix'], {}), '(dataDistr, tempDistr, dist_matrix)\n', (4593, 4628), False, 'from pyemd import emd\n'), ((4870, 4885), 'pickle.load', 'pickle.load', (['db'], {}), '(db)\n', (4881, 4885), False, 'import pickle, random\n'), ((10999, 11012), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11010, 11012), False, 'from collections import OrderedDict\n'), ((14826, 14860), 'numpy.sum', 'np.sum', (['ratioOfClassWorker'], {'axis': '(1)'}), '(ratioOfClassWorker, axis=1)\n', (14832, 14860), True, 'import numpy as np\n'), ((2992, 3003), 'time.time', 'time.time', ([], {}), '()\n', (3001, 3003), False, 'import time\n'), ((11106, 11121), 'pickle.load', 'pickle.load', (['db'], {}), '(db)\n', (11117, 11121), False, 'import pickle, random\n'), ((15730, 15764), 'numpy.sum', 'np.sum', (['ratioOfClassWorker'], {'axis': '(0)'}), '(ratioOfClassWorker, axis=0)\n', (15736, 15764), True, 'import numpy as np\n')] |
import os
import shutil
import Ni__eam__born_exp_rose as configuration
from collections import OrderedDict
def cleanup_simulation_directories():
sim_directories = [
'Ni_fcc_vac.lmps_min_pos',
'Ni_fcc.lmps_elastic',
'Ni_fcc_100_unit.lmps_min_all',
'Ni_fcc_111_s.lmps_min_pos',
'Ni_fcc_isf.lmps_min_sf',
'Ni_sc.lmps_min_all',
'Ni_bcc.lmps_min_all',
'Ni_fcc.lmps_min_all',
'Ni_fcc_110_s.lmps_min_pos',
'Ni_fcc_111_unit.lmps_min_all',
'Ni_dia.lmps_min_all',
'Ni_fcc_100_s.lmps_min_pos',
'Ni_fcc_110_unit.lmps_min_all',
'Ni_fcc_esf.lmps_min_sf',
'Ni_hcp.lmps_min_all']
for d in sim_directories:
if os.path.isdir(d):
shutil.rmtree(d)
#
symbols = ['Ni']
# THE LATTICE INFORMATION IS NORMALLY A QOI, BUT THE QOI'S ARE BURNED HERE
lattice_info = OrderedDict()
for s in symbols:
lattice_info[s] = OrderedDict()
lattice_info['Ni']['lattice_type'] = 'fcc'
lattice_info['Ni']['cohesive_energy'] = -4.5
lattice_info['Ni']['bulk_modulus'] = 162 # in_GPa
lattice_info['Ni']['lattice_parameter'] = 3.52
a0 = lattice_info['Ni']['lattice_parameter']
# THIS IS COMPUTED INFORMATION AND IS ONLY TRUE FOR AN FCC LATTICE
lattice_type = lattice_info['Ni']['lattice_type']
if lattice_type == 'fcc':
V = a0**3
lattice_info['Ni']['equilibrium_volume_per_atom'] = V
re = 1/(2**0.5)*a0
lattice_info['Ni']['equilibrium_interatomic_distance'] = 1/(2**0.5)*a0
potential_parameters = OrderedDict()
potential_parameters = OrderedDict()
potential_parameters['p_NiNi_phi0'] = 1.0
potential_parameters['p_NiNi_gamma'] = 2.0
potential_parameters['p_NiNi_r0'] = lattice_info['Ni']['equilibrium_interatomic_distance']
potential_parameters['d_Ni_rho0'] = 1.0
potential_parameters['d_Ni_beta'] = 4.0
potential_parameters['d_Ni_r0'] = lattice_info['Ni']['equilibrium_interatomic_distance']
potential_parameters['e_Ni_ecoh'] = lattice_info['Ni']['cohesive_energy']
potential_parameters['e_Ni_latticetype'] = lattice_info['Ni']['lattice_type']
potential_parameters['e_Ni_B'] = lattice_info['Ni']['bulk_modulus']
potential_parameters['e_Ni_a0'] = lattice_info['Ni']['lattice_parameter']
if __name__ == "__main__":
from pypospack.pyposmat.engines import PyposmatEngine
from pypospack.pyposmat.data import PyposmatDataFile
from pypospack.pyposmat.data import PyposmatConfigurationFile
filename_config = 'pypospack.config.in'
configuration.write_configuration_file(filename=filename_config)
engine = PyposmatEngine(
filename_in = 'pypospack.config.in',
filename_out = 'pypospack.config.out')
engine.configure()
_parameters = potential_parameters
results = engine.evaluate_parameter_set(parameters=_parameters)
print(results)
print('cleaning up simulation directories...')
cleanup_simulation_directories()
| [
"collections.OrderedDict",
"pypospack.pyposmat.engines.PyposmatEngine",
"Ni__eam__born_exp_rose.write_configuration_file",
"os.path.isdir",
"shutil.rmtree"
] | [((889, 902), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (900, 902), False, 'from collections import OrderedDict\n'), ((1537, 1550), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1548, 1550), False, 'from collections import OrderedDict\n'), ((1574, 1587), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1585, 1587), False, 'from collections import OrderedDict\n'), ((943, 956), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (954, 956), False, 'from collections import OrderedDict\n'), ((2485, 2549), 'Ni__eam__born_exp_rose.write_configuration_file', 'configuration.write_configuration_file', ([], {'filename': 'filename_config'}), '(filename=filename_config)\n', (2523, 2549), True, 'import Ni__eam__born_exp_rose as configuration\n'), ((2564, 2655), 'pypospack.pyposmat.engines.PyposmatEngine', 'PyposmatEngine', ([], {'filename_in': '"""pypospack.config.in"""', 'filename_out': '"""pypospack.config.out"""'}), "(filename_in='pypospack.config.in', filename_out=\n 'pypospack.config.out')\n", (2578, 2655), False, 'from pypospack.pyposmat.engines import PyposmatEngine\n'), ((731, 747), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (744, 747), False, 'import os\n'), ((761, 777), 'shutil.rmtree', 'shutil.rmtree', (['d'], {}), '(d)\n', (774, 777), False, 'import shutil\n')] |
from django.contrib import admin
from stade.core.models import Challenge
from .task import TaskInline
@admin.register(Challenge)
class ChallengeAdmin(admin.ModelAdmin):
inlines = [TaskInline]
| [
"django.contrib.admin.register"
] | [((107, 132), 'django.contrib.admin.register', 'admin.register', (['Challenge'], {}), '(Challenge)\n', (121, 132), False, 'from django.contrib import admin\n')] |
#!/usr/bin/env python3
"""problem_059.py
Problem 59: XOR decryption
Each character on a computer is assigned a unique code and the preferred
standard is ASCII (American Standard Code for Information Interchange). For
example, uppercase A = 65, asterisk (*) = 42, and lowercase k = 107.
A modern encryption method is to take a text file, convert the bytes to ASCII,
then XOR each byte with a given value, taken from a secret key. The advantage
with the XOR function is that using the same encryption key on the cipher text,
restores the plain text; for example, 65 XOR 42 = 107, then 107 XOR 42 = 65.
For unbreakable encryption, the key is the same length as the plain text
message, and the key is made up of random bytes. The user would keep the
encrypted message and the encryption key in different locations, and without
both "halves", it is impossible to decrypt the message.
Unfortunately, this method is impractical for most users, so the modified
method is to use a password as a key. If the password is shorter than the
message, which is likely, the key is repeated cyclically throughout the
message. The balance for this method is using a sufficiently long password key
for security, but short enough to be memorable.
Your task has been made easy, as the encryption key consists of three lower
case characters. Using FILE_NAME, a file containing the encrypted ASCII codes,
and the knowledge that the plain text must contain common English words,
decrypt the message and find the sum of the ASCII values in the original text.
"""
__author__ = '<NAME>'
from typing import List, Optional
import common.alphabet as alpha
import common.fileio as fio
# PARAMETERS ##################################################################
FILE_NAME = '../input/059.txt' # default: '../input/059.txt'
# SOLUTION ####################################################################
def decrypt_message(message: str, key: str) -> str:
"""Returns the result of XOR decrypting message with the given key."""
decrypted: List[str] = []
key_len = len(key)
for i, byte in enumerate(message):
decrypted_char = chr(ord(byte) ^ ord(key[i % key_len]))
decrypted.append(decrypted_char)
return ''.join(decrypted)
def solve() -> Optional[int]:
# sequence that should occur if the message is properly decrypted
target_seq = ' the '
# create the encrypted message from the file
byte_vals = next(fio.ints_from_file(FILE_NAME, sep=','))
encrypted = ''.join(map(chr, byte_vals))
# try each possible key and look for target sequence in message
for i in range(1, 27):
for j in range(1, 27):
for k in range(1, 27):
key = ''.join([
alpha.letter_char_lower(i),
alpha.letter_char_lower(j),
alpha.letter_char_lower(k),
])
# decrypt message with key and check for target sequence
decrypted = decrypt_message(encrypted, key)
if target_seq in decrypted:
return sum(map(ord, decrypted))
# target sequence not found
return None
if __name__ == '__main__':
print(solve())
| [
"common.fileio.ints_from_file",
"common.alphabet.letter_char_lower"
] | [((2445, 2483), 'common.fileio.ints_from_file', 'fio.ints_from_file', (['FILE_NAME'], {'sep': '""","""'}), "(FILE_NAME, sep=',')\n", (2463, 2483), True, 'import common.fileio as fio\n'), ((2744, 2770), 'common.alphabet.letter_char_lower', 'alpha.letter_char_lower', (['i'], {}), '(i)\n', (2767, 2770), True, 'import common.alphabet as alpha\n'), ((2792, 2818), 'common.alphabet.letter_char_lower', 'alpha.letter_char_lower', (['j'], {}), '(j)\n', (2815, 2818), True, 'import common.alphabet as alpha\n'), ((2840, 2866), 'common.alphabet.letter_char_lower', 'alpha.letter_char_lower', (['k'], {}), '(k)\n', (2863, 2866), True, 'import common.alphabet as alpha\n')] |
import numpy as np
import glob
import geo
import time
import pdb
start_time = time.time()
dataDir='./data/'
# get CrIS files
cris_sdr_files = sorted(glob.glob(dataDir+'SCRIS*'))
cris_geo_files = sorted(glob.glob(dataDir+'GCRSO*'))
# get VIIRS files
viirs_sdr_files = sorted(glob.glob(dataDir+'SVM15*'))
viirs_geo_files = sorted(glob.glob(dataDir+'GMODO*'))
# read VIIRS data
viirs_lon, viirs_lat, viirs_satAzimuth, viirs_satRange, viirs_satZenith = geo.read_viirs_geo(viirs_geo_files)
viirs_bt, viirs_rad, viirs_sdrQa = geo.read_viirs_sdr(viirs_sdr_files)
# read CrIS data
cris_lon, cris_lat, cris_satAzimuth, cris_satRange, cris_satZenith = geo.read_cris_geo(cris_geo_files)
cris_realLW, cris_realMW, cris_realSW, cris_sdrQa, cris_geoQa, cris_dayFlag = geo.read_cris_sdr(cris_sdr_files , sdrFlag=True)
# compute CrIS Pos Vector in EFEC on the Earth Surface
cris_pos= np.zeros(np.append(cris_lat.shape, 3))
cris_pos[:, :, :, 0], cris_pos[:, :, :, 1], cris_pos[:, :, :, 2] \
= geo.LLA2ECEF(cris_lon, cris_lat, np.zeros_like(cris_lat))
# compute CrIS LOS Vector in ECEF
cris_east, cris_north, cris_up = geo.RAE2ENU(cris_satAzimuth, cris_satZenith, cris_satRange)
cris_los= np.zeros(np.append(cris_lat.shape, 3))
cris_los[:, :, :, 0], cris_los[:, :, :, 1], cris_los[:, :, :, 2] = \
geo.ENU2ECEF(cris_east, cris_north, cris_up, cris_lon, cris_lat)
# compute viirs POS vector in ECEF
viirs_pos= np.zeros(np.append(viirs_lat.shape, 3))
viirs_pos[:, :, 0], viirs_pos[:, :, 1], viirs_pos[:, :, 2] = \
geo.LLA2ECEF(viirs_lon, viirs_lat, np.zeros_like(viirs_lat))
# cris_los is pointing from pixel to satellite, we need to
# change from satellite to pixel
cris_los = -1.0*cris_los
# using Kd-tree to find the closted pixel of VIIRS for each CrIS FOV
dy, dx = geo.match_cris_viirs(cris_los, cris_pos, viirs_pos, viirs_sdrQa)
print("collocation are done in --- %s seconds --- for %d files " % (time.time() - start_time, len(cris_sdr_files)))
# collocation is done
##############################################################################
# showing the collocated images
#############################################################################
start_time = time.time()
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import matplotlib.colors as colors
import matplotlib.cm as cmx
m = Basemap(resolution='l', projection='cyl', \
llcrnrlon=cris_lon.min(), llcrnrlat=cris_lat.min(),
urcrnrlon=cris_lon.max(), urcrnrlat=cris_lat.max())
m.drawcoastlines()
m.drawcountries()
m.drawstates()
# meridians on bottom and left
parallels = np.arange(0.,81,10.)
m.drawparallels(parallels,labels=[False,True,True,False])
meridians = np.arange(10.,351.,20.)
m.drawmeridians(meridians,labels=[True,False,False,True])
# create color map
jet = cm = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=220, vmax=310)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
# show collocated pixels
for k, j, i in np.ndindex(cris_lat.shape):
ix=dx[k,j,i]
iy=dy[k,j,i]
vcolorVal = np.squeeze(scalarMap.to_rgba(viirs_bt[iy, ix]))
vx, vy = m(viirs_lon[iy, ix], viirs_lat[iy, ix])
cs1 = m.scatter(vx, vy, s=0.5, c=vcolorVal, edgecolor='none', cmap='jet', marker='.')
plt.savefig('myfig', dpi=600)
print("making plots is using --- %s seconds " % (time.time() - start_time))
| [
"geo.read_viirs_sdr",
"matplotlib.pyplot.savefig",
"geo.read_cris_sdr",
"geo.match_cris_viirs",
"numpy.zeros_like",
"numpy.ndindex",
"numpy.append",
"matplotlib.cm.ScalarMappable",
"glob.glob",
"matplotlib.colors.Normalize",
"geo.RAE2ENU",
"matplotlib.pyplot.get_cmap",
"geo.ENU2ECEF",
"tim... | [((79, 90), 'time.time', 'time.time', ([], {}), '()\n', (88, 90), False, 'import time\n'), ((457, 492), 'geo.read_viirs_geo', 'geo.read_viirs_geo', (['viirs_geo_files'], {}), '(viirs_geo_files)\n', (475, 492), False, 'import geo\n'), ((528, 563), 'geo.read_viirs_sdr', 'geo.read_viirs_sdr', (['viirs_sdr_files'], {}), '(viirs_sdr_files)\n', (546, 563), False, 'import geo\n'), ((653, 686), 'geo.read_cris_geo', 'geo.read_cris_geo', (['cris_geo_files'], {}), '(cris_geo_files)\n', (670, 686), False, 'import geo\n'), ((766, 813), 'geo.read_cris_sdr', 'geo.read_cris_sdr', (['cris_sdr_files'], {'sdrFlag': '(True)'}), '(cris_sdr_files, sdrFlag=True)\n', (783, 813), False, 'import geo\n'), ((1121, 1180), 'geo.RAE2ENU', 'geo.RAE2ENU', (['cris_satAzimuth', 'cris_satZenith', 'cris_satRange'], {}), '(cris_satAzimuth, cris_satZenith, cris_satRange)\n', (1132, 1180), False, 'import geo\n'), ((1304, 1368), 'geo.ENU2ECEF', 'geo.ENU2ECEF', (['cris_east', 'cris_north', 'cris_up', 'cris_lon', 'cris_lat'], {}), '(cris_east, cris_north, cris_up, cris_lon, cris_lat)\n', (1316, 1368), False, 'import geo\n'), ((1783, 1847), 'geo.match_cris_viirs', 'geo.match_cris_viirs', (['cris_los', 'cris_pos', 'viirs_pos', 'viirs_sdrQa'], {}), '(cris_los, cris_pos, viirs_pos, viirs_sdrQa)\n', (1803, 1847), False, 'import geo\n'), ((2191, 2202), 'time.time', 'time.time', ([], {}), '()\n', (2200, 2202), False, 'import time\n'), ((2604, 2628), 'numpy.arange', 'np.arange', (['(0.0)', '(81)', '(10.0)'], {}), '(0.0, 81, 10.0)\n', (2613, 2628), True, 'import numpy as np\n'), ((2695, 2723), 'numpy.arange', 'np.arange', (['(10.0)', '(351.0)', '(20.0)'], {}), '(10.0, 351.0, 20.0)\n', (2704, 2723), True, 'import numpy as np\n'), ((2809, 2828), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (2821, 2828), True, 'import matplotlib.pyplot as plt\n'), ((2839, 2875), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(220)', 'vmax': '(310)'}), '(vmin=220, vmax=310)\n', (2855, 2875), True, 'import matplotlib.colors as colors\n'), ((2888, 2928), 'matplotlib.cm.ScalarMappable', 'cmx.ScalarMappable', ([], {'norm': 'cNorm', 'cmap': 'jet'}), '(norm=cNorm, cmap=jet)\n', (2906, 2928), True, 'import matplotlib.cm as cmx\n'), ((2971, 2997), 'numpy.ndindex', 'np.ndindex', (['cris_lat.shape'], {}), '(cris_lat.shape)\n', (2981, 2997), True, 'import numpy as np\n'), ((3228, 3257), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""myfig"""'], {'dpi': '(600)'}), "('myfig', dpi=600)\n", (3239, 3257), True, 'import matplotlib.pyplot as plt\n'), ((153, 182), 'glob.glob', 'glob.glob', (["(dataDir + 'SCRIS*')"], {}), "(dataDir + 'SCRIS*')\n", (162, 182), False, 'import glob\n'), ((206, 235), 'glob.glob', 'glob.glob', (["(dataDir + 'GCRSO*')"], {}), "(dataDir + 'GCRSO*')\n", (215, 235), False, 'import glob\n'), ((280, 309), 'glob.glob', 'glob.glob', (["(dataDir + 'SVM15*')"], {}), "(dataDir + 'SVM15*')\n", (289, 309), False, 'import glob\n'), ((334, 363), 'glob.glob', 'glob.glob', (["(dataDir + 'GMODO*')"], {}), "(dataDir + 'GMODO*')\n", (343, 363), False, 'import glob\n'), ((891, 919), 'numpy.append', 'np.append', (['cris_lat.shape', '(3)'], {}), '(cris_lat.shape, 3)\n', (900, 919), True, 'import numpy as np\n'), ((1027, 1050), 'numpy.zeros_like', 'np.zeros_like', (['cris_lat'], {}), '(cris_lat)\n', (1040, 1050), True, 'import numpy as np\n'), ((1201, 1229), 'numpy.append', 'np.append', (['cris_lat.shape', '(3)'], {}), '(cris_lat.shape, 3)\n', (1210, 1229), True, 'import numpy as np\n'), ((1425, 1454), 'numpy.append', 'np.append', (['viirs_lat.shape', '(3)'], {}), '(viirs_lat.shape, 3)\n', (1434, 1454), True, 'import numpy as np\n'), ((1558, 1582), 'numpy.zeros_like', 'np.zeros_like', (['viirs_lat'], {}), '(viirs_lat)\n', (1571, 1582), True, 'import numpy as np\n'), ((3312, 3323), 'time.time', 'time.time', ([], {}), '()\n', (3321, 3323), False, 'import time\n'), ((1916, 1927), 'time.time', 'time.time', ([], {}), '()\n', (1925, 1927), False, 'import time\n')] |
"""Aggregate hosts data set."""
from censys.search import SearchClient
c = SearchClient()
# The aggregate method constructs a report using a query, an aggregation field, and the
# number of buckets to bin.
report = c.v2.hosts.aggregate(
"service.service_name: HTTP",
"services.port",
num_buckets=5,
)
print(report)
# {
# "total": 987342156,
# "total_omitted": 836949090,
# "potential_deviation": 3965103,
# "buckets": [
# {"key": "80", "count": 58727150},
# {"key": "443", "count": 46716751},
# {"key": "7547", "count": 19185117},
# {"key": "22", "count": 13276559},
# {"key": "30005", "count": 12487489},
# ],
# "query": "service.service_name: HTTP",
# "field": "services.port",
# }
# You can also specify whether to include virtual hosts in the report.
report = c.v2.hosts.aggregate(
"service.service_name: HTTP",
"services.port",
num_buckets=5,
virtual_hosts="INCLUDE",
)
print(report)
| [
"censys.search.SearchClient"
] | [((76, 90), 'censys.search.SearchClient', 'SearchClient', ([], {}), '()\n', (88, 90), False, 'from censys.search import SearchClient\n')] |
import aiohttp_jinja2
from aiohttp import web
from riego.db import get_db
from riego.web.security import raise_permission
router = web.RouteTableDef()
def setup_routes_events(app):
app.add_routes(router)
@router.get("/events", name='events')
@aiohttp_jinja2.template('events/index.html')
async def event_index(request):
await raise_permission(request, permission=None)
cursor = get_db().conn.cursor()
cursor.execute('''SELECT events.*,
valves.name,
valves.duration AS valves_duration
FROM events, valves
WHERE events.valve_id = valves.id
ORDER BY events.created_at DESC''')
items = cursor.fetchall()
get_db().conn.commit()
return {'items': items}
@router.get("/events/{item_id}/filter", name='events_item_filter')
@aiohttp_jinja2.template('events/index.html')
async def event_filter(request):
await raise_permission(request, permission=None)
item_id = request.match_info["item_id"]
cursor = get_db().conn.cursor()
cursor.execute('''SELECT events.*, valves.name
FROM events, valves
WHERE events.valve_id = valves.id
AND valves.id = ?
ORDER BY events.created_at DESC''', (item_id,))
items = cursor.fetchall()
get_db().conn.commit()
return {'items': items}
| [
"aiohttp_jinja2.template",
"riego.db.get_db",
"riego.web.security.raise_permission",
"aiohttp.web.RouteTableDef"
] | [((141, 160), 'aiohttp.web.RouteTableDef', 'web.RouteTableDef', ([], {}), '()\n', (158, 160), False, 'from aiohttp import web\n'), ((269, 313), 'aiohttp_jinja2.template', 'aiohttp_jinja2.template', (['"""events/index.html"""'], {}), "('events/index.html')\n", (292, 313), False, 'import aiohttp_jinja2\n'), ((863, 907), 'aiohttp_jinja2.template', 'aiohttp_jinja2.template', (['"""events/index.html"""'], {}), "('events/index.html')\n", (886, 907), False, 'import aiohttp_jinja2\n'), ((358, 400), 'riego.web.security.raise_permission', 'raise_permission', (['request'], {'permission': 'None'}), '(request, permission=None)\n', (374, 400), False, 'from riego.web.security import raise_permission\n'), ((953, 995), 'riego.web.security.raise_permission', 'raise_permission', (['request'], {'permission': 'None'}), '(request, permission=None)\n', (969, 995), False, 'from riego.web.security import raise_permission\n'), ((415, 423), 'riego.db.get_db', 'get_db', ([], {}), '()\n', (421, 423), False, 'from riego.db import get_db\n'), ((737, 745), 'riego.db.get_db', 'get_db', ([], {}), '()\n', (743, 745), False, 'from riego.db import get_db\n'), ((1055, 1063), 'riego.db.get_db', 'get_db', ([], {}), '()\n', (1061, 1063), False, 'from riego.db import get_db\n'), ((1354, 1362), 'riego.db.get_db', 'get_db', ([], {}), '()\n', (1360, 1362), False, 'from riego.db import get_db\n')] |
# coding: utf-8
from __future__ import division, print_function, absolute_import
import pkg_resources
__version__ = pkg_resources.get_distribution('pypcl').version
from pypcl.common import *
| [
"pkg_resources.get_distribution"
] | [((119, 158), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""pypcl"""'], {}), "('pypcl')\n", (149, 158), False, 'import pkg_resources\n')] |
import tkinter as tk
import tkinter.font as tkFont
class History(tk.Frame):
def __init__(self, master):
self.master=master
tk.Frame.__init__(self, master=master)
self.listOperations =[]
self.history = []
self.LEFT = 0
self.OP = 1
self.RIGHT = 2
self.EQUAL = 3
self.RES = 4
self.DEL = 5
self.createGrid()
def createGrid(self):
i = 0
while i < 6:
buttonList = list()
# create line
for j in range(0, 5):
operator = tk.Button(self, text="", state=tk.DISABLED, width=5, font=tkFont.Font(size=10))
operator.grid(row=i, column=j)
buttonList.append(operator)
operator = tk.Button(self, text="", state=tk.DISABLED, width=5, font=tkFont.Font(size=10),
fg="red", command=lambda c=i:self.deleteLine(c))
operator.grid(row=i, column=6)
buttonList.append(operator)
self.history.append(buttonList)
i+=1
def reset(self):
for line in self.history:
for i in range(0, 6):
line[i].config(text="", state=tk.DISABLED)
def unDo(self, row, column):
self.history[row][column].config(text="")
if (column == self.LEFT and row > 0) :
self.history[row-1][self.DEL].config(state=tk.NORMAL)
def addResult(self,right,result, row ):
self.history[row][self.RIGHT].config(text=right)
self.history[row][self.EQUAL].config(text="=")
self.history[row][self.RES].config(text=result)
self.history[row][self.DEL].config(text="x", state=tk.NORMAL)
def addLeftOperand(self, value, row):
self.history[row][self.LEFT].config(text=value)
if(row > 0):
self.history[row-1][self.DEL].config(state=tk.DISABLED)
def addOperator(self, operator, row):
self.history[row][self.OP].config(text=operator)
def deleteLine(self, row):
self.history[row][self.DEL].config(text="", state=tk.DISABLED)
for i in range(0, 5):
self.history[row][i].config(text="")
if(row > 0):
self.history[row-1][self.DEL].config(state=tk.NORMAL)
self.master.rollBack()
def disable(self):
for i in range(0,6):
self.history[i][self.DEL].config(state=tk.DISABLED)
| [
"tkinter.font.Font",
"tkinter.Frame.__init__"
] | [((144, 182), 'tkinter.Frame.__init__', 'tk.Frame.__init__', (['self'], {'master': 'master'}), '(self, master=master)\n', (161, 182), True, 'import tkinter as tk\n'), ((831, 851), 'tkinter.font.Font', 'tkFont.Font', ([], {'size': '(10)'}), '(size=10)\n', (842, 851), True, 'import tkinter.font as tkFont\n'), ((637, 657), 'tkinter.font.Font', 'tkFont.Font', ([], {'size': '(10)'}), '(size=10)\n', (648, 657), True, 'import tkinter.font as tkFont\n')] |
"""
Contains the code necessary to extract a list of optimal compression values from a csv file containing
columns corresponding to {compression_type}_{level}, {variable}, {time}, and {DSSIM}
It would be best to open the csv file once, and get a list of all variables, levels, and timesteps
so I don't read the csv file more times than necessary. Seems like search_csv does most of what I need
already.
REQUIRES: daily/monthly_dssims.csv
"""
import csv
import re
import numpy as np
import os
import lcr_global_vars
import sys
import argparse
def search_csv(csvfilename: str, variable: str, timestep: int, compression:str):
"""
Searches csv file for an entry with the given variable in the first column
in the format .*_\d+_VARIABLE, and timestep given in the second column.
"""
match_rows = []
with open(csvfilename, newline='') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if len(row) == 0:
continue
if compression != "sz3":
m = re.search('(?P<compression>.*?)_(?P<level>[0-9]+?)_(?P<varname>.*)', row[0])
time = row[1]
if(m is not None):
if (m.group("varname") == variable and str(timestep) == time and m.group("compression") == compression):
match_rows.append(row)
else:
m = re.compile(r'(?P<level>[510][^_]*)_(?P<varname>.*)').findall(row[0])
time = row[1]
if(len(m) > 0):
if (m[0][1] == variable and str(timestep) == time):
match_rows.append(row)
return match_rows
def optimal_level(csvfilename: str, variable: str, timestep: int, threshold: float, compression: str):
"""
Finds the optimal compression level in a csv file assuming the levels are in the first
column with the format .*_LEVEL_.* and the DSSIM/comparison values are in the third column.
"""
rows = search_csv(csvfilename, variable, timestep, compression)
if len(rows) == 0:
return 0
levels = []
# ensure unique variable/level/timeslice
rowids = []
for row in rows:
rowid = row[0] + row[1]
rowids.append(rowid)
rows = [rows[i] for i in np.unique(rowids, return_index=True)[1][::-1]]
# ensure list of levels is in descending order (i.e. least compressed first)
if compression not in ["sz1.4", "sz1ROn", "sz3"]:
for row in rows:
m = re.search('.*?_(?P<level>[0-9]+?)_(?P<varname>.*)', row[0])
levels.append(int(m.group("level")))
sort_index = np.argsort(levels)
rows = [rows[i] for i in sort_index[::-1]]
levels = [levels[i] for i in sort_index[::-1]]
if compression in ["sz1.4", "sz1ROn", "sz3"]:
for row in rows:
m = re.search('.*?_(?P<level>[0-9]+?)_(?P<varname>.*)', row[0])
levels.append(m.group("level"))
rows = rows[::-1]
levels = levels[::-1]
# compute optimal level based on dssim
i = 0
prev_lev = None
for row in rows:
dssim = float(row[2])
if dssim >= threshold:
prev_lev=levels[i]
i=i+1
continue
if dssim < threshold:
if prev_lev is not None:
best_lev = prev_lev
return best_lev
else:
return -1
return levels[len(levels)-1]
def optimal_level_multiple_comparison(csvfilename: str, variable: str, timestep: int,
dssim_threshold: float, ks_p_threshold: float,
spatial_err_threshold: float, max_spatial_err_threshold: float,
pcc_threshold: float, compression: str):
"""
Finds the optimal compression level in a csv file assuming the levels are in the first
column with the format .*_LEVEL_.* the DSSIM/comparison values are in the third column, fourth, ... columns.
"""
rows = search_csv(csvfilename, variable, timestep, compression)
if len(rows) == 0:
return 0
levels = []
# ensure unique variable/level/timeslice
rowids = []
for row in rows:
rowid = row[0] + row[1]
rowids.append(rowid)
rows = [rows[i] for i in np.unique(rowids, return_index=True)[1][::-1]]
# ensure list of levels is in descending order (i.e. least compressed first)
if compression not in ["sz1.4", "sz1ROn", "sz3"]:
for row in rows:
m = re.search('.*?_(?P<level>[0-9]+?)_(?P<varname>.*)', row[0])
levels.append(int(m.group("level")))
sort_index = np.argsort(levels)
rows = [rows[i] for i in sort_index[::-1]]
levels = [levels[i] for i in sort_index[::-1]]
if compression in ["sz1.4", "sz1ROn", "sz3"]:
if compression == "sz3":
for row in rows:
m = re.compile(r'(?P<level>[510][^_]*)_(?P<varname>.*)').findall(row[0])
level = m[0][0]
levels.append(level)
rows = rows[::-1]
levels = levels[::-1]
else:
for row in rows:
m = re.search('.*?_(?P<level>[0-9]+?)_(?P<varname>.*)', row[0])
levels.append(m.group("level"))
rows = rows[::-1]
levels = levels[::-1]
# compute optimal level based on dssim
i = 0
prev_lev = None
best_dssim_lev = -1
for row in rows:
dssim = float(row[2])
if dssim >= dssim_threshold:
prev_lev=levels[i]
i=i+1
continue
if dssim < dssim_threshold:
if prev_lev is not None:
best_dssim_lev = prev_lev
else:
best_dssim_lev = 100000
if best_dssim_lev == -1:
best_dssim_lev = prev_lev
i = 0
prev_lev = None
best_ks_p_lev = -1
for row in rows:
ks_p = float(row[3])
if ks_p >= ks_p_threshold:
prev_lev=levels[i]
i=i+1
continue
if ks_p < ks_p_threshold:
if prev_lev is not None:
best_ks_p_lev = prev_lev
else:
best_ks_p_lev = 100000
if best_ks_p_lev == -1:
best_ks_p_lev = prev_lev
i = 0
prev_lev = None
best_spatial_err_lev = -1
for row in rows:
spatial_err = 100-float(row[4])
if spatial_err >= spatial_err_threshold:
prev_lev = levels[i]
i = i + 1
continue
if spatial_err < spatial_err_threshold:
if prev_lev is not None:
best_spatial_err_lev = prev_lev
else:
best_spatial_err_lev = 100000
if best_spatial_err_lev == -1:
best_spatial_err_lev = prev_lev
i = 0
prev_lev = None
best_max_spatial_err_lev = -1
for row in rows:
max_spatial_err = 1-float(row[5])
if max_spatial_err >= max_spatial_err_threshold:
prev_lev = levels[i]
i = i + 1
continue
if max_spatial_err < max_spatial_err_threshold:
if prev_lev is not None:
best_max_spatial_err_lev = prev_lev
else:
best_max_spatial_err_lev = 100000
if best_max_spatial_err_lev == -1:
best_max_spatial_err_lev = prev_lev
i = 0
prev_lev = None
best_pcc_lev = -1
for row in rows:
pcc = float(row[6])
if pcc >= pcc_threshold:
prev_lev = levels[i]
i = i + 1
continue
if pcc < pcc_threshold:
if prev_lev is not None:
best_pcc_lev = prev_lev
else:
best_pcc_lev = 100000
if best_pcc_lev == -1:
best_pcc_lev = prev_lev
levs = [float(best_dssim_lev), float(best_ks_p_lev), float(best_spatial_err_lev), float(best_max_spatial_err_lev), float(best_pcc_lev)]
if compression == "sz3":
return levs, min(levs)
return levs, max(levs)
def optimal_level_max(csvfilename, variable, threshold, compression, freq, argv_var):
"""
Find the minimum of all the optimal compression levels for a specified variable
over all time slices.
"""
times = []
with open(csvfilename, newline='') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if compression != "sz3":
m = re.search('.*?_(?P<level>[0-9]+?)_(?P<varname>.*)', row[0])
time = row[1]
if (m is not None):
if (m.group("varname") == variable):
times.append(time)
else:
m = re.compile(r'(?P<level>[510][^_]*)_(?P<varname>.*)').findall(row[0])
time = row[1]
if (len(m) > 0):
if (m[0][1] == variable):
times.append(time)
times = np.unique(times)
levs = []
for time in times:
#index, lev = optimal_level_multiple_comparison(f"../data/{freq}_dssims.csv", variable, time, threshold, 0.01, 100-10, 1-0.1, 0.9999, compression)
lev = optimal_level(f"../data/sz3/{argv_var}_calcs.csv", variable, time, threshold, compression)
levs.append(lev)
min_level = max(levs)
return min_level
def optimal_level_spread(csvfilename, variable, threshold, compression, freq, argv_var):
"""
Find the minimum of all the optimal compression levels for a specified variable
over all time slices.
"""
times = []
with open(csvfilename, newline='') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if len(row) == 0:
continue
if compression != "sz3":
m = re.search('.*?_(?P<level>[0-9]+?)_(?P<varname>.*)', row[0])
time = row[1]
if(m is not None):
if (m.group("varname") == variable):
times.append(time)
else:
m = re.compile(r'(?P<level>[510][^_]*)_(?P<varname>.*)').findall(row[0])
time = row[1]
if (len(m) > 0):
if (m[0][1] == variable):
times.append(time)
times = np.unique(times)
levs = []
all_levs = []
for time in times:
all_lev, lev = optimal_level_multiple_comparison(f"../data/sz3/{argv_var}_calcs.csv", variable, time, threshold, 0.05, 100-5, 1-0.05, 0.99999, compression)
#lev = optimal_level(f"/glade/scratch/apinard/sz3/{argv_var}_calcs.csv", variable, time, threshold, compression)
levs.append(lev)
all_levs.append(all_lev)
return all_levs, levs
def filesize(csvfilename, variable, level, compression):
with open(csvfilename, newline='') as csvfile:
reader = csv.reader(csvfile)
if compression == "sz3":
for row in reader:
if len(row) == 0:
return -1
if level == "orig" or level == 100000:
if row[0] == variable and row[1] == f"orig":
return row[2]
if row[0] == variable and row[1] == f"{compression}_ROn{level}":
return row[2]
else:
for row in reader:
if len(row) == 0:
return -1
if level == "orig" or level == 100000:
if row[0] == variable and row[1] == f"orig":
return row[2]
if row[0] == variable and row[1] == f"{compression}_{level}":
return row[2]
def create_daily_monthly_freq_hist():
for freq in ['daily', 'monthly']:
v = lcr_global_vars.varlist(f"../data/{freq}_dssims.csv")
for varname in v:
all_levs, level = optimal_level_spread(f"../data/{freq}_dssims.csv", varname, 0.9995, "bg", freq)
bg_levels=[2, 3, 4, 5, 6, 7]
hist = {}
for l in bg_levels:
hist[l] = level.count(l)
location = f"../data/test{freq}_bg_hist.csv"
file_exists = os.path.isfile(location)
with open(location, 'a', newline='') as csvfile:
fieldnames = [
'variable',
'frequency',
'2',
'3',
'4',
'5',
'6',
'7'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
if not file_exists:
writer.writeheader()
writer.writerow(
{
'variable': varname,
'frequency': freq,
'2': hist[2],
'3': hist[3],
'4': hist[4],
'5': hist[5],
'6': hist[6],
'7': hist[7]
}
)
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--var", help="csv file to store output (if file exists, then data will append).",
type=str, default="./sample.csv")
args = parser.parse_args()
return args
def main_zfp(argv):
# Get command line stuff and store in a dictionary
args = parseArguments()
argv_var = args.var
print(f"current_var: {argv_var}")
for freq in ['daily']:
#v = lcr_global_vars.varlist(f"../data/{freq}_dssims.csv")
#for argv_var in v:
location = f"../data/2real_zfp_bg_sz_comp_slices.csv"
#location = f"../data/monthly_zfp_bg_sz_comp_slices.csv"
file_exists = os.path.isfile(location)
with open(location, 'a', newline='') as csvfile:
fieldnames = [
'variable',
'frequency',
'timestep',
#'bg_level',
#'bg_size',
#'bg_ratio',
#'zfp_level',
#'zfp_size',
#'zfp_ratio',
'sz_level',
'sz_size',
'sz_ratio',
#"all_bg_levs",
#"all_zfp_levs",
'all_sz_levs'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
if not file_exists:
writer.writeheader()
# for varname in argv_var:
#print(f"current_var: {argv_var}")
#all_bg_levs, levelbg = optimal_level_spread(f"../data/daily_dssims.csv", argv_var, 0.9995, "bg", freq, argv_var)
#all_bg_levs, levelbg = optimal_level_spread(f"/glade/scratch/apinard/{argv_var}_calcs.csv", argv_var, 0.9995, "bg", freq, argv_var)
#print(f"level bg: {levelbg}")
#all_zfp_levs,
#levelzfp = optimal_level_spread(f"../data/monthly_dssims.csv", argv_var, 0.9995, "zfp5_p", freq, argv_var)
#levelsz = optimal_level_spread(f"../data/monthly_dssims.csv", argv_var, 0.9995, "sz3", freq, argv_var)
#all_zfp_levs, levelzfp = optimal_level_spread(f"/glade/scratch/apinard/{argv_var}_calcs.csv", argv_var, 0.9995, "zfp_p", freq, argv_var)
all_sz_levs, levelsz = optimal_level_spread(f"../data/sz3/{argv_var}_calcs.csv", argv_var, 0.9995, "sz3", freq, argv_var)
location = f"../data/2real_zfp_bg_sz_comp_slices.csv"
#location = f"../data/monthly_zfp_bg_sz_comp_slices.csv"
file_exists = os.path.isfile(location)
with open(location, 'a', newline='') as csvfile:
fieldnames = [
'variable',
'frequency',
'timestep',
#'bg_level',
#'bg_size',
#'bg_ratio',
#'zfp_level',
#'zfp_size',
#'zfp_ratio',
'sz_level',
'sz_size',
'sz_ratio',
#"all_bg_levs",
#"all_zfp_levs",
'all_sz_levs'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
sizecsv = f"../data/{freq}_filesizes.csv"
for i in range(0, 730):
print(f"{i}")
#fzfp = filesize(sizecsv, argv_var, levelzfp[i], "zfp5_p")
#fbg = filesize(sizecsv, argv_var, levelbg[i], "bg")
fsz = filesize(sizecsv, argv_var, levelsz[i], "sz3")
if fsz is not None:
sizesz = float(fsz)
#sizebg = float(fbg)
ratiosz = float(filesize(sizecsv, argv_var, "orig", "zfp5_p")) / float(fsz)
#ratiobg = float(filesize(sizecsv, argv_var, "orig", "bg")) / float(fbg)
writer.writerow(
{
'variable': argv_var,
'frequency': freq,
'timestep': i,
#'bg_level': levelbg[i],
#'bg_size': sizebg,
#'bg_ratio': ratiobg,
# 'zfp_level': levelzfp[i],
# 'zfp_size': sizezfp,
# 'zfp_ratio': ratiozfp,
#"all_bg_levs": all_bg_levs[i],
#"all_zfp_levs": all_zfp_levs[i],
'sz_level': levelsz[i],
'sz_size': sizesz,
'sz_ratio': ratiosz,
'all_sz_levs': all_sz_levs[i]
}
)
if __name__ == "__main__":
main_zfp(sys.argv[1:])
# if __name__ == "__main__":
# #daily_sizecsv = "../data/daily_filesizes.csv"
# # varname = "TS"
# # sz_level = optimal_level_max(f"../data/daily_dssims.csv", "TS", 0.9995, "sz1.4", "daily")
# # f = filesize(daily_sizecsv, varname, sz_level, "sz1.4")
# monthly_sizecsv = "../data/monthly_filesizes.csv"
# #daily_sizecsv = "../data/daily_filesizes.csv"
# for num in [0.95, 0.995, 0.9995]:
# for freq in ['monthly']:
# v = lcr_global_vars.varlist(f"../data/{freq}_dssims.csv")
# for varname in v:
# level = optimal_level_max(f"../data/{freq}_dssims.csv", varname, num, "bg", freq, varname)
# f = filesize(monthly_sizecsv, varname, level, "bg")
# if f is not None:
# size = float(f)
# ratio = float(filesize(monthly_sizecsv, varname, "orig", "bg"))/float(f)
# else:
# size = float(filesize(monthly_sizecsv, varname, level, "bg"))
# ratio = float(filesize(monthly_sizecsv, varname, "orig", "bg")) / float(filesize(monthly_sizecsv, varname, level, "bg"))
#
# zfp_level = optimal_level_max(f"../data/{freq}_dssims.csv", varname, num, "zfp5_p", freq, varname)
# if freq == "daily":
# f = filesize(daily_sizecsv, varname, zfp_level, "zfp5")
# elif freq == "monthly":
# f = filesize(monthly_sizecsv, varname, zfp_level, "zfp5")
# if f is not None:
# zfp_size = float(f)
# zfp_ratio = float(filesize(monthly_sizecsv, varname, "orig", "zfp5")) / float(f)
# else:
# zfp_size = float(filesize(monthly_sizecsv, varname, zfp_level, "zfp5"))
# zfp_ratio = float(filesize(monthly_sizecsv, varname, "orig", "zfp5")) / float(
# filesize(monthly_sizecsv, varname, zfp_level, "zfp5"))
#
# # sz_level = optimal_level_max(f"../data/test_set/{freq}_dssims.csv", varname, 0.9995, "sz1.4", freq)
# # f = filesize(daily_sizecsv, varname, sz_level, "sz1.4")
# # if f is not None:
# # sz_size = float(f)
# # sz_ratio = float(filesize(daily_sizecsv, varname, "orig", "sz1.4")) / float(f)
# # else:
# # sz_size = float(filesize(monthly_sizecsv, varname, sz_level, "sz1.4"))
# # sz_ratio = float(filesize(monthly_sizecsv, varname, "orig", "sz1.4")) / float(
# # filesize(monthly_sizecsv, varname, sz_level, "sz1.4"))
#
# location = f"../data/{freq}_zfp_bg_sz_comparison_test_{num}.csv"
# file_exists = os.path.isfile(location)
# with open(location, 'a', newline='') as csvfile:
# fieldnames = [
# 'variable',
# 'bg_level',
# 'bg_size',
# 'bg_ratio',
# 'zfp_level',
# 'zfp_size',
# 'zfp_ratio',
# #'sz_level',
# #'sz_size',
# #'sz_ratio'
# ]
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
#
# if not file_exists:
# writer.writeheader()
# writer.writerow(
# {
# 'variable': varname,
# 'bg_level': level,
# 'bg_size': size,
# 'bg_ratio' : ratio,
# 'zfp_level': zfp_level,
# 'zfp_size': zfp_size,
# 'zfp_ratio': zfp_ratio,
# #'sz_level': sz_level,
# #'sz_size': sz_size,
# #'sz_ratio': sz_ratio
# }
# )
# if __name__ == "__main__":
#
# for freq in ['daily', 'monthly']:
# v = lcr_global_vars.varlist(f"../data/{freq}_dssims.csv")
# for varname in v:
# level = optimal_level_spread(f"../data/{freq}_dssims.csv", varname, 0.9995, "sz1.4", freq)
# location = f"../data/{freq}_sz14_optimal_slices.csv"
# file_exists = os.path.isfile(location)
# with open(location, 'a', newline='') as csvfile:
# fieldnames = [
# 'variable',
# 'frequency',
# '0',
# '1',
# '2',
# '3',
# '4',
# '5',
# '6',
# '7',
# '8',
# '9',
# '10',
# '11',
# '12',
# '13',
# '14',
# '15',
# '16',
# '17',
# '18',
# '19',
# '20',
# '21',
# '22',
# '23',
# '24',
# '25',
# '26',
# '27',
# '28',
# '29',
# '30',
# '31',
# '32',
# '33',
# '34',
# '35',
# '36',
# '37',
# '38',
# '39',
# '40',
# '41',
# '42',
# '43',
# '44',
# '45',
# '46',
# '47',
# '48',
# '49',
# '50',
# '51',
# '52',
# '53',
# '54',
# '55',
# '56',
# '57',
# '58',
# '59'
# ]
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
#
# if not file_exists:
# writer.writeheader()
# writer.writerow(
# {
# 'variable': varname,
# 'frequency': freq,
# '0': level[0],
# '1': level[1],
# '2': level[2],
# '3': level[3],
# '4': level[4],
# '5': level[5],
# '6': level[6],
# '7': level[7],
# '8': level[8],
# '9': level[9],
# '10': level[10],
# '11': level[11],
# '12': level[12],
# '13': level[13],
# '14': level[14],
# '15': level[15],
# '16': level[16],
# '17': level[17],
# '18': level[18],
# '19': level[19],
# '20': level[20],
# '21': level[21],
# '22': level[22],
# '23': level[23],
# '24': level[24],
# '25': level[25],
# '26': level[26],
# '27': level[27],
# '28': level[28],
# '29': level[29],
# '30': level[30],
# '31': level[31],
# '32': level[32],
# '33': level[33],
# '34': level[34],
# '35': level[35],
# '36': level[36],
# '37': level[37],
# '38': level[38],
# '39': level[39],
# '40': level[40],
# '41': level[41],
# '42': level[42],
# '43': level[43],
# '44': level[44],
# '45': level[45],
# '46': level[46],
# '47': level[47],
# '48': level[48],
# '49': level[49],
# '50': level[50],
# '51': level[51],
# '52': level[52],
# '53': level[53],
# '54': level[54],
# '55': level[55],
# '56': level[56],
# '57': level[57],
# '58': level[58],
# '59': level[59],
# }
# )
# for freq in ['daily', 'monthly']:
# v = lcr_global_vars.varlist(f"../data/{freq}_dssims.csv")
# for varname in v:
# level = optimal_level_spread(f"../data/{freq}_dssims.csv", varname, 0.9995, "zfp_p", freq)
# bg_levels=[8, 10, 12, 14, 16, 18, 20, 22, 24]
# hist = {}
# for l in bg_levels:
# hist[l] = level.count(l)
# location = f"../data/{freq}_zfp_hist.csv"
# file_exists = os.path.isfile(location)
# with open(location, 'a', newline='') as csvfile:
# fieldnames = [
# 'variable',
# 'frequency',
# '8',
# '10',
# '12',
# '14',
# '16',
# '18',
# '20',
# '22',
# '24'
# ]
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
#
# if not file_exists:
# writer.writeheader()
# writer.writerow(
# {
# 'variable': varname,
# 'frequency': freq,
# '8': hist[8],
# '10': hist[10],
# '12': hist[12],
# '14': hist[14],
# '16': hist[16],
# '18': hist[18],
# '20': hist[20],
# '22': hist[22],
# '24': hist[24]
# }
# )
#
# for freq in ['daily', 'monthly']:
# v = lcr_global_vars.varlist(f"../data/{freq}_dssims.csv")
# for varname in v:
# level = optimal_level_spread(f"../data/{freq}_dssims.csv", varname, 0.9995, "sz1.4", freq)
# bg_levels=["1", "05", "01", "005", "001", "0005", "0001", "00005", "00001", "000005", "000001"]
# hist = {}
# for l in bg_levels:
# hist[l] = level.count(l)
# location = f"../data/{freq}_sz14_hist.csv"
# file_exists = os.path.isfile(location)
# with open(location, 'a', newline='') as csvfile:
# fieldnames = [
# 'variable',
# 'freq',
# '1',
# '05',
# '01',
# '005',
# '001',
# '0005',
# '0001',
# '00005',
# '00001',
# '000005',
# '000001'
# ]
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
#
# if not file_exists:
# writer.writeheader()
# writer.writerow(
# {
# 'variable': varname,
# 'freq': freq,
# '1': hist["1"],
# '05': hist["05"],
# '01': hist["01"],
# '005': hist["005"],
# '001': hist["001"],
# '0005': hist["0005"],
# '0001': hist["0001"],
# '00005': hist["00005"],
# '00001': hist["00001"],
# '000005': hist["000005"],
# '000001': hist["000001"]
# }
# )
# for freq in ['monthly', 'daily']:
# v = lcr_global_vars.varlist(f"../data/{freq}_dssims.csv")
#
# location = f"../data/{freq}_zfp_bg_sz_comp_slices.csv"
# file_exists = os.path.isfile(location)
# with open(location, 'a', newline='') as csvfile:
# fieldnames = [
# 'variable',
# 'frequency',
# 'timestep',
# 'bg_level',
# 'bg_size',
# 'bg_ratio',
# 'zfp_level',
# 'zfp_size',
# 'zfp_ratio',
# 'sz_level',
# 'sz_size',
# 'sz_ratio',
# 'sz1413_level',
# 'sz1413_size',
# 'zfp5_level',
# 'zfp5_size',
# 'zfp5_ratio'
# ]
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# if not file_exists:
# writer.writeheader()
#
# for varname in v:
# levelsz = optimal_level_spread(f"../data/{freq}_dssims.csv", varname, 0.9995, "sz1.4", freq)
# levelsz1413 = optimal_level_spread(f"../data/{freq}_dssims.csv", varname, 0.9995, "sz1ROn", freq)
# levelbg = optimal_level_spread(f"../data/{freq}_dssims.csv", varname, 0.9995, "bg", freq)
# levelzfp = optimal_level_spread(f"../data/{freq}_dssims.csv", varname, 0.9995, "zfp_p", freq)
# levelzfp5 = optimal_level_spread(f"../data/{freq}_dssims.csv", varname, 0.9995, "zfp5_p", freq)
# location = f"../data/{freq}_zfp_bg_sz_comp_slices.csv"
# file_exists = os.path.isfile(location)
# with open(location, 'a', newline='') as csvfile:
# fieldnames = [
# 'variable',
# 'frequency',
# 'timestep',
# 'bg_level',
# 'bg_size',
# 'bg_ratio',
# 'zfp_level',
# 'zfp_size',
# 'zfp_ratio',
# 'sz_level',
# 'sz_size',
# 'sz_ratio',
# 'sz1413_level',
# 'sz1413_size',
# 'sz1413_ratio',
# 'zfp5_level',
# 'zfp5_size',
# 'zfp5_ratio'
# ]
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# sizecsv = f"../data/{freq}_filesizes.csv"
#
# for i in range(0, 60):
# fsz = filesize(sizecsv, varname, levelsz[i], "sz1.4")
# fsz1413 = filesize(sizecsv, varname, levelsz1413[i], "sz1ROn")
# fzfp = filesize(sizecsv, varname, levelzfp[i], "zfp_p")
# fbg = filesize(sizecsv, varname, levelbg[i], "bg")
# fzfp5 = filesize(sizecsv, varname, levelzfp5[i], "zfp5")
# if fsz is not None:
# sizesz = float(fsz)
# sizesz1413 = float(fsz1413)
# sizezfp = float(fzfp)
# sizebg = float(fbg)
# sizezfp5 = float(fzfp5)
# ratiosz = float(filesize(sizecsv, varname, "orig", "sz1.4")) / float(fsz)
# ratiosz1413 = float(filesize(sizecsv, varname, "orig", "sz1ROn")) / float(fsz1413)
# ratiozfp = float(filesize(sizecsv, varname, "orig", "zfp_p")) / float(fzfp)
# ratiobg = float(filesize(sizecsv, varname, "orig", "bg")) / float(fbg)
# ratiozfp5 = float(filesize(sizecsv, varname, "orig", "zfp5")) / float(fzfp5)
# writer.writerow(
# {
# 'variable': varname,
# 'frequency': freq,
# 'timestep': i,
# 'bg_level': levelbg[i],
# 'bg_size': sizebg,
# 'bg_ratio': ratiobg,
# 'zfp_level': levelzfp[i],
# 'zfp_size': sizezfp,
# 'zfp_ratio': ratiozfp,
# 'sz_level': levelsz[i],
# 'sz_size': sizesz,
# 'sz_ratio': ratiosz,
# 'sz1413_level': levelsz1413[i],
# 'sz1413_size': sizesz1413,
# 'sz1413_ratio': ratiosz1413,
# 'zfp5_level': levelzfp5[i],
# 'zfp5_size': sizezfp5,
# 'zfp5_ratio': ratiozfp5,
# }
# ) | [
"csv.DictWriter",
"numpy.unique",
"argparse.ArgumentParser",
"re.compile",
"os.path.isfile",
"numpy.argsort",
"lcr_global_vars.varlist",
"csv.reader",
"re.search"
] | [((8948, 8964), 'numpy.unique', 'np.unique', (['times'], {}), '(times)\n', (8957, 8964), True, 'import numpy as np\n'), ((10290, 10306), 'numpy.unique', 'np.unique', (['times'], {}), '(times)\n', (10299, 10306), True, 'import numpy as np\n'), ((13106, 13131), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (13129, 13131), False, 'import argparse\n'), ((887, 906), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (897, 906), False, 'import csv\n'), ((8347, 8366), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (8357, 8366), False, 'import csv\n'), ((9634, 9653), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (9644, 9653), False, 'import csv\n'), ((10862, 10881), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (10872, 10881), False, 'import csv\n'), ((11751, 11804), 'lcr_global_vars.varlist', 'lcr_global_vars.varlist', (['f"""../data/{freq}_dssims.csv"""'], {}), "(f'../data/{freq}_dssims.csv')\n", (11774, 11804), False, 'import lcr_global_vars\n'), ((13790, 13814), 'os.path.isfile', 'os.path.isfile', (['location'], {}), '(location)\n', (13804, 13814), False, 'import os\n'), ((15548, 15572), 'os.path.isfile', 'os.path.isfile', (['location'], {}), '(location)\n', (15562, 15572), False, 'import os\n'), ((2501, 2560), 're.search', 're.search', (['""".*?_(?P<level>[0-9]+?)_(?P<varname>.*)"""', 'row[0]'], {}), "('.*?_(?P<level>[0-9]+?)_(?P<varname>.*)', row[0])\n", (2510, 2560), False, 'import re\n'), ((2635, 2653), 'numpy.argsort', 'np.argsort', (['levels'], {}), '(levels)\n', (2645, 2653), True, 'import numpy as np\n'), ((2851, 2910), 're.search', 're.search', (['""".*?_(?P<level>[0-9]+?)_(?P<varname>.*)"""', 'row[0]'], {}), "('.*?_(?P<level>[0-9]+?)_(?P<varname>.*)', row[0])\n", (2860, 2910), False, 'import re\n'), ((4544, 4603), 're.search', 're.search', (['""".*?_(?P<level>[0-9]+?)_(?P<varname>.*)"""', 'row[0]'], {}), "('.*?_(?P<level>[0-9]+?)_(?P<varname>.*)', row[0])\n", (4553, 4603), False, 'import re\n'), ((4678, 4696), 'numpy.argsort', 'np.argsort', (['levels'], {}), '(levels)\n', (4688, 4696), True, 'import numpy as np\n'), ((12160, 12184), 'os.path.isfile', 'os.path.isfile', (['location'], {}), '(location)\n', (12174, 12184), False, 'import os\n'), ((14372, 14418), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': 'fieldnames'}), '(csvfile, fieldnames=fieldnames)\n', (14386, 14418), False, 'import csv\n'), ((16130, 16176), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': 'fieldnames'}), '(csvfile, fieldnames=fieldnames)\n', (16144, 16176), False, 'import csv\n'), ((1047, 1123), 're.search', 're.search', (['"""(?P<compression>.*?)_(?P<level>[0-9]+?)_(?P<varname>.*)"""', 'row[0]'], {}), "('(?P<compression>.*?)_(?P<level>[0-9]+?)_(?P<varname>.*)', row[0])\n", (1056, 1123), False, 'import re\n'), ((5200, 5259), 're.search', 're.search', (['""".*?_(?P<level>[0-9]+?)_(?P<varname>.*)"""', 'row[0]'], {}), "('.*?_(?P<level>[0-9]+?)_(?P<varname>.*)', row[0])\n", (5209, 5259), False, 'import re\n'), ((8451, 8510), 're.search', 're.search', (['""".*?_(?P<level>[0-9]+?)_(?P<varname>.*)"""', 'row[0]'], {}), "('.*?_(?P<level>[0-9]+?)_(?P<varname>.*)', row[0])\n", (8460, 8510), False, 'import re\n'), ((9793, 9852), 're.search', 're.search', (['""".*?_(?P<level>[0-9]+?)_(?P<varname>.*)"""', 'row[0]'], {}), "('.*?_(?P<level>[0-9]+?)_(?P<varname>.*)', row[0])\n", (9802, 9852), False, 'import re\n'), ((12534, 12580), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': 'fieldnames'}), '(csvfile, fieldnames=fieldnames)\n', (12548, 12580), False, 'import csv\n'), ((2276, 2312), 'numpy.unique', 'np.unique', (['rowids'], {'return_index': '(True)'}), '(rowids, return_index=True)\n', (2285, 2312), True, 'import numpy as np\n'), ((4319, 4355), 'numpy.unique', 'np.unique', (['rowids'], {'return_index': '(True)'}), '(rowids, return_index=True)\n', (4328, 4355), True, 'import numpy as np\n'), ((1399, 1450), 're.compile', 're.compile', (['"""(?P<level>[510][^_]*)_(?P<varname>.*)"""'], {}), "('(?P<level>[510][^_]*)_(?P<varname>.*)')\n", (1409, 1450), False, 'import re\n'), ((4935, 4986), 're.compile', 're.compile', (['"""(?P<level>[510][^_]*)_(?P<varname>.*)"""'], {}), "('(?P<level>[510][^_]*)_(?P<varname>.*)')\n", (4945, 4986), False, 'import re\n'), ((8715, 8766), 're.compile', 're.compile', (['"""(?P<level>[510][^_]*)_(?P<varname>.*)"""'], {}), "('(?P<level>[510][^_]*)_(?P<varname>.*)')\n", (8725, 8766), False, 'import re\n'), ((10056, 10107), 're.compile', 're.compile', (['"""(?P<level>[510][^_]*)_(?P<varname>.*)"""'], {}), "('(?P<level>[510][^_]*)_(?P<varname>.*)')\n", (10066, 10107), False, 'import re\n')] |
# -*- coding: utf-8 -*-
"""
===============================================================================
Horsager et al. (2009): Predicting temporal sensitivity
===============================================================================
This example shows how to use the
:py:class:`~pulse2percept.models.Horsager2009Model`.
The model introduced in [Horsager2009]_ assumes that electrical stimulation
leads to percepts that quickly increase in brightness (over the time course
of ~100ms) and then slowly fade away (over the time course of seconds).
The model was fit to perceptual sensitivity data for a number of different
pulse trains, which are available in the :py:mod:`~pulse2percept.datasets`
subpackage.
The dataset can be loaded as follows:
"""
# sphinx_gallery_thumbnail_number = 3
from pulse2percept.datasets import load_horsager2009
data = load_horsager2009()
data.shape
###############################################################################
# Single-pulse thresholds
# -----------------------
#
# Loading the data
# ^^^^^^^^^^^^^^^^
#
# The data includes a number of thresholds measured on single-pulse stimuli.
# We can load a subset of these data; for example, for subject S05 and
# Electrode C3:
single_pulse = load_horsager2009(subjects='S05', electrodes='C3',
stim_types='single_pulse')
single_pulse
###############################################################################
# Creating the stimulus
# ^^^^^^^^^^^^^^^^^^^^^
#
# To recreate Fig. 3 in the paper, where the model fit to single-pulse stimuli
# is shown, we first need to recreate the stimulus used in the figure.
#
# For example, we can create a stimulus from a single biphasic pulse
# (0.075 ms phase duration) with amplitude 180 uA, lasting 200 ms in total:
import numpy as np
from pulse2percept.stimuli import BiphasicPulse
phase_dur = 0.075
stim_dur = 200
pulse = BiphasicPulse(180, phase_dur, interphase_dur=phase_dur,
stim_dur=stim_dur, cathodic_first=True)
pulse.plot(time=np.linspace(0, 10, num=10000))
###############################################################################
# Simulating the model response
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The model's response to this stimulus can be visualized as follows:
from pulse2percept.models import Horsager2009Temporal
model = Horsager2009Temporal()
model.build()
percept = model.predict_percept(pulse, t_percept=np.arange(stim_dur))
max_bright = percept.data.max()
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(12, 5))
ax.plot(pulse.time, -20 + 10 * pulse.data[0, :] / pulse.data.max(),
linewidth=3, label='pulse')
ax.plot(percept.time, percept.data[0, 0, :], linewidth=3, label='percept')
ax.plot([0, stim_dur], [max_bright, max_bright], 'k--', label='max brightness')
ax.plot([0, stim_dur], [0, 0], 'k')
ax.set_xlabel('Time (s)')
ax.set_ylabel('Predicted brightness (a.u.)')
ax.set_xlim(0, stim_dur)
fig.legend(loc='center right')
fig.tight_layout()
###############################################################################
# Finding the threshold current
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Finally, we need to find the "threshold current" to ultimately reproduce
# Fig. 3.
# In the real world, the threshold current is defined as the stimulus amplitude
# needed to elicit a detectable phosphene (e.g.) 50% of the time.
# This threshold current typically differs for every stimulus, stimulated
# electrode, and patient.
#
# In the model, there is no notion of "seeing something 50% of the time".
# Instead, the model was assumed to reach threshold if the model response
# exceeded some constant :math:`\\theta` over time.
#
# The process of finding the stimulus amplitude needed to achieve model output
# :math:`\\theta` can be automated with the help of the
# :py:meth:`~pulse2percept.models.Horsager2009Temporal.find_threshold` method.
#
# We will run this method on every data point from the ones selected above:
amp_th = []
for _, row in single_pulse.iterrows():
# Set up a biphasic pulse with amplitude 1uA - the amplitude will be
# up-and-down regulated by find_threshold until the output matches
# theta:
stim = BiphasicPulse(1, row['pulse_dur'],
interphase_dur=row['interphase_dur'],
stim_dur=row['stim_dur'],
cathodic_first=True)
# Find the current that gives model output theta. Search amplitudes in the
# range [0, 300] uA. Stop the search once the candidate amplitudes are
# within 1 uA, or the model output is within 0.1 of theta:
amp_th.append(model.find_threshold(stim, row['theta'],
amp_range=(0, 300), amp_tol=1,
bright_tol=0.1))
plt.semilogx(single_pulse.pulse_dur, single_pulse.stim_amp, 's', label='data')
plt.semilogx(single_pulse.pulse_dur, amp_th, 'k-', linewidth=3, label='model')
plt.xticks([0.1, 1, 4])
plt.xlabel('pulse duration (ms)')
plt.ylabel('threshold current (uA)')
plt.legend()
plt.title('Fig. 3B: S05 (C3)')
###############################################################################
# Fixed-duration pulse train thresholds
# -------------------------------------
#
# The same procedure can be repeated for
# :py:class:`~pulse2percept.stimuli.BiphasicPulseTrain` stimuli to reproduce
# Fig. 4.
from pulse2percept.stimuli import BiphasicPulseTrain
# Load the data:
fixed_dur = data[(data.stim_type == 'fixed_duration') &
(data.subject == 'S05') &
(data.electrode == 'C3') &
(data.pulse_dur == 0.075)]
# Find the threshold:
amp_th = []
for _, row in fixed_dur.iterrows():
stim = BiphasicPulseTrain(row['stim_freq'], 1, row['pulse_dur'],
interphase_dur=row['interphase_dur'],
stim_dur=row['stim_dur'], cathodic_first=True)
amp_th.append(model.find_threshold(stim, row['theta'],
amp_range=(0, 300), amp_tol=1,
bright_tol=0.1))
plt.semilogx(fixed_dur.stim_freq, fixed_dur.stim_amp, 's', label='data')
plt.semilogx(fixed_dur.stim_freq, amp_th, 'k-', linewidth=3, label='model')
plt.xticks([5, 15, 75, 225])
plt.xlabel('frequency (Hz)')
plt.ylabel('threshold current (uA)')
plt.legend()
plt.title('Fig. 4B: S05 (C3), 0.075 ms pulse width')
###############################################################################
# Other stimuli
# -------------
#
# Bursting pulse triplets
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# "Bursting pulse triplets" as shown in Fig. 7 are readily supported via the
# :py:class:`~pulse2percept.stimuli.BiphasicTripletTrain` class.
#
# Variable-duration pulse trains
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# A "variable-duration" pulse train is essentially
# :py:class:`~pulse2percept.stimuli.BiphasicPulseTrain` cut to the length of
# N pulses.
#
# For example, the following recreates a pulse train used in Fig. 5B:
from pulse2percept.stimuli import BiphasicPulseTrain
n_pulses = 2
freq = 3
amp = 180
phase_dur = 0.075
pt = BiphasicPulseTrain(freq, amp, phase_dur, interphase_dur=phase_dur,
n_pulses=n_pulses, cathodic_first=True,
stim_dur=np.maximum(np.ceil(n_pulses * 1000.0 / freq),
200))
pt.plot()
###############################################################################
# Latent addition
# ---------------
#
# "Latent addition" stimuli only show up in the supplementary materials
# (see Fig. S2.2).
#
# They are pseudo-monophasic pulse pairs, where the anodic phases were
# presented 20 ms after the end of the second cathodic pulse.
#
# The initial cathodic pulse always has a fixed amplitude of 50% of the single
# pulse threshold:
from pulse2percept.stimuli import MonophasicPulse
# Phase duration:
phase_dur = 0.075
# Single-pulse threshold determines this current:
amp_th = 20
# Cathodic phase of the standard pulse::
cath_standard = MonophasicPulse(-0.5 * amp_th, phase_dur)
###############################################################################
# The delay between the start of the conditioning pulse and the start of the
# test pulse was varied systematically (between 0.15 and 12 ms).
# The amplitude of the second pulse was varied to determine thresholds.
# Delay was varied between 0.15 and 12 ms:
delay_dur = 12
# Vary this current to determine threshold:
amp_test = 45
# Cathodic phase of the test pulse (delivered after a delay):
cath_test = MonophasicPulse(-amp_test, phase_dur, delay_dur=delay_dur)
###############################################################################
# The anodic phase were always presented 20 ms after the second cathodic phase:
anod_standard = MonophasicPulse(0.5 * amp_th, phase_dur, delay_dur=20)
anod_test = MonophasicPulse(amp_test, phase_dur, delay_dur=delay_dur)
###############################################################################
# The last step is to concatenate all the pulses into a single stimulus:
from pulse2percept.stimuli import Stimulus
data = []
time = []
time_tracker = 0
for pulse in (cath_standard, cath_test, anod_standard, anod_test):
data.append(pulse.data)
time.append(pulse.time + time_tracker)
time_tracker += pulse.time[-1]
latent_add = Stimulus(np.concatenate(data, axis=1), time=np.concatenate(time))
latent_add.plot()
| [
"pulse2percept.stimuli.BiphasicPulseTrain",
"pulse2percept.stimuli.MonophasicPulse",
"numpy.ceil",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.semilogx",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"pulse2percept.models.Horsager2009Temporal",
"pulse2percept.stimuli.B... | [((862, 881), 'pulse2percept.datasets.load_horsager2009', 'load_horsager2009', ([], {}), '()\n', (879, 881), False, 'from pulse2percept.datasets import load_horsager2009\n'), ((1248, 1325), 'pulse2percept.datasets.load_horsager2009', 'load_horsager2009', ([], {'subjects': '"""S05"""', 'electrodes': '"""C3"""', 'stim_types': '"""single_pulse"""'}), "(subjects='S05', electrodes='C3', stim_types='single_pulse')\n", (1265, 1325), False, 'from pulse2percept.datasets import load_horsager2009\n'), ((1909, 2008), 'pulse2percept.stimuli.BiphasicPulse', 'BiphasicPulse', (['(180)', 'phase_dur'], {'interphase_dur': 'phase_dur', 'stim_dur': 'stim_dur', 'cathodic_first': '(True)'}), '(180, phase_dur, interphase_dur=phase_dur, stim_dur=stim_dur,\n cathodic_first=True)\n', (1922, 2008), False, 'from pulse2percept.stimuli import BiphasicPulse\n'), ((2354, 2376), 'pulse2percept.models.Horsager2009Temporal', 'Horsager2009Temporal', ([], {}), '()\n', (2374, 2376), False, 'from pulse2percept.models import Horsager2009Temporal\n'), ((2538, 2567), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (2550, 2567), True, 'import matplotlib.pyplot as plt\n'), ((4814, 4892), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['single_pulse.pulse_dur', 'single_pulse.stim_amp', '"""s"""'], {'label': '"""data"""'}), "(single_pulse.pulse_dur, single_pulse.stim_amp, 's', label='data')\n", (4826, 4892), True, 'import matplotlib.pyplot as plt\n'), ((4893, 4971), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['single_pulse.pulse_dur', 'amp_th', '"""k-"""'], {'linewidth': '(3)', 'label': '"""model"""'}), "(single_pulse.pulse_dur, amp_th, 'k-', linewidth=3, label='model')\n", (4905, 4971), True, 'import matplotlib.pyplot as plt\n'), ((4972, 4995), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0.1, 1, 4]'], {}), '([0.1, 1, 4])\n', (4982, 4995), True, 'import matplotlib.pyplot as plt\n'), ((4996, 5029), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""pulse duration (ms)"""'], {}), "('pulse duration (ms)')\n", (5006, 5029), True, 'import matplotlib.pyplot as plt\n'), ((5030, 5066), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""threshold current (uA)"""'], {}), "('threshold current (uA)')\n", (5040, 5066), True, 'import matplotlib.pyplot as plt\n'), ((5067, 5079), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5077, 5079), True, 'import matplotlib.pyplot as plt\n'), ((5080, 5110), 'matplotlib.pyplot.title', 'plt.title', (['"""Fig. 3B: S05 (C3)"""'], {}), "('Fig. 3B: S05 (C3)')\n", (5089, 5110), True, 'import matplotlib.pyplot as plt\n'), ((6132, 6204), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['fixed_dur.stim_freq', 'fixed_dur.stim_amp', '"""s"""'], {'label': '"""data"""'}), "(fixed_dur.stim_freq, fixed_dur.stim_amp, 's', label='data')\n", (6144, 6204), True, 'import matplotlib.pyplot as plt\n'), ((6205, 6280), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['fixed_dur.stim_freq', 'amp_th', '"""k-"""'], {'linewidth': '(3)', 'label': '"""model"""'}), "(fixed_dur.stim_freq, amp_th, 'k-', linewidth=3, label='model')\n", (6217, 6280), True, 'import matplotlib.pyplot as plt\n'), ((6281, 6309), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[5, 15, 75, 225]'], {}), '([5, 15, 75, 225])\n', (6291, 6309), True, 'import matplotlib.pyplot as plt\n'), ((6310, 6338), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""frequency (Hz)"""'], {}), "('frequency (Hz)')\n", (6320, 6338), True, 'import matplotlib.pyplot as plt\n'), ((6339, 6375), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""threshold current (uA)"""'], {}), "('threshold current (uA)')\n", (6349, 6375), True, 'import matplotlib.pyplot as plt\n'), ((6376, 6388), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6386, 6388), True, 'import matplotlib.pyplot as plt\n'), ((6389, 6441), 'matplotlib.pyplot.title', 'plt.title', (['"""Fig. 4B: S05 (C3), 0.075 ms pulse width"""'], {}), "('Fig. 4B: S05 (C3), 0.075 ms pulse width')\n", (6398, 6441), True, 'import matplotlib.pyplot as plt\n'), ((8070, 8111), 'pulse2percept.stimuli.MonophasicPulse', 'MonophasicPulse', (['(-0.5 * amp_th)', 'phase_dur'], {}), '(-0.5 * amp_th, phase_dur)\n', (8085, 8111), False, 'from pulse2percept.stimuli import MonophasicPulse\n'), ((8600, 8658), 'pulse2percept.stimuli.MonophasicPulse', 'MonophasicPulse', (['(-amp_test)', 'phase_dur'], {'delay_dur': 'delay_dur'}), '(-amp_test, phase_dur, delay_dur=delay_dur)\n', (8615, 8658), False, 'from pulse2percept.stimuli import MonophasicPulse\n'), ((8837, 8891), 'pulse2percept.stimuli.MonophasicPulse', 'MonophasicPulse', (['(0.5 * amp_th)', 'phase_dur'], {'delay_dur': '(20)'}), '(0.5 * amp_th, phase_dur, delay_dur=20)\n', (8852, 8891), False, 'from pulse2percept.stimuli import MonophasicPulse\n'), ((8905, 8962), 'pulse2percept.stimuli.MonophasicPulse', 'MonophasicPulse', (['amp_test', 'phase_dur'], {'delay_dur': 'delay_dur'}), '(amp_test, phase_dur, delay_dur=delay_dur)\n', (8920, 8962), False, 'from pulse2percept.stimuli import MonophasicPulse\n'), ((4216, 4339), 'pulse2percept.stimuli.BiphasicPulse', 'BiphasicPulse', (['(1)', "row['pulse_dur']"], {'interphase_dur': "row['interphase_dur']", 'stim_dur': "row['stim_dur']", 'cathodic_first': '(True)'}), "(1, row['pulse_dur'], interphase_dur=row['interphase_dur'],\n stim_dur=row['stim_dur'], cathodic_first=True)\n", (4229, 4339), False, 'from pulse2percept.stimuli import BiphasicPulse\n'), ((5743, 5890), 'pulse2percept.stimuli.BiphasicPulseTrain', 'BiphasicPulseTrain', (["row['stim_freq']", '(1)', "row['pulse_dur']"], {'interphase_dur': "row['interphase_dur']", 'stim_dur': "row['stim_dur']", 'cathodic_first': '(True)'}), "(row['stim_freq'], 1, row['pulse_dur'], interphase_dur=\n row['interphase_dur'], stim_dur=row['stim_dur'], cathodic_first=True)\n", (5761, 5890), False, 'from pulse2percept.stimuli import BiphasicPulseTrain\n'), ((9395, 9423), 'numpy.concatenate', 'np.concatenate', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (9409, 9423), True, 'import numpy as np\n'), ((2043, 2072), 'numpy.linspace', 'np.linspace', (['(0)', '(10)'], {'num': '(10000)'}), '(0, 10, num=10000)\n', (2054, 2072), True, 'import numpy as np\n'), ((2441, 2460), 'numpy.arange', 'np.arange', (['stim_dur'], {}), '(stim_dur)\n', (2450, 2460), True, 'import numpy as np\n'), ((9430, 9450), 'numpy.concatenate', 'np.concatenate', (['time'], {}), '(time)\n', (9444, 9450), True, 'import numpy as np\n'), ((7321, 7354), 'numpy.ceil', 'np.ceil', (['(n_pulses * 1000.0 / freq)'], {}), '(n_pulses * 1000.0 / freq)\n', (7328, 7354), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import mysql.connector
from mysql.connector import Error
from mysql.connector import errorcode
from os import environ
import json
DB_HOST = environ.get('DB_HOST')
DB_NAME = environ.get('DB_NAME')
DB_USER = environ.get('DB_USER')
DB_PASSWORD = environ.get('DB_PASSWORD')
if DB_PASSWORD is not None:
print('###################################')
print('These are the environment variables: DB_HOST='+DB_HOST+', DB_NAME='+DB_NAME+', DB_USER='+DB_USER+', DB_PASSWORD='+DB_PASSWORD)
print('###################################')
else:
print('###################################')
print('No environment variable appeared!')
print('###################################')
def add_people_count(request_data):
#print('Add people count called!')
#print('Request' + str(request_data))
insert_query = """INSERT INTO PeopleCount (value, collector_id, timestamp) VALUES (%s, %s, %s)"""
val = (request_data['value'],request_data['collector_id'],str(request_data['timestamp']))
run_insert_query(insert_query, val, 'PeopleCount')
return run_insert_query(insert_query, val, 'PeopleCount')
def add_people_recognized(request_data):
name_ids = []
print('Request data (json): '+str(request_data['value']))
for name in request_data['value']:
#print(name)
name_ids.append(add_people(name))
recog_id = add_recognized(request_data)
res = []
for name_id in name_ids:
insert_query = """INSERT IGNORE INTO PeopleRecognized (id_recognized, id_people) VALUES (%s, %s)"""
val = (recog_id, name_id)
res.append(run_insert_query(insert_query, val, 'PeopleRecognized'))
return json.dumps(res)
def add_people(name):
insert_query = """INSERT IGNORE INTO People (name) VALUES (%s)"""
val = (name,)
return run_insert_query(insert_query, val, 'People')[1] #returns id
def add_recognized(request_data):
insert_query = """INSERT INTO Recognized (collector_id, timestamp) VALUES (%s, %s)"""
val = (request_data['collector_id'],str(request_data['timestamp']))
return run_insert_query(insert_query, val, 'Recognized')[1] #returns id
def get_database_connection():
return mysql.connector.connect(host=DB_HOST, database=DB_NAME, user=DB_USER, password=DB_PASSWORD)
def run_insert_query(query, values, table_name):
connection = get_database_connection()
res = ''
id = None
try:
cursor = connection.cursor()
cursor.execute(query, values)
connection.commit()
id = cursor.lastrowid
if id is not None:
res += 'Record with id('+str(id)+') inserted successfully into '+table_name+' table'
else:
res += str(cursor.rowcount) + ' Record inserted successfully into '+table_name+' table'
#print(res)
cursor.close()
except mysql.connector.Error as error:
res += "Failed to insert record into table {}".format(error)
print(res)
finally:
if connection.is_connected():
connection.close()
return (res,id)
request_data_pc = dict()
request_data_pr = dict()
for i in range(10000):
request_data_pc['value'] = i
request_data_pc['collector_id'] = 'iot_dev_id_'+str(i)
request_data_pc['timestamp'] = i
add_people_count(request_data_pc)
request_data_pr['value'] = ['andrey','eduardo','fabio']
request_data_pr['collector_id'] = 'iot_dev_id_'+str(i)
request_data_pr['timestamp'] = i
add_people_recognized(request_data_pr)
print("Entry "+str(i)+" inserted!")
| [
"json.dumps",
"os.environ.get"
] | [((160, 182), 'os.environ.get', 'environ.get', (['"""DB_HOST"""'], {}), "('DB_HOST')\n", (171, 182), False, 'from os import environ\n'), ((193, 215), 'os.environ.get', 'environ.get', (['"""DB_NAME"""'], {}), "('DB_NAME')\n", (204, 215), False, 'from os import environ\n'), ((226, 248), 'os.environ.get', 'environ.get', (['"""DB_USER"""'], {}), "('DB_USER')\n", (237, 248), False, 'from os import environ\n'), ((263, 289), 'os.environ.get', 'environ.get', (['"""DB_PASSWORD"""'], {}), "('DB_PASSWORD')\n", (274, 289), False, 'from os import environ\n'), ((1708, 1723), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (1718, 1723), False, 'import json\n')] |
from __future__ import print_function
import argparse
import glob
import io
import os
import subprocess as sp
import sys
from collections import defaultdict
from itertools import chain
import networkx as nx
import requests
import yaml
from conda_build import api
# ---------------------------------------------------------------------------------------------------------------------------------
## Global Variables
# ---------------------------------------------------------------------------------------------------------------------------------
REPODATA_URL = "https://conda.anaconda.org/{channel}/{subdir}/repodata.json"
REPODATA_LABELED_URL = "https://conda.anaconda.org/{channel}/label/{label}/{subdir}/repodata.json"
REPODATA_DEFAULTS_URL = "https://repo.anaconda.com/pkgs/main/{subdir}/repodata.json"
# ---------------------------------------------------------------------------------------------------------------------------------
## Argument Parser
# ---------------------------------------------------------------------------------------------------------------------------------
def arguments():
p = argparse.ArgumentParser(
description="Identify and build all ggd recipes that are not currently in any ggd conda channel"
)
req = p.add_argument_group("Required Arguments")
opt = p.add_argument_group("Optional Arguments")
req.add_argument(
"--recipe-dir",
metavar="Base Recipe Directory",
required=True,
help="(Required) The base recipe directory to start walking through to identify any ggd recipes. For example, the main 'recipes' folder in the ggd-recipes repo",
)
req.add_argument(
"--config-file",
metavar="Configuration File",
required=True,
help="(Required) Path to the configureation yaml file. This file should contain relevant information such as specific channels",
)
opt.add_argument(
"--packages",
metavar="Specific Packages to build",
nargs="*",
help="(Optional) A single or space seperated list of packages to build. Only these packages will be checked and potentially built",
)
opt.add_argument(
"--blacklist",
metavar="Black listed recipes",
help="(Optional) A file with recipes that are blacklisted. That is, recipes to skip build for. This file should contain a single recipe name per line. # comment lines will be ignored.",
)
opt.add_argument(
"--debug",
action="store_true",
help="(Optional) Whther or not to print the debug output from conad build to the screen.",
)
opt.add_argument(
"--force-build",
action="store_true",
help="(Optional) Whether or not to force all recipes being checked to be built or not. (Default = False).",
)
return p.parse_args()
# ---------------------------------------------------------------------------------------------------------------------------------
## Functions/methods
# ---------------------------------------------------------------------------------------------------------------------------------
def parse_backlist_file(file_path):
"""
parse_backlist_file
===================
Method to parse a file that is provided that represents recipes that should be blacklisted.
Any lines that start with a '#' or any empty lines will be skipped. This method will get all
other lines, and treat any recipes that match the recipes being checked as a blacklisted recipe.
Parameters:
-----------
1) file_path: (str) The file path to the blacklist file
Returns:
++++++++
1) A generator of blacklisted items
"""
assert os.path.isfile(
file_path
), ":ggd:build recipes: !!ERROR!! This blacklist file provided is not a file: '{}' please provide a correct file or remove the --backlist flag".foramt(
file_path
)
try:
with io.open(file_path, "rt", encoding="utf-8") as blist:
for line in blist:
if line[0] != "#" and line.strip():
yield os.path.basename(str(line.strip()))
except IOError as e:
print(
":ggd:build recipes: !!ERROR!! A problem occured trying to read the blacklist file. Fix the error and try again"
)
print(str(e))
sys.exit(1)
def get_all_recipes(base_recipe_folder, packages="*", exclude_recipes=set()):
"""
get_all_recipes
===============
Method to get all the ggd recipes from a base recipe directory. This method will walk through the directory and once it finds "meta.yaml" files it
returns the directory as a reciipe.
Parameters:
-----------
1) base_recipe_folder: (str) The directory path to the base recipe folder to search for recipes in
2) packages: (list) A specific package, a set of packages, or "*" for all packages to look for
Returns:
++++++++
1) A generator with the directory paths that represent the recipes
"""
## Identify if the packages are going to be filtered by a set of packages or not
filter_packages = False
if packages != "*":
filter_packages = True
exclude = False
if exclude_recipes:
exclude = True
##If the packages argument is a string, convert it to a list
if isinstance(packages, str):
packages = set(packages)
print(
":ggd:build recipes: Getting recipe(s) from '{recipe_dir}'. Recipes filtered by: '{p}'".format(
recipe_dir=base_recipe_folder, p=", ".join(list(packages))
)
)
## Dir path for walking over
for recipe_dir in glob.glob(os.path.join(base_recipe_folder, "*")):
## Identify all recipes with a meta.yaml file
for dir_path, dir_names, file_names in os.walk(recipe_dir):
## If a recipe has been found
if "meta.yaml" in file_names:
## Exclude any blacklisted recipes
if exclude:
if os.path.basename(dir_path) in exclude_recipes:
continue
## if filter by package
if filter_packages:
if os.path.basename(dir_path) in packages:
yield dir_path
## If not filtering by package
else:
yield dir_path
def load_config(config_file):
"""
load_config
===========
Method to load a the base config file for building recipes. The config file should be a yaml file
Parameters:
----------
1) config_file: (str) The file path to the config file for buliding recipes
Returns:
++++++++
1) A list of channels in the config file
"""
try:
config_dict = yaml.safe_load(io.open(config_file, "rt", encoding="utf-8"))
except IOError as e:
print(
":ggd:build recipes: !!ERROR!! A problem occured trying to read the config file. Fix the error and try again"
)
print(str(e))
sys.exit(1)
channel_dict = config_dict["channels"]
return channel_dict
def build_recipes(
recipe_list, check_channels, force=False, debug=False, n_workers=1, worker_offset=0
):
"""
build_recipes
=============
Controller method used to perpare, check, build, and process each recipe in the recipe list. It will
build a DAG with Nodes as recipes and dependencies, and edges connecting recipe to dependencies. It
removes any cyclic nodes that depend on each other. Identify new or updated recipes that need to
be built and build them.
Parameters:
-----------
1) recipe_list: (str) A list of recieps to check (The directory path of the recipe)
2) check_channels: (list) A list of channels to check against
3) force: (bool) Whether or not to force the recipe to be built even if the same verion and build exits in a channel being checked against (Default = False)
4) debug: (bool) Whether or not to run 'conda build' in the debug phase. (Default = False)
5) n_workers: (int) The number of works to use to create subdags. (Default = 1)
6) worker_offset: (int) The number to use to offset the n_workers used for subdag creation. (Default = 0)
Return:
+++++++
1) True if all recipes are checked and there are no errors. False otherwise
"""
if not recipe_list:
print(":ggd:build recipes: Nothing to be done")
return True
## create a dag
dag, name2recipe, recipe2name = build_recipe_dag(recipe_list)
if not dag:
print(":ggd:build recipes: Nothing to be done")
return True
## Remove cyclic dependencies in the build job
### If current build jobs depend on each other, can't build them
skip_dependent = defaultdict(list)
dag = remove_dag_cycles(dag, name2recipe, skip_dependent)
## Create subdag workers
subdag = get_subdags(dag, n_workers, worker_offset)
if not subdag:
print(":ggd:build recipes: Nothing to be done")
return True
print(
":ggd:build recipes: {} recipes to build and test: \n{}".format(
len(subdag), "\n".join(subdag.nodes())
)
)
## Filter recipes
filtered_recipes = [
(recipe, recipe2name[recipe])
for package in nx.topological_sort(subdag)
for recipe in name2recipe[package]
]
## Get the Repodata for each channel
repodata_by_channel, actualname_to_idname = get_repodata(check_channels)
## Remove defaults channel for now
if "defaults" in check_channels:
check_channels.remove("defaults")
## Check each recipe
built_recipes = []
skipped_recipes = []
failed_recipes = []
for recipe, name in filtered_recipes:
## Check if the recipe is being skipped
if name in skip_dependent:
print(
(
":ggd:build recipes: SKIPPING BUILD: skipping {} because it depends on {} "
" which failed build"
).format(recipe, skip_dependent[name])
)
skipped_recipes.append(recipe)
continue
print(":ggd:build recipes: Determining expected packages for {}".format(recipe))
## Check a recipe to see if it is any other channel repodata and if it is if it's version/build is greater then what is in the repo data
predicted_path = check_recipe_for_build(
recipe,
check_channels,
repodata_by_channel,
actualname_to_idname,
force=force,
)
## if no predicted path, skip building this recipe
if not predicted_path:
print(
":ggd:build recipes: Nothing to be done for recipe '{}'".format(recipe)
)
continue
## Build the recipe
success = conda_build_recipe(recipe, check_channels, predicted_path, debug)
## Check for a successful recipe build
if success:
built_recipes.append(recipe)
print(
":ggd:build recipes: Package recipe located at {}".format(
",".join(predicted_path)
)
)
else:
failed_recipes.append(recipe)
for pkg in nx.algorithms.descendants(subdag, name):
skip_dependent[pkg].append(recipe)
## Check for failed recipes
if failed_recipes:
print(
(
":ggd:build recipes: BUILD SUMMARY: of {} recipes, "
"{} failed and {} were skipped. "
).format(len(filtered_recipes), len(failed_recipes), len(skipped_recipes))
)
if built_recipes:
print(
(
":ggd:build recipes: BUILD SUMMARY: Although "
"the build process failed, there were {} recipes "
"built successfully."
).format(len(built_recipes))
)
for frecipe in failed_recipes:
print(":ggd:build recipes: BUILD SUMMARY: FAILED recipe {}".format(frecipe))
## Purge the builds
sp.check_call(["conda", "build", "purge"], stderr=sys.stderr, stdout=sys.stdout)
return False
## IF not failed recipes, prompt for a successful build
print(
":ggd:build recipes: BUILD SUMMARY: SUCCESSFULLY BUILT {} of {} recipes".format(
len(built_recipes), len(filtered_recipes)
)
)
return True
def conda_build_recipe(recipe, channels, predicted_path, debug=False):
"""
conda_build_recipe
==================
This method is used to build a single recipe using `conda build`
Parameters:
-----------
1) recipe: (str) The directory path to the recipe to build
2) channels: (list) A list of conda channels
3) predicted_path: (str) The file path to the predicted tarball file path once a recipe is built
4) debug: (bool) Whether or not to run `conda build` in debug mode. (Default = False)
Return:
+++++++
1) True if the recipe is successfully built, False otherwise
"""
print(":ggd:build recipe: BUILD STARTED for {}".format(recipe))
## set up args
args = ["--override-channels", "--no-anaconda-upload"]
## Add changels to args
for channel in channels + ["local"]:
args += ["-c", channel]
## Get config file
config = load_conda_build_config()
## Check for exclusions
for file_path in config.exclusive_config_files or []:
if file_path:
args += ["-e", file_path]
## check for additional configs
for file_path in config.variant_config_files or []:
if file_path:
args += ["-m", file_path]
## Get recipe path
recipe_path = os.path.join(recipe, "meta.yaml")
if debug:
cmd = ["conda", "build", "--debug"] + args + [recipe_path]
else:
cmd = ["conda", "build"] + args + [recipe_path]
## Run conda build
try:
sp.check_call(cmd, stderr=sys.stderr, stdout=sys.stdout)
except Exception as e:
print(":ggd:build recipes: Build failed for {}".format(recipe))
print(str(e))
return False
## Check that the predicted tarfile path was created
for ppath in predicted_path:
if os.path.exists(ppath) == False or os.path.isfile(ppath) == False:
print(
":ggd:build recipes: !!ERROR!! The predicted tarfile does not exists after building the recipe. The build failed"
)
return False
print(":ggd:build recipes: BUILD SUCCESS: Successfully built {}".format(recipe))
return True
def get_repodata(check_channels):
"""
get_repodata
============
Method to get the conda repodata for a list of conda channels
Parameters:
-----------
1) check_channels: (list) A list of channels to check and get repodata for
Returns:
++++++++
1) A dictionary with keys as channels and values as the repodata for that channel starting at the "packages" key.
"""
print(":ggd:build recipes: Loading repodata for each channel from the config file")
## Load the repodata for each channel
repodata_by_channel = dict()
name2tar = defaultdict(lambda: defaultdict(set))
## Check each channel
for channel in check_channels:
## No repodata for default (local) channel
if channel == "defaults":
continue
## NOTE: Hardset to noarch right now. This might need to change in the future
repodata_url = REPODATA_URL.format(channel=channel, subdir="noarch")
## Get the repodata from the anaconda url
try:
repodata_json = requests.get(repodata_url).json()
except ValueError as e:
print(
":ggd:build recipes: !!ERROR!! A problem occured loading the repodata for the conda channel: '{}'".format(
channel
)
)
print(str(e))
sys.exit(1)
## Add to dict
repodata_by_channel[channel] = repodata_json["packages"]
for tar, pkg in repodata_json["packages"].items():
name = pkg["name"]
name2tar[channel][name].add(tar)
return (repodata_by_channel, name2tar)
def load_conda_build_config(platform=None, trim_skip=True):
"""
load_conda_build_config
=======================
Load conda build config while considering global pinnings from conda-forge.
Parameters:
-----------
1) platform: (str) The platform to use. Example: noarch, linux-64, etc. (Default = None)
2) trim_skip: (bool) What to set conda build config trim skip to. (Default = True)
Return:
++++++
1) The conda build config object
"""
config = api.Config(no_download_source=True, set_build_id=False)
## Hardset to the bioconda_utils-conda_build_config.yaml file in the .circleci dir
### Will need to change this later
if os.path.basename(os.getcwd()) == "ggd-recipes":
config.exclusive_config_files = [
os.path.join(
os.getcwd(), "bioconda_utils-conda_build_config.yaml"
)
]
else:
config.exclusive_config_files = []
for cfg in chain(config.exclusive_config_files, config.variant_config_files or []):
assert os.path.exists(cfg), "error: {0} does not exist".format(cfg)
if platform:
config.platform = platform
config.trim_skip = trim_skip
return config
def load_all_meta(recipe, config=None, finalize=True):
"""
load_all_meta
=============
For each environment, yield the rendered meta.yaml.
Parameters
----------
1) recipe: (str) The directory path to the recipe
2) config: (str) The config file. (Default = None)
3) finalize: (bool) If True, do a full conda-build render. Determines exact package builds
of build/host dependencies. It involves costly dependency resolution
via conda and also download of those packages (to inspect possible
run_exports). For fast-running tasks like linting, set to False.
Returns:
++++++++
1) A list of metadata for each matching recipe
"""
bypass_env_check = not finalize
return [
meta
for (meta, _, _) in api.render(
recipe, config=config, finalize=finalize, bypass_env_check=bypass_env_check,
)
]
def load_platform_metas(recipe, finalize=True):
"""
load_platform_metas
===================
Method to laod conda build config metadata based on the current platoform
1) recipe: (str) The directory path to the recipe
2) finalize: (bool) Used for the load_all_meta() method. Whether or not to run finalize or not. (Default = True)
Return:
+++++++
1) The current system platform
2) the platfor specific cofig info fro load_all_meta()
"""
platform = os.environ.get("OSTYPE", sys.platform)
if platform.startswith("darwin"):
platform = "osx"
elif platform == "linux-gnu":
platform = "linux"
config = load_conda_build_config(platform=platform)
return (platform, load_all_meta(recipe, config=config, finalize=finalize))
def check_if_recipe_skippable(recipe, channels, repodata_dict, actualname_to_idname):
"""
check_if_recipe_skippable
=========================
Method used to check if a recipe should be skipped or not.
Skip criteria include:
- If the version of the recipe in the channel repodata is greater than or equal to the query recipe.
- If the query recipe's version and build are equal to or less than the recipe in the repodata
Non-Skip Citeria include:
- Opposite of skip criteria
- If the recipe is not in any channel
Parameters:
-----------
1) recipe: (str) The directory path to the query recipe
2) channels: (list) A list of channels to check against
3) repodata_dict: (dict) A dictionary of repodata by channel (From get_repodata() method)
4) actualname_to_idname: (dict) Dict of recipe names as keys as id names in the repodata_dict as keys. (From get_repodata() method)
Returns:
++++++++
- Return True if recipe building is skippable
- Return False if recipe building cannot be skipped
"""
platform, metas = load_platform_metas(recipe, finalize=False)
# The recipe likely defined skip: True
if not metas:
return True
## Get each packages name, version, and build number
packages = set(
(meta.name(), float(meta.version()), float(meta.build_number() or 0))
for meta in metas
)
for name, version, build_num in packages:
present = False
for c in channels:
## Check for the recipe in one of the channel's repodata
if name in actualname_to_idname[c].keys():
## Find the newest/highest versioned and build package
present = True
cur_version = -1.0
cur_build = -1.0
for pkg_tar in actualname_to_idname[c][name]:
repo_version = float(repodata_dict[c][pkg_tar]["version"])
repo_build_number = float(repodata_dict[c][pkg_tar]["build_number"])
## If version is greater than the previous version, reset values with this package
if repo_version > cur_version:
cur_version = repo_version
cur_build = repo_build_number
## If version is the same but the build number is greater, reset values with this package
elif version == cur_version and repo_build_number > cur_build:
cur_build = repo_build_number
## Check if the query package is newer then what is repoted in the repodata or not
## If the query package's version is greater than the best in the repodata, update recipe
if cur_version < version:
return False
## If the query package's is the same version but the build number is greater than the best in the repodata, update recipe
elif cur_version == version and cur_build < build_num:
return False
## If package not already in the repodata
if not present:
return False
print(
":ggd:build recipes: FILTER: not building recipe {} because the version and/or build number match what is already in the channel and not forced".format(
recipe
)
)
return True
def check_recipe_for_build(
recipe, check_channels, repodata_by_channel, actualname_to_idname, force=False
):
"""
check_recipe_for_build
======================
Method used to check if a recipe should be built or not
Parameters:
-----------
1) recipe: (str) The directory path for the recipe in question
2) check_channels: (list) A list of channels to check against
3) repodata_by_channel: (dict) A dictionary of repodata by channel (From get_repodata() method)
4) actualname_to_idname: (dict) Dict of recipe names as keys as id names in the repodata_dict as keys. (From get_repodata() method)
5) force: (bool) Whether or not to force a recipe to be built even if it should be skipped. "Force build" (Default = False)
Return:
+++++++
- Any empty list if the recipe should be skipped
- A list of predicted tarball file paths for the build recipe if the recipe should be built
"""
if not force:
## Check for recipes that could be skipped
if check_if_recipe_skippable(
recipe, check_channels, repodata_by_channel, actualname_to_idname
):
# NB: If we skip early here, we don't detect possible divergent builds.
return []
## Use conda build to get meta info
platform, metas = load_platform_metas(recipe, finalize=True)
# The recipe likely defined skip: True
if not metas:
return []
## Get the predicted tarball path
predicted_tar_paths = list(
chain.from_iterable(api.get_output_file_paths(meta) for meta in metas)
)
## Return predicted tarball file path
return predicted_tar_paths
def remove_dag_cycles(dag, name2recipes, skip_dependent):
"""
remove_dag_cycles
=================
Method to remove cycles in the dag. Cycles happen when mutliple recipes as nodes depend on each other.
Parameters:
-----------
1) dag: (networkx.DiGraph() object) The dag create from build_recipe_dag()
2) name2receips: (dict) A dictionary where keys are recipe names and values are sets of recipe paths
3) skip_dependent: (dict) A dictionary with recipes that should be skipped. (To be filled with this method)
Returns:
++++++++
1) an updated dag with cyclic nodes removed
"""
nodes_in_cycles = set()
for cycle in list(nx.simple_cycles(dag)):
print(
":ggd:build recipes: !!BUILD ERROR!! dependency cycle found for: {}".format(
cycle
)
)
nodes_in_cycles.update(cycle)
for name in sorted(nodes_in_cycles):
fail_recipes = sorted(name2recipes[name])
print(
(
":ggd:build recipes: !!BUILD ERROR!! cannot build recipes for {} since "
"it cyclically depends on other packages in the current build job. "
"Failed recipes: %s"
).format(name, fail_recipes)
)
for node in nx.algorithms.descendants(dag, name):
if node not in nodes_in_cycles:
skip_dependent[node].extend(cycle_fail_recipes)
return dag.subgraph(name for name in dag if name not in nodes_in_cycles)
def get_subdags(dag, n_workers, worker_offset):
"""
get_subdags
===========
Method to create subdags from the main dag based on the number or workers available
Parameters:
-----------
1) dag: (networkx.DiGraph() object) The recipe dag
2) n_workers: (int) The number of workers
3) worker_offset: (int) The worker offset
Returns:
++++++++
1) the subdags
"""
if n_workers > 1 and worker_offset >= n_workers:
raise ValueError(
"n-workers is less than the worker-offset given! "
"Either decrease --n-workers or decrease --worker-offset!"
)
# Get connected subdags and sort by nodes
if n_workers > 1:
root_nodes = sorted([k for k, v in dag.in_degree().items() if v == 0])
nodes = set()
found = set()
for idx, root_node in enumerate(root_nodes):
# Flatten the nested list
children = itertools.chain(*nx.dfs_successors(dag, root_node).values())
# This is the only obvious way of ensuring that all nodes are included
# in exactly 1 subgraph
found.add(root_node)
if idx % n_workers == worker_offset:
nodes.add(root_node)
for child in children:
if child not in found:
nodes.add(child)
found.add(child)
else:
for child in children:
found.add(child)
subdags = dag.subgraph(list(nodes))
print(
":ggd:build recipes: Building and testing sub-DAGs {} in each group of {}, which is {} packages".format(
worker_offset, n_workers, len(subdags.nodes())
)
)
else:
subdags = dag
return subdags
def build_recipe_dag(recipe_list, restricted=True):
"""
build_recipe_dag
================
Method to build the DAG for recipes. Nodes represent the recipes and their dependencies, while edges connect the recipe nodes
to their dependencies. (build or host deps)
Parameters:
-----------
1) recipe_list: (list) A list of recipes that to build the DAG for
2) restricted: (bool) Whether or not to restrict the final list of recipes to recipes only (True) or to include their deps as well (False)
Returns:
++++++++
1) The DAG
2) name2recipe_dict: (dict) A dictionary with names of recipes as keys, and sets of recipe paths as values
3) recipe2name_dict: (dict) A dictionary with recipe path as keys and names as values
"""
print(":ggd:build recipes: Generating recipe DAG")
name2recipe_dict = defaultdict(set)
recipe2name_dict = defaultdict(str)
## Create a dag
dag = nx.DiGraph()
## For each recipe, update the dag and update the name2recipe_dict
for recipe in recipe_list:
recipe_path = os.path.join(recipe, "meta.yaml")
recipe_meta = yaml.safe_load(io.open(recipe_path, "rt", encoding="utf-8"))
## get a dictionary to match recipe name to recipe dir
recipe_name = recipe_meta["package"]["name"]
name2recipe_dict[recipe_name].update([recipe])
## create another dict for recipe to name
recipe2name_dict[recipe] = recipe_name
## Add name as a node to the graph
dag.add_node(recipe_name)
## Check deps
for recipe in recipe_list:
recipe_path = os.path.join(recipe, "meta.yaml")
recipe_meta = yaml.safe_load(io.open(recipe_path, "rt", encoding="utf-8"))
## Get deps
if (
"build" in recipe_meta["requirements"]
and recipe_meta["requirements"]["build"]
):
## If the build reqs are in the current recipe list or the restricted is set to False, add the dep
build_reqs = [
x
for x in recipe_meta["requirements"]["build"]
if x in name2recipe_dict or not restricted
]
else:
build_reqs = []
if "run" in recipe_meta["requirements"] and recipe_meta["requirements"]["run"]:
run_reqs = [
x
for x in recipe_meta["requirements"]["run"]
if x in name2recipe_dict or not restricted
]
else:
run_reqs = []
if (
"host" in recipe_meta["requirements"]
and recipe_meta["requirements"]["host"]
):
host_reqs = [
x
for x in recipe_meta["requirements"]["host"]
if x in name2recipe_dict or not restricted
]
else:
host_reqs = []
## Add deps as edges to node
dag.add_edges_from((dep, recipe_name) for dep in set(build_reqs + host_reqs))
return (dag, name2recipe_dict, recipe2name_dict)
# ---------------------------------------------------------------------------------------------------------------------------------
## Main
# ---------------------------------------------------------------------------------------------------------------------------------
def main():
args = arguments()
## Get blacklisted recipes
blacklist_recipes = set()
if args.blacklist:
blacklist_recipes = set(parse_backlist_file(args.blacklist))
print(
":ggd:build recipes: The following recipes are being blacklisted: {}".format(
", ".join(list(blacklist_recipes))
)
)
## Get a list of ggd recipes
print(":ggd:build recipes: Gathering ggd recipes")
recipes = list(
get_all_recipes(
args.recipe_dir, args.packages if args.packages else "*", blacklist_recipes
)
)
print(":ggd:build recipes: Considering {} ggd recipes".format(len(recipes)))
## Load the configuration file
print(":ggd:build recipes: loading config file".format(len(recipes)))
channels = load_config(args.config_file)
print(
":ggd:build recipes: channels from config file: {}".format(", ".join(channels))
)
## Build the recipes
build_recipes(recipes, channels, debug=args.debug)
if __name__ == "__main__":
sys.exit(main() or 0)
| [
"itertools.chain",
"conda_build.api.Config",
"io.open",
"networkx.dfs_successors",
"sys.exit",
"os.walk",
"os.path.exists",
"argparse.ArgumentParser",
"conda_build.api.get_output_file_paths",
"networkx.DiGraph",
"networkx.simple_cycles",
"subprocess.check_call",
"conda_build.api.render",
"... | [((1125, 1256), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Identify and build all ggd recipes that are not currently in any ggd conda channel"""'}), "(description=\n 'Identify and build all ggd recipes that are not currently in any ggd conda channel'\n )\n", (1148, 1256), False, 'import argparse\n'), ((3708, 3733), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (3722, 3733), False, 'import os\n'), ((8803, 8820), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (8814, 8820), False, 'from collections import defaultdict\n'), ((13813, 13846), 'os.path.join', 'os.path.join', (['recipe', '"""meta.yaml"""'], {}), "(recipe, 'meta.yaml')\n", (13825, 13846), False, 'import os\n'), ((16837, 16892), 'conda_build.api.Config', 'api.Config', ([], {'no_download_source': '(True)', 'set_build_id': '(False)'}), '(no_download_source=True, set_build_id=False)\n', (16847, 16892), False, 'from conda_build import api\n'), ((17306, 17377), 'itertools.chain', 'chain', (['config.exclusive_config_files', '(config.variant_config_files or [])'], {}), '(config.exclusive_config_files, config.variant_config_files or [])\n', (17311, 17377), False, 'from itertools import chain\n'), ((18998, 19036), 'os.environ.get', 'os.environ.get', (['"""OSTYPE"""', 'sys.platform'], {}), "('OSTYPE', sys.platform)\n", (19012, 19036), False, 'import os\n'), ((28619, 28635), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (28630, 28635), False, 'from collections import defaultdict\n'), ((28659, 28675), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (28670, 28675), False, 'from collections import defaultdict\n'), ((28707, 28719), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (28717, 28719), True, 'import networkx as nx\n'), ((5666, 5703), 'os.path.join', 'os.path.join', (['base_recipe_folder', '"""*"""'], {}), "(base_recipe_folder, '*')\n", (5678, 5703), False, 'import os\n'), ((5808, 5827), 'os.walk', 'os.walk', (['recipe_dir'], {}), '(recipe_dir)\n', (5815, 5827), False, 'import os\n'), ((12177, 12262), 'subprocess.check_call', 'sp.check_call', (["['conda', 'build', 'purge']"], {'stderr': 'sys.stderr', 'stdout': 'sys.stdout'}), "(['conda', 'build', 'purge'], stderr=sys.stderr, stdout=sys.stdout\n )\n", (12190, 12262), True, 'import subprocess as sp\n'), ((14036, 14092), 'subprocess.check_call', 'sp.check_call', (['cmd'], {'stderr': 'sys.stderr', 'stdout': 'sys.stdout'}), '(cmd, stderr=sys.stderr, stdout=sys.stdout)\n', (14049, 14092), True, 'import subprocess as sp\n'), ((17394, 17413), 'os.path.exists', 'os.path.exists', (['cfg'], {}), '(cfg)\n', (17408, 17413), False, 'import os\n'), ((25099, 25120), 'networkx.simple_cycles', 'nx.simple_cycles', (['dag'], {}), '(dag)\n', (25115, 25120), True, 'import networkx as nx\n'), ((25716, 25752), 'networkx.algorithms.descendants', 'nx.algorithms.descendants', (['dag', 'name'], {}), '(dag, name)\n', (25741, 25752), True, 'import networkx as nx\n'), ((28845, 28878), 'os.path.join', 'os.path.join', (['recipe', '"""meta.yaml"""'], {}), "(recipe, 'meta.yaml')\n", (28857, 28878), False, 'import os\n'), ((29382, 29415), 'os.path.join', 'os.path.join', (['recipe', '"""meta.yaml"""'], {}), "(recipe, 'meta.yaml')\n", (29394, 29415), False, 'import os\n'), ((3945, 3987), 'io.open', 'io.open', (['file_path', '"""rt"""'], {'encoding': '"""utf-8"""'}), "(file_path, 'rt', encoding='utf-8')\n", (3952, 3987), False, 'import io\n'), ((4348, 4359), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4356, 4359), False, 'import sys\n'), ((6795, 6839), 'io.open', 'io.open', (['config_file', '"""rt"""'], {'encoding': '"""utf-8"""'}), "(config_file, 'rt', encoding='utf-8')\n", (6802, 6839), False, 'import io\n'), ((7045, 7056), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7053, 7056), False, 'import sys\n'), ((9326, 9353), 'networkx.topological_sort', 'nx.topological_sort', (['subdag'], {}), '(subdag)\n', (9345, 9353), True, 'import networkx as nx\n'), ((11315, 11354), 'networkx.algorithms.descendants', 'nx.algorithms.descendants', (['subdag', 'name'], {}), '(subdag, name)\n', (11340, 11354), True, 'import networkx as nx\n'), ((15305, 15321), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (15316, 15321), False, 'from collections import defaultdict\n'), ((17044, 17055), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17053, 17055), False, 'import os\n'), ((18381, 18473), 'conda_build.api.render', 'api.render', (['recipe'], {'config': 'config', 'finalize': 'finalize', 'bypass_env_check': 'bypass_env_check'}), '(recipe, config=config, finalize=finalize, bypass_env_check=\n bypass_env_check)\n', (18391, 18473), False, 'from conda_build import api\n'), ((28916, 28960), 'io.open', 'io.open', (['recipe_path', '"""rt"""'], {'encoding': '"""utf-8"""'}), "(recipe_path, 'rt', encoding='utf-8')\n", (28923, 28960), False, 'import io\n'), ((29453, 29497), 'io.open', 'io.open', (['recipe_path', '"""rt"""'], {'encoding': '"""utf-8"""'}), "(recipe_path, 'rt', encoding='utf-8')\n", (29460, 29497), False, 'import io\n'), ((14337, 14358), 'os.path.exists', 'os.path.exists', (['ppath'], {}), '(ppath)\n', (14351, 14358), False, 'import os\n'), ((14371, 14392), 'os.path.isfile', 'os.path.isfile', (['ppath'], {}), '(ppath)\n', (14385, 14392), False, 'import os\n'), ((16054, 16065), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (16062, 16065), False, 'import sys\n'), ((17159, 17170), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17168, 17170), False, 'import os\n'), ((24284, 24315), 'conda_build.api.get_output_file_paths', 'api.get_output_file_paths', (['meta'], {}), '(meta)\n', (24309, 24315), False, 'from conda_build import api\n'), ((15748, 15774), 'requests.get', 'requests.get', (['repodata_url'], {}), '(repodata_url)\n', (15760, 15774), False, 'import requests\n'), ((6017, 6043), 'os.path.basename', 'os.path.basename', (['dir_path'], {}), '(dir_path)\n', (6033, 6043), False, 'import os\n'), ((6197, 6223), 'os.path.basename', 'os.path.basename', (['dir_path'], {}), '(dir_path)\n', (6213, 6223), False, 'import os\n'), ((26901, 26934), 'networkx.dfs_successors', 'nx.dfs_successors', (['dag', 'root_node'], {}), '(dag, root_node)\n', (26918, 26934), True, 'import networkx as nx\n')] |
from django.test import TestCase
from corehq import toggles
from corehq.motech.dhis2.tasks import send_datasets_for_all_domains
class TestSendDatasetsForAllDomains(TestCase):
domain_name = 'does-not-exist'
def setUp(self):
toggles.DHIS2_INTEGRATION.set(
self.domain_name,
enabled=True,
namespace=toggles.NAMESPACE_DOMAIN
)
def tearDown(self):
toggles.DHIS2_INTEGRATION.set(
self.domain_name,
enabled=False,
namespace=toggles.NAMESPACE_DOMAIN
)
def test_check_domain_exists(self):
"""
send_datasets_for_all_domains() should not raise an AttributeError
if a domain does not exist
"""
send_datasets_for_all_domains()
| [
"corehq.toggles.DHIS2_INTEGRATION.set",
"corehq.motech.dhis2.tasks.send_datasets_for_all_domains"
] | [((244, 346), 'corehq.toggles.DHIS2_INTEGRATION.set', 'toggles.DHIS2_INTEGRATION.set', (['self.domain_name'], {'enabled': '(True)', 'namespace': 'toggles.NAMESPACE_DOMAIN'}), '(self.domain_name, enabled=True, namespace=\n toggles.NAMESPACE_DOMAIN)\n', (273, 346), False, 'from corehq import toggles\n'), ((421, 524), 'corehq.toggles.DHIS2_INTEGRATION.set', 'toggles.DHIS2_INTEGRATION.set', (['self.domain_name'], {'enabled': '(False)', 'namespace': 'toggles.NAMESPACE_DOMAIN'}), '(self.domain_name, enabled=False, namespace=\n toggles.NAMESPACE_DOMAIN)\n', (450, 524), False, 'from corehq import toggles\n'), ((749, 780), 'corehq.motech.dhis2.tasks.send_datasets_for_all_domains', 'send_datasets_for_all_domains', ([], {}), '()\n', (778, 780), False, 'from corehq.motech.dhis2.tasks import send_datasets_for_all_domains\n')] |
'''
@Author: fxm
@Date: Dec 27, 2020.
@Title: UI class.
'''
import sys
sys.path.append('..')
import pygame
import logging
from pygame.locals import *
import pygame.gfxdraw
from collections import namedtuple
from Framework.Net import dotdict
from Othello.Board import Board
log = logging.getLogger(__name__)
'''
参数设置
'''
SIZE = 30 # 棋盘每个点之间的间隔
Line_Points = 9 # 棋盘每行/每列点数
Outer_Width = 20 # 棋盘外宽度
Border_Width = 4 # 边框宽度
Inside_Width = 4 # 边框跟实际的棋盘之间的间隔
Border_Length = SIZE * (Line_Points - 1) + Inside_Width * 2 + Border_Width # 边框线的长度
Start_X = Outer_Width + int(Border_Width / 2) + Inside_Width # 起始点X坐标
Start_Y = Outer_Width + int(Border_Width / 2) + Inside_Width # 起始点Y坐标
SCREEN_HEIGHT = SIZE * (Line_Points - 1) + Outer_Width \
* 2 + Border_Width + Inside_Width * 2 # 游戏屏幕的高
SCREEN_WIDTH = SCREEN_HEIGHT + 200 # 游戏屏幕的宽
Stone_Radius = SIZE // 2 # 棋子半径
Checkerboard_Color = (0xE3, 0x92, 0x65) # 棋盘颜色
BLACK_COLOR = (0, 0, 0) # 黑色
WHITE_COLOR = (255, 255, 255) # 白色
RED_COLOR = (245, 222, 179) # 淡黄色
BLUE_COLOR = (30, 30, 200) # 蓝色
RIGHT_INFO_POS_X = SCREEN_HEIGHT + Stone_Radius * 2 + 10
'''
UI类,游戏界面设置对象
'''
class UI():
'''
初始化
参数设置:
game:游戏对象
screen_h:游戏屏幕高
screen_w:游戏屏幕宽
'''
def __init__(self, game):
self.game = game
self.screen_h = SCREEN_HEIGHT
self.screen_w = SCREEN_WIDTH
'''输出一段文字信息'''
def printText(self, screen, font, x, y, text, fcolor=(255, 255, 255)):
imgText = font.render(text, True, fcolor)
screen.blit(imgText, (x, y))
'''画棋盘'''
def drawCheckerboard(self, screen):
# 填充棋盘背景色
screen.fill(Checkerboard_Color)
# 画棋盘网格线外的边框
pygame.draw.rect(screen, BLACK_COLOR, (Outer_Width, Outer_Width, \
Border_Length, Border_Length), Border_Width)
# 画网格线
for i in range(Line_Points): #竖线
pygame.draw.line(screen, BLACK_COLOR, (Start_Y, Start_Y + SIZE * i), \
(Start_Y + SIZE * (Line_Points - 1), Start_Y + SIZE * i), 1)
for j in range(Line_Points): #横线
pygame.draw.line(screen, BLACK_COLOR, (Start_X + SIZE * j, Start_X), \
(Start_X + SIZE * j, Start_X + SIZE * (Line_Points - 1)), 1)
'''画棋子'''
def drawChessman(self, screen, point, stone_color):
pygame.gfxdraw.aacircle(screen, Start_X + SIZE * point[0] + SIZE // 2, \
Start_Y + SIZE * point[1] + SIZE // 2, Stone_Radius, stone_color)
pygame.gfxdraw.filled_circle(screen, Start_X + SIZE * point[0] + SIZE // 2, \
Start_Y + SIZE * point[1] + SIZE // 2, Stone_Radius, stone_color)
'''画一个单独的不在棋盘内的棋子'''
def drawChessmanPos(self, screen, pos, stone_color):
pygame.gfxdraw.aacircle(screen, pos[0], pos[1], Stone_Radius, stone_color)
pygame.gfxdraw.filled_circle(screen, pos[0], pos[1], Stone_Radius, stone_color)
'''画提示可以走的棋子位置'''
def drawMoves(self, screen, point, stone_color):
pygame.gfxdraw.aacircle(screen, Start_X + SIZE * point[0] + SIZE // 2, \
Start_Y + SIZE * point[1] + SIZE // 2, Stone_Radius // 3, stone_color)
pygame.gfxdraw.filled_circle(screen, Start_X + SIZE * point[0] + SIZE // 2, \
Start_Y + SIZE * point[1] + SIZE // 2, Stone_Radius // 3, stone_color)
'''画右侧信息显示'''
def drawRightInfo(self, screen, font, moves, curplayer, is_human_first):
self.drawChessmanPos(screen, (SCREEN_HEIGHT + Stone_Radius, Start_X + Stone_Radius), BLACK_COLOR)
self.drawChessmanPos(screen, (SCREEN_HEIGHT + Stone_Radius, Start_X + Stone_Radius * 4), WHITE_COLOR)
if is_human_first == True:
self.printText(screen, font, RIGHT_INFO_POS_X, Start_X + 3, '玩家', BLUE_COLOR)
self.printText(screen, font, RIGHT_INFO_POS_X, Start_X + Stone_Radius * 3 + 3, '电脑', BLUE_COLOR)
else:
self.printText(screen, font, RIGHT_INFO_POS_X, Start_X + 3, '电脑', BLUE_COLOR)
self.printText(screen, font, RIGHT_INFO_POS_X, Start_X + Stone_Radius * 3 + 3, '玩家', BLUE_COLOR)
if curplayer == -1:
self.printText(screen, font, SCREEN_HEIGHT, SCREEN_HEIGHT//2, f'当前出棋:黑棋', BLUE_COLOR)
else:
self.printText(screen, font, SCREEN_HEIGHT, SCREEN_HEIGHT//2, f'当前出棋:白棋', BLUE_COLOR)
'''根据鼠标点击位置,返回游戏区坐标'''
def getClickpoint(self, click_pos):
pos_x = click_pos[0] - Start_X
pos_y = click_pos[1] - Start_Y
# 如果鼠标点击范围不在游戏区内,就返回None
if pos_x < -Inside_Width or pos_y < -Inside_Width:
return None
x = int(pos_x / SIZE)
y = int(pos_y / SIZE)
# 如果鼠标点击范围超过棋盘另一侧长度,也返回None
if x >= Line_Points or y >= Line_Points:
return None
return (x, y)
'''画出棋盘的所有信息'''
def drawAll(self, screen, board, end, moves, curplayer, is_human_first):
# 画棋盘
font1 = pygame.font.SysFont('SimHei', 72)
font2 = pygame.font.SysFont('SimHei', 24)
fwidth, fheight = font1.size('黑方获胜')
self.drawCheckerboard(screen)
self.drawRightInfo(screen, font2, moves, curplayer, is_human_first)
# 画棋盘上已有的棋子
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == 1:
self.drawChessman(screen, (j,i), WHITE_COLOR)
elif board[i][j] == -1:
self.drawChessman(screen, (j,i), BLACK_COLOR)
if (is_human_first == True and curplayer == -1) or \
(is_human_first == False and curplayer == 1):
for move in moves:
x = move[0]
y = move[1]
self.drawMoves(screen, (y,x), RED_COLOR)
if end != -2:
if end == 1:
self.printText(screen, font1, (SCREEN_WIDTH - fwidth)//2, (SCREEN_HEIGHT - fheight)//2, '白子获胜', RED_COLOR)
elif end == -1:
self.printText(screen, font1, (SCREEN_WIDTH - fwidth)//2, (SCREEN_HEIGHT - fheight)//2, '黑子获胜', RED_COLOR)
elif end == 0:
self.printText(screen, font1, (SCREEN_WIDTH - fwidth)//2, (SCREEN_HEIGHT - fheight)//2, '平局', RED_COLOR)
pygame.display.flip()
'''人类走棋'''
def humanplay(self, board):
valid = self.game.getValid(board, 1)
while True:
for event in pygame.event.get():
if event.type == MOUSEBUTTONDOWN: # 鼠标有输入,则为落子
pressed_array = pygame.mouse.get_pressed()
if pressed_array[0]:
mouse_pos = pygame.mouse.get_pos()
point = self.getClickpoint(mouse_pos)
y,x = point[0], point[1]
if ((0 <= x) and (x < self.game.size) and (0 <= y) and (y < self.game.size)) or \
((x == self.game.size) and (y == 0)):
a = self.game.size * x + y if x != -1 else self.game.size ** 2
if valid[a]:
return a
else:continue
else:continue
'''游戏仿真主过程'''
def display(self, screen, ai, is_human_first):
if is_human_first:
players = ['human', None, 'ai']
else:
players = ['ai', None, 'human']
curplayer = -1
board = self.game.initBoard()
while True:
b = Board(self.game.size)
b.matrix = board
moves = b.getLegalMoves(curplayer)
end = self.game.getGameEnded(board, curplayer)
self.drawAll(screen, board, curplayer * end, moves, curplayer, is_human_first)
if end != -2:
continue
# 如果当前无路可走,就让对方连走两步
if self.game.getNoAction(board, curplayer) == True:
# 更改状态,交换执棋者
board, curplayer = self.game.getNextState(board, curplayer, self.game.size **2)
continue
if players[curplayer+1] == 'human':
action = self.humanplay(self.game.getCanonicalForm(board, curplayer))
elif players[curplayer+1] == 'ai':
action = ai(self.game.getCanonicalForm(board, curplayer))
valids = self.game.getValid(self.game.getCanonicalForm(board, curplayer), 1)
# 如果动作不在合法动作列表内,返回错误
if valids[action] == 0:
log.error(f'Action {action} is not valid!')
log.debug(f'valids = {valids}')
assert valids[action] > 0
# 更改状态,交换执棋者
board, curplayer = self.game.getNextState(board, curplayer, action) | [
"logging.getLogger",
"pygame.mouse.get_pressed",
"pygame.draw.line",
"pygame.gfxdraw.aacircle",
"pygame.event.get",
"pygame.display.flip",
"Othello.Board.Board",
"pygame.mouse.get_pos",
"pygame.draw.rect",
"pygame.gfxdraw.filled_circle",
"sys.path.append",
"pygame.font.SysFont"
] | [((83, 104), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (98, 104), False, 'import sys\n'), ((292, 319), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (309, 319), False, 'import logging\n'), ((2467, 2580), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'BLACK_COLOR', '(Outer_Width, Outer_Width, Border_Length, Border_Length)', 'Border_Width'], {}), '(screen, BLACK_COLOR, (Outer_Width, Outer_Width,\n Border_Length, Border_Length), Border_Width)\n', (2483, 2580), False, 'import pygame\n'), ((3087, 3228), 'pygame.gfxdraw.aacircle', 'pygame.gfxdraw.aacircle', (['screen', '(Start_X + SIZE * point[0] + SIZE // 2)', '(Start_Y + SIZE * point[1] + SIZE // 2)', 'Stone_Radius', 'stone_color'], {}), '(screen, Start_X + SIZE * point[0] + SIZE // 2, \n Start_Y + SIZE * point[1] + SIZE // 2, Stone_Radius, stone_color)\n', (3110, 3228), False, 'import pygame\n'), ((3246, 3391), 'pygame.gfxdraw.filled_circle', 'pygame.gfxdraw.filled_circle', (['screen', '(Start_X + SIZE * point[0] + SIZE // 2)', '(Start_Y + SIZE * point[1] + SIZE // 2)', 'Stone_Radius', 'stone_color'], {}), '(screen, Start_X + SIZE * point[0] + SIZE // 2,\n Start_Y + SIZE * point[1] + SIZE // 2, Stone_Radius, stone_color)\n', (3274, 3391), False, 'import pygame\n'), ((3493, 3567), 'pygame.gfxdraw.aacircle', 'pygame.gfxdraw.aacircle', (['screen', 'pos[0]', 'pos[1]', 'Stone_Radius', 'stone_color'], {}), '(screen, pos[0], pos[1], Stone_Radius, stone_color)\n', (3516, 3567), False, 'import pygame\n'), ((3576, 3655), 'pygame.gfxdraw.filled_circle', 'pygame.gfxdraw.filled_circle', (['screen', 'pos[0]', 'pos[1]', 'Stone_Radius', 'stone_color'], {}), '(screen, pos[0], pos[1], Stone_Radius, stone_color)\n', (3604, 3655), False, 'import pygame\n'), ((3744, 3890), 'pygame.gfxdraw.aacircle', 'pygame.gfxdraw.aacircle', (['screen', '(Start_X + SIZE * point[0] + SIZE // 2)', '(Start_Y + SIZE * point[1] + SIZE // 2)', '(Stone_Radius // 3)', 'stone_color'], {}), '(screen, Start_X + SIZE * point[0] + SIZE // 2, \n Start_Y + SIZE * point[1] + SIZE // 2, Stone_Radius // 3, stone_color)\n', (3767, 3890), False, 'import pygame\n'), ((3908, 4058), 'pygame.gfxdraw.filled_circle', 'pygame.gfxdraw.filled_circle', (['screen', '(Start_X + SIZE * point[0] + SIZE // 2)', '(Start_Y + SIZE * point[1] + SIZE // 2)', '(Stone_Radius // 3)', 'stone_color'], {}), '(screen, Start_X + SIZE * point[0] + SIZE // 2,\n Start_Y + SIZE * point[1] + SIZE // 2, Stone_Radius // 3, stone_color)\n', (3936, 4058), False, 'import pygame\n'), ((5686, 5719), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""SimHei"""', '(72)'], {}), "('SimHei', 72)\n", (5705, 5719), False, 'import pygame\n'), ((5736, 5769), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""SimHei"""', '(24)'], {}), "('SimHei', 24)\n", (5755, 5769), False, 'import pygame\n'), ((6988, 7009), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (7007, 7009), False, 'import pygame\n'), ((2659, 2793), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'BLACK_COLOR', '(Start_Y, Start_Y + SIZE * i)', '(Start_Y + SIZE * (Line_Points - 1), Start_Y + SIZE * i)', '(1)'], {}), '(screen, BLACK_COLOR, (Start_Y, Start_Y + SIZE * i), (\n Start_Y + SIZE * (Line_Points - 1), Start_Y + SIZE * i), 1)\n', (2675, 2793), False, 'import pygame\n'), ((2860, 2994), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'BLACK_COLOR', '(Start_X + SIZE * j, Start_X)', '(Start_X + SIZE * j, Start_X + SIZE * (Line_Points - 1))', '(1)'], {}), '(screen, BLACK_COLOR, (Start_X + SIZE * j, Start_X), (\n Start_X + SIZE * j, Start_X + SIZE * (Line_Points - 1)), 1)\n', (2876, 2994), False, 'import pygame\n'), ((7151, 7169), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (7167, 7169), False, 'import pygame\n'), ((8255, 8276), 'Othello.Board.Board', 'Board', (['self.game.size'], {}), '(self.game.size)\n', (8260, 8276), False, 'from Othello.Board import Board\n'), ((7286, 7312), 'pygame.mouse.get_pressed', 'pygame.mouse.get_pressed', ([], {}), '()\n', (7310, 7312), False, 'import pygame\n'), ((7390, 7412), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (7410, 7412), False, 'import pygame\n')] |
# -*- coding: UTF-8 -*-
# main.py
# Root file for EmotionDetection program.
# Prints out command line menu and handles user choices
from __future__ import print_function
from EmotionDetection import WordMap
from EmotionDetection import EvaluateText
from EmotionDetection import GUI
try:
input = raw_input
except NameError:
pass
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def printMenu():
print("°º¤ø,¸¸,ø¤º°`°º¤ø,¸,ø¤°º¤ø,¸¸,", "EmotionDetection", ",¸¸,ø¤º°`°º¤ø,¸,ø¤°º¤ø,¸¸,ø¤º°\n")
print("1. Training")
print("2. Testing")
print("3. Evaluate Text")
print("4. GUI Evaluation")
print("5. Information")
print("6. Exit\n")
print(78 * "-", "\n")
def main():
choice = True
while choice:
printMenu()
choice = input("Select option [1-6]: ")
print
if choice == "1":
train()
elif choice == "2":
test()
elif choice == "3":
evaluate()
elif choice == "4":
gui()
elif choice == "5":
printInfo()
elif choice == "6":
print("Exiting....\n")
choice = False
else:
print("Invalid choice.")
choice = True
# Training Program, builds map of words and emotion values from annotated corpus
def train():
reset = input("Reset training data? [y/n]: ").lower() in ["yes", "y", "1"]
text = input("Text file: ")
values = input("Value file: ")
print("")
try:
print("Loading input values into WordMap...\n")
with open("./data/" + text, 'r') as textFile:
with open("./data/" + values, 'r') as valueFile:
WordMap.buildWordMap(reset, textFile, valueFile)
except IOError:
print("File not found. Returning to main menu...\n")
def test():
text = input("Text file: ")
values = input("Value file: ")
print("")
print ("values file is " , "./data/" + values)
try:
print("\nRunning text evaluation...\n")
with open("./data/" + text, 'r') as textFile:
print ("text found")
with open("./data/" + values, 'r') as valueFile:
print ("values found")
EvaluateText.evaluate(textFile, valueFile)
except IOError:
print("File not found. Returning to main menu...\n")
def evaluate():
text = input("Text file: ")
print("")
try:
print("Running text evaluation...\n")
with open("./data/" + text, 'r') as textFile:
EvaluateText.evaluate(textFile)
except IOError:
print("File not found. Returning to main menu...\n")
def gui():
window = GUI.Evaluator()
def printInfo():
print("\n°`°º¤ø,¸¸,ø¤º°`°º¤ø,¸,ø¤°º¤ø,¸¸,", "INFORMATION", ",¸¸,ø¤º°`°º¤ø,¸,ø¤°º¤ø,¸¸,ø¤º°º¤ø\n")
print("EmotionDetection v1, sentiment analysis system operating off a multinomial")
print("Naive Bayes classififer. There are 13 possible labels that text can be")
print("labelled as, the emotions are :empty, sadness, enthusiasm, neutral, worry,")
print("surprise, love, fun, hate, happiness, boredom, relief and anger.\n")
print("1. Training - Generates a WordMap using a text file and emotion value file.")
print(" A word map is required for both testing and evaluation.\n")
print("2. Testing - Run the system and test its accuracy by supplying correct ")
print(" emotion values. Also produces reports and confusion plot\n")
print("3. Evaluate Text - Run the system without given values. Used to evaluate input ")
print(" file that has not been pre-labelled.")
print(78 * "-", "\n")
input("Press enter to return to menu...\n")
main()
| [
"EmotionDetection.EvaluateText.evaluate",
"EmotionDetection.GUI.Evaluator",
"sys.setdefaultencoding",
"EmotionDetection.WordMap.buildWordMap"
] | [((363, 393), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf8"""'], {}), "('utf8')\n", (385, 393), False, 'import sys\n'), ((2684, 2699), 'EmotionDetection.GUI.Evaluator', 'GUI.Evaluator', ([], {}), '()\n', (2697, 2699), False, 'from EmotionDetection import GUI\n'), ((2544, 2575), 'EmotionDetection.EvaluateText.evaluate', 'EvaluateText.evaluate', (['textFile'], {}), '(textFile)\n', (2565, 2575), False, 'from EmotionDetection import EvaluateText\n'), ((1695, 1743), 'EmotionDetection.WordMap.buildWordMap', 'WordMap.buildWordMap', (['reset', 'textFile', 'valueFile'], {}), '(reset, textFile, valueFile)\n', (1715, 1743), False, 'from EmotionDetection import WordMap\n'), ((2233, 2275), 'EmotionDetection.EvaluateText.evaluate', 'EvaluateText.evaluate', (['textFile', 'valueFile'], {}), '(textFile, valueFile)\n', (2254, 2275), False, 'from EmotionDetection import EvaluateText\n')] |
import os
def print_tables(tables, width=os.get_terminal_size().columns, spaces=3, index_width=3):
space = ' ' * spaces
lines = []
if index_width <= 0:
index_width = -1
tables.sort(key=lambda t: max(len(t), max([len(r) for r in t])), reverse=True)
for table in tables:
max_length = max([len(x) for x in table])
table_len = len(table)
cursor = 0
max_width = width
_count = 0
for i, line in enumerate(lines): # search free block for table
line_len = len(line) + max_length + spaces + index_width + 1
if line_len > width or line_len != max_width:
_count = 1
max_width = line_len
cursor = i
else:
_count += 1
if _count >= table_len:
if not cursor or _count > table_len:
break
else:
cursor = len(lines)
if cursor: # print empty line
line = ' ' * max_length
try:
lines[cursor] += line
except IndexError:
lines.append(line)
cursor += 1
for i, row in enumerate(table):
if index_width <= 0:
line = f'{space}{row:{max_length}}'
else:
line = f'{space}{i: {index_width}}:{row:{max_length}}'
try:
lines[cursor + i] += line
except IndexError:
lines.append(line)
print()
for row in lines:
print(row)
print()
if __name__ == '__main__':
A = [
'123123123',
'66669999',
'0xffffff',
'VVVVVVVV',
'QWEQWEASDASD',
]
B = [
'OOOOOOOOOO',
'AAAAAAAAAA',
'AHAHAHAHAHA',
]
C = [
'XXXXXXXXXXXXXXXXXxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
'WWWWWWWWW',
'0000000000000000',
'3.1415926',
'0.618033988749895',
'0.8660254037844386',
'1.322875644432295',
]
print_tables([A, B, C])
| [
"os.get_terminal_size"
] | [((43, 65), 'os.get_terminal_size', 'os.get_terminal_size', ([], {}), '()\n', (63, 65), False, 'import os\n')] |
import datetime as dt
from uploader.utils import NULL
from typing import List
__ESCAPE_SYMBOLS_MAPPING = {"'": r"''"}
def __value_empty(value) -> bool:
return value == NULL or value is None or not value or (isinstance(value, str) and value.isspace())
def __escaped_symbols() -> dict:
if not hasattr(__escaped_symbols, 'translation'):
__escaped_symbols.translation = str.maketrans(__ESCAPE_SYMBOLS_MAPPING)
return __escaped_symbols.translation
def time_delta_to_str(value: dt.datetime):
excel_start_date = dt.datetime(1899, 12, 31, 0, 0, 0)
delta = value - excel_start_date
hours = delta.days * 24 + int(delta.seconds / 3600)
minutes = int(delta.seconds % 3600 / 60)
seconds = delta.seconds % 3600 % 60
result = '{}:{:02d}:{:02d}'.format(hours, minutes, seconds)
return result
def convert_datetime_to_str(value, to_format: str, formater_type: str) -> str:
if __value_empty(value):
return NULL
if formater_type == 'timedelta':
return time_delta_to_str(value)
if type(value) == str:
formater = FORMATERS[formater_type]
for format in formater['formats']:
try:
dt_value = dt.datetime.strptime(value, format)
except:
continue
value = formater['converter'](dt_value)
return value.strftime(to_format)
# def convert_datetime_to_str(value, dt_format: str) -> str:
# if type(value) == str:
# return value
# else:
# return value.strftime(dt_format)
def null_or_format_str(value, str_format: str):
if __value_empty(value):
return NULL
else:
return str_format.format(str(value).translate(__escaped_symbols()))
def py_type_to_pg_type(py_type):
return PYTHON_TYPES_TO_PG_SQL_TYPES[py_type]['type']
def py_value_to_pg_value(value_type, value) -> str:
if type(value_type) is dict:
if 'mapping' in value_type and 'type' in value_type['mapping'] and value_type['mapping']['type'] is not None:
current_type = pg_type_to_py(value_type['mapping']['type'], value_type['type'])
else:
current_type = value_type['type']
else:
current_type = value_type
return PYTHON_TYPES_TO_PG_SQL_TYPES[current_type]['converter'](value)
# def datetime_to_null_or_str_format(value, dt_format, str_format):
# result = convert_datetime_to_str(value, dt_format)
# result = null_or_format_str(result, str_format)
# return result
PG_TYPE_TO_PYTHON_TYPE = {
'numer': int,
'integ': int,
'real': float,
'times': dt.datetime,
'time': dt.time,
'date': dt.date,
'inter': dt.timedelta,
'varch': str,
'text': str
}
def pg_type_to_py(pg_type: str, default_type: type) -> type:
pg_type = pg_type.lower()[0:5]
if pg_type in PG_TYPE_TO_PYTHON_TYPE:
return PG_TYPE_TO_PYTHON_TYPE[pg_type]
return default_type
FORMATERS = {
'date': {
'formats': [
'%d.%m.%Y',
'%Y-%m-%d'
],
'converter': lambda value: value.date()
},
'time': {
'formats': [
'%H:%M:%S',
],
'converter': lambda value: value.time()
},
'timestamp': {
'formats': [
'%d.%m.%Y %H:%M:%S',
'%Y-%m-%d %H:%M:%S'
],
'converter': lambda value: value
}
}
PYTHON_TYPES_TO_PG_SQL_TYPES = {
int: {
'type': 'numeric',
'converter': lambda value: null_or_format_str(value, '{}')
},
float: {
'type': 'real',
'converter': lambda value: null_or_format_str(value, '{}')
},
str: {
'type': 'varchar',
'converter': lambda value: null_or_format_str(value, "'{}'")
},
dt.time: {
'type': 'time',
'converter': lambda value: null_or_format_str(convert_datetime_to_str(value,
'%H:%M:%S',
'time'),
"'{}'")
},
dt.datetime: {
'type': 'timestamp',
'converter': lambda value: null_or_format_str(convert_datetime_to_str(value,
'%d.%m.%Y %H:%M:%S',
'timestamp'),
"to_timestamp('{}', 'dd.mm.yyyy hh24:mi:ss')")
},
dt.date: {
'type': 'date',
'converter': lambda value: null_or_format_str(convert_datetime_to_str(value,
'%d.%m.%Y',
'date'),
"to_date('{}', 'dd.mm.yyyy')")
},
dt.timedelta: {
'type': 'interval',
'converter': lambda value: null_or_format_str(convert_datetime_to_str(value,
'%d.%m.%Y %H:%M:%S',
'timedelta'),
"'{}'")
}
}
| [
"datetime.datetime",
"datetime.datetime.strptime"
] | [((536, 570), 'datetime.datetime', 'dt.datetime', (['(1899)', '(12)', '(31)', '(0)', '(0)', '(0)'], {}), '(1899, 12, 31, 0, 0, 0)\n', (547, 570), True, 'import datetime as dt\n'), ((1196, 1231), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['value', 'format'], {}), '(value, format)\n', (1216, 1231), True, 'import datetime as dt\n')] |
import destination_search
'''
test용 값
gpsX = 127.0816985 # 동경 - 경도
gpsY = 37.5642135 # 북위 - 위도
time = 20
1. 좌표, 시간 입력 -> 주변 버스 정류소 ID 반환
2. 정류소 ID 입력 -> 버스 노선 획득
3. 획득한 버스 노선에서 출발지, 목적지 시간 계산하여 도착 버스 정류장 반환
'''
print("gps_x : ", end='')
gps_x = input()
print("gps_y : ", end='')
gps_y = input()
print("time : ", end='')
time = int(input())
print('gps X : ', gps_x, 'gps Y : ', gps_y, 'time : ', time)
result = destination_search.DestinationStation(gps_x, gps_y, time)
print('gps_x, gps_y, 도착 시간, 도착 정류장')
result.show_result()
| [
"destination_search.DestinationStation"
] | [((429, 486), 'destination_search.DestinationStation', 'destination_search.DestinationStation', (['gps_x', 'gps_y', 'time'], {}), '(gps_x, gps_y, time)\n', (466, 486), False, 'import destination_search\n')] |
from torch.utils.data import Dataset
import cv2
class SegmentationDataSet(Dataset):
def __init__(self, image_paths, mask_paths, transform):
self.image_paths = image_paths
self.mask_paths = mask_paths
self.transforms = transform
def __len__(self):
return len(self.image_paths)
def __getitem__(self, idx):
image_path = self.image_paths[idx] # grab the image path from the current index
""" load the image from disk, swap its channels from BGR to RGB,
and read the associated mask from disk in grayscale mode """
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
annotations = cv2.imread(self.mask_paths[idx], 0)
""" check to see if we are applying any transformations """
if self.transforms is not None:
image = self.transforms(image)
annotations = self.transforms(annotations)
return image, annotations
| [
"cv2.imread",
"cv2.cvtColor"
] | [((600, 622), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (610, 622), False, 'import cv2\n'), ((639, 677), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (651, 677), False, 'import cv2\n'), ((700, 735), 'cv2.imread', 'cv2.imread', (['self.mask_paths[idx]', '(0)'], {}), '(self.mask_paths[idx], 0)\n', (710, 735), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools.misc import format_date
class AccrualAccountingWizard(models.TransientModel):
_name = 'account.accrual.accounting.wizard'
_description = 'Create accrual entry.'
date = fields.Date(required=True)
company_id = fields.Many2one('res.company', required=True)
account_type = fields.Selection([('income', 'Revenue'), ('expense', 'Expense')])
active_move_line_ids = fields.Many2many('account.move.line')
journal_id = fields.Many2one('account.journal', required=True, readonly=False,
domain="[('company_id', '=', company_id), ('type', '=', 'general')]",
related="company_id.accrual_default_journal_id")
expense_accrual_account = fields.Many2one('account.account', readonly=False,
domain="[('company_id', '=', company_id), ('internal_type', 'not in', ('receivable', 'payable')), ('internal_group', '=', 'liability'), ('reconcile', '=', True)]",
related="company_id.expense_accrual_account_id")
revenue_accrual_account = fields.Many2one('account.account', readonly=False,
domain="[('company_id', '=', company_id), ('internal_type', 'not in', ('receivable', 'payable')), ('internal_group', '=', 'asset'), ('reconcile', '=', True)]",
related="company_id.revenue_accrual_account_id")
percentage = fields.Float("Percentage", default=100.0)
total_amount = fields.Monetary(compute="_compute_total_amount", currency_field='company_currency_id')
company_currency_id = fields.Many2one('res.currency', related='company_id.currency_id')
@api.constrains('percentage')
def _constraint_percentage(self):
for record in self:
if not (0.0 < record.percentage <= 100.0):
raise UserError(_("Percentage must be between 0 and 100"))
@api.depends('percentage', 'active_move_line_ids')
def _compute_total_amount(self):
for record in self:
record.total_amount = sum(record.active_move_line_ids.mapped(lambda l: record.percentage * (l.debit + l.credit) / 100))
@api.model
def default_get(self, fields):
if self.env.context.get('active_model') != 'account.move.line' or not self.env.context.get('active_ids'):
raise UserError(_('This can only be used on journal items'))
rec = super(AccrualAccountingWizard, self).default_get(fields)
active_move_line_ids = self.env['account.move.line'].browse(self.env.context['active_ids'])
rec['active_move_line_ids'] = active_move_line_ids.ids
if any(move.state != 'posted' for move in active_move_line_ids.mapped('move_id')):
raise UserError(_('You can only change the period for posted journal items.'))
if any(move_line.reconciled for move_line in active_move_line_ids):
raise UserError(_('You can only change the period for items that are not yet reconciled.'))
if any(line.account_id.user_type_id != active_move_line_ids[0].account_id.user_type_id for line in active_move_line_ids):
raise UserError(_('All accounts on the lines must be from the same type.'))
if any(line.company_id != active_move_line_ids[0].company_id for line in active_move_line_ids):
raise UserError(_('All lines must be from the same company.'))
rec['company_id'] = active_move_line_ids[0].company_id.id
account_types_allowed = self.env.ref('account.data_account_type_expenses') + self.env.ref('account.data_account_type_revenue') + self.env.ref('account.data_account_type_other_income')
if active_move_line_ids[0].account_id.user_type_id not in account_types_allowed:
raise UserError(_('You can only change the period for items in these types of accounts: ') + ", ".join(account_types_allowed.mapped('name')))
rec['account_type'] = active_move_line_ids[0].account_id.user_type_id.internal_group
return rec
def amend_entries(self):
# set the accrual account on the selected journal items
accrual_account = self.revenue_accrual_account if self.account_type == 'income' else self.expense_accrual_account
# Generate journal entries.
move_data = {}
for aml in self.active_move_line_ids:
ref1 = _('Accrual Adjusting Entry (%s%% recognized) for invoice: %s') % (self.percentage, aml.move_id.name)
ref2 = _('Accrual Adjusting Entry (%s%% recognized) for invoice: %s') % (100 - self.percentage, aml.move_id.name)
move_data.setdefault(aml.move_id, (
[
# Values to create moves.
{
'date': self.date,
'ref': ref1,
'journal_id': self.journal_id.id,
'line_ids': [],
},
{
'date': aml.move_id.date,
'ref': ref2,
'journal_id': self.journal_id.id,
'line_ids': [],
},
], [
# Messages to log on the chatter.
(_('Accrual Adjusting Entry ({percent}% recognized) for invoice:') + ' <a href=# data-oe-model=account.move data-oe-id={id}>{name}</a>').format(
percent=self.percentage,
id=aml.move_id.id,
name=aml.move_id.name,
),
(_('Accrual Adjusting Entry ({percent}% recognized) for invoice:') + ' <a href=# data-oe-model=account.move data-oe-id={id}>{name}</a>').format(
percent=100 - self.percentage,
id=aml.move_id.id,
name=aml.move_id.name,
),
(_('Accrual Adjusting Entries ({percent}%% recognized) have been created for this invoice on {date}') + ' <a href=# data-oe-model=account.move data-oe-id=%(first_id)d>%(first_name)s</a> and <a href=# data-oe-model=account.move data-oe-id=%(second_id)d>%(second_name)s</a>').format(
percent=self.percentage,
date=format_date(self.env, self.date),
),
]
))
reported_debit = aml.company_id.currency_id.round((self.percentage / 100) * aml.debit)
reported_credit = aml.company_id.currency_id.round((self.percentage / 100) * aml.credit)
if aml.currency_id:
reported_amount_currency = aml.currency_id.round((self.percentage / 100) * aml.amount_currency)
else:
reported_amount_currency = 0.0
move_data[aml.move_id][0][0]['line_ids'] += [
(0, 0, {
'name': aml.name,
'debit': reported_debit,
'credit': reported_credit,
'amount_currency': reported_amount_currency,
'currency_id': aml.currency_id.id,
'account_id': aml.account_id.id,
'partner_id': aml.partner_id.id,
}),
(0, 0, {
'name': ref1,
'debit': reported_credit,
'credit': reported_debit,
'amount_currency': -reported_amount_currency,
'currency_id': aml.currency_id.id,
'account_id': accrual_account.id,
'partner_id': aml.partner_id.id,
}),
]
move_data[aml.move_id][0][1]['line_ids'] += [
(0, 0, {
'name': aml.name,
'debit': reported_credit,
'credit': reported_debit,
'amount_currency': -reported_amount_currency,
'currency_id': aml.currency_id.id,
'account_id': aml.account_id.id,
'partner_id': aml.partner_id.id,
}),
(0, 0, {
'name': ref2,
'debit': reported_debit,
'credit': reported_credit,
'amount_currency': reported_amount_currency,
'currency_id': aml.currency_id.id,
'account_id': accrual_account.id,
'partner_id': aml.partner_id.id,
}),
]
move_vals = []
log_messages = []
for v in move_data.values():
move_vals += v[0]
log_messages += v[1]
created_moves = self.env['account.move'].create(move_vals)
created_moves.post()
# Reconcile.
index = 0
for move in self.active_move_line_ids.mapped('move_id'):
accrual_moves = created_moves[index:index + 2]
to_reconcile = accrual_moves.mapped('line_ids').filtered(lambda line: line.account_id == accrual_account)
to_reconcile.reconcile()
move.message_post(body=log_messages[index//2 + 2] % {
'first_id': accrual_moves[0].id,
'first_name': accrual_moves[0].name,
'second_id': accrual_moves[1].id,
'second_name': accrual_moves[1].name,
})
accrual_moves[0].message_post(body=log_messages[index//2 + 0])
accrual_moves[1].message_post(body=log_messages[index//2 + 1])
index += 2
# open the generated entries
action = {
'name': _('Generated Entries'),
'domain': [('id', 'in', created_moves.ids)],
'res_model': 'account.move',
'view_mode': 'tree,form',
'type': 'ir.actions.act_window',
'views': [(self.env.ref('account.view_move_tree').id, 'tree'), (False, 'form')],
}
if len(created_moves) == 1:
action.update({'view_mode': 'form', 'res_id': created_moves.id})
return action
| [
"odoo._",
"odoo.fields.Monetary",
"odoo.api.constrains",
"odoo.fields.Date",
"odoo.fields.Float",
"odoo.fields.Many2one",
"odoo.api.depends",
"odoo.tools.misc.format_date",
"odoo.fields.Selection",
"odoo.fields.Many2many"
] | [((301, 327), 'odoo.fields.Date', 'fields.Date', ([], {'required': '(True)'}), '(required=True)\n', (312, 327), False, 'from odoo import api, fields, models, _\n'), ((345, 390), 'odoo.fields.Many2one', 'fields.Many2one', (['"""res.company"""'], {'required': '(True)'}), "('res.company', required=True)\n", (360, 390), False, 'from odoo import api, fields, models, _\n'), ((410, 475), 'odoo.fields.Selection', 'fields.Selection', (["[('income', 'Revenue'), ('expense', 'Expense')]"], {}), "([('income', 'Revenue'), ('expense', 'Expense')])\n", (426, 475), False, 'from odoo import api, fields, models, _\n'), ((503, 540), 'odoo.fields.Many2many', 'fields.Many2many', (['"""account.move.line"""'], {}), "('account.move.line')\n", (519, 540), False, 'from odoo import api, fields, models, _\n'), ((558, 752), 'odoo.fields.Many2one', 'fields.Many2one', (['"""account.journal"""'], {'required': '(True)', 'readonly': '(False)', 'domain': '"""[(\'company_id\', \'=\', company_id), (\'type\', \'=\', \'general\')]"""', 'related': '"""company_id.accrual_default_journal_id"""'}), '(\'account.journal\', required=True, readonly=False, domain=\n "[(\'company_id\', \'=\', company_id), (\'type\', \'=\', \'general\')]", related=\n \'company_id.accrual_default_journal_id\')\n', (573, 752), False, 'from odoo import api, fields, models, _\n'), ((789, 1062), 'odoo.fields.Many2one', 'fields.Many2one', (['"""account.account"""'], {'readonly': '(False)', 'domain': '"""[(\'company_id\', \'=\', company_id), (\'internal_type\', \'not in\', (\'receivable\', \'payable\')), (\'internal_group\', \'=\', \'liability\'), (\'reconcile\', \'=\', True)]"""', 'related': '"""company_id.expense_accrual_account_id"""'}), '(\'account.account\', readonly=False, domain=\n "[(\'company_id\', \'=\', company_id), (\'internal_type\', \'not in\', (\'receivable\', \'payable\')), (\'internal_group\', \'=\', \'liability\'), (\'reconcile\', \'=\', True)]"\n , related=\'company_id.expense_accrual_account_id\')\n', (804, 1062), False, 'from odoo import api, fields, models, _\n'), ((1099, 1368), 'odoo.fields.Many2one', 'fields.Many2one', (['"""account.account"""'], {'readonly': '(False)', 'domain': '"""[(\'company_id\', \'=\', company_id), (\'internal_type\', \'not in\', (\'receivable\', \'payable\')), (\'internal_group\', \'=\', \'asset\'), (\'reconcile\', \'=\', True)]"""', 'related': '"""company_id.revenue_accrual_account_id"""'}), '(\'account.account\', readonly=False, domain=\n "[(\'company_id\', \'=\', company_id), (\'internal_type\', \'not in\', (\'receivable\', \'payable\')), (\'internal_group\', \'=\', \'asset\'), (\'reconcile\', \'=\', True)]"\n , related=\'company_id.revenue_accrual_account_id\')\n', (1114, 1368), False, 'from odoo import api, fields, models, _\n'), ((1392, 1433), 'odoo.fields.Float', 'fields.Float', (['"""Percentage"""'], {'default': '(100.0)'}), "('Percentage', default=100.0)\n", (1404, 1433), False, 'from odoo import api, fields, models, _\n'), ((1453, 1544), 'odoo.fields.Monetary', 'fields.Monetary', ([], {'compute': '"""_compute_total_amount"""', 'currency_field': '"""company_currency_id"""'}), "(compute='_compute_total_amount', currency_field=\n 'company_currency_id')\n", (1468, 1544), False, 'from odoo import api, fields, models, _\n'), ((1566, 1631), 'odoo.fields.Many2one', 'fields.Many2one', (['"""res.currency"""'], {'related': '"""company_id.currency_id"""'}), "('res.currency', related='company_id.currency_id')\n", (1581, 1631), False, 'from odoo import api, fields, models, _\n'), ((1638, 1666), 'odoo.api.constrains', 'api.constrains', (['"""percentage"""'], {}), "('percentage')\n", (1652, 1666), False, 'from odoo import api, fields, models, _\n'), ((1869, 1918), 'odoo.api.depends', 'api.depends', (['"""percentage"""', '"""active_move_line_ids"""'], {}), "('percentage', 'active_move_line_ids')\n", (1880, 1918), False, 'from odoo import api, fields, models, _\n'), ((9559, 9581), 'odoo._', '_', (['"""Generated Entries"""'], {}), "('Generated Entries')\n", (9560, 9581), False, 'from odoo import api, fields, models, _\n'), ((2309, 2352), 'odoo._', '_', (['"""This can only be used on journal items"""'], {}), "('This can only be used on journal items')\n", (2310, 2352), False, 'from odoo import api, fields, models, _\n'), ((2708, 2769), 'odoo._', '_', (['"""You can only change the period for posted journal items."""'], {}), "('You can only change the period for posted journal items.')\n", (2709, 2769), False, 'from odoo import api, fields, models, _\n'), ((2875, 2949), 'odoo._', '_', (['"""You can only change the period for items that are not yet reconciled."""'], {}), "('You can only change the period for items that are not yet reconciled.')\n", (2876, 2949), False, 'from odoo import api, fields, models, _\n'), ((3109, 3167), 'odoo._', '_', (['"""All accounts on the lines must be from the same type."""'], {}), "('All accounts on the lines must be from the same type.')\n", (3110, 3167), False, 'from odoo import api, fields, models, _\n'), ((3301, 3346), 'odoo._', '_', (['"""All lines must be from the same company."""'], {}), "('All lines must be from the same company.')\n", (3302, 3346), False, 'from odoo import api, fields, models, _\n'), ((4302, 4364), 'odoo._', '_', (['"""Accrual Adjusting Entry (%s%% recognized) for invoice: %s"""'], {}), "('Accrual Adjusting Entry (%s%% recognized) for invoice: %s')\n", (4303, 4364), False, 'from odoo import api, fields, models, _\n'), ((4422, 4484), 'odoo._', '_', (['"""Accrual Adjusting Entry (%s%% recognized) for invoice: %s"""'], {}), "('Accrual Adjusting Entry (%s%% recognized) for invoice: %s')\n", (4423, 4484), False, 'from odoo import api, fields, models, _\n'), ((1820, 1861), 'odoo._', '_', (['"""Percentage must be between 0 and 100"""'], {}), "('Percentage must be between 0 and 100')\n", (1821, 1861), False, 'from odoo import api, fields, models, _\n'), ((3723, 3797), 'odoo._', '_', (['"""You can only change the period for items in these types of accounts: """'], {}), "('You can only change the period for items in these types of accounts: ')\n", (3724, 3797), False, 'from odoo import api, fields, models, _\n'), ((6209, 6241), 'odoo.tools.misc.format_date', 'format_date', (['self.env', 'self.date'], {}), '(self.env, self.date)\n', (6220, 6241), False, 'from odoo.tools.misc import format_date\n'), ((5190, 5255), 'odoo._', '_', (['"""Accrual Adjusting Entry ({percent}% recognized) for invoice:"""'], {}), "('Accrual Adjusting Entry ({percent}% recognized) for invoice:')\n", (5191, 5255), False, 'from odoo import api, fields, models, _\n'), ((5517, 5582), 'odoo._', '_', (['"""Accrual Adjusting Entry ({percent}% recognized) for invoice:"""'], {}), "('Accrual Adjusting Entry ({percent}% recognized) for invoice:')\n", (5518, 5582), False, 'from odoo import api, fields, models, _\n'), ((5850, 5955), 'odoo._', '_', (['"""Accrual Adjusting Entries ({percent}%% recognized) have been created for this invoice on {date}"""'], {}), "('Accrual Adjusting Entries ({percent}%% recognized) have been created for this invoice on {date}'\n )\n", (5851, 5955), False, 'from odoo import api, fields, models, _\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 11 21:22:59 2018
@author: pami4
"""
#CUDA_VISIBLE_DEVICES=0 python
from pycocotools.coco import COCO
import coco
import numpy as np
from matplotlib import pyplot as plt
import visualize
import custom_utils
config = coco.CocoConfig()
config.GPU_COUNT = 1
import CustomDataset
data_train=CustomDataset.CocoDataset()
data_train.load_coco("..","train", year=2014)
data_train.prepare()
#import CustomDataGenerator
#data_gen=CustomDataGenerator.data_generator(data_train, config, batch_size=2, shuffle=False, augment=False)
from CustomDataGenerator import CustomDatasetIterator_MaskRCNN
data_gen = CustomDatasetIterator_MaskRCNN(data_train, config, mode="val", shuffle=False,
batch_size=2, augment=True)
#plt.imshow((images[0]+config.MEAN_PIXEL).astype(np.uint8))
import model as modellib
model=modellib.MaskRCNN(mode="training", config=config, model_dir="logs")
model.load_weights("logs/coco20180327T1023/mask_rcnn_coco_0050.h5", by_name=True, skip_mismatch=True)
#model.load_weights("/home/pami4/.keras/models/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5", by_name=True)
inputs, outputs= next(data_gen)
outs=model.keras_model.predict(inputs)
#out_kp_vs = outs[18]
#np.where(out_kp_vs)
#
#
#out_kp_masks=outs[19]
images = inputs[0]
rois = outs[8]
img_idx=0
visualize.draw_boxes((images[img_idx]+config.MEAN_PIXEL).astype(np.uint8), boxes=rois[img_idx][:10]*np.array([1024,1024,1024,1024]))
plt.show()
#layer=model.keras_model.get_layer(name='kp_mask_bilinear_up')
#layer.get_weights()[0].shape
kp_masks=outs[-3]
kp_vs = outs[-4]
target_masks = outs[-1]
target_class_ids=outs[-2]
pred_kp_masks=outs[10]
pred_masks = outs[6]
#target_class_ids.shape
img_idx=0
index=1
visualize.draw_boxes((images[img_idx]+config.MEAN_PIXEL).astype(np.uint8), boxes=rois[img_idx][index:index+1]*np.array([1024,1024,1024,1024]))
plt.show()
custom_utils.showKPs((images[img_idx]+config.MEAN_PIXEL).astype(np.uint8), rois[img_idx][index]*np.array([1024,1024,1024,1024]),kp_vs[img_idx][index], kp_masks[img_idx][index], target_masks[img_idx][index])
plt.imshow(np.sum(kp_masks[1][index], axis=2))
plt.show()
#custom_utils.showKPs((images[1]+config.MEAN_PIXEL).astype(np.uint8), rois[1][index]*np.array([1024,1024,1024,1024]),kp_vs[1][index], kp_masks[1][index])
#pred_kp_masks=outs[10]
#pred_masks = outs[6]
#custom_utils.showKPs((images[1]+config.MEAN_PIXEL).astype(np.uint8), rois[1][index]*np.array([1024,1024,1024,1024]),kp_vs[1][index], pred_kp_masks[1][index])
custom_utils.showKPs((images[img_idx]+config.MEAN_PIXEL).astype(np.uint8), rois[img_idx][index]*np.array([1024,1024,1024,1024]),kp_vs[img_idx][index], pred_kp_masks[img_idx][index], pred_masks[img_idx][index][:,:,1])
from imp import reload
| [
"CustomDataGenerator.CustomDatasetIterator_MaskRCNN",
"CustomDataset.CocoDataset",
"numpy.sum",
"numpy.array",
"model.MaskRCNN",
"coco.CocoConfig",
"matplotlib.pyplot.show"
] | [((289, 306), 'coco.CocoConfig', 'coco.CocoConfig', ([], {}), '()\n', (304, 306), False, 'import coco\n'), ((361, 388), 'CustomDataset.CocoDataset', 'CustomDataset.CocoDataset', ([], {}), '()\n', (386, 388), False, 'import CustomDataset\n'), ((668, 778), 'CustomDataGenerator.CustomDatasetIterator_MaskRCNN', 'CustomDatasetIterator_MaskRCNN', (['data_train', 'config'], {'mode': '"""val"""', 'shuffle': '(False)', 'batch_size': '(2)', 'augment': '(True)'}), "(data_train, config, mode='val', shuffle=\n False, batch_size=2, augment=True)\n", (698, 778), False, 'from CustomDataGenerator import CustomDatasetIterator_MaskRCNN\n'), ((908, 975), 'model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""training"""', 'config': 'config', 'model_dir': '"""logs"""'}), "(mode='training', config=config, model_dir='logs')\n", (925, 975), True, 'import model as modellib\n'), ((1516, 1526), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1524, 1526), True, 'from matplotlib import pyplot as plt\n'), ((1939, 1949), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1947, 1949), True, 'from matplotlib import pyplot as plt\n'), ((2206, 2216), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2214, 2216), True, 'from matplotlib import pyplot as plt\n'), ((2170, 2204), 'numpy.sum', 'np.sum', (['kp_masks[1][index]'], {'axis': '(2)'}), '(kp_masks[1][index], axis=2)\n', (2176, 2204), True, 'import numpy as np\n'), ((2047, 2081), 'numpy.array', 'np.array', (['[1024, 1024, 1024, 1024]'], {}), '([1024, 1024, 1024, 1024])\n', (2055, 2081), True, 'import numpy as np\n'), ((2674, 2708), 'numpy.array', 'np.array', (['[1024, 1024, 1024, 1024]'], {}), '([1024, 1024, 1024, 1024])\n', (2682, 2708), True, 'import numpy as np\n'), ((1483, 1517), 'numpy.array', 'np.array', (['[1024, 1024, 1024, 1024]'], {}), '([1024, 1024, 1024, 1024])\n', (1491, 1517), True, 'import numpy as np\n'), ((1906, 1940), 'numpy.array', 'np.array', (['[1024, 1024, 1024, 1024]'], {}), '([1024, 1024, 1024, 1024])\n', (1914, 1940), True, 'import numpy as np\n')] |
from importlib.metadata import PackageNotFoundError, version
try:
__version__ = version("x_mlps")
except PackageNotFoundError:
# package is not installed
__version__ = None
| [
"importlib.metadata.version"
] | [((85, 102), 'importlib.metadata.version', 'version', (['"""x_mlps"""'], {}), "('x_mlps')\n", (92, 102), False, 'from importlib.metadata import PackageNotFoundError, version\n')] |
from django.db.models.signals import post_save
from django.dispatch import receiver
from company.models import Company
from company.tasks import deploy_new_company
@receiver(post_save, sender=Company)
def company_created(sender, instance, created, **kwargs):
if created:
deploy_new_company.delay(instance.id)
| [
"django.dispatch.receiver",
"company.tasks.deploy_new_company.delay"
] | [((168, 203), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Company'}), '(post_save, sender=Company)\n', (176, 203), False, 'from django.dispatch import receiver\n'), ((286, 323), 'company.tasks.deploy_new_company.delay', 'deploy_new_company.delay', (['instance.id'], {}), '(instance.id)\n', (310, 323), False, 'from company.tasks import deploy_new_company\n')] |
# -*- coding:utf-8 -*-
from sc import SupercellSWF
import os
def findAllFile(basePath, endsWith):
for root, ds, fs in os.walk(basePath):
for f in fs:
if not f.startswith('.') and f.endswith(endsWith):
fullname = os.path.join(root, f)
yield fullname
| [
"os.path.join",
"os.walk"
] | [((123, 140), 'os.walk', 'os.walk', (['basePath'], {}), '(basePath)\n', (130, 140), False, 'import os\n'), ((253, 274), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (265, 274), False, 'import os\n')] |
#####################################################
# #
# Source file of the MadFlow plugin #
# Use only with consent of its authors. #
# #
# authors: S.Carrazza, J.Cruz-Martinez, #
# M.Rossi, M.Zaro #
# #
# #
#####################################################
import os
import sys
root_path = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0]
sys.path.insert(0, root_path)
from . import PyOut_exporter
##import Resummation.resummation_exporters as resummation_exporters
# Three types of functionality are allowed in a plugin
# 1. new output mode
# 2. new cluster support
# 3. new interface
# 1. Define new output mode
# example: new_output = {'myformat': MYCLASS}
# madgraph will then allow the command "output myformat PATH"
# MYCLASS should inherated of the class madgraph.iolibs.export_v4.VirtualExporter
new_output = {'pyout': PyOut_exporter.PyOutExporter}
# 2. Define new way to handle the cluster.
# example new_cluster = {'mycluster': MYCLUSTERCLASS}
# allow "set cluster_type mycluster" in madgraph
# MYCLUSTERCLASS should inherated from madgraph.various.cluster.Cluster
new_cluster = {}
# 3. Define a new interface (allow to add/modify MG5 command)
# This can be activated via ./bin/mg5_aMC --mode=PLUGINNAME
## Put None if no dedicated command are required
new_interface = None
########################## CONTROL VARIABLE ####################################
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = (0,1,0)
minimal_mg5amcnlo_version = (2,5,0)
maximal_mg5amcnlo_version = (1000,1000,1000)
latest_validated_version = (2,5,0)
| [
"os.path.realpath",
"sys.path.insert"
] | [((638, 667), 'sys.path.insert', 'sys.path.insert', (['(0)', 'root_path'], {}), '(0, root_path)\n', (653, 667), False, 'import sys\n'), ((604, 630), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (620, 630), False, 'import os\n')] |
import abc
import time
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional, cast
from loguru import logger
from rich import print
from dubdub import (
Binary,
Grouping,
Literal,
Node,
Token,
TokenType,
Unary,
Visitor,
dataclass,
)
from dubdub.callables import *
from dubdub.dubcall import DubCallable
from dubdub.env import Environment
from dubdub.operations import *
from dubdub.parser import Parser
from dubdub.scanner import Scanner
CWD_DIR = Path.cwd()
@dataclass
class __Interpreter(Visitor):
environment: Optional[Environment] = None
globals: Environment = Environment()
locals: Dict[Expr, int] = field(default_factory=lambda: {})
def resolve(self, expr: Expr, depth: int):
self.locals[expr] = depth
return
def look_up_variable(self, name: Token, expr: Expr):
distance = self.locals.get(expr, None)
if distance is not None:
return self.environment.get_at(distance, name.lexeme)
return self.globals.get(name)
def __post_init__(self):
self.globals.define("clock", Clock())
self.environment = self.globals
def is_truthy(self, value: Any) -> bool:
if value is None:
return False
if isinstance(value, bool):
return value
return True
def is_equal(self, first: Any, second: Any) -> bool:
return first == second
class __Interpreter(__Interpreter):
def visit_binary(self, expr: "Binary"):
left = self.evaluate(expr.left)
right = self.evaluate(expr.right)
token_type = expr.token.token_type
if TokenType.GREATER:
return float(left) > float(right)
elif TokenType.GREATER_EQUAL:
return float(left) >= float(right)
elif TokenType.LESS:
return float(left) < float(right)
elif TokenType.LESS_EQUAL:
return float(left) <= float(right)
elif TokenType.BANG_EQUAL:
return self.is_equal(left, right)
elif token_type == TokenType.MINUS:
return float(left) - float(right)
elif token_type == TokenType.SLASH:
return float(left) / float(right)
elif token_type == TokenType.STAR:
return float(left) * float(right)
elif token_type == TokenType.PLUS:
if isinstance(left, str) and isinstance(right, str):
return str(left) + str(right)
elif isinstance(left, (int, float)) and isinstance(right, (int, float)):
return float(left) + float(right)
raise TypeError("One of the two evaluated expressions is off.")
def visit_unary(self, node: "Unary"):
right = self.evaluate(node.right)
exp_types = {
TokenType.MINUS: lambda x: (-1 * x),
TokenType.BANG: lambda x: (not self.is_truthy(x)),
}
resp_fn = exp_types.get(node.token.token_type, None)
if resp_fn is not None:
return resp_fn(right)
raise Exception("Shouldn't have touched this here.")
"""
Normal expressions
"""
def visit_grouping(self, expr: "Grouping") -> str:
return self.evaluate(expr.expression)
def visit_literal(self, node: "Literal") -> Any:
return node.value
def visit_token(self, node: "Token"):
logger.debug("Visiting a token")
def visit_expression_stmt(self, stmt: ExpressionStmt):
return self.evaluate(stmt.expression)
def visit_print(self, print_stmt: Print):
value = self.evaluate(print_stmt.expression)
print(str(value))
def evaluate(self, node: "Node") -> Any:
return self.visit(node)
class Interpreter(__Interpreter):
"""Statement managment"""
def visit_var_stmt(self, stmt: "Var"):
value = None
if stmt.initializer is not None:
value = self.evaluate(stmt.initializer)
self.environment.define(stmt.name, value)
def visit_variable_expr(self, expr: Variable) -> Any:
return self.environment.get(expr.name)
def visit_assign_expr(self, expr: Assign):
value = self.evaluate(expr.value)
distance = self.locals.get(expr, None)
if distance:
self.environment.assign_at(distance, expr.name, value)
else:
self.globals.assign(expr.name, value)
return value
def visit_block_stmt(self, stmt: Block) -> None:
self.execute_block(stmt.stmts, Environment(enclosing=self.environment))
def visit_if_stmt(self, stmt: If):
if self.is_truthy(self.evaluate(stmt.condition)):
self.evaluate(stmt.then_branch)
elif stmt.else_branch is not None:
self.evaluate(stmt.else_branch)
def visit_logical_expr(self, expr: Binary):
"""
The concept is a bit complex. Though we're saying the following rules:
1. if the statement is an OR statement, and the first is true, the conditions are met. Skip the next check.
2. If the statement is and AND statement, and the first condition is true.
1. Then you must check the second condition because both must be true.
2. Otherwise, if the first condition is false, skip it. All of it is false then.
Args:
expr (Binary): This the comparison statement overall.
Returns:
bool: Hopefully returns a boolean type.
"""
left = self.evaluate(expr.left)
if expr.token.token_type == TokenType.OR:
if self.is_truthy(left):
return left
else:
if not self.is_truthy(left):
return left
return self.evaluate(expr.right)
def visit_while_stmt(self, stmt: Stmt):
while self.is_truthy(self.evaluate(stmt.condition)):
self.execute(stmt.body)
def visit_call_expr(self, expr: Call):
callee = self.evaluate(expr.callee)
args = []
for arg in expr.arguments:
args.append(self.evaluate(arg))
if not isinstance(expr.paren, DubCallable):
raise RuntimeError("Can only call functions and classes")
func = cast(DubCallable, callee)
size = func.arity()
arg_count = len(args)
if arg_count != size:
raise RuntimeError(f"expected {size} arguments but got {arg_count}")
return func.call(self, *args)
def visit_function_stmt(self, stmt: Function):
function: DubFunction = DubFunction(stmt, self.environment)
self.environment.define(stmt.name.lexeme, function)
def visit_return_stmt(self, stmt: Stmt):
value = None
if stmt.value is not None:
value = self.evaluate(stmt.value)
raise ReturnErr(value)
#
def execute_block(self, stmts: List[Stmt], environment: Environment):
previous = self.env
try:
self.env = environment
for stmt in stmts:
self.evaluate(stmt)
except Exception as e:
raise e
finally:
self.env = previous
def intepret(self, stmts: List[Stmt]):
try:
for stmt in stmts:
self.evaluate(stmt)
except Exception as e:
raise e
def main():
# nested_scopes = "var hello = 1234.456;"
# Note: The scanner has a index bug. Will need to solve it at some point.
# nested_scopes = " 10 * 12 + ( 1 + 1 ) "
nested_scopes = "( 100 == 100 )"
scanner = Scanner(source=nested_scopes)
tokens: List[Token] = scanner.scan_tokens()
print(tokens)
parser = Parser(tokens=tokens)
parsed_stmts = parser.parse()
interp = Interpreter()
print(parsed_stmts)
# resp = interp.intepret(parsed_stmts)
# print(resp)
if __name__ == "__main__":
main()
| [
"loguru.logger.debug",
"dubdub.parser.Parser",
"pathlib.Path.cwd",
"rich.print",
"dubdub.scanner.Scanner",
"dubdub.env.Environment",
"typing.cast"
] | [((533, 543), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (541, 543), False, 'from pathlib import Path\n'), ((660, 673), 'dubdub.env.Environment', 'Environment', ([], {}), '()\n', (671, 673), False, 'from dubdub.env import Environment\n'), ((7550, 7579), 'dubdub.scanner.Scanner', 'Scanner', ([], {'source': 'nested_scopes'}), '(source=nested_scopes)\n', (7557, 7579), False, 'from dubdub.scanner import Scanner\n'), ((7632, 7645), 'rich.print', 'print', (['tokens'], {}), '(tokens)\n', (7637, 7645), False, 'from rich import print\n'), ((7660, 7681), 'dubdub.parser.Parser', 'Parser', ([], {'tokens': 'tokens'}), '(tokens=tokens)\n', (7666, 7681), False, 'from dubdub.parser import Parser\n'), ((7748, 7767), 'rich.print', 'print', (['parsed_stmts'], {}), '(parsed_stmts)\n', (7753, 7767), False, 'from rich import print\n'), ((3398, 3430), 'loguru.logger.debug', 'logger.debug', (['"""Visiting a token"""'], {}), "('Visiting a token')\n", (3410, 3430), False, 'from loguru import logger\n'), ((6220, 6245), 'typing.cast', 'cast', (['DubCallable', 'callee'], {}), '(DubCallable, callee)\n', (6224, 6245), False, 'from typing import Any, Dict, List, Optional, cast\n'), ((4526, 4565), 'dubdub.env.Environment', 'Environment', ([], {'enclosing': 'self.environment'}), '(enclosing=self.environment)\n', (4537, 4565), False, 'from dubdub.env import Environment\n')] |
from pdpyras import APISession
from requests.sessions import session
from kafka import KafkaConsumer
import json
import sys
print("Starting the alerts listener")
# Kafka configurations
kafka_broker = 'kafka:9092'
alerts_topic = "high-temp-alerts-u5-1631586083-13100517045407420899"
# PageDuty configurations
api_token = 'u+CtEy_6s91Pp93RM7sQ'
service_id = "PU35FY7"
from_email = "<EMAIL>"
# Initialize the PageDuty session
# session = APISession(api_token)
# session.default_from = "<EMAIL>"
consumer = KafkaConsumer(alerts_topic,
bootstrap_servers=[kafka_broker],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id='my-group',
value_deserializer=lambda x: json.loads(x.decode('utf-8')))
# consumer.subscribe(pattern=alerts_topic_pattern)
for message in consumer:
try:
event = message.value
if event["after"] is None:
continue
else:
row = event["after"]["row"]
# print("sensor-uuid: %s average: %s"%(row["sensor_uuid"],str(row["avg"])))
sensor_uuid = row["sensor_uuid"]
avg_temperature = row["avg"]
# Trigger a PD incident
incident_title = "High temperature observed in the data center"
incident_description = "The temperature sensor %s observed an average temperature of %s during the past minute." % (sensor_uuid,str(avg_temperature))
payload = {
"type": "incident",
"service": {"id": "", "type": "service_reference"},
"body": {"type": "incident_body", "details": ""}
}
# Manipulate the payload
payload["title"] = incident_title
payload["service"]["id"] = service_id
payload["body"]["details"] = incident_description
# pd_incident = session.rpost("incidents", json=payload)
# print(pd_incident)
print("Incident triggered")
print(json.dumps(payload))
except:
print("An error occured while triggering the incident.", sys.exc_info()[0])
| [
"sys.exc_info",
"json.dumps"
] | [((1833, 1852), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (1843, 1852), False, 'import json\n'), ((1925, 1939), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1937, 1939), False, 'import sys\n')] |
from decimal import Decimal
from src import profitable_music_genre
EXPECTED_RESULT = [
(Decimal('826.65'), 'Rock'),
(Decimal('382.14'), 'Latin'),
(Decimal('261.36'), 'Metal'),
(Decimal('241.56'), 'Alternative & Punk'),
(Decimal('93.53'), 'TV Shows'),
(Decimal('79.20'), 'Jazz'),
(Decimal('60.39'), 'Blues'),
(Decimal('57.71'), 'Drama'),
(Decimal('40.59'), 'R&B/Soul'),
(Decimal('40.59'), 'Classical'),
(Decimal('39.80'), 'Sci Fi & Fantasy'),
(Decimal('29.70'), 'Reggae'),
(Decimal('27.72'), 'Pop'),
(Decimal('19.80'), 'Soundtrack'),
(Decimal('17.91'), 'Comedy'),
(Decimal('16.83'), 'Hip Hop/Rap'),
(Decimal('14.85'), 'Bossa Nova'),
(Decimal('13.86'), 'Alternative'),
(Decimal('12.87'), 'World'),
(Decimal('11.94'), 'Science Fiction'),
(Decimal('11.88'), 'Heavy Metal'),
(Decimal('11.88'), 'Electronica/Dance'),
(Decimal('9.90'), 'Easy Listening'),
(Decimal('5.94'), 'Rock And Roll'),
]
def test_most_profitable_music_genre() -> None:
tested_result = [tuple(row) for row in profitable_music_genre.most_profitable_music_genre()]
assert tested_result == EXPECTED_RESULT
| [
"src.profitable_music_genre.most_profitable_music_genre",
"decimal.Decimal"
] | [((94, 111), 'decimal.Decimal', 'Decimal', (['"""826.65"""'], {}), "('826.65')\n", (101, 111), False, 'from decimal import Decimal\n'), ((127, 144), 'decimal.Decimal', 'Decimal', (['"""382.14"""'], {}), "('382.14')\n", (134, 144), False, 'from decimal import Decimal\n'), ((161, 178), 'decimal.Decimal', 'Decimal', (['"""261.36"""'], {}), "('261.36')\n", (168, 178), False, 'from decimal import Decimal\n'), ((195, 212), 'decimal.Decimal', 'Decimal', (['"""241.56"""'], {}), "('241.56')\n", (202, 212), False, 'from decimal import Decimal\n'), ((242, 258), 'decimal.Decimal', 'Decimal', (['"""93.53"""'], {}), "('93.53')\n", (249, 258), False, 'from decimal import Decimal\n'), ((278, 294), 'decimal.Decimal', 'Decimal', (['"""79.20"""'], {}), "('79.20')\n", (285, 294), False, 'from decimal import Decimal\n'), ((310, 326), 'decimal.Decimal', 'Decimal', (['"""60.39"""'], {}), "('60.39')\n", (317, 326), False, 'from decimal import Decimal\n'), ((343, 359), 'decimal.Decimal', 'Decimal', (['"""57.71"""'], {}), "('57.71')\n", (350, 359), False, 'from decimal import Decimal\n'), ((376, 392), 'decimal.Decimal', 'Decimal', (['"""40.59"""'], {}), "('40.59')\n", (383, 392), False, 'from decimal import Decimal\n'), ((412, 428), 'decimal.Decimal', 'Decimal', (['"""40.59"""'], {}), "('40.59')\n", (419, 428), False, 'from decimal import Decimal\n'), ((449, 465), 'decimal.Decimal', 'Decimal', (['"""39.80"""'], {}), "('39.80')\n", (456, 465), False, 'from decimal import Decimal\n'), ((493, 509), 'decimal.Decimal', 'Decimal', (['"""29.70"""'], {}), "('29.70')\n", (500, 509), False, 'from decimal import Decimal\n'), ((527, 543), 'decimal.Decimal', 'Decimal', (['"""27.72"""'], {}), "('27.72')\n", (534, 543), False, 'from decimal import Decimal\n'), ((558, 574), 'decimal.Decimal', 'Decimal', (['"""19.80"""'], {}), "('19.80')\n", (565, 574), False, 'from decimal import Decimal\n'), ((596, 612), 'decimal.Decimal', 'Decimal', (['"""17.91"""'], {}), "('17.91')\n", (603, 612), False, 'from decimal import Decimal\n'), ((630, 646), 'decimal.Decimal', 'Decimal', (['"""16.83"""'], {}), "('16.83')\n", (637, 646), False, 'from decimal import Decimal\n'), ((669, 685), 'decimal.Decimal', 'Decimal', (['"""14.85"""'], {}), "('14.85')\n", (676, 685), False, 'from decimal import Decimal\n'), ((707, 723), 'decimal.Decimal', 'Decimal', (['"""13.86"""'], {}), "('13.86')\n", (714, 723), False, 'from decimal import Decimal\n'), ((746, 762), 'decimal.Decimal', 'Decimal', (['"""12.87"""'], {}), "('12.87')\n", (753, 762), False, 'from decimal import Decimal\n'), ((779, 795), 'decimal.Decimal', 'Decimal', (['"""11.94"""'], {}), "('11.94')\n", (786, 795), False, 'from decimal import Decimal\n'), ((822, 838), 'decimal.Decimal', 'Decimal', (['"""11.88"""'], {}), "('11.88')\n", (829, 838), False, 'from decimal import Decimal\n'), ((861, 877), 'decimal.Decimal', 'Decimal', (['"""11.88"""'], {}), "('11.88')\n", (868, 877), False, 'from decimal import Decimal\n'), ((906, 921), 'decimal.Decimal', 'Decimal', (['"""9.90"""'], {}), "('9.90')\n", (913, 921), False, 'from decimal import Decimal\n'), ((947, 962), 'decimal.Decimal', 'Decimal', (['"""5.94"""'], {}), "('5.94')\n", (954, 962), False, 'from decimal import Decimal\n'), ((1077, 1129), 'src.profitable_music_genre.most_profitable_music_genre', 'profitable_music_genre.most_profitable_music_genre', ([], {}), '()\n', (1127, 1129), False, 'from src import profitable_music_genre\n')] |
import multiprocessing as mp
import os
import queue
import signal
import torch
class BaseCallback:
def __init__(self, verbose: int = 0):
self.model = None
self.verbose = verbose
def init_callback(self, model):
self.model = model
def on_training_start(self):
pass
def on_step(self, sim_step, learn_step, **kwargs):
# If False, break off Training
return True
def on_training_end(self):
pass
def on_evaluation_end(self, sim_step, learn_step, **kwargs):
return True
def update_curriculum(self, curriculum):
pass
def on_episode_done(self, episode_results):
pass
class EpisodeResults:
def __init__(self):
self.reward = 0
self.done = False
self.n_steps = 0
self.infos = []
def update(self, step_results):
self.reward += step_results.reward
self.done = step_results.done
self.n_steps = step_results.n_steps
self.infos.append(step_results.info)
class WorkerCommunication:
def __init__(self):
self._model_output_queue = mp.Queue()
self._observation_queue = mp.Queue()
self._curriculum_queue = mp.Queue()
self._event_reset = mp.Event()
self._event_quit = mp.Event()
self.pid = None
def is_interrupted(self):
return self.is_reset() or self.is_quit()
def is_reset(self):
return self._event_reset.is_set()
def done_reset(self):
self._event_reset.clear()
def put_reset(self):
os.kill(self.pid, signal.SIGUSR1)
self._event_reset.set()
def get_curriculum(self, block=False):
curriculum = None
try:
while True:
curriculum = self._curriculum_queue.get(block=block)
except queue.Empty:
pass
return curriculum
def put_update_curriculum(self, curriculum):
self._curriculum_queue.put(curriculum)
self.put_reset()
def put_observation(self, observation):
self._observation_queue.put(observation)
def get_observation(self, block=True):
return self._observation_queue.get(block=block)
def _convert_to_numpy(self, container):
if isinstance(container, list):
return [self._convert_to_numpy(c) for c in container]
elif isinstance(container, dict):
return {k: self._convert_to_numpy(v) for k, v in container.items()}
elif isinstance(container, torch.Tensor):
return container.detach().cpu().numpy()
return container
def put_model_output(self, output):
self._model_output_queue.put(self._convert_to_numpy(output))
def get_model_output(self, block=True):
return self._model_output_queue.get(block=block)
def put_quit(self):
return self._event_quit.set()
def is_quit(self):
return self._event_quit.set()
class StepResult:
def __init__(self, observation, is_reset, action_chosen, reward, done, info, n_steps):
self.observation = observation
self.is_reset = is_reset
self.action_chosen = action_chosen
self.reward = reward
self.done = done
self.info = info
self.n_steps = n_steps
class EmptyLogger:
def add_scalar(self, name, value, step, steps_added=1):
pass
def add_mean_scalar(self, name, value, step, save_every):
pass
| [
"multiprocessing.Queue",
"os.kill",
"multiprocessing.Event"
] | [((1120, 1130), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (1128, 1130), True, 'import multiprocessing as mp\n'), ((1165, 1175), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (1173, 1175), True, 'import multiprocessing as mp\n'), ((1209, 1219), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (1217, 1219), True, 'import multiprocessing as mp\n'), ((1248, 1258), 'multiprocessing.Event', 'mp.Event', ([], {}), '()\n', (1256, 1258), True, 'import multiprocessing as mp\n'), ((1286, 1296), 'multiprocessing.Event', 'mp.Event', ([], {}), '()\n', (1294, 1296), True, 'import multiprocessing as mp\n'), ((1563, 1596), 'os.kill', 'os.kill', (['self.pid', 'signal.SIGUSR1'], {}), '(self.pid, signal.SIGUSR1)\n', (1570, 1596), False, 'import os\n')] |
import os
import subprocess
import tempfile
import time
import wave
import simpleaudio
def extract(file):
ptmv_tempdir = os.path.join(tempfile.gettempdir(), "ptmv")
if not os.path.exists(ptmv_tempdir): os.makedirs(ptmv_tempdir)
snd_file = ptmv_tempdir + str(int(time.time())) + ".wav"
command = "ffmpeg -i " + file + " -b:a 48k -ac 1 " + snd_file
subprocess.run(command.split(), stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL)
return snd_file
def play(file, start_time):
if not os.path.exists(file): return
wave_raw = wave.open(file)
frame_rate = wave_raw.getframerate()
wave_raw.setpos(int(frame_rate * start_time))
return simpleaudio.WaveObject.from_wave_read(wave_raw).play()
def stop(play_obj): play_obj.stop() | [
"os.path.exists",
"wave.open",
"simpleaudio.WaveObject.from_wave_read",
"os.makedirs",
"tempfile.gettempdir",
"time.time"
] | [((538, 553), 'wave.open', 'wave.open', (['file'], {}), '(file)\n', (547, 553), False, 'import wave\n'), ((136, 157), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (155, 157), False, 'import tempfile\n'), ((175, 203), 'os.path.exists', 'os.path.exists', (['ptmv_tempdir'], {}), '(ptmv_tempdir)\n', (189, 203), False, 'import os\n'), ((205, 230), 'os.makedirs', 'os.makedirs', (['ptmv_tempdir'], {}), '(ptmv_tempdir)\n', (216, 230), False, 'import os\n'), ((497, 517), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (511, 517), False, 'import os\n'), ((647, 694), 'simpleaudio.WaveObject.from_wave_read', 'simpleaudio.WaveObject.from_wave_read', (['wave_raw'], {}), '(wave_raw)\n', (684, 694), False, 'import simpleaudio\n'), ((266, 277), 'time.time', 'time.time', ([], {}), '()\n', (275, 277), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 30 11:53:32 2021
@author: HP
"""
PATH = "Forest.png"
from PIL import Image
sprites = [Image.new("RGBA", (32, 32), (0,0,0,0)) for x in range(8*6)]
current = 0
with Image.open(PATH) as im:
curI = 0
curJ = 0
for curI in range(0,256,32):
for curJ in range(0, 192, 32):
for i in range(curI,curI + 32):
for j in range(curJ, curJ + 32):
col = im.getpixel((i,j))
sprites[current].putpixel((i - curI, j - curJ), col)
sprites[current].save("sprite_{}.png".format(current))
current += 1
| [
"PIL.Image.new",
"PIL.Image.open"
] | [((134, 175), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(32, 32)', '(0, 0, 0, 0)'], {}), "('RGBA', (32, 32), (0, 0, 0, 0))\n", (143, 175), False, 'from PIL import Image\n'), ((211, 227), 'PIL.Image.open', 'Image.open', (['PATH'], {}), '(PATH)\n', (221, 227), False, 'from PIL import Image\n')] |
from minibench import Benchmark
import time
class PauseBenchmark(Benchmark):
times = 10
def bench_one_hundredth(self):
time.sleep(.01)
def bench_one_tenth(self):
time.sleep(.1)
| [
"time.sleep"
] | [((139, 155), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (149, 155), False, 'import time\n'), ((195, 210), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (205, 210), False, 'import time\n')] |
# Check http://piwigo.com/
from urllib.parse import urlparse
from os import path
from plugins.cloudimport.cloud_platform import File, Folder
from plugins.cloudimport.extensions.cloud_library import CloudLibrary
class Platform(CloudLibrary):
def __init__(self):
super().__init__('Piwigo', 'http://{server_url}/index.php?/category/{category_id}')
# Cloud Platform
def platform_file_processing(self, files):
# Piwigo has the concept of physical albums, that basically expose the actual folders in the file system.
# So it might happen that if the File Uploader plugin is used for GCP files, that the files will need to be renamed to store multiple GCP files.
# So basically we are taking any file that contains the string 'gcp_list' and has the extension '.txt' and rename it to 'gcp_list.txt'
return [self._map_gcp_file_if_necessary(file) for file in files]
def get_server_and_folder_id_from_url(self, url):
parse_result = urlparse(url)
paths = parse_result.query.split('/')
if not 'category' in paths or paths.index('category') >= len(paths) - 1:
raise Exception('Wrong URL format')
else:
category_id = paths[paths.index('category') + 1]
path = parse_result.path
if not 'index.php' in path:
raise Exception('Wrong URL format')
path = path[0:path.index('index.php')]
server = parse_result.scheme + '://' + parse_result.netloc + '/' + path
return server, category_id
def build_folder_api_url(self, server_url, folder_id):
return '{server_url}/ws.php?format=json&method=pwg.categories.getList&cat_id={folder_id}&recursive=false'.format(server_url = server_url, folder_id = folder_id)
def parse_payload_into_folder(self, payload):
result = payload['result']['categories'][0]
return Folder(result['name'], result['url'], result['nb_images'])
def build_list_files_in_folder_api_url(self, server_url, folder_id):
# ToDo: add pagination
return '{server_url}/ws.php?format=json&method=pwg.categories.getImages&cat_id={folder_id}&recursive=false&per_page=500'.format(server_url = server_url, folder_id = folder_id)
def parse_payload_into_files(self, payload):
result = payload['result']
return [File(image['file'], image['element_url']) for image in result['images']]
def _map_gcp_file_if_necessary(self, file):
_, file_extension = path.splitext(file.name)
if file_extension.lower() == ".txt" and 'gcp_list' in file.name:
return File('gcp_list.txt', file.url, file.other)
return file
# Cloud Library
def build_folder_list_api_url(self, server_url):
return '{}/ws.php?format=json&method=pwg.categories.getList&recursive=true&tree_output=true'.format(server_url)
def parse_payload_into_folders(self, payload):
categories = payload['result']
return self._flatten_list([self._build_category(cat) for cat in categories])
def _build_category(self, category):
name = category['name']
images = category['nb_images']
url = category['url']
subcategories = self._flatten_list([self._build_category(subcat) for subcat in category['sub_categories']]) if category['nb_categories'] > 0 else []
for subcategory in subcategories:
subcategory.name = name + ' > ' + subcategory.name
folder = [Folder(name, url, images)] if images > 0 else []
return folder + subcategories
def _flatten_list(self, list_of_lists):
return [item for sublist in list_of_lists for item in sublist]
| [
"urllib.parse.urlparse",
"plugins.cloudimport.cloud_platform.Folder",
"os.path.splitext",
"plugins.cloudimport.cloud_platform.File",
"os.path.index"
] | [((994, 1007), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (1002, 1007), False, 'from urllib.parse import urlparse\n'), ((1926, 1984), 'plugins.cloudimport.cloud_platform.Folder', 'Folder', (["result['name']", "result['url']", "result['nb_images']"], {}), "(result['name'], result['url'], result['nb_images'])\n", (1932, 1984), False, 'from plugins.cloudimport.cloud_platform import File, Folder\n'), ((2533, 2557), 'os.path.splitext', 'path.splitext', (['file.name'], {}), '(file.name)\n', (2546, 2557), False, 'from os import path\n'), ((2379, 2420), 'plugins.cloudimport.cloud_platform.File', 'File', (["image['file']", "image['element_url']"], {}), "(image['file'], image['element_url'])\n", (2383, 2420), False, 'from plugins.cloudimport.cloud_platform import File, Folder\n'), ((2650, 2692), 'plugins.cloudimport.cloud_platform.File', 'File', (['"""gcp_list.txt"""', 'file.url', 'file.other'], {}), "('gcp_list.txt', file.url, file.other)\n", (2654, 2692), False, 'from plugins.cloudimport.cloud_platform import File, Folder\n'), ((3512, 3537), 'plugins.cloudimport.cloud_platform.Folder', 'Folder', (['name', 'url', 'images'], {}), '(name, url, images)\n', (3518, 3537), False, 'from plugins.cloudimport.cloud_platform import File, Folder\n'), ((1427, 1450), 'os.path.index', 'path.index', (['"""index.php"""'], {}), "('index.php')\n", (1437, 1450), False, 'from os import path\n')] |
"""
<Reinforcement Learning and Control>(Year 2020)
by <NAME>
@ Intelligent Driving Lab, Tsinghua University
OCP example for lane keeping problem in a circle road
[Method]
Model predictive control
"""
from casadi import *
from config import DynamicsConfig
import math
from dynamics import VehicleDynamics
import matplotlib.pyplot as plt
class Solver(DynamicsConfig):
"""
NLP solver for nonlinear model predictive control with Casadi.
"""
def __init__(self):
self._sol_dic = {'ipopt.print_level': 0, 'ipopt.sb': 'yes', 'print_time': 0}
if self.tire_model == 'Fiala':
self.X_init = [0.0, 0.0, self.psi_init, 0.0, self.u, 0.0]
self.zero = [0., 0., 0., 0., 0., 0.]
self.U_LOWER = [- math.pi / 9, -10]
self.U_UPPER = [math.pi / 9, 10]
else:
self.X_init = [0.0, 0.0, 0.1, 0.0, 0.0]
self.zero = [0., 0., 0., 0., 0.]
self.U_LOWER = [- math.pi / 9]
self.U_UPPER = [math.pi / 9]
self.x_last = 0
self.dynamics = VehicleDynamics()
super(Solver, self).__init__()
def dynamics(self,x, u):
x1 = [0.0, 0.0, 0.0, 0.0, 0.0]
x1[0] = x[0] + self.Ts * (self.u * sin(x[2]) + x[1] * cos(x[2])),
x1[1] = x[1] + self.Ts * (-self.D * self.F_z1 * sin(
self.C * arctan(self.B * (-u[0] + (x[1] + self.a * x[3]) / self.u))) * cos(u[0])
- self.D * self.F_z2 * sin(
self.C * arctan(self.B * ((x[1] - self.b * x[3]) / self.u))) / self.m - self.u * x[3]),
x1[2] = x[2] + self.Ts * (x[3]),
x1[3] = x[3] + self.Ts * (self.a * (-self.D * self.F_z1 * sin(
self.C * arctan(self.B * (-u[0] + (x[1] + self.a * x[3]) / self.u)))) * cos(u[0])
- self.b * (-self.D * self.F_z2 * sin(
self.C * arctan(self.B * ((x[1] - self.b * x[3]) / self.u)))) / self.I_zz),
x1[4] = x[4] + self.Ts * (self.u * cos(x[2]) - x[1] * sin(x[2]))
return x1
def openLoopMpcSolver(self, x_init, predict_steps):
"""
Solver of nonlinear MPC
Parameters
----------
x_init: list
input state for MPC.
predict_steps: int
steps of predict horizon.
Returns
----------
state: np.array shape: [predict_steps+1, state_dimension]
state trajectory of MPC in the whole predict horizon.
control: np.array shape: [predict_steps, control_dimension]
control signal of MPC in the whole predict horizon.
"""
x = SX.sym('x', self.DYNAMICS_DIM)
u = SX.sym('u', self.ACTION_DIM)
# discrete dynamic model
self.f = vertcat(
x[0] + self.Ts * (self.u * sin(x[2]) + x[1] * cos(x[2])),
x[1] + self.Ts * ((-self.D * self.F_z1 * sin(
self.C * arctan(self.B * (-u[0] + (x[1] + self.a * x[3]) / self.u))) * cos(u[0])
- self.D * self.F_z2 * sin(
self.C * arctan(self.B * ((x[1] - self.b * x[3]) / self.u)))) / self.m - self.u * x[3]),
x[2] + self.Ts * (x[3]),
x[3] + self.Ts * ((self.a * (-self.D * self.F_z1 * sin(
self.C * arctan(self.B * (-u[0] + (x[1] + self.a * x[3]) / self.u)))) * cos(u[0])
- self.b * (-self.D * self.F_z2 * sin(
self.C * arctan(self.B * ((x[1] - self.b * x[3]) / self.u))))) / self.I_zz),
x[4] + self.Ts * (self.u * cos(x[2]) - x[1] * sin(x[2]))
)
# Create solver instance
self.F = Function("F", [x, u], [self.f])
# Create empty NLP
w = []
lbw = []
ubw = []
lbg = []
ubg = []
G = []
J = 0
# Initial conditions
Xk = MX.sym('X0', self.DYNAMICS_DIM)
w += [Xk]
lbw += x_init
ubw += x_init
for k in range(1, predict_steps + 1):
# Local control
Uname = 'U' + str(k - 1)
Uk = MX.sym(Uname, self.ACTION_DIM)
w += [Uk]
lbw += self.U_LOWER
ubw += self.U_UPPER
Fk = self.F(Xk, Uk)
Xname = 'X' + str(k)
Xk = MX.sym(Xname, self.DYNAMICS_DIM)
# Dynamic Constriants
G += [Fk - Xk]
lbg += self.zero
ubg += self.zero
w += [Xk]
if self.tire_model == 'Fiala':
lbw += [-inf, -20, -pi, -20, 50, -inf]
ubw += [inf, 20, pi, 20, 0, inf]
else:
lbw += [-inf, -20, -pi, -20, -inf]
ubw += [inf, 20, pi, 20, inf]
F_cost = Function('F_cost', [x, u], [0.1 * (x[0] - self.a_curve * sin(self.k_curve * x[4])) ** 2
+ 0.1 * (x[2] - arctan(
self.a_curve * self.k_curve * cos(self.k_curve * x[4]))) ** 2
+ 0.001 * u[0] ** 2])
J += F_cost(w[k * 2], w[k * 2 - 1])
# Create NLP solver
nlp = dict(f=J, g=vertcat(*G), x=vertcat(*w))
S = nlpsol('S', 'ipopt', nlp, self._sol_dic)
# Solve NLP
r = S(lbx=lbw, ubx=ubw, x0=0, lbg=lbg, ubg=ubg)
# print(r['x'])
state_all = np.array(r['x'])
state = np.zeros([predict_steps, self.DYNAMICS_DIM])
control = np.zeros([predict_steps, self.ACTION_DIM])
nt = self.DYNAMICS_DIM + self.ACTION_DIM # total variable per step
# save trajectories
for i in range(predict_steps):
state[i] = state_all[nt * i: nt * i + nt - 1].reshape(-1)
control[i] = state_all[nt * i + nt - 1]
return state, control
def mpcSolver(self, x_init, predict_steps):
"""
Solver of nonlinear MPC
Parameters
----------
x_init: list
input state for MPC.
predict_steps: int
steps of predict horizon.
Returns
----------
state: np.array shape: [predict_steps+1, state_dimension]
state trajectory of MPC in the whole predict horizon.
control: np.array shape: [predict_steps, control_dimension]
control signal of MPC in the whole predict horizon.
"""
tire_model = self.tire_model
if tire_model == 'Fiala':
DYNAMICS_DIM = 6
ACTION_DIM = 2
else:
DYNAMICS_DIM = 5
ACTION_DIM = 1
x = SX.sym('x', DYNAMICS_DIM)
u = SX.sym('u', ACTION_DIM)
# Create solver instance
if self.tire_model == 'Fiala':
self.f_d = vertcat(
x[0] + self.Ts * (x[4] * sin(x[2]) + x[1] * cos(x[2])),
# y : lateral position
x[1] + self.Ts * (((- self.k1 * tan(- u[0] + arctan((x[1] + self.a * x[3]) / x[4])) * (
pow(self.k1 * tan(- u[0] + arctan((x[1] + self.a * x[3]) / x[4])), 2) / (
27 * pow(self.D * self.F_z1, 2)) - self.k1 * fabs(
tan(- u[0] + arctan((x[1] + self.a * x[3]) / x[4]))) / (
3 * self.D * self.F_z1) + 1)) * cos(u[0]) - self.k2 * tan(
arctan((x[1] - self.b * x[3]) / x[4])) * (
pow(self.k2 * tan(arctan((x[1] - self.b * x[3]) / x[4])), 2) / (
27 * pow(self.D * self.F_z2, 2)) - self.k2 * fabs(tan(
arctan((x[1] - self.b * x[3]) / x[4]))) / (
3 * self.D * self.F_z2) + 1)) / self.m - x[4] * x[3]),
# v_y : lateral speed
x[2] + self.Ts * (x[3]),
# psi : heading angle
x[3] + self.Ts * ((self.a * (- self.k1 * tan(- u[0] + arctan((x[1] + self.a * x[3]) / x[4])) * (
pow(self.k1 * tan(- u[0] + arctan((x[1] + self.a * x[3]) / x[4])), 2) / (
27 * pow(self.D * self.F_z1, 2)) - self.k1 * fabs(
tan(- u[0] + arctan((x[1] + self.a * x[3]) / x[4]))) / (
3 * self.D * self.F_z1) + 1)) * cos(u[0]) - self.b * (
- self.k2 * tan(arctan((x[1] - self.b * x[3]) / x[4])) * (
pow(self.k2 * tan(arctan((x[1] - self.b * x[3]) / x[4])), 2) / (
27 * pow(self.D * self.F_z2, 2)) - self.k2 * fabs(tan(
arctan((x[1] - self.b * x[3]) / x[4]))) / (
3 * self.D * self.F_z2) + 1))) / self.I_zz),
# r : yaw rate
x[4] + self.Ts * (
u[1] + x[2] * x[4] - (- self.k1 * tan(- u[0] + arctan((x[1] + self.a * x[3]) / x[4])) * (
pow(self.k1 * tan(- u[0] + arctan((x[1] + self.a * x[3]) / x[4])), 2) / (
27 * pow(self.D * self.F_z1, 2)) - self.k1 * fabs(
tan(- u[0] + arctan((x[1] + self.a * x[3]) / x[4]))) / (
3 * self.D * self.F_z1) + 1)) * sin(u[0]) / self.m),
# v_x : longitudinal speed
x[5] + self.Ts * (x[4])
# x : longitudinal position
)
self.F = Function("F", [x, u], [self.f_d])
elif self.tire_model == 'Pacejka':
# discrete dynamic model
self.f = vertcat(
x[0] + self.Ts * (self.u * sin(x[2]) + x[1] * cos(x[2])),
x[1] + self.Ts * ((-self.D * self.F_z1 * sin(
self.C * arctan(self.B * (-u[0] + (x[1] + self.a * x[3]) / self.u))) * cos(u[0])
- self.D * self.F_z2 * sin(
self.C * arctan(self.B * ((x[1] - self.b * x[3]) / self.u)))) / self.m - self.u * x[3]),
x[2] + self.Ts * (x[3]),
x[3] + self.Ts * ((self.a * (-self.D * self.F_z1 * sin(
self.C * arctan(self.B * (-u[0] + (x[1] + self.a * x[3]) / self.u)))) * cos(u[0])
- self.b * (-self.D * self.F_z2 * sin(
self.C * arctan(self.B * ((x[1] - self.b * x[3]) / self.u))))) / self.I_zz),
x[4] + self.Ts * (self.u * cos(x[2]) - x[1] * sin(x[2]))
)
# todo:retreve
self.F = Function("F", [x, u], [self.f])
elif self.tire_model == 'Linear':
# linear model
self.f = vertcat(
x[0] + self.Ts * (self.u * sin(x[2]) + x[1] * cos(x[2])),
x[1] + self.Ts * ((-self.k1*(-u[0] + (x[1] + self.a * x[3]) / self.u) * cos(u[0]) +
-self.k2*((x[1] - self.b * x[3]) / self.u)) / self.m - self.u * x[3]),
x[2] + self.Ts * (x[3]),
x[3] + self.Ts * (self.a * (-self.D * self.F_z1 * sin(
self.C * arctan(self.B * (-u[0] + (x[1] + self.a * x[3]) / self.u)))) * cos(u[0])
- self.b * (-self.D * self.F_z2 * sin(
self.C * arctan(self.B * ((x[1] - self.b * x[3]) / self.u)))) / self.I_zz),
x[4] + self.Ts * (self.u * cos(x[2]) - x[1] * sin(x[2]))
)
self.F = Function("F", [x, u], [self.f])
# Create empty NLP
w = []
lbw = []
ubw = []
lbg = []
ubg = []
G = []
J = 0
# Initial conditions
Xk = MX.sym('X0', DYNAMICS_DIM)
w += [Xk]
lbw += x_init
ubw += x_init
for k in range(1, predict_steps + 1):
# Local control
Uname = 'U' + str(k - 1)
Uk = MX.sym(Uname, ACTION_DIM)
w += [Uk]
lbw += self.U_LOWER
ubw += self.U_UPPER
# Gk = self.G_f(Xk,Uk)
Fk = self.F(Xk, Uk)
Xname = 'X' + str(k)
Xk = MX.sym(Xname, DYNAMICS_DIM)
# Dynamic Constriants
G += [Fk - Xk]
lbg += self.zero
ubg += self.zero
w += [Xk]
if self.tire_model == 'Fiala':
lbw += [-inf, -20, -pi, -20, 0, -inf]
ubw += [inf, 20, pi, 20, 50, inf]
else:
lbw += [-inf, -20, -pi, -20, -inf]
ubw += [inf, 20, pi, 20, inf]
# Cost function
if tire_model == 'Fiala':
F_cost = Function('F_cost', [x, u], [6 * (x[0]) ** 2
+ 0.2 * (x[4] - self.u) ** 2
+ 80 * u[0] ** 2
+ 0.3 * u[1] ** 2])
else:
F_cost = Function('F_cost', [x, u], [1 * (x[0]) ** 2
+ 1 * (x[2]) ** 2
+ 1 * u[0] ** 2])
J += F_cost(w[k * 2], w[k * 2 - 1])
# J += F_cost(w[k * 3 - 1], w[k * 3 - 2])
# Create NLP solver
nlp = dict(f=J, g=vertcat(*G), x=vertcat(*w))
S = nlpsol('S', 'ipopt', nlp, self._sol_dic)
# Solve NLP
r = S(lbx=lbw, ubx=ubw, x0=0, lbg=lbg, ubg=ubg)
# print(r['x'])
state_all = np.array(r['x'])
state = np.zeros([predict_steps, DYNAMICS_DIM])
control = np.zeros([predict_steps, ACTION_DIM])
nt = DYNAMICS_DIM + ACTION_DIM # total variable per step
# save trajectories
for i in range(predict_steps):
state[i] = state_all[nt * i: nt * i + DYNAMICS_DIM].reshape(-1)
control[i] = state_all[nt * i + DYNAMICS_DIM: nt * (i + 1)].reshape(-1)
return state, control
| [
"dynamics.VehicleDynamics"
] | [((1087, 1104), 'dynamics.VehicleDynamics', 'VehicleDynamics', ([], {}), '()\n', (1102, 1104), False, 'from dynamics import VehicleDynamics\n')] |
#!/usr/bin/env python
"""Create benchmark for k nearest neighbor on unit sphere in R^k."""
# Scroll down to line 90 to "Adjust this" to add your experiment
import random
import numpy as np
import os.path
import logging
import sys
import Queue as queue
import h5py
import time
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
def create_point(n):
"""Create a random point on the unit sphere in R^n."""
p = np.array([random.uniform(-1, 1) for _ in range(n)])
return p / np.linalg.norm(p)
def create_points(n, number):
"""Create number random points on the unit sphere in R^n."""
return [create_point(n) for _ in range(number)]
def get_dist(a, b):
"""Get the Euclidean distance of two points a and b."""
return np.linalg.norm(a - b)
def run_q_at_a_time(candidates, queries, k, n, algorithm):
"""
Run every single query in queries.
Parameters
----------
candidates : object
Datastructure which contains the nearest neighbor candidates.
queries : list
List of points
k : int
How many points should be found
n : int
Dimension of each point / query
"""
assert k >= 1
assert n >= 1
solution = np.zeros((len(queries), k, n))
for i, query in enumerate(queries):
solution[i] = algorithm(D, query, k, n)
return solution
def brute_force_search(candidates, query, k, n):
"""Find the k nearest neighbors by brute force search."""
solution = np.zeros((k, n))
knn = queue.PriorityQueue()
for candidate in candidates:
dist = get_dist(candidate, query)
# insert time to prevent errors as 'candidate' is not sortable.
knn.put((dist, time.time(), candidate))
for j in range(k):
dist, _, item = knn.get()
solution[j] = item
return solution
def build_datastructure(candidates):
"""Make something sophisticated to speed up k-nn queries."""
return candidates
# parameters
k = 5 # get k closest points
n = 128 # dimensionality of each point / query
m = 10**5 # candidates for closest points
T = 10**2 # number of queries
query_batch_size = 10**1 # should divide T
assert T % query_batch_size == 0
# paths
query_file = "queries.hdf5"
candidates_file = "candidates.hdf5"
###############################################################################
# Adjust this
# gets the candidates as argument and should return a datastructure D
create_datastructure_algorithm = build_datastructure
# Gets D, query, k, n as arguments
search_algorithm = brute_force_search
###############################################################################
# Create query and candidate files if not exist or load them otherwise
if not os.path.isfile(candidates_file):
logging.info("Start creating %i candidates." % m)
candidates = create_points(n, m)
with h5py.File(candidates_file, 'w') as f:
dset = f.create_dataset('candidates',
data=np.array(candidates),
# maxshape=(None, n),
dtype='float32')
else:
with h5py.File(candidates_file, 'r') as f:
candidates = np.array(f.get('candidates'))
if not os.path.isfile(query_file):
logging.info("Start creating %i queries." % T)
with h5py.File(query_file, 'w') as f:
dset = f.create_dataset('queries',
shape=(query_batch_size, n),
maxshape=(None, n),
dtype='float32',
chunks=(query_batch_size, n))
for i in range(T / query_batch_size):
logging.info("\tQuery batch%i of %i." %
(i + 1, T / query_batch_size))
queries = np.array(create_points(n, query_batch_size))
if i > 0:
dset.resize((dset.shape[0] + query_batch_size, n))
dset[-query_batch_size:dset.shape[0], :] = queries
# Evaluate
logging.info("Start evaluation.")
total_time = 0
D = create_datastructure_algorithm(candidates)
with h5py.File(query_file, 'r') as f:
queries = f.get('queries')
for i in range(T / query_batch_size):
logging.info("\tQuery batch %i of %i." % (i + 1, T / query_batch_size))
q = queries[i * query_batch_size:(i + 1) * query_batch_size]
t0 = time.time()
solution = run_q_at_a_time(D, q, k, n, search_algorithm) # TODO
# Store the solution and compare against brute force to check if
# it is correct
t1 = time.time()
total_time += t1 - t0
logging.info("Needed %i seconds in total." % (total_time))
logging.info("k={k}, n={n}, m={m}, T={T}: {time:.2f}s per query."
.format(k=k,
n=n,
m=m,
T=T,
time=float(total_time) / T))
| [
"logging.basicConfig",
"random.uniform",
"Queue.PriorityQueue",
"time.time",
"h5py.File",
"numpy.array",
"numpy.zeros",
"numpy.linalg.norm",
"logging.info"
] | [((279, 391), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s %(message)s"""', 'level': 'logging.DEBUG', 'stream': 'sys.stdout'}), "(format='%(asctime)s %(levelname)s %(message)s', level=\n logging.DEBUG, stream=sys.stdout)\n", (298, 391), False, 'import logging\n'), ((4079, 4112), 'logging.info', 'logging.info', (['"""Start evaluation."""'], {}), "('Start evaluation.')\n", (4091, 4112), False, 'import logging\n'), ((4686, 4742), 'logging.info', 'logging.info', (["('Needed %i seconds in total.' % total_time)"], {}), "('Needed %i seconds in total.' % total_time)\n", (4698, 4742), False, 'import logging\n'), ((844, 865), 'numpy.linalg.norm', 'np.linalg.norm', (['(a - b)'], {}), '(a - b)\n', (858, 865), True, 'import numpy as np\n'), ((1571, 1587), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (1579, 1587), True, 'import numpy as np\n'), ((1598, 1619), 'Queue.PriorityQueue', 'queue.PriorityQueue', ([], {}), '()\n', (1617, 1619), True, 'import Queue as queue\n'), ((2851, 2900), 'logging.info', 'logging.info', (["('Start creating %i candidates.' % m)"], {}), "('Start creating %i candidates.' % m)\n", (2863, 2900), False, 'import logging\n'), ((3337, 3383), 'logging.info', 'logging.info', (["('Start creating %i queries.' % T)"], {}), "('Start creating %i queries.' % T)\n", (3349, 3383), False, 'import logging\n'), ((4181, 4207), 'h5py.File', 'h5py.File', (['query_file', '"""r"""'], {}), "(query_file, 'r')\n", (4190, 4207), False, 'import h5py\n'), ((584, 601), 'numpy.linalg.norm', 'np.linalg.norm', (['p'], {}), '(p)\n', (598, 601), True, 'import numpy as np\n'), ((2947, 2978), 'h5py.File', 'h5py.File', (['candidates_file', '"""w"""'], {}), "(candidates_file, 'w')\n", (2956, 2978), False, 'import h5py\n'), ((3208, 3239), 'h5py.File', 'h5py.File', (['candidates_file', '"""r"""'], {}), "(candidates_file, 'r')\n", (3217, 3239), False, 'import h5py\n'), ((3393, 3419), 'h5py.File', 'h5py.File', (['query_file', '"""w"""'], {}), "(query_file, 'w')\n", (3402, 3419), False, 'import h5py\n'), ((4295, 4366), 'logging.info', 'logging.info', (["('\\tQuery batch %i of %i.' % (i + 1, T / query_batch_size))"], {}), "('\\tQuery batch %i of %i.' % (i + 1, T / query_batch_size))\n", (4307, 4366), False, 'import logging\n'), ((4449, 4460), 'time.time', 'time.time', ([], {}), '()\n', (4458, 4460), False, 'import time\n'), ((4644, 4655), 'time.time', 'time.time', ([], {}), '()\n', (4653, 4655), False, 'import time\n'), ((527, 548), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (541, 548), False, 'import random\n'), ((3751, 3821), 'logging.info', 'logging.info', (["('\\tQuery batch%i of %i.' % (i + 1, T / query_batch_size))"], {}), "('\\tQuery batch%i of %i.' % (i + 1, T / query_batch_size))\n", (3763, 3821), False, 'import logging\n'), ((1790, 1801), 'time.time', 'time.time', ([], {}), '()\n', (1799, 1801), False, 'import time\n'), ((3068, 3088), 'numpy.array', 'np.array', (['candidates'], {}), '(candidates)\n', (3076, 3088), True, 'import numpy as np\n')] |
from setuptools import setup
setup(name='pyrosettacolabsetup',
version='0.5',
description='Mounts Google Drive for PyRosetta use in Google Colaboratory',
url='https://github.com/kathyle9/pyrosettacolabsetup',
author='kathyle9',
author_email='<EMAIL>',
license='MIT',
packages=['pyrosettacolabsetup'],
zip_safe=False)
| [
"setuptools.setup"
] | [((30, 335), 'setuptools.setup', 'setup', ([], {'name': '"""pyrosettacolabsetup"""', 'version': '"""0.5"""', 'description': '"""Mounts Google Drive for PyRosetta use in Google Colaboratory"""', 'url': '"""https://github.com/kathyle9/pyrosettacolabsetup"""', 'author': '"""kathyle9"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages': "['pyrosettacolabsetup']", 'zip_safe': '(False)'}), "(name='pyrosettacolabsetup', version='0.5', description=\n 'Mounts Google Drive for PyRosetta use in Google Colaboratory', url=\n 'https://github.com/kathyle9/pyrosettacolabsetup', author='kathyle9',\n author_email='<EMAIL>', license='MIT', packages=['pyrosettacolabsetup'],\n zip_safe=False)\n", (35, 335), False, 'from setuptools import setup\n')] |
"""The pyccl package contains all of the submodules that are implemented in
individual files in CCL.
"""
# flake8: noqa
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
# Sets the environment variable for default config path if it does not
# exist yet
from os import environ, path
if environ.get("CCL_PARAM_FILE") is None:
environ["CCL_PARAM_FILE"] = (
path.dirname(path.abspath(__file__)) + '/ccl_params.ini')
if environ.get("CLASS_PARAM_DIR") is None:
environ["CLASS_PARAM_DIR"] = path.dirname(path.abspath(__file__))
from . import ccllib as lib
from . import core, constants, background, power, massfunction, halomodel
# Core data structures
from .core import Cosmology
# Background cosmology functions and growth functions
from .background import growth_factor, growth_factor_unnorm, \
growth_rate, comoving_radial_distance, comoving_angular_distance, \
h_over_h0, luminosity_distance, distance_modulus, scale_factor_of_chi, \
omega_x, rho_x
# Power spectrum calculations and sigma8
from .power import linear_matter_power, nonlin_matter_power, sigmaR, \
sigmaV, sigma8
# Halo mass function
from .massfunction import massfunc, massfunc_m2r, sigmaM, halo_bias
# Cl's and tracers
from .cls import angular_cl, NumberCountsTracer, WeakLensingTracer, CMBLensingTracer
from .lsst_specs import bias_clustering, sigmaz_clustering, \
sigmaz_sources, dNdz_tomog, PhotoZFunction, PhotoZGaussian
# Useful constants and unit conversions
from .constants import CLIGHT_HMPC, MPC_TO_METER, PC_TO_METER, \
GNEWT, RHO_CRITICAL, SOLAR_MASS
from .correlation import correlation, correlation_3d
# Properties of haloes
from .halomodel import halomodel_matter_power, halo_concentration
# Specific to massive neutrinos
from .neutrinos import Omeganuh2, nu_masses
# Expose function to toggle debug mode
from .pyutils import debug_mode
from .errors import CCLError
| [
"os.path.abspath",
"os.environ.get",
"pkg_resources.get_distribution"
] | [((428, 457), 'os.environ.get', 'environ.get', (['"""CCL_PARAM_FILE"""'], {}), "('CCL_PARAM_FILE')\n", (439, 457), False, 'from os import environ, path\n'), ((570, 600), 'os.environ.get', 'environ.get', (['"""CLASS_PARAM_DIR"""'], {}), "('CLASS_PARAM_DIR')\n", (581, 600), False, 'from os import environ, path\n'), ((208, 234), 'pkg_resources.get_distribution', 'get_distribution', (['__name__'], {}), '(__name__)\n', (224, 234), False, 'from pkg_resources import get_distribution, DistributionNotFound\n'), ((656, 678), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (668, 678), False, 'from os import environ, path\n'), ((522, 544), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (534, 544), False, 'from os import environ, path\n')] |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class TaskRun(object):
"""
The information about a task run.
"""
#: A constant which can be used with the status property of a TaskRun.
#: This constant has a value of "NOT_STARTED"
STATUS_NOT_STARTED = "NOT_STARTED"
#: A constant which can be used with the status property of a TaskRun.
#: This constant has a value of "QUEUED"
STATUS_QUEUED = "QUEUED"
#: A constant which can be used with the status property of a TaskRun.
#: This constant has a value of "RUNNING"
STATUS_RUNNING = "RUNNING"
#: A constant which can be used with the status property of a TaskRun.
#: This constant has a value of "TERMINATING"
STATUS_TERMINATING = "TERMINATING"
#: A constant which can be used with the status property of a TaskRun.
#: This constant has a value of "TERMINATED"
STATUS_TERMINATED = "TERMINATED"
#: A constant which can be used with the status property of a TaskRun.
#: This constant has a value of "SUCCESS"
STATUS_SUCCESS = "SUCCESS"
#: A constant which can be used with the status property of a TaskRun.
#: This constant has a value of "ERROR"
STATUS_ERROR = "ERROR"
#: A constant which can be used with the expected_duration_unit property of a TaskRun.
#: This constant has a value of "SECONDS"
EXPECTED_DURATION_UNIT_SECONDS = "SECONDS"
#: A constant which can be used with the expected_duration_unit property of a TaskRun.
#: This constant has a value of "MINUTES"
EXPECTED_DURATION_UNIT_MINUTES = "MINUTES"
#: A constant which can be used with the expected_duration_unit property of a TaskRun.
#: This constant has a value of "HOURS"
EXPECTED_DURATION_UNIT_HOURS = "HOURS"
#: A constant which can be used with the expected_duration_unit property of a TaskRun.
#: This constant has a value of "DAYS"
EXPECTED_DURATION_UNIT_DAYS = "DAYS"
#: A constant which can be used with the auth_mode property of a TaskRun.
#: This constant has a value of "OBO"
AUTH_MODE_OBO = "OBO"
#: A constant which can be used with the auth_mode property of a TaskRun.
#: This constant has a value of "RESOURCE_PRINCIPAL"
AUTH_MODE_RESOURCE_PRINCIPAL = "RESOURCE_PRINCIPAL"
#: A constant which can be used with the auth_mode property of a TaskRun.
#: This constant has a value of "USER_CERTIFICATE"
AUTH_MODE_USER_CERTIFICATE = "USER_CERTIFICATE"
#: A constant which can be used with the task_type property of a TaskRun.
#: This constant has a value of "INTEGRATION_TASK"
TASK_TYPE_INTEGRATION_TASK = "INTEGRATION_TASK"
#: A constant which can be used with the task_type property of a TaskRun.
#: This constant has a value of "DATA_LOADER_TASK"
TASK_TYPE_DATA_LOADER_TASK = "DATA_LOADER_TASK"
#: A constant which can be used with the task_type property of a TaskRun.
#: This constant has a value of "PIPELINE_TASK"
TASK_TYPE_PIPELINE_TASK = "PIPELINE_TASK"
#: A constant which can be used with the task_type property of a TaskRun.
#: This constant has a value of "SQL_TASK"
TASK_TYPE_SQL_TASK = "SQL_TASK"
#: A constant which can be used with the task_type property of a TaskRun.
#: This constant has a value of "OCI_DATAFLOW_TASK"
TASK_TYPE_OCI_DATAFLOW_TASK = "OCI_DATAFLOW_TASK"
#: A constant which can be used with the task_type property of a TaskRun.
#: This constant has a value of "REST_TASK"
TASK_TYPE_REST_TASK = "REST_TASK"
def __init__(self, **kwargs):
"""
Initializes a new TaskRun object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param key:
The value to assign to the key property of this TaskRun.
:type key: str
:param model_type:
The value to assign to the model_type property of this TaskRun.
:type model_type: str
:param model_version:
The value to assign to the model_version property of this TaskRun.
:type model_version: str
:param parent_ref:
The value to assign to the parent_ref property of this TaskRun.
:type parent_ref: oci.data_integration.models.ParentReference
:param name:
The value to assign to the name property of this TaskRun.
:type name: str
:param description:
The value to assign to the description property of this TaskRun.
:type description: str
:param object_version:
The value to assign to the object_version property of this TaskRun.
:type object_version: int
:param config_provider:
The value to assign to the config_provider property of this TaskRun.
:type config_provider: oci.data_integration.models.ConfigProvider
:param status:
The value to assign to the status property of this TaskRun.
Allowed values for this property are: "NOT_STARTED", "QUEUED", "RUNNING", "TERMINATING", "TERMINATED", "SUCCESS", "ERROR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type status: str
:param start_time_millis:
The value to assign to the start_time_millis property of this TaskRun.
:type start_time_millis: int
:param end_time_millis:
The value to assign to the end_time_millis property of this TaskRun.
:type end_time_millis: int
:param last_updated:
The value to assign to the last_updated property of this TaskRun.
:type last_updated: int
:param records_written:
The value to assign to the records_written property of this TaskRun.
:type records_written: int
:param bytes_processed:
The value to assign to the bytes_processed property of this TaskRun.
:type bytes_processed: int
:param error_message:
The value to assign to the error_message property of this TaskRun.
:type error_message: str
:param expected_duration:
The value to assign to the expected_duration property of this TaskRun.
:type expected_duration: float
:param expected_duration_unit:
The value to assign to the expected_duration_unit property of this TaskRun.
Allowed values for this property are: "SECONDS", "MINUTES", "HOURS", "DAYS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type expected_duration_unit: str
:param task_key:
The value to assign to the task_key property of this TaskRun.
:type task_key: str
:param external_id:
The value to assign to the external_id property of this TaskRun.
:type external_id: str
:param retry_attempt:
The value to assign to the retry_attempt property of this TaskRun.
:type retry_attempt: int
:param task_schedule:
The value to assign to the task_schedule property of this TaskRun.
:type task_schedule: oci.data_integration.models.TaskSchedule
:param metrics:
The value to assign to the metrics property of this TaskRun.
:type metrics: dict(str, float)
:param outputs:
The value to assign to the outputs property of this TaskRun.
:type outputs: dict(str, ParameterValue)
:param execution_errors:
The value to assign to the execution_errors property of this TaskRun.
:type execution_errors: list[str]
:param termination_errors:
The value to assign to the termination_errors property of this TaskRun.
:type termination_errors: list[str]
:param auth_mode:
The value to assign to the auth_mode property of this TaskRun.
Allowed values for this property are: "OBO", "RESOURCE_PRINCIPAL", "USER_CERTIFICATE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type auth_mode: str
:param opc_request_id:
The value to assign to the opc_request_id property of this TaskRun.
:type opc_request_id: str
:param object_status:
The value to assign to the object_status property of this TaskRun.
:type object_status: int
:param task_type:
The value to assign to the task_type property of this TaskRun.
Allowed values for this property are: "INTEGRATION_TASK", "DATA_LOADER_TASK", "PIPELINE_TASK", "SQL_TASK", "OCI_DATAFLOW_TASK", "REST_TASK", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type task_type: str
:param identifier:
The value to assign to the identifier property of this TaskRun.
:type identifier: str
:param metadata:
The value to assign to the metadata property of this TaskRun.
:type metadata: oci.data_integration.models.ObjectMetadata
:param key_map:
The value to assign to the key_map property of this TaskRun.
:type key_map: dict(str, str)
"""
self.swagger_types = {
'key': 'str',
'model_type': 'str',
'model_version': 'str',
'parent_ref': 'ParentReference',
'name': 'str',
'description': 'str',
'object_version': 'int',
'config_provider': 'ConfigProvider',
'status': 'str',
'start_time_millis': 'int',
'end_time_millis': 'int',
'last_updated': 'int',
'records_written': 'int',
'bytes_processed': 'int',
'error_message': 'str',
'expected_duration': 'float',
'expected_duration_unit': 'str',
'task_key': 'str',
'external_id': 'str',
'retry_attempt': 'int',
'task_schedule': 'TaskSchedule',
'metrics': 'dict(str, float)',
'outputs': 'dict(str, ParameterValue)',
'execution_errors': 'list[str]',
'termination_errors': 'list[str]',
'auth_mode': 'str',
'opc_request_id': 'str',
'object_status': 'int',
'task_type': 'str',
'identifier': 'str',
'metadata': 'ObjectMetadata',
'key_map': 'dict(str, str)'
}
self.attribute_map = {
'key': 'key',
'model_type': 'modelType',
'model_version': 'modelVersion',
'parent_ref': 'parentRef',
'name': 'name',
'description': 'description',
'object_version': 'objectVersion',
'config_provider': 'configProvider',
'status': 'status',
'start_time_millis': 'startTimeMillis',
'end_time_millis': 'endTimeMillis',
'last_updated': 'lastUpdated',
'records_written': 'recordsWritten',
'bytes_processed': 'bytesProcessed',
'error_message': 'errorMessage',
'expected_duration': 'expectedDuration',
'expected_duration_unit': 'expectedDurationUnit',
'task_key': 'taskKey',
'external_id': 'externalId',
'retry_attempt': 'retryAttempt',
'task_schedule': 'taskSchedule',
'metrics': 'metrics',
'outputs': 'outputs',
'execution_errors': 'executionErrors',
'termination_errors': 'terminationErrors',
'auth_mode': 'authMode',
'opc_request_id': 'opcRequestId',
'object_status': 'objectStatus',
'task_type': 'taskType',
'identifier': 'identifier',
'metadata': 'metadata',
'key_map': 'keyMap'
}
self._key = None
self._model_type = None
self._model_version = None
self._parent_ref = None
self._name = None
self._description = None
self._object_version = None
self._config_provider = None
self._status = None
self._start_time_millis = None
self._end_time_millis = None
self._last_updated = None
self._records_written = None
self._bytes_processed = None
self._error_message = None
self._expected_duration = None
self._expected_duration_unit = None
self._task_key = None
self._external_id = None
self._retry_attempt = None
self._task_schedule = None
self._metrics = None
self._outputs = None
self._execution_errors = None
self._termination_errors = None
self._auth_mode = None
self._opc_request_id = None
self._object_status = None
self._task_type = None
self._identifier = None
self._metadata = None
self._key_map = None
@property
def key(self):
"""
Gets the key of this TaskRun.
The key of the object.
:return: The key of this TaskRun.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this TaskRun.
The key of the object.
:param key: The key of this TaskRun.
:type: str
"""
self._key = key
@property
def model_type(self):
"""
Gets the model_type of this TaskRun.
The type of the object.
:return: The model_type of this TaskRun.
:rtype: str
"""
return self._model_type
@model_type.setter
def model_type(self, model_type):
"""
Sets the model_type of this TaskRun.
The type of the object.
:param model_type: The model_type of this TaskRun.
:type: str
"""
self._model_type = model_type
@property
def model_version(self):
"""
Gets the model_version of this TaskRun.
The model version of an object.
:return: The model_version of this TaskRun.
:rtype: str
"""
return self._model_version
@model_version.setter
def model_version(self, model_version):
"""
Sets the model_version of this TaskRun.
The model version of an object.
:param model_version: The model_version of this TaskRun.
:type: str
"""
self._model_version = model_version
@property
def parent_ref(self):
"""
Gets the parent_ref of this TaskRun.
:return: The parent_ref of this TaskRun.
:rtype: oci.data_integration.models.ParentReference
"""
return self._parent_ref
@parent_ref.setter
def parent_ref(self, parent_ref):
"""
Sets the parent_ref of this TaskRun.
:param parent_ref: The parent_ref of this TaskRun.
:type: oci.data_integration.models.ParentReference
"""
self._parent_ref = parent_ref
@property
def name(self):
"""
Gets the name of this TaskRun.
Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters.
:return: The name of this TaskRun.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this TaskRun.
Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters.
:param name: The name of this TaskRun.
:type: str
"""
self._name = name
@property
def description(self):
"""
Gets the description of this TaskRun.
Detailed description for the object.
:return: The description of this TaskRun.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this TaskRun.
Detailed description for the object.
:param description: The description of this TaskRun.
:type: str
"""
self._description = description
@property
def object_version(self):
"""
Gets the object_version of this TaskRun.
The version of the object that is used to track changes in the object instance.
:return: The object_version of this TaskRun.
:rtype: int
"""
return self._object_version
@object_version.setter
def object_version(self, object_version):
"""
Sets the object_version of this TaskRun.
The version of the object that is used to track changes in the object instance.
:param object_version: The object_version of this TaskRun.
:type: int
"""
self._object_version = object_version
@property
def config_provider(self):
"""
Gets the config_provider of this TaskRun.
:return: The config_provider of this TaskRun.
:rtype: oci.data_integration.models.ConfigProvider
"""
return self._config_provider
@config_provider.setter
def config_provider(self, config_provider):
"""
Sets the config_provider of this TaskRun.
:param config_provider: The config_provider of this TaskRun.
:type: oci.data_integration.models.ConfigProvider
"""
self._config_provider = config_provider
@property
def status(self):
"""
Gets the status of this TaskRun.
The status of the task run.
Allowed values for this property are: "NOT_STARTED", "QUEUED", "RUNNING", "TERMINATING", "TERMINATED", "SUCCESS", "ERROR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The status of this TaskRun.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this TaskRun.
The status of the task run.
:param status: The status of this TaskRun.
:type: str
"""
allowed_values = ["NOT_STARTED", "QUEUED", "RUNNING", "TERMINATING", "TERMINATED", "SUCCESS", "ERROR"]
if not value_allowed_none_or_none_sentinel(status, allowed_values):
status = 'UNKNOWN_ENUM_VALUE'
self._status = status
@property
def start_time_millis(self):
"""
Gets the start_time_millis of this TaskRun.
The start time.
:return: The start_time_millis of this TaskRun.
:rtype: int
"""
return self._start_time_millis
@start_time_millis.setter
def start_time_millis(self, start_time_millis):
"""
Sets the start_time_millis of this TaskRun.
The start time.
:param start_time_millis: The start_time_millis of this TaskRun.
:type: int
"""
self._start_time_millis = start_time_millis
@property
def end_time_millis(self):
"""
Gets the end_time_millis of this TaskRun.
The end time.
:return: The end_time_millis of this TaskRun.
:rtype: int
"""
return self._end_time_millis
@end_time_millis.setter
def end_time_millis(self, end_time_millis):
"""
Sets the end_time_millis of this TaskRun.
The end time.
:param end_time_millis: The end_time_millis of this TaskRun.
:type: int
"""
self._end_time_millis = end_time_millis
@property
def last_updated(self):
"""
Gets the last_updated of this TaskRun.
The date and time the object was last updated.
:return: The last_updated of this TaskRun.
:rtype: int
"""
return self._last_updated
@last_updated.setter
def last_updated(self, last_updated):
"""
Sets the last_updated of this TaskRun.
The date and time the object was last updated.
:param last_updated: The last_updated of this TaskRun.
:type: int
"""
self._last_updated = last_updated
@property
def records_written(self):
"""
Gets the records_written of this TaskRun.
The number of records processed in the task run.
:return: The records_written of this TaskRun.
:rtype: int
"""
return self._records_written
@records_written.setter
def records_written(self, records_written):
"""
Sets the records_written of this TaskRun.
The number of records processed in the task run.
:param records_written: The records_written of this TaskRun.
:type: int
"""
self._records_written = records_written
@property
def bytes_processed(self):
"""
Gets the bytes_processed of this TaskRun.
The number of bytes processed in the task run.
:return: The bytes_processed of this TaskRun.
:rtype: int
"""
return self._bytes_processed
@bytes_processed.setter
def bytes_processed(self, bytes_processed):
"""
Sets the bytes_processed of this TaskRun.
The number of bytes processed in the task run.
:param bytes_processed: The bytes_processed of this TaskRun.
:type: int
"""
self._bytes_processed = bytes_processed
@property
def error_message(self):
"""
Gets the error_message of this TaskRun.
Contains an error message if status is `ERROR`.
:return: The error_message of this TaskRun.
:rtype: str
"""
return self._error_message
@error_message.setter
def error_message(self, error_message):
"""
Sets the error_message of this TaskRun.
Contains an error message if status is `ERROR`.
:param error_message: The error_message of this TaskRun.
:type: str
"""
self._error_message = error_message
@property
def expected_duration(self):
"""
Gets the expected_duration of this TaskRun.
The expected duration for the task run.
:return: The expected_duration of this TaskRun.
:rtype: float
"""
return self._expected_duration
@expected_duration.setter
def expected_duration(self, expected_duration):
"""
Sets the expected_duration of this TaskRun.
The expected duration for the task run.
:param expected_duration: The expected_duration of this TaskRun.
:type: float
"""
self._expected_duration = expected_duration
@property
def expected_duration_unit(self):
"""
Gets the expected_duration_unit of this TaskRun.
The expected duration unit of measure.
Allowed values for this property are: "SECONDS", "MINUTES", "HOURS", "DAYS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The expected_duration_unit of this TaskRun.
:rtype: str
"""
return self._expected_duration_unit
@expected_duration_unit.setter
def expected_duration_unit(self, expected_duration_unit):
"""
Sets the expected_duration_unit of this TaskRun.
The expected duration unit of measure.
:param expected_duration_unit: The expected_duration_unit of this TaskRun.
:type: str
"""
allowed_values = ["SECONDS", "MINUTES", "HOURS", "DAYS"]
if not value_allowed_none_or_none_sentinel(expected_duration_unit, allowed_values):
expected_duration_unit = 'UNKNOWN_ENUM_VALUE'
self._expected_duration_unit = expected_duration_unit
@property
def task_key(self):
"""
Gets the task_key of this TaskRun.
Task Key of the task for which TaskRun is being created. If not specified, the AggregatorKey in RegistryMetadata will be assumed to be the TaskKey
:return: The task_key of this TaskRun.
:rtype: str
"""
return self._task_key
@task_key.setter
def task_key(self, task_key):
"""
Sets the task_key of this TaskRun.
Task Key of the task for which TaskRun is being created. If not specified, the AggregatorKey in RegistryMetadata will be assumed to be the TaskKey
:param task_key: The task_key of this TaskRun.
:type: str
"""
self._task_key = task_key
@property
def external_id(self):
"""
Gets the external_id of this TaskRun.
The external identifier for the task run.
:return: The external_id of this TaskRun.
:rtype: str
"""
return self._external_id
@external_id.setter
def external_id(self, external_id):
"""
Sets the external_id of this TaskRun.
The external identifier for the task run.
:param external_id: The external_id of this TaskRun.
:type: str
"""
self._external_id = external_id
@property
def retry_attempt(self):
"""
Gets the retry_attempt of this TaskRun.
Holds the particular attempt number.
:return: The retry_attempt of this TaskRun.
:rtype: int
"""
return self._retry_attempt
@retry_attempt.setter
def retry_attempt(self, retry_attempt):
"""
Sets the retry_attempt of this TaskRun.
Holds the particular attempt number.
:param retry_attempt: The retry_attempt of this TaskRun.
:type: int
"""
self._retry_attempt = retry_attempt
@property
def task_schedule(self):
"""
Gets the task_schedule of this TaskRun.
:return: The task_schedule of this TaskRun.
:rtype: oci.data_integration.models.TaskSchedule
"""
return self._task_schedule
@task_schedule.setter
def task_schedule(self, task_schedule):
"""
Sets the task_schedule of this TaskRun.
:param task_schedule: The task_schedule of this TaskRun.
:type: oci.data_integration.models.TaskSchedule
"""
self._task_schedule = task_schedule
@property
def metrics(self):
"""
Gets the metrics of this TaskRun.
A map of metrics for the run.
:return: The metrics of this TaskRun.
:rtype: dict(str, float)
"""
return self._metrics
@metrics.setter
def metrics(self, metrics):
"""
Sets the metrics of this TaskRun.
A map of metrics for the run.
:param metrics: The metrics of this TaskRun.
:type: dict(str, float)
"""
self._metrics = metrics
@property
def outputs(self):
"""
Gets the outputs of this TaskRun.
A map of the outputs of the run.
:return: The outputs of this TaskRun.
:rtype: dict(str, ParameterValue)
"""
return self._outputs
@outputs.setter
def outputs(self, outputs):
"""
Sets the outputs of this TaskRun.
A map of the outputs of the run.
:param outputs: The outputs of this TaskRun.
:type: dict(str, ParameterValue)
"""
self._outputs = outputs
@property
def execution_errors(self):
"""
Gets the execution_errors of this TaskRun.
An array of execution errors from the run.
:return: The execution_errors of this TaskRun.
:rtype: list[str]
"""
return self._execution_errors
@execution_errors.setter
def execution_errors(self, execution_errors):
"""
Sets the execution_errors of this TaskRun.
An array of execution errors from the run.
:param execution_errors: The execution_errors of this TaskRun.
:type: list[str]
"""
self._execution_errors = execution_errors
@property
def termination_errors(self):
"""
Gets the termination_errors of this TaskRun.
An array of termination errors from the run.
:return: The termination_errors of this TaskRun.
:rtype: list[str]
"""
return self._termination_errors
@termination_errors.setter
def termination_errors(self, termination_errors):
"""
Sets the termination_errors of this TaskRun.
An array of termination errors from the run.
:param termination_errors: The termination_errors of this TaskRun.
:type: list[str]
"""
self._termination_errors = termination_errors
@property
def auth_mode(self):
"""
Gets the auth_mode of this TaskRun.
The autorization mode for when the task was executed.
Allowed values for this property are: "OBO", "RESOURCE_PRINCIPAL", "USER_CERTIFICATE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The auth_mode of this TaskRun.
:rtype: str
"""
return self._auth_mode
@auth_mode.setter
def auth_mode(self, auth_mode):
"""
Sets the auth_mode of this TaskRun.
The autorization mode for when the task was executed.
:param auth_mode: The auth_mode of this TaskRun.
:type: str
"""
allowed_values = ["OBO", "RESOURCE_PRINCIPAL", "USER_CERTIFICATE"]
if not value_allowed_none_or_none_sentinel(auth_mode, allowed_values):
auth_mode = 'UNKNOWN_ENUM_VALUE'
self._auth_mode = auth_mode
@property
def opc_request_id(self):
"""
Gets the opc_request_id of this TaskRun.
The OPC request ID of execution of the task run.
:return: The opc_request_id of this TaskRun.
:rtype: str
"""
return self._opc_request_id
@opc_request_id.setter
def opc_request_id(self, opc_request_id):
"""
Sets the opc_request_id of this TaskRun.
The OPC request ID of execution of the task run.
:param opc_request_id: The opc_request_id of this TaskRun.
:type: str
"""
self._opc_request_id = opc_request_id
@property
def object_status(self):
"""
Gets the object_status of this TaskRun.
The status of an object that can be set to value 1 for shallow references across objects, other values reserved.
:return: The object_status of this TaskRun.
:rtype: int
"""
return self._object_status
@object_status.setter
def object_status(self, object_status):
"""
Sets the object_status of this TaskRun.
The status of an object that can be set to value 1 for shallow references across objects, other values reserved.
:param object_status: The object_status of this TaskRun.
:type: int
"""
self._object_status = object_status
@property
def task_type(self):
"""
Gets the task_type of this TaskRun.
The type of task run.
Allowed values for this property are: "INTEGRATION_TASK", "DATA_LOADER_TASK", "PIPELINE_TASK", "SQL_TASK", "OCI_DATAFLOW_TASK", "REST_TASK", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The task_type of this TaskRun.
:rtype: str
"""
return self._task_type
@task_type.setter
def task_type(self, task_type):
"""
Sets the task_type of this TaskRun.
The type of task run.
:param task_type: The task_type of this TaskRun.
:type: str
"""
allowed_values = ["INTEGRATION_TASK", "DATA_LOADER_TASK", "PIPELINE_TASK", "SQL_TASK", "OCI_DATAFLOW_TASK", "REST_TASK"]
if not value_allowed_none_or_none_sentinel(task_type, allowed_values):
task_type = 'UNKNOWN_ENUM_VALUE'
self._task_type = task_type
@property
def identifier(self):
"""
Gets the identifier of this TaskRun.
Value can only contain upper case letters, underscore and numbers. It should begin with upper case letter or underscore. The value can be modified.
:return: The identifier of this TaskRun.
:rtype: str
"""
return self._identifier
@identifier.setter
def identifier(self, identifier):
"""
Sets the identifier of this TaskRun.
Value can only contain upper case letters, underscore and numbers. It should begin with upper case letter or underscore. The value can be modified.
:param identifier: The identifier of this TaskRun.
:type: str
"""
self._identifier = identifier
@property
def metadata(self):
"""
Gets the metadata of this TaskRun.
:return: The metadata of this TaskRun.
:rtype: oci.data_integration.models.ObjectMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this TaskRun.
:param metadata: The metadata of this TaskRun.
:type: oci.data_integration.models.ObjectMetadata
"""
self._metadata = metadata
@property
def key_map(self):
"""
Gets the key_map of this TaskRun.
A key map. If provided, key is replaced with generated key. This structure provides mapping between user provided key and generated key.
:return: The key_map of this TaskRun.
:rtype: dict(str, str)
"""
return self._key_map
@key_map.setter
def key_map(self, key_map):
"""
Sets the key_map of this TaskRun.
A key map. If provided, key is replaced with generated key. This structure provides mapping between user provided key and generated key.
:param key_map: The key_map of this TaskRun.
:type: dict(str, str)
"""
self._key_map = key_map
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| [
"oci.util.formatted_flat_dict",
"oci.util.value_allowed_none_or_none_sentinel"
] | [((35018, 35043), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', (['self'], {}), '(self)\n', (35037, 35043), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n'), ((19160, 19219), 'oci.util.value_allowed_none_or_none_sentinel', 'value_allowed_none_or_none_sentinel', (['status', 'allowed_values'], {}), '(status, allowed_values)\n', (19195, 19219), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n'), ((24493, 24568), 'oci.util.value_allowed_none_or_none_sentinel', 'value_allowed_none_or_none_sentinel', (['expected_duration_unit', 'allowed_values'], {}), '(expected_duration_unit, allowed_values)\n', (24528, 24568), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n'), ((30414, 30476), 'oci.util.value_allowed_none_or_none_sentinel', 'value_allowed_none_or_none_sentinel', (['auth_mode', 'allowed_values'], {}), '(auth_mode, allowed_values)\n', (30449, 30476), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n'), ((32807, 32869), 'oci.util.value_allowed_none_or_none_sentinel', 'value_allowed_none_or_none_sentinel', (['task_type', 'allowed_values'], {}), '(task_type, allowed_values)\n', (32842, 32869), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')] |
#!/usr/bin/env python
# pylint: disable=disallowed-name
# pylint: disable=missing-class-docstring,
# pylint: disable=missing-function-docstring,
# pylint: disable=no-self-use
import unittest
from paramobject import ParametrizedObject, parameter, Parameter
class TestParametrizedObject(unittest.TestCase):
def test_basic_values(self):
class ClassUnderTest(ParametrizedObject):
foo = Parameter(default=42)
bar = Parameter()
@parameter(stored=True, default=123)
def baz(self):
return self.params_storage['baz']
@parameter(stored=True)
def qux(self):
return self.params_storage['qux']
@parameter
def quux(self):
return 11 * self.bar
# missing mandatory parameter value must raise ValueError
self.assertRaises(ValueError, ClassUnderTest)
self.assertRaises(ValueError, ClassUnderTest, bar=1)
self.assertRaises(ValueError, ClassUnderTest, qux=2)
# check correct default values
obj = ClassUnderTest(bar=1, qux=2)
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.baz, 123)
# check correct values
obj = ClassUnderTest(foo=1, bar=2, baz=3, qux=4)
self.assertEqual(obj.foo, 1)
self.assertEqual(obj.bar, 2)
self.assertEqual(obj.baz, 3)
self.assertEqual(obj.qux, 4)
self.assertEqual(obj.quux, 22)
def test_wither(self):
class ClassUnderTest(ParametrizedObject):
foo = Parameter(default=42)
bar = Parameter(default=77)
@bar.wither
def with_bar(self, bar, as_string=False):
if as_string:
bar = str(bar)
return self.with_params(bar=bar)
# check values
obj = ClassUnderTest()
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 77)
# check default wither
obj = ClassUnderTest()
self.assertEqual(obj.with_foo(10).foo, 10)
# check custom wither
obj = ClassUnderTest()
self.assertEqual(obj.with_bar(10).bar, 10)
self.assertEqual(obj.with_bar(10, as_string=True).bar, '10')
def test_caster(self):
class ClassUnderTest(ParametrizedObject):
radius = Parameter(default=10)
@parameter
def diameter(self):
return 2 * self.radius
@radius.caster
def cast_radius(self, default, **kwargs):
radius = kwargs.get('radius', default)
if 'diameter' in kwargs:
radius = kwargs['diameter'] / 2
return radius
# check default
obj = ClassUnderTest()
self.assertEqual(obj.radius, 10)
self.assertEqual(obj.diameter, 20)
# check init with radius
obj = ClassUnderTest(radius=100)
self.assertEqual(obj.radius, 100)
self.assertEqual(obj.diameter, 200)
# check init with diameter
obj = ClassUnderTest(diameter=100)
self.assertEqual(obj.radius, 50)
self.assertEqual(obj.diameter, 100)
# check wither
obj = ClassUnderTest()
self.assertEqual(obj.with_radius(100).diameter, 200)
def test_subclassing(self):
class ClassUnderTest(ParametrizedObject):
foo = Parameter(default=42)
class SubClassUnderTest(ClassUnderTest):
bar = Parameter(default=77)
# check values
obj = SubClassUnderTest()
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 77)
def test_nested_parametrized_object(self):
class ContainedClassUnderTest(ParametrizedObject):
bar = Parameter(default=42)
class ClassUnderTest(ParametrizedObject):
foo = Parameter(default=ContainedClassUnderTest())
# check value
obj = ClassUnderTest()
self.assertEqual(obj.foo.bar, 42)
# check wither, contained class must return an instance of the
# containing class
obj = ClassUnderTest()
self.assertIsInstance(obj.foo.with_bar(10), ClassUnderTest)
self.assertEqual(obj.foo.with_bar(10).foo.bar, 10)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"paramobject.Parameter",
"paramobject.parameter"
] | [((4314, 4329), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4327, 4329), False, 'import unittest\n'), ((413, 434), 'paramobject.Parameter', 'Parameter', ([], {'default': '(42)'}), '(default=42)\n', (422, 434), False, 'from paramobject import ParametrizedObject, parameter, Parameter\n'), ((453, 464), 'paramobject.Parameter', 'Parameter', ([], {}), '()\n', (462, 464), False, 'from paramobject import ParametrizedObject, parameter, Parameter\n'), ((479, 514), 'paramobject.parameter', 'parameter', ([], {'stored': '(True)', 'default': '(123)'}), '(stored=True, default=123)\n', (488, 514), False, 'from paramobject import ParametrizedObject, parameter, Parameter\n'), ((606, 628), 'paramobject.parameter', 'parameter', ([], {'stored': '(True)'}), '(stored=True)\n', (615, 628), False, 'from paramobject import ParametrizedObject, parameter, Parameter\n'), ((1572, 1593), 'paramobject.Parameter', 'Parameter', ([], {'default': '(42)'}), '(default=42)\n', (1581, 1593), False, 'from paramobject import ParametrizedObject, parameter, Parameter\n'), ((1612, 1633), 'paramobject.Parameter', 'Parameter', ([], {'default': '(77)'}), '(default=77)\n', (1621, 1633), False, 'from paramobject import ParametrizedObject, parameter, Parameter\n'), ((2357, 2378), 'paramobject.Parameter', 'Parameter', ([], {'default': '(10)'}), '(default=10)\n', (2366, 2378), False, 'from paramobject import ParametrizedObject, parameter, Parameter\n'), ((3420, 3441), 'paramobject.Parameter', 'Parameter', ([], {'default': '(42)'}), '(default=42)\n', (3429, 3441), False, 'from paramobject import ParametrizedObject, parameter, Parameter\n'), ((3510, 3531), 'paramobject.Parameter', 'Parameter', ([], {'default': '(77)'}), '(default=77)\n', (3519, 3531), False, 'from paramobject import ParametrizedObject, parameter, Parameter\n'), ((3792, 3813), 'paramobject.Parameter', 'Parameter', ([], {'default': '(42)'}), '(default=42)\n', (3801, 3813), False, 'from paramobject import ParametrizedObject, parameter, Parameter\n')] |
"""
Copyright 2017-2020 Government of Canada - Public Services and Procurement Canada - buyandsell.gc.ca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import logging.config
from configparser import ConfigParser
from os import makedirs
from os.path import dirname, expandvars, isfile, join, realpath
from von_anchor.frill import do_wait, inis2dict
from app.cache import MEM_CACHE
def init_logging() -> None:
"""
Initialize logging configuration.
"""
dir_log = join(dirname(realpath(__file__)), 'log')
makedirs(dir_log, exist_ok=True)
path_log = join(dir_log, 'von_tails.log')
logging.basicConfig(
filename=path_log,
level=logging.INFO,
format='%(asctime)-15s | %(levelname)-8s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger('asyncio').setLevel(logging.ERROR)
logging.getLogger('aiocache').setLevel(logging.ERROR)
logging.getLogger('indy').setLevel(logging.CRITICAL)
logging.getLogger('von_anchor').setLevel(logging.INFO)
logging.getLogger('von_tails').setLevel(logging.INFO)
def set_config() -> dict:
"""
Read configuration file content into memory cache.
:return: configuration dict
"""
ini_path = join(dirname(realpath(__file__)), 'config', 'config.ini')
do_wait(MEM_CACHE.set('config.ini', ini_path))
do_wait(MEM_CACHE.delete('config'))
do_wait(MEM_CACHE.set('config', inis2dict(ini_path)))
return do_wait(MEM_CACHE.get('config'))
| [
"logging.basicConfig",
"logging.getLogger",
"os.makedirs",
"os.path.join",
"von_anchor.frill.inis2dict",
"os.path.realpath",
"app.cache.MEM_CACHE.delete",
"app.cache.MEM_CACHE.get",
"app.cache.MEM_CACHE.set"
] | [((1022, 1054), 'os.makedirs', 'makedirs', (['dir_log'], {'exist_ok': '(True)'}), '(dir_log, exist_ok=True)\n', (1030, 1054), False, 'from os import makedirs\n'), ((1070, 1100), 'os.path.join', 'join', (['dir_log', '"""von_tails.log"""'], {}), "(dir_log, 'von_tails.log')\n", (1074, 1100), False, 'from os.path import dirname, expandvars, isfile, join, realpath\n'), ((1106, 1260), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'path_log', 'level': 'logging.INFO', 'format': '"""%(asctime)-15s | %(levelname)-8s | %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(filename=path_log, level=logging.INFO, format=\n '%(asctime)-15s | %(levelname)-8s | %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S')\n", (1125, 1260), False, 'import logging\n'), ((1791, 1828), 'app.cache.MEM_CACHE.set', 'MEM_CACHE.set', (['"""config.ini"""', 'ini_path'], {}), "('config.ini', ini_path)\n", (1804, 1828), False, 'from app.cache import MEM_CACHE\n'), ((1843, 1869), 'app.cache.MEM_CACHE.delete', 'MEM_CACHE.delete', (['"""config"""'], {}), "('config')\n", (1859, 1869), False, 'from app.cache import MEM_CACHE\n'), ((1949, 1972), 'app.cache.MEM_CACHE.get', 'MEM_CACHE.get', (['"""config"""'], {}), "('config')\n", (1962, 1972), False, 'from app.cache import MEM_CACHE\n'), ((990, 1008), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (998, 1008), False, 'from os.path import dirname, expandvars, isfile, join, realpath\n'), ((1288, 1316), 'logging.getLogger', 'logging.getLogger', (['"""asyncio"""'], {}), "('asyncio')\n", (1305, 1316), False, 'import logging\n'), ((1345, 1374), 'logging.getLogger', 'logging.getLogger', (['"""aiocache"""'], {}), "('aiocache')\n", (1362, 1374), False, 'import logging\n'), ((1403, 1428), 'logging.getLogger', 'logging.getLogger', (['"""indy"""'], {}), "('indy')\n", (1420, 1428), False, 'import logging\n'), ((1460, 1491), 'logging.getLogger', 'logging.getLogger', (['"""von_anchor"""'], {}), "('von_anchor')\n", (1477, 1491), False, 'import logging\n'), ((1519, 1549), 'logging.getLogger', 'logging.getLogger', (['"""von_tails"""'], {}), "('von_tails')\n", (1536, 1549), False, 'import logging\n'), ((1734, 1752), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (1742, 1752), False, 'from os.path import dirname, expandvars, isfile, join, realpath\n'), ((1907, 1926), 'von_anchor.frill.inis2dict', 'inis2dict', (['ini_path'], {}), '(ini_path)\n', (1916, 1926), False, 'from von_anchor.frill import do_wait, inis2dict\n')] |
from django.contrib import admin
from .models import Comment
class CommentAdmin(admin.ModelAdmin):
list_display = ("article", "to_comment", "status", "owner", "content")
admin.site.register(Comment, CommentAdmin)
| [
"django.contrib.admin.site.register"
] | [((178, 220), 'django.contrib.admin.site.register', 'admin.site.register', (['Comment', 'CommentAdmin'], {}), '(Comment, CommentAdmin)\n', (197, 220), False, 'from django.contrib import admin\n')] |
from flask import Flask
import os
from application.model.entity.aula import Aula
from application.model.entity.disciplina import Disciplina
app = Flask(__name__, static_folder=os.path.abspath("application/view/static"), template_folder=os.path.abspath("application/view/templates"))
aula1 = Aula(1, "Aula 1", "Introdução ao Linux", "Sistemas Operacionais")
aula2 = Aula(2, "Aula 1", "Introdução ao grid layout e flexbox", "Interface com Usuário")
aula3 = Aula(3, "Aula 2", "Introdução ao Windows", "Sistemas Operacionais")
aula4 = Aula(4, "Aula 2", "Práticas com CSS 3", "Interface com Usuário")
todas_aulas = [aula1, aula2, aula3, aula4]
disciplina1 = Disciplina(1, "Sistemas Operacionais", "Felipe Melo", 4, [aula1, aula3])
disciplina2 = Disciplina(2, "Interface com Usuário", "Tássio Auad", 4, [aula2, aula4])
todas_disciplinas = [disciplina1, disciplina2]
from application.controller import home_controller
from application.controller import disciplina_controller | [
"application.model.entity.aula.Aula",
"application.model.entity.disciplina.Disciplina",
"os.path.abspath"
] | [((304, 369), 'application.model.entity.aula.Aula', 'Aula', (['(1)', '"""Aula 1"""', '"""Introdução ao Linux"""', '"""Sistemas Operacionais"""'], {}), "(1, 'Aula 1', 'Introdução ao Linux', 'Sistemas Operacionais')\n", (308, 369), False, 'from application.model.entity.aula import Aula\n'), ((379, 464), 'application.model.entity.aula.Aula', 'Aula', (['(2)', '"""Aula 1"""', '"""Introdução ao grid layout e flexbox"""', '"""Interface com Usuário"""'], {}), "(2, 'Aula 1', 'Introdução ao grid layout e flexbox',\n 'Interface com Usuário')\n", (383, 464), False, 'from application.model.entity.aula import Aula\n'), ((470, 537), 'application.model.entity.aula.Aula', 'Aula', (['(3)', '"""Aula 2"""', '"""Introdução ao Windows"""', '"""Sistemas Operacionais"""'], {}), "(3, 'Aula 2', 'Introdução ao Windows', 'Sistemas Operacionais')\n", (474, 537), False, 'from application.model.entity.aula import Aula\n'), ((547, 611), 'application.model.entity.aula.Aula', 'Aula', (['(4)', '"""Aula 2"""', '"""Práticas com CSS 3"""', '"""Interface com Usuário"""'], {}), "(4, 'Aula 2', 'Práticas com CSS 3', 'Interface com Usuário')\n", (551, 611), False, 'from application.model.entity.aula import Aula\n'), ((673, 745), 'application.model.entity.disciplina.Disciplina', 'Disciplina', (['(1)', '"""Sistemas Operacionais"""', '"""Felipe Melo"""', '(4)', '[aula1, aula3]'], {}), "(1, 'Sistemas Operacionais', 'Felipe Melo', 4, [aula1, aula3])\n", (683, 745), False, 'from application.model.entity.disciplina import Disciplina\n'), ((761, 833), 'application.model.entity.disciplina.Disciplina', 'Disciplina', (['(2)', '"""Interface com Usuário"""', '"""Tássio Auad"""', '(4)', '[aula2, aula4]'], {}), "(2, 'Interface com Usuário', 'Tássio Auad', 4, [aula2, aula4])\n", (771, 833), False, 'from application.model.entity.disciplina import Disciplina\n'), ((184, 226), 'os.path.abspath', 'os.path.abspath', (['"""application/view/static"""'], {}), "('application/view/static')\n", (199, 226), False, 'import os\n'), ((244, 289), 'os.path.abspath', 'os.path.abspath', (['"""application/view/templates"""'], {}), "('application/view/templates')\n", (259, 289), False, 'import os\n')] |
from typing import Callable
from putput.presets import displaCy
from putput.presets import iob2
from putput.presets import luis
from putput.presets import stochastic
def get_preset(preset: str) -> Callable:
"""A factory that gets a 'preset' Callable.
Args:
preset: the preset's name.
Returns:
The return value of calling a preset's 'preset'
function without arguments.
Examples:
>>> from pathlib import Path
>>> from putput.pipeline import Pipeline
>>> pattern_def_path = Path(__file__).parent.parent.parent / 'tests' / 'doc' / 'example_pattern_definition.yml'
>>> dynamic_token_patterns_map = {'ITEM': ('fries',)}
>>> p = Pipeline.from_preset('IOB2',
... pattern_def_path,
... dynamic_token_patterns_map=dynamic_token_patterns_map)
>>> generator = p.flow(disable_progress_bar=True)
>>> for utterance, tokens, groups in generator:
... print(utterance)
... print(tokens)
... print(groups)
... break
can she get fries can she get fries and fries
('B-ADD I-ADD I-ADD', 'B-ITEM', 'B-ADD I-ADD I-ADD', 'B-ITEM', 'B-CONJUNCTION', 'B-ITEM')
('B-ADD_ITEM I-ADD_ITEM I-ADD_ITEM I-ADD_ITEM', 'B-ADD_ITEM I-ADD_ITEM I-ADD_ITEM I-ADD_ITEM',
'B-None', 'B-None')
"""
supported_presets = ('IOB2', 'DISPLACY', 'LUIS', 'STOCHASTIC')
if preset == 'IOB2':
return iob2.preset()
if preset == 'DISPLACY':
return displaCy.preset()
if preset == 'LUIS':
return luis.preset()
if preset == 'STOCHASTIC': # pragma: no cover
return stochastic.preset()
raise ValueError('Unrecoginzed preset. Please choose from the supported presets: {}'.format(supported_presets))
| [
"putput.presets.displaCy.preset",
"putput.presets.luis.preset",
"putput.presets.stochastic.preset",
"putput.presets.iob2.preset"
] | [((1513, 1526), 'putput.presets.iob2.preset', 'iob2.preset', ([], {}), '()\n', (1524, 1526), False, 'from putput.presets import iob2\n'), ((1571, 1588), 'putput.presets.displaCy.preset', 'displaCy.preset', ([], {}), '()\n', (1586, 1588), False, 'from putput.presets import displaCy\n'), ((1629, 1642), 'putput.presets.luis.preset', 'luis.preset', ([], {}), '()\n', (1640, 1642), False, 'from putput.presets import luis\n'), ((1708, 1727), 'putput.presets.stochastic.preset', 'stochastic.preset', ([], {}), '()\n', (1725, 1727), False, 'from putput.presets import stochastic\n')] |
from hqca.core import *
import numpy as np
from hqca.tools import *
class SingleQubitHamiltonian(Hamiltonian):
def __init__(self,sq=True,
**kw
):
self._order = 1
self._model = 'sq'
self._qubOp = ''
self.No_tot = 1
self.Ne_tot = 1
self.real = True
self.imag = True
self._en_c = 0
if sq:
self._set_operator(**kw)
else:
self._set_bloch_sphere(**kw)
def _set_operator(self,p=0,h=0,c=0,a=0):
op = Operator()
for i,s in zip([c,a,p,h],['+','-','p','h']):
temp = QubitOperator(i,indices=[0],sqOp=s)
temp.generateOperators(Nq=1,real=True,imag=True)
op+= temp.formOperator()
self._qubOp = op
print('Hamiltonian operators: ')
print(op)
print('--- --- --- --- ---')
self._matrix_from_op()
def _matrix_from_op(self):
mat = np.zeros((2,2),dtype=np.complex_)
for i in self._qubOp.op:
cir = Circ(1)
if i.p=='X':
cir.x(0)
elif i.p=='Y':
cir.y(0)
elif i.p=='Z':
cir.z(0)
mat+=i.c*cir.m
self.ef = np.min(np.linalg.eigvalsh(mat))
self._matrix = np.array([mat])
@property
def qubOp(self):
return self._qubOp
@qubOp.setter
def qubOp(self,a):
self._qubOp = a
@property
def matrix(self):
return self._matrix
@matrix.setter
def matrix(self,a):
self._matrix = a
@property
def order(self):
return self._order
@order.setter
def order(self,a):
self._order = a
@property
def model(self):
return self._model
@model.setter
def model(self,mod):
self._model = mod
| [
"numpy.array",
"numpy.zeros",
"numpy.linalg.eigvalsh"
] | [((953, 988), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'np.complex_'}), '((2, 2), dtype=np.complex_)\n', (961, 988), True, 'import numpy as np\n'), ((1300, 1315), 'numpy.array', 'np.array', (['[mat]'], {}), '([mat])\n', (1308, 1315), True, 'import numpy as np\n'), ((1252, 1275), 'numpy.linalg.eigvalsh', 'np.linalg.eigvalsh', (['mat'], {}), '(mat)\n', (1270, 1275), True, 'import numpy as np\n')] |
# Author: <NAME>, <EMAIL>
# Dec 02, 2020
# Copyright 2020 <NAME>
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.spatial import distance as dist
import scipy.io
import pickle
## TAPTC Dataset
group_list = [1,2]
instance_list = [0, 1, 2]
ratio_deadline_list = [1, 2, 3, 4]
robotSize_list = [2, 3, 5, 7]
for G in group_list:
for D in ratio_deadline_list:
for I in instance_list:
for R in robotSize_list:
agent_name = "a"+str(R)+"i0"+str(I)
data_name = "r"+str(G)+str(D)+agent_name
dir_name = "group"+str(G)
file_name = dir_name+"/"+data_name+".txt"
tasks = pd.read_csv(file_name, sep=" ", header=None, skiprows=1)
tasks.columns = ["id", "x", "y", "w", "T"]
file_name = "agent/"+agent_name+".txt"
robots = pd.read_csv(file_name, sep=" ", header=None, skiprows=1)
robots.columns = ["id", "x", "y", "c"]
print(robots)
exit() | [
"pandas.read_csv"
] | [((705, 761), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'sep': '""" """', 'header': 'None', 'skiprows': '(1)'}), "(file_name, sep=' ', header=None, skiprows=1)\n", (716, 761), True, 'import pandas as pd\n'), ((901, 957), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'sep': '""" """', 'header': 'None', 'skiprows': '(1)'}), "(file_name, sep=' ', header=None, skiprows=1)\n", (912, 957), True, 'import pandas as pd\n')] |
#!/usr/bin/python2
import os
from plumbum import local, FG
from plumbum.cmd import git
# the commits already tested
HISTORY = '/home/cmr/benches/data'
BUILDDIR = '/mnt/rustb'
BENCH_OVERRIDE = '/home/cmr/benches/bench-override.txt'
def run(hash):
local['benchit.py'][hash] & FG
for hash in open(BENCH_OVERRIDE).read().split('\n'):
if len(hash) == 40:
local['benchit.py'][hash] & FG
for d in os.listdir(BUILDDIR):
if not os.path.exists(os.path.join(HISTORY, d)):
run(d)
| [
"os.listdir",
"os.path.join"
] | [((413, 433), 'os.listdir', 'os.listdir', (['BUILDDIR'], {}), '(BUILDDIR)\n', (423, 433), False, 'import os\n'), ((461, 485), 'os.path.join', 'os.path.join', (['HISTORY', 'd'], {}), '(HISTORY, d)\n', (473, 485), False, 'import os\n')] |
import RPi.GPIO as GPIO
import time
import datetime
from ReadWriteConfig import *
import Adafruit_ADS1x15
now = datetime.datetime.now()
print("Starting CheckWater.py", str(now))
adc = Adafruit_ADS1x15.ADS1015() # Pick Sensors
GAIN = 0 #import gain for adc reading
SS_COUNT = 0
with open("PlantMgr.xml", "r") as f:
content = f.read()
y = BeautifulSoup(content, features = "lxml")
GAIN = int(y.find("gain").text)
SS_COUNT = int(y.find("ss_count").text)
mSS_Array = np.full(5, SoilSensors(0,0.0, 0.0,0, 0.0))
SS_ARRAY = ReadWriteConfig(0, mSS_Array, SS_COUNT)
#print("SS_ARRAY Value: ", SS_ARRAY)
PWM0Pin = 18
PWM1Pin = 12
LED0Pin = 14
# pwm0 setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(PWM0Pin, GPIO.OUT)
GPIO.output(PWM0Pin, GPIO.LOW)
pwm0 = GPIO.PWM(PWM0Pin, 30000)
# pwm1 setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(PWM1Pin, GPIO.OUT)
GPIO.output(PWM1Pin, GPIO.LOW)
pwm1 = GPIO.PWM(PWM1Pin, 30000)
pwm_array = [pwm0, pwm1]
GPIO.setup(LED0Pin, GPIO.OUT)
GPIO.output(LED0Pin, GPIO.HIGH)
for item in SS_ARRAY:
print("Sensor Index: ", item.index)
print("Sensor #", item.index," config setpoint:", item.setpoint)
print("Sensor #", item.index," config Cal A value:", round(item.a,2))
print("Sensor #", item.index," config Cal B value:", round(item.b,2))
SensorVal = 0
for _ in range(5):
SensorVal += adc.read_adc(item.index, gain = GAIN)
time.sleep(0.1)
SensorVal = SensorVal / 5
print("Sensor #", item.index," sensor reading:", SensorVal)
a = item.a
b = item.b
setpoint = item.setpoint
print("Sensor #", item.index," recalibrated (0-100):", round(SensorVal * a + b,2), "/", item.setpoint)
# run at low power to indicate running
pwm_array[item.index].start(10)
time.sleep(0.2)
pwm_array[item.index].stop(0)
overflowCounter = 0
while((SensorVal * a + b) > setpoint):
if overflowCounter > 40:
print("Error: Reached overflow counter.")
break
# check for wire open on sensor
if SensorVal < 1200:
# Run motors
pwm_array[item.index].start(100)
time.sleep(2)
pwm_array[item.index].stop(0)
overflowCounter += 1
SensorVal = 0
# Read new sensor values
for _ in range(5):
SensorVal += adc.read_adc(item.index, gain = GAIN)
time.sleep(0.1)
SensorVal = SensorVal / 5
print("Counter:", overflowCounter, "Sensor #", item.index," recalibrated (0-100):", SensorVal * a + b)
else:
print("Error: Check sensor connection.")
break
pwm_array[item.index].stop(0)
# hold LED on after completion
time.sleep(10)
GPIO.output(LED0Pin, GPIO.LOW)
GPIO.cleanup()
print("Ending CheckWater.py", str(now))
| [
"RPi.GPIO.cleanup",
"RPi.GPIO.setup",
"RPi.GPIO.output",
"time.sleep",
"RPi.GPIO.PWM",
"datetime.datetime.now",
"Adafruit_ADS1x15.ADS1015",
"RPi.GPIO.setmode"
] | [((113, 136), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (134, 136), False, 'import datetime\n'), ((187, 213), 'Adafruit_ADS1x15.ADS1015', 'Adafruit_ADS1x15.ADS1015', ([], {}), '()\n', (211, 213), False, 'import Adafruit_ADS1x15\n'), ((671, 693), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (683, 693), True, 'import RPi.GPIO as GPIO\n'), ((694, 723), 'RPi.GPIO.setup', 'GPIO.setup', (['PWM0Pin', 'GPIO.OUT'], {}), '(PWM0Pin, GPIO.OUT)\n', (704, 723), True, 'import RPi.GPIO as GPIO\n'), ((724, 754), 'RPi.GPIO.output', 'GPIO.output', (['PWM0Pin', 'GPIO.LOW'], {}), '(PWM0Pin, GPIO.LOW)\n', (735, 754), True, 'import RPi.GPIO as GPIO\n'), ((762, 786), 'RPi.GPIO.PWM', 'GPIO.PWM', (['PWM0Pin', '(30000)'], {}), '(PWM0Pin, 30000)\n', (770, 786), True, 'import RPi.GPIO as GPIO\n'), ((801, 823), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (813, 823), True, 'import RPi.GPIO as GPIO\n'), ((824, 853), 'RPi.GPIO.setup', 'GPIO.setup', (['PWM1Pin', 'GPIO.OUT'], {}), '(PWM1Pin, GPIO.OUT)\n', (834, 853), True, 'import RPi.GPIO as GPIO\n'), ((854, 884), 'RPi.GPIO.output', 'GPIO.output', (['PWM1Pin', 'GPIO.LOW'], {}), '(PWM1Pin, GPIO.LOW)\n', (865, 884), True, 'import RPi.GPIO as GPIO\n'), ((892, 916), 'RPi.GPIO.PWM', 'GPIO.PWM', (['PWM1Pin', '(30000)'], {}), '(PWM1Pin, 30000)\n', (900, 916), True, 'import RPi.GPIO as GPIO\n'), ((944, 973), 'RPi.GPIO.setup', 'GPIO.setup', (['LED0Pin', 'GPIO.OUT'], {}), '(LED0Pin, GPIO.OUT)\n', (954, 973), True, 'import RPi.GPIO as GPIO\n'), ((974, 1005), 'RPi.GPIO.output', 'GPIO.output', (['LED0Pin', 'GPIO.HIGH'], {}), '(LED0Pin, GPIO.HIGH)\n', (985, 1005), True, 'import RPi.GPIO as GPIO\n'), ((2478, 2492), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (2488, 2492), False, 'import time\n'), ((2493, 2523), 'RPi.GPIO.output', 'GPIO.output', (['LED0Pin', 'GPIO.LOW'], {}), '(LED0Pin, GPIO.LOW)\n', (2504, 2523), True, 'import RPi.GPIO as GPIO\n'), ((2524, 2538), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (2536, 2538), True, 'import RPi.GPIO as GPIO\n'), ((1699, 1714), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (1709, 1714), False, 'import time\n'), ((1365, 1380), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1375, 1380), False, 'import time\n'), ((2001, 2014), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2011, 2014), False, 'import time\n'), ((2200, 2215), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2210, 2215), False, 'import time\n')] |
import discord
import ctftime
import os
import random
from discord.ext import commands, tasks
from datetime import datetime
# Token generated from https://discord.com/developers/applications
# Keep this private, if exposed generate new one
TOKEN = ''
# Bot channel ID was grabbed from Settings > Appearance > Developer Mode (On). Afterwards, right click on desired channel to copy ID
BOT_CHANNEL = 0
bot = commands.Bot(command_prefix = '!')
# When bot is ready
@bot.event
async def on_ready():
print('Starting loop task')
update_channel.start()
print('pwnbot is now ready for commands.')
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.errors.CommandNotFound):
await ctx.send(':robot: *That command does not exist.\nTry:* `!help`')
return
raise error
# removes default help command
bot.remove_command('help');
# contex is passed in automatically
@bot.command()
async def help(ctx):
await ctx.send(
'```\n' +
'---------------------------------- Help ----------------------------------\n\n' +
'Usage: !<command>\n\n' +
'Event Commands:\n' +
' * !events - Displays ongoing ctf events within the week.\n' +
' * !events all - Displays ctf events for the past and next week.\n' +
' * !events next - Displays upcoming ctf events for next week.\n' +
' * !events past - Displays finished ctf events from the past week.\n\n' +
'Clear Commands:\n' +
' * !clear - Clears the last 20 messages from pwnbot in current channel.\n' +
' * !clear all - Clears all messages from pwnbot in current channel.\n' +
' * !clear last - Clears last message from pwnbot in current channel.\n\n' +
'Util Commands:\n' +
' * !ping - Checks the latency for pwnbot with date/time\n\n' +
'Misc Commands:\n' +
' * !celebrate - Celebration!!\n' +
' * !facepalm - Sometimes you just have to facepalm ...\n\n' +
'--------------------------------------------------------------------------\n' +
'```'
)
@bot.command()
async def events(ctx, arg=None):
embed_msgs = []
current_time = int(datetime.now().timestamp())
SEVEN_DAYS = ctftime.days_to_secs(7)
if arg == 'all':
start = current_time - SEVEN_DAYS
finish = current_time + SEVEN_DAYS
# checks previous json events to see if its the same as the newly fetched events
embed_msgs = ctftime.get_events(start, finish)
# if there are new events, embed the new events and send to current channel
if len(embed_msgs) == 0:
await ctx.send(':robot: *There are no events happening from last week to next week.*')
return
elif arg == 'next':
start = current_time
finish = current_time + SEVEN_DAYS
embed_msgs = ctftime.get_events(start, finish, status='upcoming')
if len(embed_msgs) == 0:
await ctx.send(':robot: *There are no upcoming events next week.*')
return
elif arg == 'past':
start = current_time - SEVEN_DAYS
finish = current_time
embed_msgs = ctftime.get_events(start, finish, status='finished')
if len(embed_msgs) == 0:
await ctx.send(':robot: *There are no finished events from last week.*')
return
else:
start = current_time - SEVEN_DAYS
finish = current_time + SEVEN_DAYS
embed_msgs = ctftime.get_events(start, finish, status='update')
if len(embed_msgs) == 0:
await ctx.send(':robot: **There are no ongoing events.**')
return
for embed in embed_msgs:
await ctx.send(embed=embed)
@bot.command()
async def clear(ctx, arg=None):
if arg == 'all':
await ctx.channel.purge(limit=200, check=is_bot)
if arg == 'last':
await ctx.channel.purge(limit=1, check=is_bot)
else:
await ctx.channel.purge(limit=20, check=is_bot)
@bot.command()
async def ping(ctx):
await ctx.send(f"Pong!\n[**{round(bot.latency * 1000)}ms**]: *Current date/time: {datetime.now()}*")
@bot.command()
async def celebrate(ctx):
await ctx.send('\o/ :confetti_ball: :tada:')
@bot.command()
async def facepalm(ctx):
await ctx.send(':man_facepalming:')
@bot.command()
async def pwnbot(ctx):
#-5 removes #0000 at the end of username for discord
user = str(ctx.author)[:-5]
responses = [
f"*Oh my! You caught me by surprise! How can I help, {user}?*",
f"**BRUTEFORCE!**",
f"*get pwned {user}* :computer:",
f"*You must be bored. Check out: `!events` for current ctf events :robot:*",
f"*pwnbot at your service!*",
f"!{user}",
f"*-thinks of a quirky comment-*",
f"*What do you want?*",
f"*-currently sleeping-*",
f"*At your service, {user}!*",
f"*'UNO is the best'* -pwnbot 2020",
f":robot: ||*NTk2Zjc1MjA2ZDc1NzM3NDIwNjI2NTIwNzY2NTcyNzkyMDYzNmM2NTc2NjU3MjJlMjA0ZDc5MjA2ZTYxNmQ2NTIwNjk3MzIwNzA3NzZlNjI2Zjc0MjEyMDNhMjkK*||",
f"*Beware of this command!! :robot:*",
f"*Ah yes, some human interaction. How can I assist you?*"
]
await ctx.send(random.choice(responses))
# checks if message is from bot or a command to bot. helper for clear command
def is_bot(msg):
return msg.author == bot.user or msg.content[0] == '!'
def diff_events(curr, prev):
if len(curr) != len(prev):
return True
for curr_event, prev_event in zip(curr, prev):
if curr_event.title != prev_event.title:
return True
return False
# sends update to #bot-channel with embedded new events. if events are the same as the old update,
# dont update. checks every 30 minutes for new content
prev_update = ''
bot_msg = ''
@tasks.loop(minutes=30)
async def update_channel():
channel = bot.get_channel(BOT_CHANNEL)
SEVEN_DAYS = ctftime.days_to_secs(7)
current_time = int(datetime.now().timestamp())
start = current_time - SEVEN_DAYS
finish = current_time + SEVEN_DAYS
curr_events = ctftime.get_events(start, finish, status='update')
# prev_events is used to self check bot for new events
global prev_update, bot_msg
if prev_update == '' or diff_events(curr_events, prev_update):
prev_update = curr_events
if len(curr_events) == 0 and bot_msg == '':
bot_msg = ':robot: *There are no ongoing/upcoming events. I will update this channel when I see new events.*'
await channel.send(bot_msg)
if curr_events != None:
for embed in curr_events:
await channel.send(embed=embed)
bot_msg = ''
bot.run(TOKEN)
| [
"random.choice",
"discord.ext.commands.Bot",
"datetime.datetime.now",
"ctftime.get_events",
"discord.ext.tasks.loop",
"ctftime.days_to_secs"
] | [((408, 440), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""!"""'}), "(command_prefix='!')\n", (420, 440), False, 'from discord.ext import commands, tasks\n'), ((5482, 5504), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'minutes': '(30)'}), '(minutes=30)\n', (5492, 5504), False, 'from discord.ext import commands, tasks\n'), ((2140, 2163), 'ctftime.days_to_secs', 'ctftime.days_to_secs', (['(7)'], {}), '(7)\n', (2160, 2163), False, 'import ctftime\n'), ((5589, 5612), 'ctftime.days_to_secs', 'ctftime.days_to_secs', (['(7)'], {}), '(7)\n', (5609, 5612), False, 'import ctftime\n'), ((5753, 5803), 'ctftime.get_events', 'ctftime.get_events', (['start', 'finish'], {'status': '"""update"""'}), "(start, finish, status='update')\n", (5771, 5803), False, 'import ctftime\n'), ((2363, 2396), 'ctftime.get_events', 'ctftime.get_events', (['start', 'finish'], {}), '(start, finish)\n', (2381, 2396), False, 'import ctftime\n'), ((2718, 2770), 'ctftime.get_events', 'ctftime.get_events', (['start', 'finish'], {'status': '"""upcoming"""'}), "(start, finish, status='upcoming')\n", (2736, 2770), False, 'import ctftime\n'), ((4907, 4931), 'random.choice', 'random.choice', (['responses'], {}), '(responses)\n', (4920, 4931), False, 'import random\n'), ((2097, 2111), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2109, 2111), False, 'from datetime import datetime\n'), ((2993, 3045), 'ctftime.get_events', 'ctftime.get_events', (['start', 'finish'], {'status': '"""finished"""'}), "(start, finish, status='finished')\n", (3011, 3045), False, 'import ctftime\n'), ((3272, 3322), 'ctftime.get_events', 'ctftime.get_events', (['start', 'finish'], {'status': '"""update"""'}), "(start, finish, status='update')\n", (3290, 3322), False, 'import ctftime\n'), ((5634, 5648), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5646, 5648), False, 'from datetime import datetime\n'), ((3866, 3880), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3878, 3880), False, 'from datetime import datetime\n')] |
import os
import sys
NAME = 'multipla'
PACKAGE = __import__(NAME)
AUTHOR, EMAIL = PACKAGE.__author__.rsplit(' ', 1)
with open('docs/index.rst', 'r') as INDEX:
DESCRIPTION = INDEX.readline()
with open('README.rst', 'r') as README:
LONG_DESCRIPTION = README.read()
URL = 'https://github.com/monkeython/%s' % NAME
EGG = {
'name': NAME,
'version': PACKAGE.__version__,
'author': AUTHOR,
'author_email': EMAIL.strip('<>'),
'url': URL,
'description': DESCRIPTION,
'long_description': LONG_DESCRIPTION,
'classifiers': PACKAGE.__classifiers__,
'license': 'BSD',
'keywords': PACKAGE.__keywords__,
'py_modules': [NAME],
'tests_require': ['genty'],
'test_suite': 'test_{}'.format(NAME)
}
if __name__ == '__main__':
import setuptools
setuptools.setup(**EGG)
| [
"setuptools.setup"
] | [((796, 819), 'setuptools.setup', 'setuptools.setup', ([], {}), '(**EGG)\n', (812, 819), False, 'import setuptools\n')] |
# Generic-DiscordBot
# author: github/adibarra
# imports
import os
import time
import uuid
import enum
import glob
import traceback
from zipfile import ZipFile
from DB_prefsloader import PreferenceLoader
class Importance(enum.IntEnum):
""" Enum to keep track of logger message importance """
CRIT = 0
WARN = 1
INFO = 2
DBUG = 3
class Transaction:
""" Class to keep all same transactions together """
logs = []
transaction_ID = None
def __init__(self, transaction_ID: uuid):
self.logs = []
self.transaction_ID = transaction_ID
def addMessage(self, message: str):
self.logs.append(message)
def getMessages(self):
return self.logs
class Logger:
""" Class to handle logging """
transaction_cache = []
# log message to logfile
def log(message: str, importance: int, transactionID: uuid = None, final=False):
if not PreferenceLoader.logger_enabled:
return
else:
# if logs folder does not exist then create it
try:
if not os.path.exists(os.path.dirname(os.path.realpath(__file__))+'/../logs'):
original_umask = os.umask(0)
os.makedirs(os.path.dirname(os.path.realpath(__file__))+'/../logs')
os.umask(original_umask)
except Exception as e:
print('There was an error while trying to create the logs directory:')
print(e)
# if logfile for today does not exist then create it
filePath = os.path.dirname(os.path.realpath(__file__))+'/../logs/'+time.strftime('log-%Y-%m-%d')+'.log'
if not os.path.isfile(filePath):
try:
open(filePath, 'a').close()
except Exception as e:
print('There was an error while trying to create the logfile:')
print(e)
# let transactions bunch up until final or special ones come through
if importance != None and transactionID != None and importance <= Importance[PreferenceLoader.verbositySetting].value:
found = False
for transaction in Logger.transaction_cache:
if transaction.transaction_ID == transactionID:
transaction.addMessage(time.strftime('%Y-%m-%d %H:%M:%S')+' '+(str(transactionID)[:13]+' ['+Importance(importance).name+'] '+message))
found = True
break
if not found:
newTransaction = Transaction(transactionID)
newTransaction.addMessage(time.strftime('%Y-%m-%d %H:%M:%S')+' '+(str(transactionID)[:13]+' ['+Importance(importance).name+'] '+message))
Logger.transaction_cache.append(newTransaction)
# final transaction for batch has come through, write it to the file
if final or importance == None or transactionID == None:
try:
to_write = ''
with open(filePath, 'a') as (logFile):
# if importance or transactionID are None then immediately write to logfile
if importance == None or transactionID == None:
logFile.write(message+'\n')
return
# else build to_write str from transaction_cache
else:
for transaction in Logger.transaction_cache:
if transaction.transaction_ID == transactionID:
# get matching transactions and build to_write str
for trans_message in transaction.getMessages():
if importance <= Importance[PreferenceLoader.verbositySetting].value:
to_write += trans_message+'\n'
# remove transaction from cache
Logger.transaction_cache.remove(transaction)
break
# write to logfile
if to_write != '':
logFile.write(to_write+'\n')
# if logfile gets too big (10 MB), rename current logfile and later autocreate another
if os.stat(filePath).st_size > 1e+7:
log_number = 0
# iterate through logs for the day and find largest logfile number
for name in glob.glob(filePath[:len(filePath)-4]+'*'):
if len(name.split('/')[-1].split('.')) > 2:
num = int(name.split('/')[-1].split('.')[1])
if num > log_number:
log_number = num
# rename current log to largest log number +1 then zip and delete original
fileName = (filePath[:len(filePath)-4]+'.'+str(log_number+1)+'.log').split('/')[-1]
os.rename(filePath, filePath[:len(filePath)-4]+'.'+str(log_number+1)+'.log')
with ZipFile(filePath[:len(filePath)-4]+'.'+str(log_number+1)+'.log.zip', 'w') as zip:
zip.write(filePath[:len(filePath)-4]+'.'+str(log_number+1)+'.log', fileName)
os.remove(filePath[:len(filePath)-4]+'.'+str(log_number+1)+'.log')
except Exception as e:
print('There was an error when reading or writing a file:')
print(traceback.format_exc())
| [
"traceback.format_exc",
"time.strftime",
"os.path.isfile",
"os.path.realpath",
"os.umask",
"os.stat"
] | [((1689, 1713), 'os.path.isfile', 'os.path.isfile', (['filePath'], {}), '(filePath)\n', (1703, 1713), False, 'import os\n'), ((1196, 1207), 'os.umask', 'os.umask', (['(0)'], {}), '(0)\n', (1204, 1207), False, 'import os\n'), ((1316, 1340), 'os.umask', 'os.umask', (['original_umask'], {}), '(original_umask)\n', (1324, 1340), False, 'import os\n'), ((1633, 1662), 'time.strftime', 'time.strftime', (['"""log-%Y-%m-%d"""'], {}), "('log-%Y-%m-%d')\n", (1646, 1662), False, 'import time\n'), ((1593, 1619), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1609, 1619), False, 'import os\n'), ((4463, 4480), 'os.stat', 'os.stat', (['filePath'], {}), '(filePath)\n', (4470, 4480), False, 'import os\n'), ((5723, 5745), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5743, 5745), False, 'import traceback\n'), ((1118, 1144), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1134, 1144), False, 'import os\n'), ((1256, 1282), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1272, 1282), False, 'import os\n'), ((2675, 2709), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (2688, 2709), False, 'import time\n'), ((2355, 2389), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (2368, 2389), False, 'import time\n')] |
from django.urls import path
from django.conf.urls import url
from store import views
from .views import getStore, addStore, updateStore, deleteStore
urlpatterns = [
url(r'getStore/', views.getStore),
url(r'addStore/', views.addStore),
url(r'updateStore/', views.updateStore),
url(r'deleteStore/', views.deleteStore),
url(r'getStoreByOwnerId/(?P<ownerId>[0-9]+)$', views.getStoreByOwnerId),
]
| [
"django.conf.urls.url"
] | [((171, 203), 'django.conf.urls.url', 'url', (['"""getStore/"""', 'views.getStore'], {}), "('getStore/', views.getStore)\n", (174, 203), False, 'from django.conf.urls import url\n'), ((210, 242), 'django.conf.urls.url', 'url', (['"""addStore/"""', 'views.addStore'], {}), "('addStore/', views.addStore)\n", (213, 242), False, 'from django.conf.urls import url\n'), ((249, 287), 'django.conf.urls.url', 'url', (['"""updateStore/"""', 'views.updateStore'], {}), "('updateStore/', views.updateStore)\n", (252, 287), False, 'from django.conf.urls import url\n'), ((294, 332), 'django.conf.urls.url', 'url', (['"""deleteStore/"""', 'views.deleteStore'], {}), "('deleteStore/', views.deleteStore)\n", (297, 332), False, 'from django.conf.urls import url\n'), ((339, 409), 'django.conf.urls.url', 'url', (['"""getStoreByOwnerId/(?P<ownerId>[0-9]+)$"""', 'views.getStoreByOwnerId'], {}), "('getStoreByOwnerId/(?P<ownerId>[0-9]+)$', views.getStoreByOwnerId)\n", (342, 409), False, 'from django.conf.urls import url\n')] |
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 <NAME>, SE; tamalone1
"""
import unittest
from PyNite import FEModel3D
import sys
from io import StringIO
class Test_Spring_Elements(unittest.TestCase):
''' Tests of spring members.'''
def setUp(self):
# Suppress printed output temporarily
sys.stdout = StringIO()
def tearDown(self):
# Reset the print function to normal
sys.stdout = sys.__stdout__
def test_spring_elements(self):
# A First Course in the Finite Element Method, 4th Edition
# <NAME>
# Example 2.1
# Units for this model are pounds and inches
system = FEModel3D()
system.add_node('1', 0, 0, 0)
system.add_node('2', 30, 0, 0)
system.add_node('3', 10, 0, 0)
system.add_node('4', 20, 0, 0)
# Add spring members
system.add_spring('S1', '1', '3', 1000)
system.add_spring('S2', '3', '4', 2000)
system.add_spring('S3', '4', '2', 3000)
# Define supports
system.def_support('1', True, True, True, True, True, True)
system.def_support('2', True, True, True, True, True, True)
system.def_support('3', False, True, True, True, True, True)
system.def_support('4', False, True, True, True, True, True)
# Add node loads
system.add_node_load('4', 'FX', 5000)
system.analyze(True)
# Check results
# correct_values = [('3', 0.9090909090909092),
# ('4', 1.3636363636363638),
# ('1', -909.0909090909091),
# ('2', -4090.9090909090914)]
n3_DX = system.Nodes['3'].DX['Combo 1']
self.assertAlmostEqual(n3_DX/ 0.9090909090909092, 1.0, 2)
n4_DX = system.Nodes['4'].DX['Combo 1']
self.assertAlmostEqual(n4_DX/1.3636363636363638, 1.0, 2)
n1_rxn = system.Nodes['1'].RxnFX['Combo 1']
self.assertAlmostEqual(n1_rxn/-909.0909090909091, 1.0, 2)
n2_rxn = system.Nodes['2'].RxnFX['Combo 1']
self.assertAlmostEqual(n2_rxn/-4090.9090909090914, 1.0, 2)
| [
"io.StringIO",
"PyNite.FEModel3D"
] | [((340, 350), 'io.StringIO', 'StringIO', ([], {}), '()\n', (348, 350), False, 'from io import StringIO\n'), ((679, 690), 'PyNite.FEModel3D', 'FEModel3D', ([], {}), '()\n', (688, 690), False, 'from PyNite import FEModel3D\n')] |
"""
Likelihood maximization script. This program is designed to be entirely separable from ATESA in that it can be called
manually to perform likelihood maximization to user specifications and with arbitrary input files; however, it is
required by ATESA's aimless shooting information error convergence criterion.
"""
import sys
import os
import numpy
import time
import math
import itertools
import argparse
import warnings
import pickle
import numdifftools
import statsmodels
from scipy import optimize
from scipy import stats
from scipy.special import erf
import matplotlib.pyplot as plt
try:
import gnuplotlib
gnuplot = True
except FileNotFoundError: # gnuplot not installed
gnuplot = False
def update_progress(progress, message='Progress', eta=0, quiet=False):
"""
Print a dynamic progress bar to stdout.
Credit to <NAME> from stackoverflow, https://stackoverflow.com/questions/3160699/python-progress-bar
Parameters
----------
progress : float
A number between 0 and 1 indicating the fractional completeness of the bar. A value under 0 represents a 'halt'.
A value at 1 or bigger represents 100%.
message : str
The string to precede the progress bar (so as to indicate what is progressing)
eta : int
Number of seconds to display as estimated completion time (converted into HH:MM:SS)
quiet : bool
If True, suppresses output entirely
Returns
-------
None
"""
if quiet:
return None
barLength = 10 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done! \r\n"
block = int(round(barLength * progress))
if eta:
# eta is in seconds; convert into HH:MM:SS
eta_h = str(math.floor(eta/3600))
eta_m = str(math.floor((eta % 3600) / 60))
eta_s = str(math.floor((eta % 3600) % 60)) + ' '
if len(eta_m) == 1:
eta_m = '0' + eta_m
if len(eta_s) == 2:
eta_s = '0' + eta_s
eta_str = eta_h + ':' + eta_m + ':' + eta_s
text = "\r" + message + ": [{0}] {1}% {2}".format("#" * block + "-" * (barLength - block), round(progress * 100, 2), status) + " ETA: " + eta_str
else:
text = "\r" + message + ": [{0}] {1}% {2}".format("#" * block + "-" * (barLength - block), round(progress * 100, 2), status)
sys.stdout.write(text)
sys.stdout.flush()
def objective_function(params, A_data, B_data):
"""
Evaluate the negative log likelihood function for the given parameters and lists of observations.
This function evaluates the goodness of fit of the given parameters and data to an error function ansatz, as
described in Peters, 2012. Chem. Phys. Lett. 554: 248.
Designed to be called by an optimization routine to obtain the best fitting params.
Parameters
----------
params : list
Parameters for the current model to be tested
A_data : list
List of observations from aimless shooting that committed to basin "A" (usually the reactants)
B_data : list
List of observations from aimless shooting that committed to basin "B" (usually the products)
Returns
-------
negative_log_likelihood : float
The negative log likelihood of the fit to the ansatz for the given parameters and observations
"""
def erflike(arg):
pl = numpy.ones(len(arg))
ml = numpy.negative(numpy.ones(len(arg)))
return numpy.where(arg > 5.7, pl, numpy.where(arg < -5.7, ml, erf(arg)))
if A_data and not B_data:
qa = params[0] + numpy.inner(params[1:], A_data)
sum = numpy.sum(numpy.log((1 - erflike(qa)) / 2))
elif B_data and not A_data:
qb = params[0] + numpy.inner(params[1:], B_data)
sum = numpy.sum(numpy.log((1 + erflike(qb)) / 2))
else:
qa = params[0] + numpy.inner(params[1:], A_data)
qb = params[0] + numpy.inner(params[1:], B_data)
sum = numpy.sum(numpy.log((1 - erflike(qa)) / 2)) + numpy.sum(numpy.log((1 + erflike(qb)) / 2))
return -1 * sum
def two_line_test_func(results, plots, two_line_threshold=0.5):
"""
Perform a double linear regression on intersecting subsets of the data in results to determine whether to terminate
and how many dimensions to return in the RC during two_line_test.
Can only be called with len(results) >= 5.
Parameters
----------
results : list
List of dictionary objects indexed by step of two_line_test, each possessing attribute 'fun' giving the optimization
score for that step
plots : bool
If True, plot lines using gnuplot
two_line_threshold : float
Ratio of second slope to first slope (as a fraction) below which the two-line test can pass
Returns
-------
out : int
Index of selected 'best' RC from two-line test; or, -1 if no best RC could be determined
"""
if len(results) < 5:
raise RuntimeError('two_line_test can only be called with at least 5 optimized models')
best_closest = [] # result for which the intersection is closest to the shared point
for test_index in range(len(results) - 2): # - 2 to account for minimum of two points in each line
first_segment = range(1, 3 + test_index)
second_segment = range(first_segment[-1], len(results) + 1)
opt1 = stats.linregress(first_segment, [results[i - 1].fun for i in first_segment])
opt2 = stats.linregress(second_segment, [results[i - 1].fun for i in second_segment])
# Now evaluate closest point in results to the intersection of the two lines
x_intersect = (opt1.intercept - opt2.intercept) / (opt2.slope - opt1.slope)
y_intersect = (opt1.slope * x_intersect) + opt1.intercept
x_val = 0 # initialize index for keeping track of x values
min_diff = -1 # initialize smallest distance between intersection and point
closest = 0 # initialize index of closest point to intersection
for result in results:
y_val = result.fun
x_val += 1
y_diff = y_val - y_intersect
x_diff = x_val - x_intersect
diff = numpy.sqrt(y_diff**2 + x_diff**2)
if min_diff < 0:
min_diff = diff
closest = [x_val, diff]
elif diff < min_diff:
min_diff = diff
closest = [x_val, diff]
# if the closest point to the intersection is the shared point of the lines;
if closest[0] == test_index + 2:
if not best_closest: # for the first time
best_closest = [closest, opt1, opt2]
elif closest[1] < best_closest[0][1]: # update the closest yet
best_closest = [closest, opt1, opt2]
if gnuplot and plots:
if len(results[0].x) + 2 == len(results[1].x): # if this is True, results include rate-of-change terms
min_dims = (len(results[0].x) - 1) / 2 # smallest model dimensionality to be plotted (-1 for constant)
else: # no rate-of-change terms
min_dims = len(results[0].x) - 1
points1 = [[i + min_dims for i in range(len(results))],
[best_closest[1].slope * (i + 1) + best_closest[1].intercept for i in range(len(results))]]
points2 = [[i + min_dims for i in range(len(results))],
[best_closest[2].slope * (i + 1) + best_closest[2].intercept for i in range(len(results))]]
gnuplotlib.plot((numpy.asarray([item + min_dims for item in range(len(results))]),
numpy.asarray([result.fun for result in results])),
(numpy.asarray(points1[0]), numpy.asarray(points1[1]), {'legend': '1st slope: ' + '%.3f' % best_closest[1].slope}),
(numpy.asarray(points2[0]), numpy.asarray(points2[1]), {'legend': '2nd slope: ' + '%.3f' % best_closest[2].slope}),
_with='lines', terminal='dumb 80,40', unset='grid')
if plots:
print('Two_line_test plot data:')
print(' Model scores: ' + str(numpy.asarray([result.fun for result in results])))
print(' First line values: ' + str(points1[1]))
print(' Second line values: ' + str(points2[1]))
if not best_closest: # no pairs of lines whose intersection was closest to their shared point
print('Two line test: found no suitable model, performing an additional optimization step and retrying')
return -1
slope_fract = best_closest[2].slope / best_closest[1].slope
if slope_fract > two_line_threshold: # best point does not meet threshold for relative difference in slopes
print('Two line test: best model has ratio of slopes ' + str(slope_fract) + ', which does not meet threshold ' +
str(two_line_threshold) + '; performing an additional optimization step and retrying')
return -1
else: # DOES meet threshold; return the index of the passing result
return best_closest[0][0] - 1 # - 1 because of different indexing standards
def eval_rc(params, obs):
# Returns reaction coordinate value for a given set of parameters and an observation
params = list(params)
rc = params[0]
for local_index in range(len(obs)):
rc += params[local_index + 1] * obs[local_index]
return rc
def main(**kwargs):
"""
Main runtime function of lmax.py.
Assembles lists of models to optimize in the form of lists of CVs, passes them to optimize, interprets results, and
repeats or terminates in accordance with argument-dependent termination criteria.
Parameters
----------
kwargs : dict
Dictionary object containing arguments
Returns
-------
None
"""
# Ensure existence and validity of input file
input_file = kwargs['i'][0]
if not os.path.exists(input_file):
raise FileNotFoundError('could not find input file: ' + input_file)
input_file_lines = open(input_file, 'r').readlines()
open(input_file, 'r').close()
if False in [char == 'A' or char == 'B' for char in [line[0] for line in input_file_lines]]:
raise RuntimeError('input file ' + input_file + ' does not have \'A\' or \'B\' as the first character in each '
'line. Is this the correct file? Be sure to remove any blank lines.')
# Bring in other arguments, just for neatness
dims = kwargs['k'][0]
fixed = kwargs['f'] # we actually want this one to stay a list
qdot = kwargs['q'][0]
running = kwargs['r'][0]
output_file = kwargs['o'][0]
two_line_test = kwargs['two_line_test']
plots = kwargs['plots']
quiet = kwargs['quiet']
two_line_threshold = kwargs['two_line_threshold'][0]
skip = kwargs['s'] # this one also a list
hist_bins = kwargs['hist_bins'][0]
if not fixed == [None] and running == 0 and not two_line_test and len(fixed) > dims:
raise RuntimeError('value of k must be less than or equal to number of fixed (-f) dimensions.')
if not fixed == [None] and not skip == [None]:
if any([f in skip for f in fixed]) or any([s in fixed for s in skip]):
raise RuntimeError('the same CV cannot be indicated with both the -s and -f options at the same time.')
# Ignore arguments as described in documentation
if running:
if fixed == [None]:
fixed = []
dims = running
if two_line_test:
if fixed == [None]:
fixed = []
dims = -1
running = 0
# Load settings object from .pkl file if present, to check for information error override and max_dims
information_error_max_dims = -1
if two_line_test:
try:
settings = pickle.load(open('settings.pkl', 'rb'))
if not quiet:
print('Loaded settings.pkl...')
try:
information_error_override = settings.information_error_override
if not quiet:
print('Setting information_error_override = ' + str(information_error_override))
except AttributeError:
information_error_override = False
if not quiet:
print('information_error_override is not set; defaulting to False')
try:
information_error_max_dims = settings.information_error_max_dims
if not quiet:
print('Setting maximum number of two_line_test dimensions to: ' + str(int(information_error_max_dims)))
except AttributeError:
if not quiet:
print('information_error_max_dims is not set; defaulting to no limit')
except FileNotFoundError:
pass
# Get data from input file, and determine minimum and maximum values for each CV, reduce data
input_data = [[float(item) for item in
line.replace('A <- ', '').replace('B <- ', '').replace(' \n', '').replace('\n', '').split(' ')]
for line in input_file_lines] # [[obs1cv1, obs1cv2], [obs2cv1, obs2cv2]]
A_data = [[float(item) for item in line.replace('A <- ', '').replace(' \n', '').replace('\n', '').split(' ')] for
line in input_file_lines if line[0] == 'A']
B_data = [[float(item) for item in line.replace('B <- ', '').replace(' \n', '').replace('\n', '').split(' ')] for
line in input_file_lines if line[0] == 'B']
mapped = list(map(list, zip(*input_data))) # [[obs1cv1, obs2cv1], [obs1cv2, obs2cv2]]
minmax = [[numpy.min(item) for item in mapped], [numpy.max(item) for item in mapped]] # [[mincv1, mincv2], [maxcv1, maxcv2]]
N = len(input_file_lines) # number of observations
NA = len(A_data) # number of observations that committed to A...
NB = len(B_data) # ... and to B
num_cvs = len(minmax[0]) # number of CVs recorded in each observation
reduced_A = [[(A_data[jj][ii] - minmax[0][ii]) / (minmax[1][ii] - minmax[0][ii]) for ii in range(num_cvs)] for jj in range(NA)]
reduced_B = [[(B_data[jj][ii] - minmax[0][ii]) / (minmax[1][ii] - minmax[0][ii]) for ii in range(num_cvs)] for jj in range(NB)]
if qdot == 'present' or qdot == 'ignore':
if not num_cvs % 2 == 0:
raise RuntimeError('likelihood maximization was attempted with input file: ' + input_file + ' and '
'include_qdot (q) = True, but this input file has an odd number of entries per line. Are'
' you sure it includes rate-of-change data?')
num_cvs = int(num_cvs / 2)
if two_line_test and not quiet:
print('Two line test requires at least five optimizations, so there will be five progress bars before testing.')
# Prepare for and then enter optimization loop
termination = False # initialize primary termination criterion flag
termination_2 = False # additional termination flag for use with qdot = 'present', to perform final optimization
reached_maximum = False # indicates whether the maximum number of allowed dimensions has been reached by two_line_test
two_line_result = -1 # initialize current model dimensionality for two_line_test
cv_combs = [[]] # initialize list of CV combinations to iterate through
results = [] # initialize for two_line_test
while not termination and len(cv_combs[0]) <= N:
# Initialize current best result
current_best = [argparse.Namespace(), [0], [], []]
current_best[0].fun = math.inf
# Assemble list of RCs to optimize
if not fixed == [None] and len(fixed) == dims:
cv_combs = [fixed]
elif running or two_line_test:
cv_combs = [fixed + [new] for new in range(1, num_cvs + 1) if (not new in fixed) and (not new in skip)]
else:
cv_combs = [comb for comb in itertools.combinations(range(1, num_cvs + 1), dims) if (fixed == [None] or set(fixed).issubset(comb)) and (skip == [None] or not any([skipped in comb for skipped in skip]))]
if qdot == 'present' and not termination_2:
cv_combs_temp = cv_combs
cv_combs = []
for comb in cv_combs_temp:
cv_combs.append([])
for item in comb:
cv_combs[-1].append(item)
cv_combs[-1].append(item + num_cvs)
# Perform optimization
start_params = [0 for null in range(len(cv_combs[0]) + 1)] # + 1 for constant term
count = 0
count_to = len(cv_combs)
update_progress(0, 'Optimizing ' + str(count_to) + ' combination(s) of CVs', quiet=quiet)
speed_data = [0,0]
for comb in cv_combs:
t = time.time()
this_A = []
this_B = []
for index in comb: # produce k-by-len(A_data) matrices (list of lists) for the selected CVs
try:
this_A.append([obs[index - 1] for obs in reduced_A])
except TypeError:
print(comb)
print(index)
raise RuntimeError('user-defined')
this_B.append([obs[index - 1] for obs in reduced_B])
this_A = list(map(list, zip(*this_A))) # transpose the matrices to get desired format
this_B = list(map(list, zip(*this_B)))
this_result = optimize.minimize(objective_function, numpy.asarray(start_params), (this_A, this_B),
method='BFGS', options={"disp": False, "maxiter": 20000 * (len(comb) + 1)}) # try SR1?
if this_result.fun < current_best[0].fun:
current_best = [this_result, comb, this_A, this_B]
this_speed = time.time() - t
speed_data = [(speed_data[1] * speed_data[0] + this_speed) / (speed_data[1] + 1), speed_data[1] + 1]
count += 1
eta = (count_to - count) * speed_data[0]
update_progress(count / count_to, 'Optimizing ' + str(count_to) + ' combination(s) of CVs', eta, quiet=quiet)
# Update fixed and results parameters as needed
if two_line_test:
results.append(current_best)
if running or two_line_test:
fixed = current_best[1]
if qdot == 'present':
for item in fixed:
if item > num_cvs: # remove qdot terms from fixed
fixed.remove(item)
# Check termination criteria
if not running and not two_line_test:
termination = True
elif running and not two_line_test:
if int(len(current_best[1])) == running:
termination = True
elif two_line_test and not termination_2:
if len(results) >= 5: # can only confidently check for convergence with at least 5 points
two_line_result = two_line_test_func([result[0] for result in results], plots, two_line_threshold)
if two_line_result >= 0:
termination = True
current_best = results[two_line_result]
if two_line_test and len(cv_combs[0]) == information_error_max_dims and not termination_2:
termination = True
reached_maximum = True
current_best = results[-1]
if termination_2:
termination = True
if qdot == 'present' and termination and not termination_2:
termination = False
termination_2 = True
fixed = current_best[1]
for item in fixed:
if item > num_cvs: # remove qdot terms from fixed
fixed.remove(item)
dims = len(fixed)
if two_line_test and (two_line_result < 0 and not reached_maximum): # ran out of CVs to append and two_line_test never passed
err = RuntimeError('The two_line_test termination criterion was never satisfied even after including every '
'candidate CV in the model reaction coordinate.\nThis almost certainly indicates that either'
' one or more key CVs are absent from the aimless shooting output file supplied, or that not'
' enough unimportant CVs were included to give context to the important ones. Either way you'
' should add more CVs to the list.\nThis error can by bypassed by running lmax.py in a '
'directory containing a settings.pkl file with the line "information_error_override = True" '
'(without quotes). If you did supply this setting, then you are seeing this message because '
'the settings.pkl file could not be found.')
try:
if information_error_override:
pass
else:
raise err
except NameError:
raise err
# Calculate hess and jaco using the model in current_best (current_best[2] and [3] are corresponding this_A and this_B)
l_objective_function = lambda x: objective_function(x, current_best[2], current_best[3])
hess = numdifftools.Hessian(l_objective_function)(current_best[0].x)
# jaco has to be a sum of the jacobian transpose times the jacobian over each individual observation in the data
if not quiet:
count = 0
update_progress(0, 'Calculating mean information error')
total_len = len(current_best[2]) + len(current_best[3])
jaco = 0
for this_A in current_best[2]:
l_objective_function = lambda x: objective_function(x, [this_A], [])
this_jaco = numdifftools.Jacobian(l_objective_function)(current_best[0].x)
jaco += numpy.matmul(numpy.transpose(this_jaco), this_jaco)
if not quiet:
count += 1
update_progress(count/total_len, 'Calculating mean information error')
for this_B in current_best[3]:
l_objective_function = lambda x: objective_function(x, [], [this_B])
this_jaco = numdifftools.Jacobian(l_objective_function)(current_best[0].x)
jaco += numpy.matmul(numpy.transpose(this_jaco), this_jaco)
if not quiet:
count += 1
update_progress(count/total_len, 'Calculating mean information error')
V = numpy.matmul(numpy.matmul(numpy.linalg.inv(numpy.negative(hess)), jaco), numpy.linalg.inv(numpy.negative(hess))) # Godambe Information
weights = [0] + [1 / (len(V[0]) - 1) for null in range(len(V[0]) - 1)] # weights for mean excluding constant term
mean_std = numpy.inner(weights, [numpy.sqrt(item) for item in numpy.diag(V)]) # mean of estimated standard errors
# Return output in desired format
rc_string = str('%.3f' % current_best[0].x[0]) + ' + ' + ' + '.join(['%.3f' % current_best[0].x[i+1] + '*CV' +
str(current_best[1][i]) for i in range(len(current_best[1]))])
output_string = 'Likelihood maximization complete!\n' \
'The optimized reaction coordinate (with CVs indexed from 1) is: ' + rc_string + '\n' \
'The negative log likelihood of this model is: ' + '%.3f' % current_best[0].fun + '\n' \
'The mean information error for this model is: ' + '%.3f' % mean_std
if output_file:
open(output_file, 'w').write(output_string)
else:
print(output_string)
## Deprecated development tool
# if not os.path.exists('rc_stderr.out'):
# open('rc_stderr.out', 'w').close()
# open('rc_stderr.out', 'a').write(str(input_file) + ' ' + str(mean_std) + '\n')
if plots:
A_results = []
for obs in current_best[2]: # iterate over A observations
A_results.append(eval_rc(current_best[0].x, obs))
B_results = []
for obs in current_best[3]: # iterate over B observations
B_results.append(eval_rc(current_best[0].x, obs))
hist_result = numpy.histogram(A_results + B_results, hist_bins) # this step just to bin, not the final histogram
rc_values = [] # initialize results list
probs = [] # initialize results list
for bin_index in range(len(hist_result[0])):
A_count = 0
B_count = 0
for result in A_results:
if hist_result[1][bin_index] <= result < hist_result[1][bin_index + 1]:
A_count += 1
for result in B_results:
if hist_result[1][bin_index] <= result < hist_result[1][bin_index + 1]:
B_count += 1
if A_count or B_count: # if there is data in this bin
count_ratio = B_count / (A_count + B_count)
else:
raise RuntimeError('attempted to build sigmoid plot, but one or more histogram bins is empty. This '
'may indicate insufficient data in the input file. All other results from this call '
'to lmax.py have been written, but proceed with caution, and consider trying again '
'with a smaller value given for --hist_bins (the default is 10). This error can also'
' occur when one or more of the CVs making up the final RC takes on discrete values '
'instead of continuous ones.')
rc_values.append(numpy.mean([hist_result[1][bin_index + 1], hist_result[1][bin_index]]))
probs.append(count_ratio)
fig = plt.figure() # initialize matplotlib figure
ax = fig.add_subplot(111) # add axes to the figure
plt.ylabel('Probability of Commitment to Forward Basin', weight='bold')
plt.xlabel('Reaction Coordinate', weight='bold')
ax.bar(rc_values, probs, width=0.9*(rc_values[1] - rc_values[0]), color='#00274C')
ax.plot(rc_values, (1 + erf(numpy.array([value for value in rc_values])))/2, color='#FFCB05', linewidth=3)
ax.legend(['Ideal', 'Observed'])
print('Committor sigmoid histogram data:')
print(' RC values: ' + str(rc_values))
print(' Observed probabilities of commitment to the forward basin: ' + str(probs))
print(' Ideal committor sigmoid: ' + str(list((1 + erf(numpy.array([value for value in rc_values])))/2)))
fig.canvas.draw()
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Perform LMAX on the given input data')
parser.add_argument('-i', metavar='input_file', type=str, nargs=1, default=['as_decorr.out'],
help='input filename (output from aimless shooting). Default=as_decorr.out')
parser.add_argument('-k', metavar='dimensionality', type=int, nargs=1, default=[1],
help='number of CVs to include in RC. Default=1')
parser.add_argument('-f', metavar='fixed', type=int, nargs='*', default=[None],
help='CVs to require inside the RC. Default=none')
parser.add_argument('-s', metavar='skip', type=int, nargs='*', default=[None],
help='CVs to skip (not consider in RC). Default=none')
parser.add_argument('-q', metavar='include_qdot', type=str, nargs=1, default=['present'],
help='valid options are: "present", "absent", and "ignore" (quotes excluded). If "present" or '
'"ignore", the input file is assumed to include rate-of-change ("q") data for each CV '
'(formatted as in e.g., "A <- CV0 CV1 q0 q1"); in the former case, q terms will be used to'
'select the RC (but will not appear in the final RC), implementing inertial likelihood '
'maximization. In the latter, rate of change terms are not used. Finally, if "absent", the'
' q data will be assumed not to be present in the input file at all. Default=present')
parser.add_argument('-r', metavar='running', type=int, nargs=1, default=[0],
help='if > 0, runs from k = 1 to "running" using the previously obtained k - 1 results as the '
'argument for f, ignoring the arguments passed for k and f. Default=0')
parser.add_argument('-o', metavar='output_file', type=str, nargs=1, default=[''],
help='Prints output to a new file whose name is given with this argument, instead of directly '
'to the terminal. The file will be overwritten if it exists. Default=none')
parser.add_argument('--quiet', action='store_true',
help='If this option is given, progress messages outputted to the terminal are suppressed and '
'only the final result is written (either to the terminal or the output file.)')
parser.add_argument('--two_line_test', action='store_true', default=False,
help='If this option is given, arguments passed for k, f, and r are ignored, and the RC is '
'chosen based on the two-line method (see documentation).')
parser.add_argument('--plots', action='store_true', default=False,
help='If True, plots the final fit between the model and data committor sigmoid. '
'If this option is given alongside two_line_test, gnuplot will be used to write plots to '
'the terminal during evaluations of the two_line_test termination criterion (if it is '
'installed). The sigmoid data is also printed to the terminal or output file.')
parser.add_argument('--two_line_threshold', metavar='two_line_threshold', type=float, nargs=1, default=[0.5],
help='If this option is given alongside two_line_test, sets the maximum ratio of slopes in the'
'two-line test. See the documentation for two_line_test for details. Default=0.5')
parser.add_argument('--hist_bins', metavar='hist_bins', type=int, nargs=1, default=[10],
help='If this option is given alongside plots, sets the number of reaction coordinate bins for'
'the sigmoid committor histogram. Production of the histogram will fail if any of the '
'bins have zero samples in them, which is more likely for larger values of hist_bins. '
'Default = 10')
arguments = vars(parser.parse_args()) # Retrieves arguments as a dictionary object
# Suppress numpy.log and numdifftools/limits.py warnings that occur frequently during normal operation
warnings.filterwarnings('ignore', category=RuntimeWarning, message='invalid value encountered in less')
warnings.filterwarnings('ignore', category=RuntimeWarning, message='invalid value encountered in greater')
warnings.filterwarnings('ignore', category=RuntimeWarning, message='divide by zero encountered in log')
warnings.filterwarnings('ignore', category=RuntimeWarning, message='invalid value encountered in double_scalars')
warnings.filterwarnings('ignore', category=RuntimeWarning, message='invalid value encountered in subtract')
warnings.filterwarnings('ignore', category=RuntimeWarning, message='divide by zero encountered in double_scalars')
main(**arguments)
| [
"scipy.stats.linregress",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"math.floor",
"numpy.array",
"argparse.Namespace",
"os.path.exists",
"numpy.histogram",
"numpy.mean",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"numpy.asarray",
"numpy.max",
"numpy.min",
"sys.stdout.flush",
... | [((2677, 2699), 'sys.stdout.write', 'sys.stdout.write', (['text'], {}), '(text)\n', (2693, 2699), False, 'import sys\n'), ((2704, 2722), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2720, 2722), False, 'import sys\n'), ((26829, 26904), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Perform LMAX on the given input data"""'}), "(description='Perform LMAX on the given input data')\n", (26852, 26904), False, 'import argparse\n'), ((31142, 31250), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning', 'message': '"""invalid value encountered in less"""'}), "('ignore', category=RuntimeWarning, message=\n 'invalid value encountered in less')\n", (31165, 31250), False, 'import warnings\n'), ((31250, 31361), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning', 'message': '"""invalid value encountered in greater"""'}), "('ignore', category=RuntimeWarning, message=\n 'invalid value encountered in greater')\n", (31273, 31361), False, 'import warnings\n'), ((31361, 31469), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning', 'message': '"""divide by zero encountered in log"""'}), "('ignore', category=RuntimeWarning, message=\n 'divide by zero encountered in log')\n", (31384, 31469), False, 'import warnings\n'), ((31469, 31587), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning', 'message': '"""invalid value encountered in double_scalars"""'}), "('ignore', category=RuntimeWarning, message=\n 'invalid value encountered in double_scalars')\n", (31492, 31587), False, 'import warnings\n'), ((31587, 31699), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning', 'message': '"""invalid value encountered in subtract"""'}), "('ignore', category=RuntimeWarning, message=\n 'invalid value encountered in subtract')\n", (31610, 31699), False, 'import warnings\n'), ((31699, 31818), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning', 'message': '"""divide by zero encountered in double_scalars"""'}), "('ignore', category=RuntimeWarning, message=\n 'divide by zero encountered in double_scalars')\n", (31722, 31818), False, 'import warnings\n'), ((5691, 5767), 'scipy.stats.linregress', 'stats.linregress', (['first_segment', '[results[i - 1].fun for i in first_segment]'], {}), '(first_segment, [results[i - 1].fun for i in first_segment])\n', (5707, 5767), False, 'from scipy import stats\n'), ((5783, 5861), 'scipy.stats.linregress', 'stats.linregress', (['second_segment', '[results[i - 1].fun for i in second_segment]'], {}), '(second_segment, [results[i - 1].fun for i in second_segment])\n', (5799, 5861), False, 'from scipy import stats\n'), ((10230, 10256), 'os.path.exists', 'os.path.exists', (['input_file'], {}), '(input_file)\n', (10244, 10256), False, 'import os\n'), ((21550, 21592), 'numdifftools.Hessian', 'numdifftools.Hessian', (['l_objective_function'], {}), '(l_objective_function)\n', (21570, 21592), False, 'import numdifftools\n'), ((24342, 24391), 'numpy.histogram', 'numpy.histogram', (['(A_results + B_results)', 'hist_bins'], {}), '(A_results + B_results, hist_bins)\n', (24357, 24391), False, 'import numpy\n'), ((25932, 25944), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25942, 25944), True, 'import matplotlib.pyplot as plt\n'), ((26061, 26132), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability of Commitment to Forward Basin"""'], {'weight': '"""bold"""'}), "('Probability of Commitment to Forward Basin', weight='bold')\n", (26071, 26132), True, 'import matplotlib.pyplot as plt\n'), ((26141, 26189), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Reaction Coordinate"""'], {'weight': '"""bold"""'}), "('Reaction Coordinate', weight='bold')\n", (26151, 26189), True, 'import matplotlib.pyplot as plt\n'), ((26776, 26786), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26784, 26786), True, 'import matplotlib.pyplot as plt\n'), ((2074, 2096), 'math.floor', 'math.floor', (['(eta / 3600)'], {}), '(eta / 3600)\n', (2084, 2096), False, 'import math\n'), ((2116, 2143), 'math.floor', 'math.floor', (['(eta % 3600 / 60)'], {}), '(eta % 3600 / 60)\n', (2126, 2143), False, 'import math\n'), ((3906, 3937), 'numpy.inner', 'numpy.inner', (['params[1:]', 'A_data'], {}), '(params[1:], A_data)\n', (3917, 3937), False, 'import numpy\n'), ((6519, 6556), 'numpy.sqrt', 'numpy.sqrt', (['(y_diff ** 2 + x_diff ** 2)'], {}), '(y_diff ** 2 + x_diff ** 2)\n', (6529, 6556), False, 'import numpy\n'), ((13935, 13950), 'numpy.min', 'numpy.min', (['item'], {}), '(item)\n', (13944, 13950), False, 'import numpy\n'), ((13973, 13988), 'numpy.max', 'numpy.max', (['item'], {}), '(item)\n', (13982, 13988), False, 'import numpy\n'), ((15869, 15889), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '()\n', (15887, 15889), False, 'import argparse\n'), ((17129, 17140), 'time.time', 'time.time', ([], {}), '()\n', (17138, 17140), False, 'import time\n'), ((22036, 22079), 'numdifftools.Jacobian', 'numdifftools.Jacobian', (['l_objective_function'], {}), '(l_objective_function)\n', (22057, 22079), False, 'import numdifftools\n'), ((22128, 22154), 'numpy.transpose', 'numpy.transpose', (['this_jaco'], {}), '(this_jaco)\n', (22143, 22154), False, 'import numpy\n'), ((22427, 22470), 'numdifftools.Jacobian', 'numdifftools.Jacobian', (['l_objective_function'], {}), '(l_objective_function)\n', (22448, 22470), False, 'import numdifftools\n'), ((22519, 22545), 'numpy.transpose', 'numpy.transpose', (['this_jaco'], {}), '(this_jaco)\n', (22534, 22545), False, 'import numpy\n'), ((22785, 22805), 'numpy.negative', 'numpy.negative', (['hess'], {}), '(hess)\n', (22799, 22805), False, 'import numpy\n'), ((22987, 23003), 'numpy.sqrt', 'numpy.sqrt', (['item'], {}), '(item)\n', (22997, 23003), False, 'import numpy\n'), ((2167, 2194), 'math.floor', 'math.floor', (['(eta % 3600 % 60)'], {}), '(eta % 3600 % 60)\n', (2177, 2194), False, 'import math\n'), ((3839, 3847), 'scipy.special.erf', 'erf', (['arg'], {}), '(arg)\n', (3842, 3847), False, 'from scipy.special import erf\n'), ((4053, 4084), 'numpy.inner', 'numpy.inner', (['params[1:]', 'B_data'], {}), '(params[1:], B_data)\n', (4064, 4084), False, 'import numpy\n'), ((4178, 4209), 'numpy.inner', 'numpy.inner', (['params[1:]', 'A_data'], {}), '(params[1:], A_data)\n', (4189, 4209), False, 'import numpy\n'), ((4235, 4266), 'numpy.inner', 'numpy.inner', (['params[1:]', 'B_data'], {}), '(params[1:], B_data)\n', (4246, 4266), False, 'import numpy\n'), ((7955, 8004), 'numpy.asarray', 'numpy.asarray', (['[result.fun for result in results]'], {}), '([result.fun for result in results])\n', (7968, 8004), False, 'import numpy\n'), ((8032, 8057), 'numpy.asarray', 'numpy.asarray', (['points1[0]'], {}), '(points1[0])\n', (8045, 8057), False, 'import numpy\n'), ((8059, 8084), 'numpy.asarray', 'numpy.asarray', (['points1[1]'], {}), '(points1[1])\n', (8072, 8084), False, 'import numpy\n'), ((8172, 8197), 'numpy.asarray', 'numpy.asarray', (['points2[0]'], {}), '(points2[0])\n', (8185, 8197), False, 'import numpy\n'), ((8199, 8224), 'numpy.asarray', 'numpy.asarray', (['points2[1]'], {}), '(points2[1])\n', (8212, 8224), False, 'import numpy\n'), ((17825, 17852), 'numpy.asarray', 'numpy.asarray', (['start_params'], {}), '(start_params)\n', (17838, 17852), False, 'import numpy\n'), ((18149, 18160), 'time.time', 'time.time', ([], {}), '()\n', (18158, 18160), False, 'import time\n'), ((22738, 22758), 'numpy.negative', 'numpy.negative', (['hess'], {}), '(hess)\n', (22752, 22758), False, 'import numpy\n'), ((23016, 23029), 'numpy.diag', 'numpy.diag', (['V'], {}), '(V)\n', (23026, 23029), False, 'import numpy\n'), ((25807, 25877), 'numpy.mean', 'numpy.mean', (['[hist_result[1][bin_index + 1], hist_result[1][bin_index]]'], {}), '([hist_result[1][bin_index + 1], hist_result[1][bin_index]])\n', (25817, 25877), False, 'import numpy\n'), ((8457, 8506), 'numpy.asarray', 'numpy.asarray', (['[result.fun for result in results]'], {}), '([result.fun for result in results])\n', (8470, 8506), False, 'import numpy\n'), ((26317, 26360), 'numpy.array', 'numpy.array', (['[value for value in rc_values]'], {}), '([value for value in rc_values])\n', (26328, 26360), False, 'import numpy\n'), ((26690, 26733), 'numpy.array', 'numpy.array', (['[value for value in rc_values]'], {}), '([value for value in rc_values])\n', (26701, 26733), False, 'import numpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Splunk specific dependencies
import sys, os
from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators, splunklib_logger as logger
# Command specific dependencies
import requests
from requests.auth import HTTPDigestAuth
import json
# TODO's
# - paramMap auf payload umstellen -> aber backward compatible
# - Error handling auf raise XY umstellen
# - https://www.tutorialspoint.com/python/python_exceptions.htm
# - Add logging via logger
# - Do not delete older builds
@Configuration(type='reporting')
class curlCommand(GeneratingCommand):
url = Option(require=True)
method = Option(require=False, default='get')
payload = Option(require=False)
output = Option(require=False, default='json')
timeout = Option(require=False, default=10, validate=validators.Integer())
auth = Option(require=False)
headers = Option(require=False)
proxies = Option(require=False)
unsetProxy = Option(require=False, validate=validators.Boolean())
# Deprecated
paramMap = Option(require=False)
# /Deprecated
def generate(self):
url = self.url
method = self.method
payload = self.parseJSONStrToJSON(self.payload) if self.payload != None else None
output = self.output
timeout = self.timeout if self.timeout != None else None
auth = self.parseAuth(self.auth) if self.auth != None else None
headers = self.parseJSONStrToJSON(self.headers) if self.headers != None else None
proxies = self.parseProxies(self.proxies) if self.proxies != None else None
unsetProxy = self.unsetProxy
# Deprecated
paramMap = self.parseParamMap(self.paramMap) if self.paramMap != None else None
if payload == None:
payload = paramMap
# /Deprecated
# Unset proxy, if unsetProxy = True
if unsetProxy == True:
if 'HTTP' in os.environ.keys():
del os.environ['HTTP']
if 'HTTPS' in os.environ.keys():
del os.environ['HTTPS']
# Load data from REST API
event = {}
try:
if method == 'get':
request = requests.get(
url,
params=payload,
auth=auth,
headers=headers,
timeout=timeout,
proxies=proxies
)
elif method == 'post':
request = requests.post(
url,
data=payload,
auth=auth,
headers=headers,
timeout=timeout,
proxies=proxies
)
else:
raise ValueError('Only get and post are valid methods.')
# Choose right output format
if output == 'json':
event = request.json()
else:
event = {'reponse': request.content}
except requests.exceptions.RequestException as err:
event = ({"Error:": err})
yield event
''' HELPERS '''
'''
Convert headers string into dict
:headers string: Headers as json string
:return dict
'''
def parseJSONStrToJSON(self, headers):
# Replace single quotes with double quotes for valid json
return json.loads(
headers.replace('\'', '"')
)
'''
Parse proxy into python dict
:proxy string: Comma separated proxies -> http,https
:return dict
'''
def parseProxies(self, proxies):
proxies = proxies.split(',')
return {
'http': proxies[0].strip(),
'https' : proxies[1].strip()
}
'''
Parse auth into python dict with correct method
:proxy string: Comma separated auth params -> method,user,pass
:return object/bool
'''
def parseAuth(self, auth):
# Password could use commas, so just split 2 times
auth = auth.rsplit(',', 2)
# Use correcht auth method
if auth[0].lower() == 'basic':
return (auth[1].strip(), auth[2].strip())
elif auth[0].lower() == 'digest':
return HTTPDigestAuth(auth[0].strip(), auth[1].strip())
# Return false in case of no valid method
return False
''' DEPRECATED '''
'''
Parse paramMap into python dict
:paramMap string: Pattern 'foo=bar, hello=world, ...'
:return dict
'''
def parseParamMap(self, paramMap):
paramStr = ''
# Check, if params contain \, or \= and replace it with placeholder
paramMap = paramMap.replace(r'\,', ',')
paramMap = paramMap.split(',')
for param in paramMap:
paramStr += param.replace(',', ',').strip() + '&'
# Delete last &
return paramStr[:-1]
dispatch(curlCommand, sys.argv, sys.stdin, sys.stdout, __name__) | [
"os.environ.keys",
"requests.post",
"splunklib.searchcommands.validators.Boolean",
"requests.get",
"splunklib.searchcommands.dispatch",
"splunklib.searchcommands.validators.Integer",
"splunklib.searchcommands.Option",
"splunklib.searchcommands.Configuration"
] | [((565, 596), 'splunklib.searchcommands.Configuration', 'Configuration', ([], {'type': '"""reporting"""'}), "(type='reporting')\n", (578, 596), False, 'from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators, splunklib_logger as logger\n'), ((4507, 4571), 'splunklib.searchcommands.dispatch', 'dispatch', (['curlCommand', 'sys.argv', 'sys.stdin', 'sys.stdout', '__name__'], {}), '(curlCommand, sys.argv, sys.stdin, sys.stdout, __name__)\n', (4515, 4571), False, 'from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators, splunklib_logger as logger\n'), ((650, 670), 'splunklib.searchcommands.Option', 'Option', ([], {'require': '(True)'}), '(require=True)\n', (656, 670), False, 'from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators, splunklib_logger as logger\n'), ((686, 722), 'splunklib.searchcommands.Option', 'Option', ([], {'require': '(False)', 'default': '"""get"""'}), "(require=False, default='get')\n", (692, 722), False, 'from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators, splunklib_logger as logger\n'), ((738, 759), 'splunklib.searchcommands.Option', 'Option', ([], {'require': '(False)'}), '(require=False)\n', (744, 759), False, 'from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators, splunklib_logger as logger\n'), ((775, 812), 'splunklib.searchcommands.Option', 'Option', ([], {'require': '(False)', 'default': '"""json"""'}), "(require=False, default='json')\n", (781, 812), False, 'from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators, splunklib_logger as logger\n'), ((908, 929), 'splunklib.searchcommands.Option', 'Option', ([], {'require': '(False)'}), '(require=False)\n', (914, 929), False, 'from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators, splunklib_logger as logger\n'), ((945, 966), 'splunklib.searchcommands.Option', 'Option', ([], {'require': '(False)'}), '(require=False)\n', (951, 966), False, 'from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators, splunklib_logger as logger\n'), ((982, 1003), 'splunklib.searchcommands.Option', 'Option', ([], {'require': '(False)'}), '(require=False)\n', (988, 1003), False, 'from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators, splunklib_logger as logger\n'), ((1105, 1126), 'splunklib.searchcommands.Option', 'Option', ([], {'require': '(False)'}), '(require=False)\n', (1111, 1126), False, 'from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators, splunklib_logger as logger\n'), ((871, 891), 'splunklib.searchcommands.validators.Integer', 'validators.Integer', ([], {}), '()\n', (889, 891), False, 'from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators, splunklib_logger as logger\n'), ((1050, 1070), 'splunklib.searchcommands.validators.Boolean', 'validators.Boolean', ([], {}), '()\n', (1068, 1070), False, 'from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators, splunklib_logger as logger\n'), ((1947, 1964), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (1962, 1964), False, 'import sys, os\n'), ((2017, 2034), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (2032, 2034), False, 'import sys, os\n'), ((2177, 2277), 'requests.get', 'requests.get', (['url'], {'params': 'payload', 'auth': 'auth', 'headers': 'headers', 'timeout': 'timeout', 'proxies': 'proxies'}), '(url, params=payload, auth=auth, headers=headers, timeout=\n timeout, proxies=proxies)\n', (2189, 2277), False, 'import requests\n'), ((2390, 2489), 'requests.post', 'requests.post', (['url'], {'data': 'payload', 'auth': 'auth', 'headers': 'headers', 'timeout': 'timeout', 'proxies': 'proxies'}), '(url, data=payload, auth=auth, headers=headers, timeout=\n timeout, proxies=proxies)\n', (2403, 2489), False, 'import requests\n')] |
"""Tests for RandoPony admin views and functionality.
"""
from datetime import datetime
import unittest
from unittest.mock import patch
from pyramid import testing
from pyramid_mailer import get_mailer
from sqlalchemy import create_engine
from randopony.models.meta import (
Base,
DBSession,
)
class TestCoreAdminViews(unittest.TestCase):
"""Unit tests for core admin interface views.
"""
def _get_target_class(self):
from randopony.views.admin.core import AdminViews
return AdminViews
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def setUp(self):
self.config = testing.setUp()
engine = create_engine('sqlite://')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_home(self):
"""admin home view has expected template variables
"""
from randopony import __pkg_metadata__ as version
request = testing.DummyRequest()
admin = self._make_one(request)
tmpl_vars = admin.home()
self.assertEqual(
tmpl_vars, {'version': version.number + version.release})
def test_wranglers_list(self):
"""admin wranglers view has expected template variables
"""
from randopony import __pkg_metadata__ as version
request = testing.DummyRequest()
request.matchdict['list'] = 'wranglers'
admin = self._make_one(request)
tmpl_vars = admin.items_list()
self.assertEqual(
tmpl_vars['version'], version.number + version.release)
self.assertEqual(tmpl_vars['list'], 'wranglers')
self.assertEqual(tmpl_vars['list_title'], 'Pony Wranglers')
self.assertEqual(tmpl_vars['action'], 'edit')
def test_wranglers_list_order(self):
"""admin wranglers list is alpha ordered by email
"""
from randopony.models import Administrator
admin1 = Administrator(email='<EMAIL>', password_hash='<PASSWORD>')
admin2 = Administrator(email='<EMAIL>', password_hash='<PASSWORD>')
DBSession.add_all((admin1, admin2))
request = testing.DummyRequest()
request.matchdict['list'] = 'wranglers'
admin = self._make_one(request)
tmpl_vars = admin.items_list()
admins = [a.email for a in tmpl_vars['items'].all()]
self.assertEqual(
admins, '<EMAIL> <EMAIL>'.split())
def test_delete_cancel(self):
"""admin delete cancel leaves item in database
"""
from randopony.models import Administrator
admin = Administrator(email='<EMAIL>', password_hash='<PASSWORD>')
DBSession.add(admin)
self.config.add_route('admin.list', '/admin/{list}/')
request = testing.DummyRequest(post={'cancel': 'cancel'})
request.matchdict['list'] = 'wranglers'
request.matchdict['item'] = '<EMAIL>'
admin = self._make_one(request)
admin.delete()
wrangler = DBSession.query(Administrator).first()
self.assertEqual(wrangler.email, '<EMAIL>')
def test_delete_wrangler_confirmation(self):
"""admin delete confirmation view for wrangler has exp template vars
"""
from randopony import __pkg_metadata__ as version
self.config.add_route('admin.list', '/admin/{list}/')
request = testing.DummyRequest()
request.matchdict['list'] = 'wranglers'
request.matchdict['item'] = '<EMAIL>'
admin = self._make_one(request)
tmpl_vars = admin.delete()
self.assertEqual(
tmpl_vars,
{
'version': version.number + version.release,
'list': 'wranglers',
'item': '<EMAIL>',
'item_type': 'administrator',
})
def test_delete_wrangler(self):
"""admin delete for wrangler deletes item from database
"""
from sqlalchemy.orm.exc import NoResultFound
from randopony.models import Administrator
admin = Administrator(email='<EMAIL>', password_hash='<PASSWORD>')
DBSession.add(admin)
self.config.add_route('admin.list', '/admin/{list}/')
request = testing.DummyRequest(post={'delete': 'delete'})
request.matchdict['list'] = 'wranglers'
request.matchdict['item'] = '<EMAIL>'
admin = self._make_one(request)
admin.delete()
query = DBSession.query(Administrator)
with self.assertRaises(NoResultFound):
query.filter_by(email='<EMAIL>').one()
def test_delete_brevet(self):
"""admin delete for brevet deletes item from database
"""
from sqlalchemy.orm.exc import NoResultFound
from randopony.models import core
from randopony.models import Brevet
brevet = Brevet(
region='LM',
distance=200,
date_time=datetime(2012, 11, 11, 7, 0, 0),
route_name='11th Hour',
start_locn='Bean Around the World Coffee, Lonsdale Quay, '
'123 Carrie Cates Ct, North Vancouver',
organizer_email='<EMAIL>',
)
DBSession.add(brevet)
self.config.add_route('admin.list', '/admin/{list}/')
request = testing.DummyRequest(post={'delete': 'delete'})
request.matchdict['list'] = 'brevets'
request.matchdict['item'] = str(brevet)
with patch.object(core, 'datetime') as mock_datetime:
mock_datetime.today.return_value = datetime(2012, 11, 1, 12, 55, 42)
admin = self._make_one(request)
admin.delete()
with self.assertRaises(NoResultFound):
Brevet.get_current().one()
class TestEmailToOrganizer(unittest.TestCase):
"""Unit tests for email_to_organizer admin function re: event URLs.
"""
def _call_email_to_organizer(self, *args, **kwargs):
from randopony.views.admin.core import email_to_organizer
return email_to_organizer(*args, **kwargs)
def setUp(self):
from randopony.models import EmailAddress
self.config = testing.setUp(
settings={
'mako.directories': 'randopony:templates',
})
self.config.include('pyramid_mailer.testing')
self.config.include('pyramid_mako')
self.config.add_route(
'admin.populaires.view', '/admin/brevet/{item}')
self.config.add_route(
'brevet', '/brevets/{region}/{distance}/{date}')
self.config.add_route(
'brevet.rider_emails',
'/brevets/{region}/{distance}/{date}/rider_emails/{uuid}')
engine = create_engine('sqlite://')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
from_randopony = EmailAddress(
key='from_randopony',
email='<EMAIL>',
)
admin_email = EmailAddress(
key='admin_email',
email='<EMAIL>',
)
DBSession.add_all((from_randopony, admin_email))
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_email_to_organizer_catches_missing_google_doc_id(self):
"""email_to_organizer return error flash if google_doc_id not set
"""
from randopony.models import Brevet
brevet = Brevet(
region='VI',
distance=200,
date_time=datetime(2013, 3, 3, 7, 0),
route_name='Chilly 200',
start_locn='<NAME>, 3131 Millgrove St, Victoria',
organizer_email='<EMAIL>',
registration_end=datetime(2013, 3, 2, 12, 0),
)
DBSession.add(brevet)
request = testing.DummyRequest()
request.matchdict.update({
'region': 'VI',
'distance': '200',
'date': '03Mar2013',
})
date = '03Mar2013'
event_page_url = request.route_url(
'brevet', region=brevet.region, distance=brevet.distance,
date=date)
rider_emails_url = request.route_url(
'brevet.rider_emails', region=brevet.region,
distance=brevet.distance, date=date, uuid=brevet.uuid)
flash = self._call_email_to_organizer(
request, brevet, event_page_url, rider_emails_url)
self.assertEqual(
flash, [
'error',
'Google Drive rider list must be created before email to '
'organizer(s) can be sent'
])
def test_email_to_organizer_sends_email(self):
"""email_to_organizer sends message & sets expected flash message
"""
from randopony.models import Brevet
brevet = Brevet(
region='VI',
distance=200,
date_time=datetime(2013, 3, 3, 7, 0),
route_name='<NAME>',
start_locn='<NAME>, 3131 Millgrove St, Victoria',
organizer_email='<EMAIL>',
registration_end=datetime(2013, 3, 2, 12, 0),
google_doc_id='spreadsheet:1234',
)
DBSession.add(brevet)
request = testing.DummyRequest()
request.matchdict.update({
'region': 'VI',
'distance': '200',
'date': '03Mar2013',
})
date = '03Mar2013'
event_page_url = request.route_url(
'brevet', region=brevet.region, distance=brevet.distance,
date=date)
rider_emails_url = request.route_url(
'brevet.rider_emails', region=brevet.region,
distance=brevet.distance, date=date, uuid=brevet.uuid)
mailer = get_mailer(request)
flash = self._call_email_to_organizer(
request, brevet, event_page_url, rider_emails_url)
self.assertEqual(len(mailer.outbox), 1)
self.assertEqual(
flash,
['success', 'Email sent to VI200 03Mar2013 organizer(s)'])
def test_email_to_organizer_message(self):
"""email_to_organizer message has expected content
"""
from randopony.models import (
EmailAddress,
Brevet,
)
brevet = Brevet(
region='VI',
distance=200,
date_time=datetime(2013, 3, 3, 7, 0),
route_name='Chilly 200',
start_locn='<NAME>, 3131 Millgrove St, Victoria',
organizer_email='<EMAIL>',
registration_end=datetime(2013, 3, 2, 12, 0),
google_doc_id='spreadsheet:123'
)
DBSession.add(brevet)
request = testing.DummyRequest()
request.matchdict.update({
'region': 'VI',
'distance': '200',
'date': '03Mar2013',
})
date = '03Mar2013'
event_page_url = request.route_url(
'brevet', region=brevet.region, distance=brevet.distance,
date=date)
rider_emails_url = request.route_url(
'brevet.rider_emails', region=brevet.region,
distance=brevet.distance, date=date, uuid=brevet.uuid)
mailer = get_mailer(request)
self._call_email_to_organizer(
request, brevet, event_page_url, rider_emails_url)
msg = mailer.outbox[0]
self.assertEqual(msg.subject, 'RandoPony URLs for VI200 03Mar2013')
from_randopony = (
DBSession.query(EmailAddress)
.filter_by(key='from_randopony').first().email)
self.assertEqual(msg.sender, from_randopony)
self.assertEqual(msg.recipients, ['<EMAIL>'])
self.assertIn(
'The URL is <http://example.com/brevets/VI/200/03Mar2013>.',
msg.body)
self.assertIn(
'rider list URL is <https://spreadsheets.google.com/ccc?key=123>.',
msg.body)
self.assertIn(
'email address list URL is <http://example.com/brevets/'
'VI/200/03Mar2013/rider_emails/'
'ba8e8e00-dd42-5c6c-9b30-b65ce9c8df26>.',
msg.body)
self.assertIn(
'Pre-registration on the pony closes at 12:00 on 2013-03-02',
msg.body)
self.assertIn('send email to <<EMAIL>>.', msg.body)
def test_email_to_organizer_multi_organizer(self):
"""email to organizer has expected to list for multi-organizer event
"""
from randopony.models import Brevet
brevet = Brevet(
region='VI',
distance=200,
date_time=datetime(2013, 3, 3, 7, 0),
route_name='Chilly 200',
start_locn='Chez Croy, 3131 Millgrove St, Victoria',
organizer_email='<EMAIL>, <EMAIL>',
registration_end=datetime(2013, 3, 2, 12, 0),
google_doc_id='spreadsheet:1234'
)
DBSession.add(brevet)
request = testing.DummyRequest()
request.matchdict.update({
'region': 'VI',
'distance': '200',
'date': '03Mar2013',
})
date = '03Mar2013'
event_page_url = request.route_url(
'brevet', region=brevet.region, distance=brevet.distance,
date=date)
rider_emails_url = request.route_url(
'brevet.rider_emails', region=brevet.region,
distance=brevet.distance, date=date, uuid=brevet.uuid)
mailer = get_mailer(request)
self._call_email_to_organizer(
request, brevet, event_page_url, rider_emails_url)
msg = mailer.outbox[0]
self.assertEqual(
msg.recipients, ['<EMAIL>', '<EMAIL>'])
class TestEmailToWebmaster(unittest.TestCase):
"""Unit tests for email_to_webmaster admin function re: event page URL.
"""
def _call_email_to_webmaster(self, *args, **kwargs):
from randopony.views.admin.core import email_to_webmaster
return email_to_webmaster(*args, **kwargs)
def setUp(self):
from randopony.models import EmailAddress
self.config = testing.setUp(
settings={
'mako.directories': 'randopony:templates',
})
self.config.include('pyramid_mailer.testing')
self.config.include('pyramid_mako')
self.config.add_route(
'admin.populaires.view', '/admin/populaire/{item}')
self.config.add_route(
'populaire', '/populaires/{short_name}')
engine = create_engine('sqlite://')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
from_randopony = EmailAddress(
key='from_randopony',
email='<EMAIL>',
)
club_webmaster = EmailAddress(
key='club_webmaster',
email='<EMAIL>',
)
admin_email = EmailAddress(
key='admin_email',
email='<EMAIL>',
)
DBSession.add_all((from_randopony, club_webmaster, admin_email))
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_email_to_webmaster_sends_email(self):
"""email_to_webmaster sends message & sets expected flash message
"""
from randopony.models import Populaire
populaire = Populaire(
event_name='Victoria Populaire',
short_name='VicPop',
distance='50 km, 100 km',
date_time=datetime(2011, 3, 27, 10, 0),
start_locn='University of Victoria, Parking Lot #2 '
'(Gabriola Road, near McKinnon Gym)',
organizer_email='<EMAIL>',
registration_end=datetime(2011, 3, 24, 12, 0),
entry_form_url='http://www.randonneurs.bc.ca/VicPop/'
'VicPop11_registration.pdf',
google_doc_id='spreadsheet:1234'
)
DBSession.add(populaire)
request = testing.DummyRequest()
request.matchdict['item'] = 'VicPop'
event_page_url = request.route_url(
'populaire', short_name=populaire.short_name)
mailer = get_mailer(request)
flash = self._call_email_to_webmaster(
request, populaire, event_page_url)
self.assertEqual(len(mailer.outbox), 1)
self.assertEqual(
flash,
['success', 'Email with VicPop page URL sent to webmaster'])
def test_email_to_webmaster_message(self):
"""email_to_webmaster message has expected content
"""
from randopony.models import Populaire
populaire = Populaire(
event_name='Victoria Populaire',
short_name='VicPop',
distance='50 km, 100 km',
date_time=datetime(2011, 3, 27, 10, 0),
start_locn='University of Victoria, Parking Lot #2 '
'(Gabriola Road, near McKinnon Gym)',
organizer_email='<EMAIL>',
registration_end=datetime(2011, 3, 24, 12, 0),
entry_form_url='http://www.randonneurs.bc.ca/VicPop/'
'VicPop11_registration.pdf',
google_doc_id='spreadsheet:1234'
)
DBSession.add(populaire)
request = testing.DummyRequest()
request.matchdict['item'] = 'VicPop'
event_page_url = request.route_url(
'populaire', short_name=populaire.short_name)
mailer = get_mailer(request)
self._call_email_to_webmaster(request, populaire, event_page_url)
msg = mailer.outbox[0]
self.assertEqual(
msg.subject, 'RandoPony Pre-registration page for VicPop')
self.assertEqual(msg.sender, '<EMAIL>')
self.assertEqual(msg.recipients, ['<EMAIL>'])
self.assertIn('page for the VicPop event has been added', msg.body)
self.assertIn(
'The URL is <http://example.com/populaires/VicPop>.', msg.body)
self.assertIn('send email to <<EMAIL>>.', msg.body)
class TestFinalizeFlashMsg(unittest.TestCase):
"""Unit tests for finalize_flash_msg function.
"""
def _call_finalize_flash_msg(self, *args, **kwargs):
from randopony.views.admin.core import finalize_flash_msg
return finalize_flash_msg(*args, **kwargs)
def test_finalize_flash_msg_error(self):
"""flash 1st element is error when error present in flash list
"""
request = testing.DummyRequest()
self._call_finalize_flash_msg(request, 'success foo error bar'.split())
flash = request.session.pop_flash()
self.assertEqual(flash[0], 'error')
def test_finalize_flash_msg_success(self):
"""flash 1st element is success when error not present in flash list
"""
request = testing.DummyRequest()
self._call_finalize_flash_msg(
request, 'success foo sucess bar'.split())
flash = request.session.pop_flash()
self.assertEqual(flash[0], 'success')
def test_finalize_flash_msg_content(self):
"""flash[1:] are msgs w/o error or success elements of flash list
"""
request = testing.DummyRequest()
self._call_finalize_flash_msg(request, 'success foo error bar'.split())
flash = request.session.pop_flash()
self.assertEqual(flash[1:], 'foo bar'.split())
| [
"pyramid.testing.setUp",
"pyramid_mailer.get_mailer",
"randopony.views.admin.core.email_to_organizer",
"randopony.models.meta.Base.metadata.create_all",
"datetime.datetime",
"randopony.models.Brevet.get_current",
"pyramid.testing.tearDown",
"sqlalchemy.create_engine",
"randopony.views.admin.core.ema... | [((670, 685), 'pyramid.testing.setUp', 'testing.setUp', ([], {}), '()\n', (683, 685), False, 'from pyramid import testing\n'), ((703, 729), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite://"""'], {}), "('sqlite://')\n", (716, 729), False, 'from sqlalchemy import create_engine\n'), ((738, 770), 'randopony.models.meta.DBSession.configure', 'DBSession.configure', ([], {'bind': 'engine'}), '(bind=engine)\n', (757, 770), False, 'from randopony.models.meta import Base, DBSession\n'), ((779, 811), 'randopony.models.meta.Base.metadata.create_all', 'Base.metadata.create_all', (['engine'], {}), '(engine)\n', (803, 811), False, 'from randopony.models.meta import Base, DBSession\n'), ((845, 863), 'randopony.models.meta.DBSession.remove', 'DBSession.remove', ([], {}), '()\n', (861, 863), False, 'from randopony.models.meta import Base, DBSession\n'), ((872, 890), 'pyramid.testing.tearDown', 'testing.tearDown', ([], {}), '()\n', (888, 890), False, 'from pyramid import testing\n'), ((1064, 1086), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {}), '()\n', (1084, 1086), False, 'from pyramid import testing\n'), ((1444, 1466), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {}), '()\n', (1464, 1466), False, 'from pyramid import testing\n'), ((2047, 2105), 'randopony.models.Administrator', 'Administrator', ([], {'email': '"""<EMAIL>"""', 'password_hash': '"""<PASSWORD>"""'}), "(email='<EMAIL>', password_hash='<PASSWORD>')\n", (2060, 2105), False, 'from randopony.models import Administrator\n'), ((2123, 2181), 'randopony.models.Administrator', 'Administrator', ([], {'email': '"""<EMAIL>"""', 'password_hash': '"""<PASSWORD>"""'}), "(email='<EMAIL>', password_hash='<PASSWORD>')\n", (2136, 2181), False, 'from randopony.models import Administrator\n'), ((2190, 2225), 'randopony.models.meta.DBSession.add_all', 'DBSession.add_all', (['(admin1, admin2)'], {}), '((admin1, admin2))\n', (2207, 2225), False, 'from randopony.models.meta import Base, DBSession\n'), ((2244, 2266), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {}), '()\n', (2264, 2266), False, 'from pyramid import testing\n'), ((2697, 2755), 'randopony.models.Administrator', 'Administrator', ([], {'email': '"""<EMAIL>"""', 'password_hash': '"""<PASSWORD>"""'}), "(email='<EMAIL>', password_hash='<PASSWORD>')\n", (2710, 2755), False, 'from randopony.models import Administrator\n'), ((2764, 2784), 'randopony.models.meta.DBSession.add', 'DBSession.add', (['admin'], {}), '(admin)\n', (2777, 2784), False, 'from randopony.models.meta import Base, DBSession\n'), ((2865, 2912), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {'post': "{'cancel': 'cancel'}"}), "(post={'cancel': 'cancel'})\n", (2885, 2912), False, 'from pyramid import testing\n'), ((3457, 3479), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {}), '()\n', (3477, 3479), False, 'from pyramid import testing\n'), ((4139, 4197), 'randopony.models.Administrator', 'Administrator', ([], {'email': '"""<EMAIL>"""', 'password_hash': '"""<PASSWORD>"""'}), "(email='<EMAIL>', password_hash='<PASSWORD>')\n", (4152, 4197), False, 'from randopony.models import Administrator\n'), ((4206, 4226), 'randopony.models.meta.DBSession.add', 'DBSession.add', (['admin'], {}), '(admin)\n', (4219, 4226), False, 'from randopony.models.meta import Base, DBSession\n'), ((4307, 4354), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {'post': "{'delete': 'delete'}"}), "(post={'delete': 'delete'})\n", (4327, 4354), False, 'from pyramid import testing\n'), ((4528, 4558), 'randopony.models.meta.DBSession.query', 'DBSession.query', (['Administrator'], {}), '(Administrator)\n', (4543, 4558), False, 'from randopony.models.meta import Base, DBSession\n'), ((5263, 5284), 'randopony.models.meta.DBSession.add', 'DBSession.add', (['brevet'], {}), '(brevet)\n', (5276, 5284), False, 'from randopony.models.meta import Base, DBSession\n'), ((5365, 5412), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {'post': "{'delete': 'delete'}"}), "(post={'delete': 'delete'})\n", (5385, 5412), False, 'from pyramid import testing\n'), ((6070, 6105), 'randopony.views.admin.core.email_to_organizer', 'email_to_organizer', (['*args'], {}), '(*args, **kwargs)\n', (6088, 6105), False, 'from randopony.views.admin.core import email_to_organizer\n'), ((6200, 6267), 'pyramid.testing.setUp', 'testing.setUp', ([], {'settings': "{'mako.directories': 'randopony:templates'}"}), "(settings={'mako.directories': 'randopony:templates'})\n", (6213, 6267), False, 'from pyramid import testing\n'), ((6748, 6774), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite://"""'], {}), "('sqlite://')\n", (6761, 6774), False, 'from sqlalchemy import create_engine\n'), ((6783, 6815), 'randopony.models.meta.DBSession.configure', 'DBSession.configure', ([], {'bind': 'engine'}), '(bind=engine)\n', (6802, 6815), False, 'from randopony.models.meta import Base, DBSession\n'), ((6824, 6856), 'randopony.models.meta.Base.metadata.create_all', 'Base.metadata.create_all', (['engine'], {}), '(engine)\n', (6848, 6856), False, 'from randopony.models.meta import Base, DBSession\n'), ((6882, 6933), 'randopony.models.EmailAddress', 'EmailAddress', ([], {'key': '"""from_randopony"""', 'email': '"""<EMAIL>"""'}), "(key='from_randopony', email='<EMAIL>')\n", (6894, 6933), False, 'from randopony.models import EmailAddress\n'), ((6991, 7039), 'randopony.models.EmailAddress', 'EmailAddress', ([], {'key': '"""admin_email"""', 'email': '"""<EMAIL>"""'}), "(key='admin_email', email='<EMAIL>')\n", (7003, 7039), False, 'from randopony.models import EmailAddress\n'), ((7083, 7131), 'randopony.models.meta.DBSession.add_all', 'DBSession.add_all', (['(from_randopony, admin_email)'], {}), '((from_randopony, admin_email))\n', (7100, 7131), False, 'from randopony.models.meta import Base, DBSession\n'), ((7165, 7183), 'randopony.models.meta.DBSession.remove', 'DBSession.remove', ([], {}), '()\n', (7181, 7183), False, 'from randopony.models.meta import Base, DBSession\n'), ((7192, 7210), 'pyramid.testing.tearDown', 'testing.tearDown', ([], {}), '()\n', (7208, 7210), False, 'from pyramid import testing\n'), ((7751, 7772), 'randopony.models.meta.DBSession.add', 'DBSession.add', (['brevet'], {}), '(brevet)\n', (7764, 7772), False, 'from randopony.models.meta import Base, DBSession\n'), ((7791, 7813), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {}), '()\n', (7811, 7813), False, 'from pyramid import testing\n'), ((9165, 9186), 'randopony.models.meta.DBSession.add', 'DBSession.add', (['brevet'], {}), '(brevet)\n', (9178, 9186), False, 'from randopony.models.meta import Base, DBSession\n'), ((9205, 9227), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {}), '()\n', (9225, 9227), False, 'from pyramid import testing\n'), ((9717, 9736), 'pyramid_mailer.get_mailer', 'get_mailer', (['request'], {}), '(request)\n', (9727, 9736), False, 'from pyramid_mailer import get_mailer\n'), ((10609, 10630), 'randopony.models.meta.DBSession.add', 'DBSession.add', (['brevet'], {}), '(brevet)\n', (10622, 10630), False, 'from randopony.models.meta import Base, DBSession\n'), ((10649, 10671), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {}), '()\n', (10669, 10671), False, 'from pyramid import testing\n'), ((11161, 11180), 'pyramid_mailer.get_mailer', 'get_mailer', (['request'], {}), '(request)\n', (11171, 11180), False, 'from pyramid_mailer import get_mailer\n'), ((12847, 12868), 'randopony.models.meta.DBSession.add', 'DBSession.add', (['brevet'], {}), '(brevet)\n', (12860, 12868), False, 'from randopony.models.meta import Base, DBSession\n'), ((12887, 12909), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {}), '()\n', (12907, 12909), False, 'from pyramid import testing\n'), ((13399, 13418), 'pyramid_mailer.get_mailer', 'get_mailer', (['request'], {}), '(request)\n', (13409, 13418), False, 'from pyramid_mailer import get_mailer\n'), ((13901, 13936), 'randopony.views.admin.core.email_to_webmaster', 'email_to_webmaster', (['*args'], {}), '(*args, **kwargs)\n', (13919, 13936), False, 'from randopony.views.admin.core import email_to_webmaster\n'), ((14031, 14098), 'pyramid.testing.setUp', 'testing.setUp', ([], {'settings': "{'mako.directories': 'randopony:templates'}"}), "(settings={'mako.directories': 'randopony:templates'})\n", (14044, 14098), False, 'from pyramid import testing\n'), ((14437, 14463), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite://"""'], {}), "('sqlite://')\n", (14450, 14463), False, 'from sqlalchemy import create_engine\n'), ((14472, 14504), 'randopony.models.meta.DBSession.configure', 'DBSession.configure', ([], {'bind': 'engine'}), '(bind=engine)\n', (14491, 14504), False, 'from randopony.models.meta import Base, DBSession\n'), ((14513, 14545), 'randopony.models.meta.Base.metadata.create_all', 'Base.metadata.create_all', (['engine'], {}), '(engine)\n', (14537, 14545), False, 'from randopony.models.meta import Base, DBSession\n'), ((14571, 14622), 'randopony.models.EmailAddress', 'EmailAddress', ([], {'key': '"""from_randopony"""', 'email': '"""<EMAIL>"""'}), "(key='from_randopony', email='<EMAIL>')\n", (14583, 14622), False, 'from randopony.models import EmailAddress\n'), ((14683, 14734), 'randopony.models.EmailAddress', 'EmailAddress', ([], {'key': '"""club_webmaster"""', 'email': '"""<EMAIL>"""'}), "(key='club_webmaster', email='<EMAIL>')\n", (14695, 14734), False, 'from randopony.models import EmailAddress\n'), ((14792, 14840), 'randopony.models.EmailAddress', 'EmailAddress', ([], {'key': '"""admin_email"""', 'email': '"""<EMAIL>"""'}), "(key='admin_email', email='<EMAIL>')\n", (14804, 14840), False, 'from randopony.models import EmailAddress\n'), ((14884, 14948), 'randopony.models.meta.DBSession.add_all', 'DBSession.add_all', (['(from_randopony, club_webmaster, admin_email)'], {}), '((from_randopony, club_webmaster, admin_email))\n', (14901, 14948), False, 'from randopony.models.meta import Base, DBSession\n'), ((14982, 15000), 'randopony.models.meta.DBSession.remove', 'DBSession.remove', ([], {}), '()\n', (14998, 15000), False, 'from randopony.models.meta import Base, DBSession\n'), ((15009, 15027), 'pyramid.testing.tearDown', 'testing.tearDown', ([], {}), '()\n', (15025, 15027), False, 'from pyramid import testing\n'), ((15821, 15845), 'randopony.models.meta.DBSession.add', 'DBSession.add', (['populaire'], {}), '(populaire)\n', (15834, 15845), False, 'from randopony.models.meta import Base, DBSession\n'), ((15864, 15886), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {}), '()\n', (15884, 15886), False, 'from pyramid import testing\n'), ((16051, 16070), 'pyramid_mailer.get_mailer', 'get_mailer', (['request'], {}), '(request)\n', (16061, 16070), False, 'from pyramid_mailer import get_mailer\n'), ((17106, 17130), 'randopony.models.meta.DBSession.add', 'DBSession.add', (['populaire'], {}), '(populaire)\n', (17119, 17130), False, 'from randopony.models.meta import Base, DBSession\n'), ((17149, 17171), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {}), '()\n', (17169, 17171), False, 'from pyramid import testing\n'), ((17336, 17355), 'pyramid_mailer.get_mailer', 'get_mailer', (['request'], {}), '(request)\n', (17346, 17355), False, 'from pyramid_mailer import get_mailer\n'), ((18141, 18176), 'randopony.views.admin.core.finalize_flash_msg', 'finalize_flash_msg', (['*args'], {}), '(*args, **kwargs)\n', (18159, 18176), False, 'from randopony.views.admin.core import finalize_flash_msg\n'), ((18324, 18346), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {}), '()\n', (18344, 18346), False, 'from pyramid import testing\n'), ((18670, 18692), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {}), '()\n', (18690, 18692), False, 'from pyramid import testing\n'), ((19029, 19051), 'pyramid.testing.DummyRequest', 'testing.DummyRequest', ([], {}), '()\n', (19049, 19051), False, 'from pyramid import testing\n'), ((5520, 5550), 'unittest.mock.patch.object', 'patch.object', (['core', '"""datetime"""'], {}), "(core, 'datetime')\n", (5532, 5550), False, 'from unittest.mock import patch\n'), ((5616, 5649), 'datetime.datetime', 'datetime', (['(2012)', '(11)', '(1)', '(12)', '(55)', '(42)'], {}), '(2012, 11, 1, 12, 55, 42)\n', (5624, 5649), False, 'from datetime import datetime\n'), ((3089, 3119), 'randopony.models.meta.DBSession.query', 'DBSession.query', (['Administrator'], {}), '(Administrator)\n', (3104, 3119), False, 'from randopony.models.meta import Base, DBSession\n'), ((5003, 5034), 'datetime.datetime', 'datetime', (['(2012)', '(11)', '(11)', '(7)', '(0)', '(0)'], {}), '(2012, 11, 11, 7, 0, 0)\n', (5011, 5034), False, 'from datetime import datetime\n'), ((7509, 7535), 'datetime.datetime', 'datetime', (['(2013)', '(3)', '(3)', '(7)', '(0)'], {}), '(2013, 3, 3, 7, 0)\n', (7517, 7535), False, 'from datetime import datetime\n'), ((7704, 7731), 'datetime.datetime', 'datetime', (['(2013)', '(3)', '(2)', '(12)', '(0)'], {}), '(2013, 3, 2, 12, 0)\n', (7712, 7731), False, 'from datetime import datetime\n'), ((8881, 8907), 'datetime.datetime', 'datetime', (['(2013)', '(3)', '(3)', '(7)', '(0)'], {}), '(2013, 3, 3, 7, 0)\n', (8889, 8907), False, 'from datetime import datetime\n'), ((9072, 9099), 'datetime.datetime', 'datetime', (['(2013)', '(3)', '(2)', '(12)', '(0)'], {}), '(2013, 3, 2, 12, 0)\n', (9080, 9099), False, 'from datetime import datetime\n'), ((10323, 10349), 'datetime.datetime', 'datetime', (['(2013)', '(3)', '(3)', '(7)', '(0)'], {}), '(2013, 3, 3, 7, 0)\n', (10331, 10349), False, 'from datetime import datetime\n'), ((10518, 10545), 'datetime.datetime', 'datetime', (['(2013)', '(3)', '(2)', '(12)', '(0)'], {}), '(2013, 3, 2, 12, 0)\n', (10526, 10545), False, 'from datetime import datetime\n'), ((12548, 12574), 'datetime.datetime', 'datetime', (['(2013)', '(3)', '(3)', '(7)', '(0)'], {}), '(2013, 3, 3, 7, 0)\n', (12556, 12574), False, 'from datetime import datetime\n'), ((12755, 12782), 'datetime.datetime', 'datetime', (['(2013)', '(3)', '(2)', '(12)', '(0)'], {}), '(2013, 3, 2, 12, 0)\n', (12763, 12782), False, 'from datetime import datetime\n'), ((15382, 15410), 'datetime.datetime', 'datetime', (['(2011)', '(3)', '(27)', '(10)', '(0)'], {}), '(2011, 3, 27, 10, 0)\n', (15390, 15410), False, 'from datetime import datetime\n'), ((15606, 15634), 'datetime.datetime', 'datetime', (['(2011)', '(3)', '(24)', '(12)', '(0)'], {}), '(2011, 3, 24, 12, 0)\n', (15614, 15634), False, 'from datetime import datetime\n'), ((16667, 16695), 'datetime.datetime', 'datetime', (['(2011)', '(3)', '(27)', '(10)', '(0)'], {}), '(2011, 3, 27, 10, 0)\n', (16675, 16695), False, 'from datetime import datetime\n'), ((16891, 16919), 'datetime.datetime', 'datetime', (['(2011)', '(3)', '(24)', '(12)', '(0)'], {}), '(2011, 3, 24, 12, 0)\n', (16899, 16919), False, 'from datetime import datetime\n'), ((5776, 5796), 'randopony.models.Brevet.get_current', 'Brevet.get_current', ([], {}), '()\n', (5794, 5796), False, 'from randopony.models import Brevet\n'), ((11429, 11458), 'randopony.models.meta.DBSession.query', 'DBSession.query', (['EmailAddress'], {}), '(EmailAddress)\n', (11444, 11458), False, 'from randopony.models.meta import Base, DBSession\n')] |
"""
WSGI entrypoint.
"""
from nunaserver.server import app
if __name__ == "__main__":
app.run()
| [
"nunaserver.server.app.run"
] | [((91, 100), 'nunaserver.server.app.run', 'app.run', ([], {}), '()\n', (98, 100), False, 'from nunaserver.server import app\n')] |
import os
import pytest
import csv_diff
import logging
import torch
from unit_tests.t_utils import remove_tmp_dir, create_tmp_dir, __data_testing_dir__, __tmp_dir__
from ivadomed.loader import utils as imed_loader_utils
from ivadomed.loader import loader as imed_loader
logger = logging.getLogger(__name__)
def setup_function():
create_tmp_dir()
@pytest.mark.parametrize('loader_parameters', [{
"path_data": [os.path.join(__data_testing_dir__, "microscopy_png")],
"bids_config": "ivadomed/config/config_bids.json",
"target_suffix": [["_seg-myelin-manual", "_seg-axon-manual"]],
"extensions": [".png"],
"roi_params": {"suffix": None, "slice_filter_roi": None},
"contrast_params": {"contrast_lst": []}
}])
def test_bids_df_microscopy_png(loader_parameters):
"""
Test for microscopy png file format
Test for _sessions.tsv and _scans.tsv files
Test for target_suffix as a nested list
Test for when no contrast_params are provided
"""
bids_df = imed_loader_utils.BidsDataframe(loader_parameters, __tmp_dir__, derivatives=True)
df_test = bids_df.df.drop(columns=['path'])
# TODO: modify df_ref.csv file in data-testing dataset to include "participant_id"
# and "sample_id" columns, then delete next line
df_test = df_test.drop(columns=['participant_id', 'sample_id'])
df_test = df_test.sort_values(by=['filename']).reset_index(drop=True)
csv_ref = os.path.join(loader_parameters["path_data"][0], "df_ref.csv")
csv_test = os.path.join(loader_parameters["path_data"][0], "df_test.csv")
df_test.to_csv(csv_test, index=False)
diff = csv_diff.compare(csv_diff.load_csv(open(csv_ref)), csv_diff.load_csv(open(csv_test)))
assert diff == {'added': [], 'removed': [], 'changed': [], 'columns_added': [], 'columns_removed': []}
@pytest.mark.parametrize('loader_parameters', [{
"path_data": [__data_testing_dir__],
"target_suffix": ["_seg-manual"],
"extensions": [],
"roi_params": {"suffix": None, "slice_filter_roi": None},
"contrast_params": {"contrast_lst": ["T1w", "T2w"]}
}])
def test_bids_df_anat(loader_parameters):
"""
Test for MRI anat nii.gz file format
Test for when no file extensions are provided
Test for multiple target_suffix
TODO: modify test and "df_ref.csv" file in data-testing dataset to test behavior when "roi_suffix" is not None
"""
bids_df = imed_loader_utils.BidsDataframe(loader_parameters, __tmp_dir__, derivatives = True)
df_test = bids_df.df.drop(columns=['path'])
# TODO: modify df_ref.csv file in data-testing dataset to include "participant_id"
# column then delete next line
df_test = df_test.drop(columns=['participant_id'])
df_test = df_test.sort_values(by=['filename']).reset_index(drop=True)
csv_ref = os.path.join(loader_parameters["path_data"][0], "df_ref.csv")
csv_test = os.path.join(loader_parameters["path_data"][0], "df_test.csv")
df_test.to_csv(csv_test, index=False)
diff = csv_diff.compare(csv_diff.load_csv(open(csv_ref)), csv_diff.load_csv(open(csv_test)))
assert diff == {'added': [], 'removed': [], 'changed': [],
'columns_added': [], 'columns_removed': []}
# TODO: add a test to ensure the loader can read in multiple entries in path_data
@pytest.mark.parametrize('seg_pair', [
{"input": torch.rand((2, 5, 5))},
{"input": torch.rand((1, 5, 5))},
{"input": torch.rand((5, 5, 5, 5))},
{"input": (torch.rand((5, 5, 5, 3)) * torch.tensor([1, 0, 1], dtype=torch.float)).transpose(0, -1)},
{"input": (torch.rand((7, 7, 4)) * torch.tensor([1, 0, 0, 0], dtype=torch.float)).transpose(0, -1)}
])
def test_dropout_input(seg_pair):
n_channels = seg_pair['input'].size(0)
seg_pair = imed_loader.dropout_input(seg_pair)
empty_channels = [len(torch.unique(input_data)) == 1 for input_data in seg_pair['input']]
# If multichannel
if n_channels > 1:
# Verify that there is still at least one channel remaining
assert sum(empty_channels) <= n_channels
else:
assert sum(empty_channels) == 0
def teardown_function():
remove_tmp_dir()
| [
"logging.getLogger",
"torch.unique",
"ivadomed.loader.loader.dropout_input",
"os.path.join",
"pytest.mark.parametrize",
"unit_tests.t_utils.remove_tmp_dir",
"torch.tensor",
"ivadomed.loader.utils.BidsDataframe",
"torch.rand",
"unit_tests.t_utils.create_tmp_dir"
] | [((279, 306), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (296, 306), False, 'import logging\n'), ((1820, 2081), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""loader_parameters"""', "[{'path_data': [__data_testing_dir__], 'target_suffix': ['_seg-manual'],\n 'extensions': [], 'roi_params': {'suffix': None, 'slice_filter_roi':\n None}, 'contrast_params': {'contrast_lst': ['T1w', 'T2w']}}]"], {}), "('loader_parameters', [{'path_data': [\n __data_testing_dir__], 'target_suffix': ['_seg-manual'], 'extensions':\n [], 'roi_params': {'suffix': None, 'slice_filter_roi': None},\n 'contrast_params': {'contrast_lst': ['T1w', 'T2w']}}])\n", (1843, 2081), False, 'import pytest\n'), ((335, 351), 'unit_tests.t_utils.create_tmp_dir', 'create_tmp_dir', ([], {}), '()\n', (349, 351), False, 'from unit_tests.t_utils import remove_tmp_dir, create_tmp_dir, __data_testing_dir__, __tmp_dir__\n'), ((1005, 1091), 'ivadomed.loader.utils.BidsDataframe', 'imed_loader_utils.BidsDataframe', (['loader_parameters', '__tmp_dir__'], {'derivatives': '(True)'}), '(loader_parameters, __tmp_dir__, derivatives\n =True)\n', (1036, 1091), True, 'from ivadomed.loader import utils as imed_loader_utils\n'), ((1431, 1492), 'os.path.join', 'os.path.join', (["loader_parameters['path_data'][0]", '"""df_ref.csv"""'], {}), "(loader_parameters['path_data'][0], 'df_ref.csv')\n", (1443, 1492), False, 'import os\n'), ((1508, 1570), 'os.path.join', 'os.path.join', (["loader_parameters['path_data'][0]", '"""df_test.csv"""'], {}), "(loader_parameters['path_data'][0], 'df_test.csv')\n", (1520, 1570), False, 'import os\n'), ((2410, 2496), 'ivadomed.loader.utils.BidsDataframe', 'imed_loader_utils.BidsDataframe', (['loader_parameters', '__tmp_dir__'], {'derivatives': '(True)'}), '(loader_parameters, __tmp_dir__, derivatives\n =True)\n', (2441, 2496), True, 'from ivadomed.loader import utils as imed_loader_utils\n'), ((2807, 2868), 'os.path.join', 'os.path.join', (["loader_parameters['path_data'][0]", '"""df_ref.csv"""'], {}), "(loader_parameters['path_data'][0], 'df_ref.csv')\n", (2819, 2868), False, 'import os\n'), ((2884, 2946), 'os.path.join', 'os.path.join', (["loader_parameters['path_data'][0]", '"""df_test.csv"""'], {}), "(loader_parameters['path_data'][0], 'df_test.csv')\n", (2896, 2946), False, 'import os\n'), ((3758, 3793), 'ivadomed.loader.loader.dropout_input', 'imed_loader.dropout_input', (['seg_pair'], {}), '(seg_pair)\n', (3783, 3793), True, 'from ivadomed.loader import loader as imed_loader\n'), ((4132, 4148), 'unit_tests.t_utils.remove_tmp_dir', 'remove_tmp_dir', ([], {}), '()\n', (4146, 4148), False, 'from unit_tests.t_utils import remove_tmp_dir, create_tmp_dir, __data_testing_dir__, __tmp_dir__\n'), ((3351, 3372), 'torch.rand', 'torch.rand', (['(2, 5, 5)'], {}), '((2, 5, 5))\n', (3361, 3372), False, 'import torch\n'), ((3389, 3410), 'torch.rand', 'torch.rand', (['(1, 5, 5)'], {}), '((1, 5, 5))\n', (3399, 3410), False, 'import torch\n'), ((3427, 3451), 'torch.rand', 'torch.rand', (['(5, 5, 5, 5)'], {}), '((5, 5, 5, 5))\n', (3437, 3451), False, 'import torch\n'), ((421, 473), 'os.path.join', 'os.path.join', (['__data_testing_dir__', '"""microscopy_png"""'], {}), "(__data_testing_dir__, 'microscopy_png')\n", (433, 473), False, 'import os\n'), ((3820, 3844), 'torch.unique', 'torch.unique', (['input_data'], {}), '(input_data)\n', (3832, 3844), False, 'import torch\n'), ((3469, 3493), 'torch.rand', 'torch.rand', (['(5, 5, 5, 3)'], {}), '((5, 5, 5, 3))\n', (3479, 3493), False, 'import torch\n'), ((3496, 3538), 'torch.tensor', 'torch.tensor', (['[1, 0, 1]'], {'dtype': 'torch.float'}), '([1, 0, 1], dtype=torch.float)\n', (3508, 3538), False, 'import torch\n'), ((3574, 3595), 'torch.rand', 'torch.rand', (['(7, 7, 4)'], {}), '((7, 7, 4))\n', (3584, 3595), False, 'import torch\n'), ((3598, 3643), 'torch.tensor', 'torch.tensor', (['[1, 0, 0, 0]'], {'dtype': 'torch.float'}), '([1, 0, 0, 0], dtype=torch.float)\n', (3610, 3643), False, 'import torch\n')] |
from pymongo import MongoClient
from bson.objectid import ObjectId
from datetime import datetime as dt
import pprint
client = MongoClient()
db = client['mongo_db_lab']
defs = db['definitions']
def random_word_requester():
'''
This function should return a random word and its definition and also
log in the MongoDB database the timestamp that it was accessed.
'''
# perform a sample data aggregation
rando = list(defs.aggregate([{'$sample': {'size': 1 }}]))[0]
# update the list of definitions with the last accessed timestamp
query = {'_id': rando.get('_id')}
date = { "$push": { "dates": dt.utcnow().isoformat() } }
defs.update_one(query, date)
return rando
if __name__ == '__main__':
# create a temporary collection to check for duplicates
dupes = db['dupes']
item = random_word_requester()
# we keep getting random words until we find a dupe
while dupes.count_documents({'_id': item.get('_id')}) <= 0:
dupes.insert_one(item)
item = random_word_requester()
# get the dupe in the original collection
duped = defs.find_one({'_id': item.get('_id')})
pprint.pprint(duped)
# remove the duplicate collection and remove
# the dates field before exiting
dupes.drop()
defs.update_many({}, {'$unset': {'dates': ''}})
client.close() | [
"pymongo.MongoClient",
"pprint.pprint",
"datetime.datetime.utcnow"
] | [((126, 139), 'pymongo.MongoClient', 'MongoClient', ([], {}), '()\n', (137, 139), False, 'from pymongo import MongoClient\n'), ((1150, 1170), 'pprint.pprint', 'pprint.pprint', (['duped'], {}), '(duped)\n', (1163, 1170), False, 'import pprint\n'), ((628, 639), 'datetime.datetime.utcnow', 'dt.utcnow', ([], {}), '()\n', (637, 639), True, 'from datetime import datetime as dt\n')] |
# Generated by Django 2.1.7 on 2019-05-20 19:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('leads', '0011_auto_20190520_1217'),
]
operations = [
migrations.AddField(
model_name='lead',
name='first_name',
field=models.CharField(max_length=255, null=True, verbose_name='First name'),
),
migrations.AddField(
model_name='lead',
name='last_name',
field=models.CharField(max_length=255, null=True, verbose_name='Last name'),
),
]
| [
"django.db.models.CharField"
] | [((334, 404), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'verbose_name': '"""First name"""'}), "(max_length=255, null=True, verbose_name='First name')\n", (350, 404), False, 'from django.db import migrations, models\n'), ((525, 594), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'verbose_name': '"""Last name"""'}), "(max_length=255, null=True, verbose_name='Last name')\n", (541, 594), False, 'from django.db import migrations, models\n')] |
import sys
import torch
from args import get_argparser, parse_args, get_aligner, get_bbox
from os.path import join
if __name__ == '__main__':
parser = get_argparser()
parser.add_argument('--align_start',
help='align without vector voting the 2nd & 3rd sections, otherwise copy them', action='store_true')
args = parse_args(parser)
args.tgt_path = join(args.dst_path, 'image')
# only compute matches to previous sections
args.serial_operation = True
a = get_aligner(args)
bbox = get_bbox(args)
block_start = args.bbox_start[2]
z_range = range(args.bbox_start[2], args.bbox_stop[2])
a.dst[0].add_composed_cv(block_start, inverse=False)
field_k = a.dst[0].get_composed_key(block_start, inverse=False)
field_cv= a.dst[0].for_read(field_k)
dst_cv = a.dst[0].for_write('dst_img')
z_offset = 1
uncomposed_field_cv = a.dst[z_offset].for_read('field')
mip = args.mip
copy_range = []
uncomposed_range = []
overlap_range = []
composed_range = z_range
if args.align_start:
overlap = args.tgt_radius
copy_range = z_range[0:1]
uncomposed_range = z_range[1:overlap]
overlap_range = z_range[overlap:2*overlap]
composed_range = z_range[2*overlap:]
# copy first section
for z in copy_range:
print('Copying z={0}'.format(z))
a.copy_section(z, dst_cv, z, bbox, mip)
a.downsample(dst_cv, z, bbox, a.render_low_mip, a.render_high_mip)
# align without vector voting
for z in uncomposed_range:
print('Aligning without vector voting z={0}'.format(z))
src_z = z
tgt_z = z-1
a.compute_section_pair_residuals(src_z, tgt_z, bbox)
a.render_section_all_mips(src_z, uncomposed_field_cv, src_z,
dst_cv, src_z, bbox, mip)
# align overlap to the uncomposed
print('align overlap pairwise to previous aligned')
for k, z in enumerate(overlap_range):
for z_offset in range(k+1, args.tgt_radius+1):
src_z = z
tgt_z = src_z - z_offset
# print(src_z,tgt_z)
a.compute_section_pair_residuals(src_z, tgt_z, bbox)
# setup an aligner to run pairwise
args.tgt_path = args.src_path
args.serial_operation = False
a = get_aligner(args)
# align overlap to the composed
print('align overlap pairwise to previous unaligned')
for k, z in enumerate(overlap_range):
for z_offset in range(1, k+1):
src_z = z
tgt_z = src_z - z_offset
# print(src_z,tgt_z)
a.compute_section_pair_residuals(src_z, tgt_z, bbox)
print('align overlap pairwise to future unaligned')
for k, z in enumerate(overlap_range):
for z_offset in range(-args.tgt_radius, 0):
src_z = z
tgt_z = src_z - z_offset
# print(src_z,tgt_z)
a.compute_section_pair_residuals(src_z, tgt_z, bbox)
# multi-match for all of the composed
a.generate_pairwise(composed_range, bbox, render_match=False)
pairwise_range = list(overlap_range) + list(composed_range)
# compose from block_start)
a.compose_pairwise(pairwise_range, block_start, bbox, mip,
forward_compose=True,
inverse_compose=False)
# render all of overlap and compose
for z in pairwise_range:
a.render_section_all_mips(z, field_cv, z, dst_cv, z, bbox, mip)
# # compose and regularized all of the composed
# for block_start in range(z_start, z_stop, args.block_size):
# compose_range = range(block_start, block_start + args.block_size + overlap)
# print('Composing for z_range {0}'.format(compose_range))
# a.compose_pairwise(compose_range, block_start, bbox, mip,
# forward_compose=args.forward_compose,
# inverse_compose=args.inverse_compose)
# reg_range = range(block_start, block_start + args.block_size)
# print('Regularizing for z_range {0}'.format(reg_range))
# a.regularize_z_chunkwise(reg_range, z_start, bbox, mip, sigma=args.sigma)
# for z in reg_range:
# a.render_section_all_mips(z, field_cv, z, dst_cv, z, bbox, mip)
| [
"args.get_argparser",
"args.get_bbox",
"os.path.join",
"args.get_aligner",
"args.parse_args"
] | [((154, 169), 'args.get_argparser', 'get_argparser', ([], {}), '()\n', (167, 169), False, 'from args import get_argparser, parse_args, get_aligner, get_bbox\n'), ((323, 341), 'args.parse_args', 'parse_args', (['parser'], {}), '(parser)\n', (333, 341), False, 'from args import get_argparser, parse_args, get_aligner, get_bbox\n'), ((360, 388), 'os.path.join', 'join', (['args.dst_path', '"""image"""'], {}), "(args.dst_path, 'image')\n", (364, 388), False, 'from os.path import join\n'), ((472, 489), 'args.get_aligner', 'get_aligner', (['args'], {}), '(args)\n', (483, 489), False, 'from args import get_argparser, parse_args, get_aligner, get_bbox\n'), ((499, 513), 'args.get_bbox', 'get_bbox', (['args'], {}), '(args)\n', (507, 513), False, 'from args import get_argparser, parse_args, get_aligner, get_bbox\n'), ((2161, 2178), 'args.get_aligner', 'get_aligner', (['args'], {}), '(args)\n', (2172, 2178), False, 'from args import get_argparser, parse_args, get_aligner, get_bbox\n')] |
from cms.extensions.toolbar import ExtensionToolbar
from cms.utils import get_language_list
from django.utils.encoding import force_text
from django.utils.translation import get_language_info
class TitleExtensionToolbar(ExtensionToolbar):
model = None
insert_after = None
def get_item_position(self, menu):
position = None
for items in menu._memo.values():
for item in items:
if force_text(getattr(item, 'name', None)) in (
force_text(self.insert_after),
'{0}...'.format(self.insert_after)
):
position = menu._item_position(item) + 1
break
return position
def populate(self):
current_page_menu = self._setup_extension_toolbar()
if not current_page_menu or not self.page:
return
languages = get_language_list(self.current_site.pk)
is_single_lang = len(languages) < 2
position = self.get_item_position(current_page_menu)
urls = self.get_title_extension_admin()
page = self._get_page()
titleset = page.title_set.filter(language__in=languages)
if hasattr(self.toolbar, 'edit_mode_active'):
not_edit_mode = not self.toolbar.edit_mode_active
else:
not_edit_mode = not self.toolbar.edit_mode
extended_menu = current_page_menu if is_single_lang else (
current_page_menu.get_or_create_menu(
key='{0}_menu'.format(self.model._meta.db_table),
verbose_name=self.model._meta.verbose_name,
position=position, disabled=not_edit_mode))
nodes = [(title_extension, url, title) for (
(title_extension, url), title) in zip(urls, titleset)]
for title_extension, url, title in nodes:
item_position = position if is_single_lang else None
language_str = get_language_info(title.language)['name_translated']
name = '{0}{1}'.format(
'' if is_single_lang else (language_str + ' '),
self.model._meta.verbose_name)
extended_menu.add_modal_item(
name, url=url, disabled=not_edit_mode, position=item_position)
| [
"django.utils.translation.get_language_info",
"django.utils.encoding.force_text",
"cms.utils.get_language_list"
] | [((896, 935), 'cms.utils.get_language_list', 'get_language_list', (['self.current_site.pk'], {}), '(self.current_site.pk)\n', (913, 935), False, 'from cms.utils import get_language_list\n'), ((1940, 1973), 'django.utils.translation.get_language_info', 'get_language_info', (['title.language'], {}), '(title.language)\n', (1957, 1973), False, 'from django.utils.translation import get_language_info\n'), ((503, 532), 'django.utils.encoding.force_text', 'force_text', (['self.insert_after'], {}), '(self.insert_after)\n', (513, 532), False, 'from django.utils.encoding import force_text\n')] |
import datetime, os, pkg_resources, re, setuptools_scm
from .. import __name__ as package_name
try:
if int(os.environ.get("_ASTROPATH_VERSION_NO_GIT", 0)):
env_var_no_git = True
raise LookupError
env_var_no_git = False
astropathversion = "v"+setuptools_scm.get_version(root="../..", relative_to=__file__)
except LookupError:
try:
astropathversion = "v"+pkg_resources.get_distribution(package_name).version
except pkg_resources.DistributionNotFound:
astropathversion = "v0.0.0.dev0+g0000000.d"+datetime.date.today().strftime("%Y%m%d")
astropathversionmatch = re.match(r"v(?P<version>[0-9]+(?:\.[0-9]+)*)(?P<dev>\.dev[0-9]+\+g[0-9a-f]+)?(?P<date>\.d[0-9]+)?", astropathversion)
if not astropathversionmatch:
raise RuntimeError(f"got a version number '{astropathversion}' that doesn't match the desired regex")
| [
"re.match",
"os.environ.get",
"setuptools_scm.get_version",
"datetime.date.today",
"pkg_resources.get_distribution"
] | [((586, 716), 're.match', 're.match', (['"""v(?P<version>[0-9]+(?:\\\\.[0-9]+)*)(?P<dev>\\\\.dev[0-9]+\\\\+g[0-9a-f]+)?(?P<date>\\\\.d[0-9]+)?"""', 'astropathversion'], {}), "(\n 'v(?P<version>[0-9]+(?:\\\\.[0-9]+)*)(?P<dev>\\\\.dev[0-9]+\\\\+g[0-9a-f]+)?(?P<date>\\\\.d[0-9]+)?'\n , astropathversion)\n", (594, 716), False, 'import datetime, os, pkg_resources, re, setuptools_scm\n'), ((110, 156), 'os.environ.get', 'os.environ.get', (['"""_ASTROPATH_VERSION_NO_GIT"""', '(0)'], {}), "('_ASTROPATH_VERSION_NO_GIT', 0)\n", (124, 156), False, 'import datetime, os, pkg_resources, re, setuptools_scm\n'), ((257, 319), 'setuptools_scm.get_version', 'setuptools_scm.get_version', ([], {'root': '"""../.."""', 'relative_to': '__file__'}), "(root='../..', relative_to=__file__)\n", (283, 319), False, 'import datetime, os, pkg_resources, re, setuptools_scm\n'), ((374, 418), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['package_name'], {}), '(package_name)\n', (404, 418), False, 'import datetime, os, pkg_resources, re, setuptools_scm\n'), ((520, 541), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (539, 541), False, 'import datetime, os, pkg_resources, re, setuptools_scm\n')] |
###
### Precios de casas en boston
###
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.neighbors import KNeighborsRegressor
boston = load_boston()
# Visualiza el nombre de los valores de los datos.
print(boston.keys())
# Existen 506 casas con 13 características.
print(boston.data.shape)
X_ent, X_test, Y_ent, Y_test = train_test_split(boston.data, boston.target)
knn=KNeighborsRegressor(n_neighbors=4)
# Entrenar
knn.fit(X_ent, Y_ent)
# Para ver que tal ha aprendido el algoritmo.
print('Aprendizaje de vecinos cercanos: %d', knn.score(X_test, Y_test))
rl=LinearRegression()
rl.fit(X_ent, Y_ent)
print ('Aprendizaja Lineal: %d', rl.score(X_test, Y_test))
ridge=Ridge()
ridge.fit(X_ent, Y_ent)
print ('Aprendizaje Ridge: %d', ridge.score(X_test, Y_test))
| [
"sklearn.model_selection.train_test_split",
"sklearn.neighbors.KNeighborsRegressor",
"sklearn.datasets.load_boston",
"sklearn.linear_model.Ridge",
"sklearn.linear_model.LinearRegression"
] | [((251, 264), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (262, 264), False, 'from sklearn.datasets import load_boston\n'), ((440, 484), 'sklearn.model_selection.train_test_split', 'train_test_split', (['boston.data', 'boston.target'], {}), '(boston.data, boston.target)\n', (456, 484), False, 'from sklearn.model_selection import train_test_split\n'), ((490, 524), 'sklearn.neighbors.KNeighborsRegressor', 'KNeighborsRegressor', ([], {'n_neighbors': '(4)'}), '(n_neighbors=4)\n', (509, 524), False, 'from sklearn.neighbors import KNeighborsRegressor\n'), ((682, 700), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (698, 700), False, 'from sklearn.linear_model import LinearRegression, Ridge\n'), ((789, 796), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (794, 796), False, 'from sklearn.linear_model import LinearRegression, Ridge\n')] |
"""Ui."""
import logging
import logging.config
import os.path
from datetime import datetime
from PyQt5 import QtCore, QtGui
from .lib import (
EquityChart,
OptimizatimizedResultsTable,
OptimizationTable,
Portfolio,
QuotesChart,
ResultsTable,
Settings,
Symbol,
TradesTable,
get_quotes,
get_symbols,
strategies_from_file,
)
__all__ = ('MainWidget',)
logger = logging.getLogger(__name__)
DEFAULT_TICKER = 'AAPL'
SYMBOL_COLUMNS = ['Symbol', 'Security Name']
class SymbolsLoaderThread(QtCore.QThread):
symbols_loaded = QtCore.pyqtSignal(object)
def run(self):
symbols = get_symbols()
self.symbols_loaded.emit(symbols[SYMBOL_COLUMNS].values)
class DataTabWidget(QtGui.QWidget):
data_updated = QtCore.pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
self.select_source = QtGui.QTabWidget(self)
self.select_source.setGeometry(210, 50, 340, 200)
self.init_shares_tab_ui()
self.init_external_tab_ui()
self.symbols_loader = SymbolsLoaderThread()
self.symbols_loader.started.connect(self.on_symbols_loading)
self.symbols_loader.symbols_loaded.connect(
self.on_symbols_loaded, QtCore.Qt.QueuedConnection
)
self.symbols_loader.start()
self.date_from = self.shares_date_from.date().toPyDate()
self.date_to = self.shares_date_to.date().toPyDate()
def init_external_tab_ui(self):
"""External data."""
self.external_tab = QtGui.QWidget()
self.external_tab.setEnabled(False)
self.external_layout = QtGui.QVBoxLayout(self.external_tab)
self.import_data_name = QtGui.QLabel('Import External Data')
self.import_data_label = QtGui.QLabel('...')
self.import_data_btn = QtGui.QPushButton('Import')
self.import_data_btn.clicked.connect(self.open_file)
self.external_layout.addWidget(
self.import_data_name, 0, QtCore.Qt.AlignCenter
)
self.external_layout.addWidget(
self.import_data_label, 0, QtCore.Qt.AlignCenter
)
self.external_layout.addWidget(
self.import_data_btn, 0, QtCore.Qt.AlignCenter
)
self.select_source.addTab(self.external_tab, 'Custom data')
def init_shares_tab_ui(self):
"""Shares."""
self.shares_tab = QtGui.QWidget()
self.shares_layout = QtGui.QFormLayout(self.shares_tab)
today = datetime.today()
self.shares_date_from = QtGui.QDateEdit()
self.shares_date_from.setMinimumDate(QtCore.QDate(1900, 1, 1))
self.shares_date_from.setMaximumDate(QtCore.QDate(2030, 12, 31))
self.shares_date_from.setDate(QtCore.QDate(today.year, 1, 1))
self.shares_date_from.setDisplayFormat('dd.MM.yyyy')
self.shares_date_to = QtGui.QDateEdit()
self.shares_date_to.setMinimumDate(QtCore.QDate(1900, 1, 1))
self.shares_date_to.setMaximumDate(QtCore.QDate(2030, 12, 31))
self.shares_date_to.setDate(
QtCore.QDate(today.year, today.month, today.day)
)
self.shares_date_to.setDisplayFormat('dd.MM.yyyy')
self.shares_symbol_list = QtGui.QComboBox()
self.shares_symbol_list.setFocusPolicy(QtCore.Qt.StrongFocus)
self.shares_symbol_list.setMaxVisibleItems(20)
self.shares_symbol_list.setEditable(True)
self.shares_show_btn = QtGui.QPushButton('Load')
self.shares_show_btn.clicked.connect(self.update_data)
self.shares_layout.addRow('From', self.shares_date_from)
self.shares_layout.addRow('To', self.shares_date_to)
self.shares_layout.addRow('Symbol', self.shares_symbol_list)
self.shares_layout.addRow(None, self.shares_show_btn)
self.select_source.addTab(self.shares_tab, 'Shares/Futures/ETFs')
def on_symbols_loading(self):
self.shares_symbol_list.addItem('Loading...')
self.shares_symbol_list.setEnabled(False)
def on_symbols_loaded(self, symbols):
self.shares_symbol_list.clear()
self.shares_symbol_list.setEnabled(True)
# self.symbols = ['%s/%s' % (ticker, name) for ticker, name in symbols]
# self.shares_symbol_list.addItems(self.symbols)
model = QtGui.QStandardItemModel()
model.setHorizontalHeaderLabels(SYMBOL_COLUMNS)
for irow, (ticker, name) in enumerate(symbols):
model.setItem(irow, 0, QtGui.QStandardItem(ticker))
model.setItem(irow, 1, QtGui.QStandardItem(name))
table_view = QtGui.QTableView()
table_view.setModel(model)
table_view.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
table_view.verticalHeader().setVisible(False)
table_view.setAutoScroll(False)
table_view.setShowGrid(False)
table_view.resizeRowsToContents()
table_view.setColumnWidth(0, 60)
table_view.setColumnWidth(1, 240)
table_view.setMinimumWidth(300)
completer = QtGui.QCompleter(model)
completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
completer.setModel(model)
self.symbols = symbols
self.shares_symbol_list.setModel(model)
self.shares_symbol_list.setView(table_view)
self.shares_symbol_list.setCompleter(completer)
# set default symbol
self.shares_symbol_list.setCurrentIndex(
self.shares_symbol_list.findText(DEFAULT_TICKER)
)
def open_file(self):
filename = QtGui.QFileDialog.getOpenFileName(
parent=None,
caption='Open a source of data',
directory=QtCore.QDir.currentPath(),
filter='All (*);;Text (*.txt)',
)
self.import_data_label.setText('Loading %s' % filename)
with open(filename, 'r', encoding='utf-8') as f:
self.data = f.readlines()
def update_data(self, ticker=None):
ticker = ticker or self.shares_symbol_list.currentText()
self.symbol = Symbol(ticker=ticker, mode=Symbol.SHARES)
self.date_from = self.shares_date_from.date().toPyDate()
self.date_to = self.shares_date_to.date().toPyDate()
get_quotes(
symbol=self.symbol.ticker,
date_from=self.date_from,
date_to=self.date_to,
)
self.data_updated.emit(self.symbol)
class StrategyBoxWidget(QtGui.QGroupBox):
run_backtest = QtCore.pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
self.setTitle('Strategy')
self.setAlignment(QtCore.Qt.AlignCenter)
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.list = QtGui.QComboBox()
self.add_btn = QtGui.QPushButton('+')
self.add_btn.clicked.connect(self.add_strategies)
self.start_btn = QtGui.QPushButton('Start Backtest')
self.start_btn.clicked.connect(self.load_strategy)
self.layout.addWidget(self.list, stretch=2)
self.layout.addWidget(self.add_btn, stretch=0)
self.layout.addWidget(self.start_btn, stretch=0)
self.load_strategies_from_settings()
def reload_strategies(self):
"""Reload user's file to get actual version of the strategies."""
self.strategies = strategies_from_file(self.strategies_path)
def reload_list(self):
self.list.clear()
self.list.addItems([s.get_name() for s in self.strategies])
def load_strategies_from_settings(self):
filename = Settings.value('strategies/path', None)
if not filename or not os.path.exists(filename):
return
self.strategies_path = filename
self.reload_strategies()
self.reload_list()
def save_strategies_to_settings(self):
Settings.setValue('strategies/path', self.strategies_path)
def add_strategies(self):
filename, _filter = QtGui.QFileDialog.getOpenFileName(
self,
caption='Open Strategy.',
directory=QtCore.QDir.currentPath(),
filter='Python modules (*.py)',
)
if not filename:
return
self.strategies_path = filename
self.save_strategies_to_settings()
self.reload_strategies()
self.reload_list()
def load_strategy(self):
self.reload_strategies()
self.run_backtest.emit(self.strategies[self.list.currentIndex()])
class QuotesTabWidget(QtGui.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.layout = QtGui.QVBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.toolbar_layout = QtGui.QHBoxLayout()
self.toolbar_layout.setContentsMargins(10, 10, 15, 0)
self.chart_layout = QtGui.QHBoxLayout()
self.init_timeframes_ui()
self.init_strategy_ui()
self.layout.addLayout(self.toolbar_layout)
self.layout.addLayout(self.chart_layout)
def init_timeframes_ui(self):
self.tf_layout = QtGui.QHBoxLayout()
self.tf_layout.setSpacing(0)
self.tf_layout.setContentsMargins(0, 12, 0, 0)
time_frames = ('1M', '5M', '15M', '30M', '1H', '1D', '1W', 'MN')
btn_prefix = 'TF'
for tf in time_frames:
btn_name = ''.join([btn_prefix, tf])
btn = QtGui.QPushButton(tf)
# TODO:
btn.setEnabled(False)
setattr(self, btn_name, btn)
self.tf_layout.addWidget(btn)
self.toolbar_layout.addLayout(self.tf_layout)
def init_strategy_ui(self):
self.strategy_box = StrategyBoxWidget(self)
self.toolbar_layout.addWidget(self.strategy_box)
def update_chart(self, symbol):
if not self.chart_layout.isEmpty():
self.chart_layout.removeWidget(self.chart)
self.chart = QuotesChart()
self.chart.plot(symbol)
self.chart_layout.addWidget(self.chart)
def add_signals(self):
self.chart.add_signals()
class EquityTabWidget(QtGui.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
def update_chart(self):
if not self.layout.isEmpty():
self.layout.removeWidget(self.chart)
self.chart = EquityChart()
self.chart.plot()
self.layout.addWidget(self.chart)
class ResultsTabWidget(QtGui.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
def update_table(self):
if not self.layout.isEmpty():
self.layout.removeWidget(self.table)
self.table = ResultsTable()
self.table.plot()
self.layout.addWidget(self.table)
class TradesTabWidget(QtGui.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
def update_table(self):
if not self.layout.isEmpty():
self.layout.removeWidget(self.table)
self.table = TradesTable()
self.table.plot()
self.layout.addWidget(self.table)
class OptimizationTabWidget(QtGui.QWidget):
optimization_done = QtCore.pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self.layout = QtGui.QVBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.table_layout = QtGui.QHBoxLayout()
self.top_layout = QtGui.QHBoxLayout()
self.top_layout.setContentsMargins(0, 10, 0, 0)
self.start_optimization_btn = QtGui.QPushButton('Start')
self.start_optimization_btn.clicked.connect(self.start_optimization)
self.top_layout.addWidget(
self.start_optimization_btn, alignment=QtCore.Qt.AlignRight
)
self.layout.addLayout(self.top_layout)
self.layout.addLayout(self.table_layout)
def update_table(self, strategy):
if not self.table_layout.isEmpty():
# close() to avoid an UI issue with duplication of the table
self.table.close()
self.table_layout.removeWidget(self.table)
self.table = OptimizationTable()
self.table.plot(strategy)
self.table_layout.addWidget(self.table)
def start_optimization(self, *args, **kwargs):
logger.debug('Start optimization')
self.table.optimize()
self.optimization_done.emit()
logger.debug('Optimization is done')
class OptimizatimizedResultsTabWidget(QtGui.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.table = OptimizatimizedResultsTable()
self.table.plot()
self.layout.addWidget(self.table)
class MainWidget(QtGui.QTabWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.setDocumentMode(True)
self.data_tab = DataTabWidget(self)
self.data_tab.data_updated.connect(self._update_quotes_chart)
self.addTab(self.data_tab, 'Data')
def _add_quotes_tab(self):
if self.count() >= 2: # quotes tab is already exists
return
self.quotes_tab = QuotesTabWidget(self)
self.quotes_tab.strategy_box.run_backtest.connect(self._run_backtest)
self.addTab(self.quotes_tab, 'Quotes')
def _add_result_tabs(self):
if self.count() >= 3: # tabs are already exist
return
self.equity_tab = EquityTabWidget(self)
self.results_tab = ResultsTabWidget(self)
self.trades_tab = TradesTabWidget(self)
self.optimization_tab = OptimizationTabWidget(self)
self.optimization_tab.optimization_done.connect(
self._add_optimized_results
) # noqa
self.addTab(self.equity_tab, 'Equity')
self.addTab(self.results_tab, 'Results')
self.addTab(self.trades_tab, 'Trades')
self.addTab(self.optimization_tab, 'Optimization')
def _update_quotes_chart(self, symbol):
self._add_quotes_tab()
self.symbol = symbol
self.quotes_tab.update_chart(self.symbol)
self.setCurrentIndex(1)
def _run_backtest(self, strategy):
logger.debug('Run backtest')
Portfolio.clear()
stg = strategy(symbols=[self.symbol])
stg.run()
Portfolio.summarize()
self.quotes_tab.add_signals()
self._add_result_tabs()
self.equity_tab.update_chart()
self.results_tab.update_table()
self.trades_tab.update_table()
self.optimization_tab.update_table(strategy=stg)
logger.debug(
'Count positions in the portfolio: %d', Portfolio.position_count()
)
def _add_optimized_results(self):
self.addTab(OptimizatimizedResultsTabWidget(self), 'Optimized Results')
self.setCurrentIndex(self.count() - 1)
def plot_test_data(self):
logger.debug('Plot test data')
self.data_tab.update_data(ticker=DEFAULT_TICKER)
self.quotes_tab.strategy_box.load_strategy()
| [
"logging.getLogger",
"PyQt5.QtCore.QDir.currentPath",
"PyQt5.QtCore.pyqtSignal",
"PyQt5.QtGui.QPushButton",
"PyQt5.QtGui.QStandardItemModel",
"PyQt5.QtGui.QStandardItem",
"PyQt5.QtGui.QHBoxLayout",
"PyQt5.QtGui.QTableView",
"PyQt5.QtGui.QLabel",
"PyQt5.QtGui.QFormLayout",
"PyQt5.QtGui.QVBoxLayou... | [((411, 438), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (428, 438), False, 'import logging\n'), ((576, 601), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['object'], {}), '(object)\n', (593, 601), False, 'from PyQt5 import QtCore, QtGui\n'), ((777, 802), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['object'], {}), '(object)\n', (794, 802), False, 'from PyQt5 import QtCore, QtGui\n'), ((6470, 6495), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['object'], {}), '(object)\n', (6487, 6495), False, 'from PyQt5 import QtCore, QtGui\n'), ((11413, 11432), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', ([], {}), '()\n', (11430, 11432), False, 'from PyQt5 import QtCore, QtGui\n'), ((903, 925), 'PyQt5.QtGui.QTabWidget', 'QtGui.QTabWidget', (['self'], {}), '(self)\n', (919, 925), False, 'from PyQt5 import QtCore, QtGui\n'), ((1559, 1574), 'PyQt5.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (1572, 1574), False, 'from PyQt5 import QtCore, QtGui\n'), ((1650, 1686), 'PyQt5.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', (['self.external_tab'], {}), '(self.external_tab)\n', (1667, 1686), False, 'from PyQt5 import QtCore, QtGui\n'), ((1720, 1756), 'PyQt5.QtGui.QLabel', 'QtGui.QLabel', (['"""Import External Data"""'], {}), "('Import External Data')\n", (1732, 1756), False, 'from PyQt5 import QtCore, QtGui\n'), ((1790, 1809), 'PyQt5.QtGui.QLabel', 'QtGui.QLabel', (['"""..."""'], {}), "('...')\n", (1802, 1809), False, 'from PyQt5 import QtCore, QtGui\n'), ((1841, 1868), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Import"""'], {}), "('Import')\n", (1858, 1868), False, 'from PyQt5 import QtCore, QtGui\n'), ((2413, 2428), 'PyQt5.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (2426, 2428), False, 'from PyQt5 import QtCore, QtGui\n'), ((2458, 2492), 'PyQt5.QtGui.QFormLayout', 'QtGui.QFormLayout', (['self.shares_tab'], {}), '(self.shares_tab)\n', (2475, 2492), False, 'from PyQt5 import QtCore, QtGui\n'), ((2509, 2525), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2523, 2525), False, 'from datetime import datetime\n'), ((2559, 2576), 'PyQt5.QtGui.QDateEdit', 'QtGui.QDateEdit', ([], {}), '()\n', (2574, 2576), False, 'from PyQt5 import QtCore, QtGui\n'), ((2883, 2900), 'PyQt5.QtGui.QDateEdit', 'QtGui.QDateEdit', ([], {}), '()\n', (2898, 2900), False, 'from PyQt5 import QtCore, QtGui\n'), ((3243, 3260), 'PyQt5.QtGui.QComboBox', 'QtGui.QComboBox', ([], {}), '()\n', (3258, 3260), False, 'from PyQt5 import QtCore, QtGui\n'), ((3468, 3493), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Load"""'], {}), "('Load')\n", (3485, 3493), False, 'from PyQt5 import QtCore, QtGui\n'), ((4314, 4340), 'PyQt5.QtGui.QStandardItemModel', 'QtGui.QStandardItemModel', ([], {}), '()\n', (4338, 4340), False, 'from PyQt5 import QtCore, QtGui\n'), ((4601, 4619), 'PyQt5.QtGui.QTableView', 'QtGui.QTableView', ([], {}), '()\n', (4617, 4619), False, 'from PyQt5 import QtCore, QtGui\n'), ((5049, 5072), 'PyQt5.QtGui.QCompleter', 'QtGui.QCompleter', (['model'], {}), '(model)\n', (5065, 5072), False, 'from PyQt5 import QtCore, QtGui\n'), ((6672, 6695), 'PyQt5.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', (['self'], {}), '(self)\n', (6689, 6695), False, 'from PyQt5 import QtCore, QtGui\n'), ((6768, 6785), 'PyQt5.QtGui.QComboBox', 'QtGui.QComboBox', ([], {}), '()\n', (6783, 6785), False, 'from PyQt5 import QtCore, QtGui\n'), ((6810, 6832), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""+"""'], {}), "('+')\n", (6827, 6832), False, 'from PyQt5 import QtCore, QtGui\n'), ((6917, 6952), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Start Backtest"""'], {}), "('Start Backtest')\n", (6934, 6952), False, 'from PyQt5 import QtCore, QtGui\n'), ((8623, 8646), 'PyQt5.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', (['self'], {}), '(self)\n', (8640, 8646), False, 'from PyQt5 import QtCore, QtGui\n'), ((8728, 8747), 'PyQt5.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (8745, 8747), False, 'from PyQt5 import QtCore, QtGui\n'), ((8838, 8857), 'PyQt5.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (8855, 8857), False, 'from PyQt5 import QtCore, QtGui\n'), ((9086, 9105), 'PyQt5.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (9103, 9105), False, 'from PyQt5 import QtCore, QtGui\n'), ((10194, 10217), 'PyQt5.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', (['self'], {}), '(self)\n', (10211, 10217), False, 'from PyQt5 import QtCore, QtGui\n'), ((10621, 10644), 'PyQt5.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', (['self'], {}), '(self)\n', (10638, 10644), False, 'from PyQt5 import QtCore, QtGui\n'), ((11048, 11071), 'PyQt5.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', (['self'], {}), '(self)\n', (11065, 11071), False, 'from PyQt5 import QtCore, QtGui\n'), ((11526, 11549), 'PyQt5.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', (['self'], {}), '(self)\n', (11543, 11549), False, 'from PyQt5 import QtCore, QtGui\n'), ((11629, 11648), 'PyQt5.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (11646, 11648), False, 'from PyQt5 import QtCore, QtGui\n'), ((11675, 11694), 'PyQt5.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (11692, 11694), False, 'from PyQt5 import QtCore, QtGui\n'), ((11790, 11816), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Start"""'], {}), "('Start')\n", (11807, 11816), False, 'from PyQt5 import QtCore, QtGui\n'), ((12829, 12852), 'PyQt5.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', (['self'], {}), '(self)\n', (12846, 12852), False, 'from PyQt5 import QtCore, QtGui\n'), ((2622, 2646), 'PyQt5.QtCore.QDate', 'QtCore.QDate', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (2634, 2646), False, 'from PyQt5 import QtCore, QtGui\n'), ((2693, 2719), 'PyQt5.QtCore.QDate', 'QtCore.QDate', (['(2030)', '(12)', '(31)'], {}), '(2030, 12, 31)\n', (2705, 2719), False, 'from PyQt5 import QtCore, QtGui\n'), ((2759, 2789), 'PyQt5.QtCore.QDate', 'QtCore.QDate', (['today.year', '(1)', '(1)'], {}), '(today.year, 1, 1)\n', (2771, 2789), False, 'from PyQt5 import QtCore, QtGui\n'), ((2944, 2968), 'PyQt5.QtCore.QDate', 'QtCore.QDate', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (2956, 2968), False, 'from PyQt5 import QtCore, QtGui\n'), ((3013, 3039), 'PyQt5.QtCore.QDate', 'QtCore.QDate', (['(2030)', '(12)', '(31)'], {}), '(2030, 12, 31)\n', (3025, 3039), False, 'from PyQt5 import QtCore, QtGui\n'), ((3090, 3138), 'PyQt5.QtCore.QDate', 'QtCore.QDate', (['today.year', 'today.month', 'today.day'], {}), '(today.year, today.month, today.day)\n', (3102, 3138), False, 'from PyQt5 import QtCore, QtGui\n'), ((9395, 9416), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['tf'], {}), '(tf)\n', (9412, 9416), False, 'from PyQt5 import QtCore, QtGui\n'), ((4488, 4515), 'PyQt5.QtGui.QStandardItem', 'QtGui.QStandardItem', (['ticker'], {}), '(ticker)\n', (4507, 4515), False, 'from PyQt5 import QtCore, QtGui\n'), ((4552, 4577), 'PyQt5.QtGui.QStandardItem', 'QtGui.QStandardItem', (['name'], {}), '(name)\n', (4571, 4577), False, 'from PyQt5 import QtCore, QtGui\n'), ((5681, 5706), 'PyQt5.QtCore.QDir.currentPath', 'QtCore.QDir.currentPath', ([], {}), '()\n', (5704, 5706), False, 'from PyQt5 import QtCore, QtGui\n'), ((8086, 8111), 'PyQt5.QtCore.QDir.currentPath', 'QtCore.QDir.currentPath', ([], {}), '()\n', (8109, 8111), False, 'from PyQt5 import QtCore, QtGui\n')] |
import os
import shutil
import re
from collections import OrderedDict
import subprocess
import numpy as np
import atexit
class Result:
checkpoint = None
log = None
tarball = None
board = None
if __name__ == '__main__':
results = OrderedDict()
def load_files():
files = os.listdir()
for f in files:
res = re.match('ckpt-([0-9]{6}-[0-9]{6})', f)
if res is not None:
val = results.get(res.group(1), Result())
val.checkpoint = f
results[res.group(1)] = val
res = re.match('Result-([0-9]{6}-[0-9]{6})', f)
if res is not None:
val = results.get(res.group(1), Result())
val.log = f
results[res.group(1)] = val
res = re.match('result-([0-9]{6}-[0-9]{6}).tar.gz', f)
if res is not None:
val = results.get(res.group(1), Result())
val.tarball = f
results[res.group(1)] = val
def get_size(start, human = True):
ts = 0
unit = 0
if os.path.islink(start):
return ts, unit
if os.path.isdir(start):
for dirpath, dirnames, filenames in os.walk(start):
for f in filenames:
fp = os.path.join(dirpath, f)
if not os.path.islink(fp):
ts += os.path.getsize(fp)
else:
ts = os.path.getsize(start)
if human:
while ts >= 1024:
ts /= 1024
unit += 1
return ts, unit
def result_list():
print('ID:\tTime\t\tC L T B')
print('Size:\tC\tL\tT')
units = ['B', 'KB', 'MB', 'GB', 'TB', '']
for i, (d, r) in enumerate(results.items()):
print(
'{}:\t{}\t{} {} {} {}'.format(
i,
d,
'C' if r.checkpoint is not None else '-',
'L' if r.log is not None else '-',
'T' if r.tarball is not None else '-',
'B' if r.board is not None else '-'
)
)
if r.checkpoint is not None:
cs, cu = get_size(r.checkpoint)
else:
cs, cu = 0, 0
if r.log is not None:
ls, lu = get_size(r.log)
else:
ls, lu = 0, 0
if r.tarball is not None:
ts, tu = get_size(r.tarball)
else:
ts, tu = 0, 0
print(
'\t{:.1f}{}\t{:.1f}{}\t{:.1f}{}'.format(
cs, units[cu],
ls, units[lu],
ts, units[tu]
)
)
def get_key_val(sid, keys, show=True):
if len(sid.split('-')) == 1:
id = int(sid)
if id >= len(results):
if show:
print('Unknown index', sid)
return None, None
key = keys[id]
else:
key = sid
try:
val = results[key]
except:
if show:
print('Unknown time', key)
return None, None
return key, val
print('Checking files')
load_files()
print('Results:')
result_list()
while True:
cmd = input('> ')
values = list(results.values())
keys = list(results.keys())
if cmd.strip() == '':
continue
if cmd == 'ls' or cmd == 'list':
result_list()
continue
if cmd == 'exit':
break
res = re.match('rm( checkpoint| log| tarball)*( [0-9]+| [0-9]{6}-[0-9]{6})+\s*$', cmd)
if res is not None:
actions = 0
dkeys = []
for m in re.finditer('checkpoint|log|tarball', cmd):
act = m.group(0)
if act == 'checkpoint':
actions |= 1
continue
if act == 'log':
actions |= 2
continue
if act == 'tarball':
actions |= 4
continue
for m in re.finditer(' [0-9]+| [0-9]{6}-[0-9]{6}', cmd):
sid = m.group(0)
key, val = get_key_val(sid.strip(), keys)
if key is None:
continue
dkeys.append(key)
if actions == 0:
actions = 7
dkeys = np.unique(dkeys)
if len(dkeys) == 0:
print('Has nothing to delete')
continue
print('Deleting the{}{}{} of the following results:'.format(
' checkpoint' if actions & 1 != 0 else '',
' log' if actions & 2 != 0 else '',
' tarball' if actions & 4 != 0 else ''
))
for key in dkeys:
print(key)
ck = input('[y/N] ')
if ck.upper() == 'Y':
for key in dkeys:
val = results[key]
if actions & 1 != 0 and val.checkpoint is not None:
print('Deleting', val.checkpoint)
shutil.rmtree(val.checkpoint)
val.checkpoint = None
if actions & 2 != 0 and val.log is not None:
if val.board is not None:
print('Closing the tensorboard process of result {}'.format(key))
val.board.terminate()
val.board.wait(10)
if val.board.poll() is None:
val.board.kill()
val.board = None
print('Deleting', val.log)
shutil.rmtree(val.log)
val.log = None
if actions & 4 != 0 and val.tarball is not None:
print('Deleting', val.tarball)
os.remove(val.tarball)
val.tarball = None
if val.checkpoint is not None\
or val.log is not None\
or val.tarball is not None:
results[key] = val
else:
results.pop(key)
load_files()
continue
res = re.match('board ([0-9]+|[0-9]{6}-[0-9]{6})\s*$', cmd)
if res is not None:
sid = res.group(1)
key, val = get_key_val(sid, keys)
if key is None:
continue
if val.board is not None:
print('board of {} is running'.format(key))
continue
if val.log is None:
print('log of {} does not exists'.format(key))
continue
subp = subprocess.Popen(
['tensorboard', '--logdir='+val.log, '--bind_all'],
shell = False,
stdout = subprocess.DEVNULL
)
val.board = subp
results[key] = val
continue
res = re.match('stop ([0-9]+|[0-9]{6}-[0-9]{6})\s*$', cmd)
if res is not None:
sid = res.group(1)
key, val = get_key_val(sid, keys)
if key is None:
continue
if val.board is None:
print('board of {} is not running'.format(key))
continue
val.board.terminate()
val.board.wait()
val.board = None
results[key] = val
continue
res = re.match('pack ([0-9]+|[0-9]{6}-[0-9]{6})\s*$', cmd)
if res is not None:
sid = res.group(1)
key, val = get_key_val(sid, keys)
if key is None:
continue
if val.tarball is not None:
print('tarball of {} has already existed'.format(key))
continue
subp = subprocess.Popen(
'tar czvf result-{}.tar.gz {} {}'.format(
key, val.checkpoint or '', val.log or ''
),
shell = True
)
subp.wait()
val.tarball = 'result-{}.tar.gz'.format(key)
results[key] = val
continue
res = re.match('unpack ([0-9]+|[0-9]{6}-[0-9]{6})\s*$', cmd)
if res is not None:
sid = res.group(1)
key, val = get_key_val(sid, keys)
if key is None:
continue
if val.tarball is None:
print('tarball of {} does not exist'.format(key))
continue
subp = subprocess.Popen(
'tar xzvf {}'.format(val.tarball),
shell = True
)
subp.wait()
load_files()
continue
if cmd != 'help':
print('Unknown command', cmd)
print('''Usage:
help: show this message
ls: list the status of results with format \'ID: time has_checkpoint has_log has_tarball if_tensorboard_running\'
rm [checkpoint] [log] [tarball] id/time[ id/time[ ...]]: remove the results listed (double check needed)
board id/time: execute tensorboard to visualize the result specified
stop id/time: stop tensorboard of that result
pack id/time: pack the result into tar ball
unpack id/time: unpack the tar ball of result
exit: exit'''
)
| [
"collections.OrderedDict",
"os.listdir",
"os.path.getsize",
"numpy.unique",
"subprocess.Popen",
"re.match",
"os.path.join",
"os.path.isdir",
"re.finditer",
"shutil.rmtree",
"os.path.islink",
"os.walk",
"os.remove"
] | [((251, 264), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (262, 264), False, 'from collections import OrderedDict\n'), ((304, 316), 'os.listdir', 'os.listdir', ([], {}), '()\n', (314, 316), False, 'import os\n'), ((1110, 1131), 'os.path.islink', 'os.path.islink', (['start'], {}), '(start)\n', (1124, 1131), False, 'import os\n'), ((1172, 1192), 'os.path.isdir', 'os.path.isdir', (['start'], {}), '(start)\n', (1185, 1192), False, 'import os\n'), ((3717, 3802), 're.match', 're.match', (['"""rm( checkpoint| log| tarball)*( [0-9]+| [0-9]{6}-[0-9]{6})+\\\\s*$"""', 'cmd'], {}), "('rm( checkpoint| log| tarball)*( [0-9]+| [0-9]{6}-[0-9]{6})+\\\\s*$',\n cmd)\n", (3725, 3802), False, 'import re\n'), ((6533, 6587), 're.match', 're.match', (['"""board ([0-9]+|[0-9]{6}-[0-9]{6})\\\\s*$"""', 'cmd'], {}), "('board ([0-9]+|[0-9]{6}-[0-9]{6})\\\\s*$', cmd)\n", (6541, 6587), False, 'import re\n'), ((7278, 7331), 're.match', 're.match', (['"""stop ([0-9]+|[0-9]{6}-[0-9]{6})\\\\s*$"""', 'cmd'], {}), "('stop ([0-9]+|[0-9]{6}-[0-9]{6})\\\\s*$', cmd)\n", (7286, 7331), False, 'import re\n'), ((7771, 7824), 're.match', 're.match', (['"""pack ([0-9]+|[0-9]{6}-[0-9]{6})\\\\s*$"""', 'cmd'], {}), "('pack ([0-9]+|[0-9]{6}-[0-9]{6})\\\\s*$', cmd)\n", (7779, 7824), False, 'import re\n'), ((8484, 8539), 're.match', 're.match', (['"""unpack ([0-9]+|[0-9]{6}-[0-9]{6})\\\\s*$"""', 'cmd'], {}), "('unpack ([0-9]+|[0-9]{6}-[0-9]{6})\\\\s*$', cmd)\n", (8492, 8539), False, 'import re\n'), ((359, 398), 're.match', 're.match', (['"""ckpt-([0-9]{6}-[0-9]{6})"""', 'f'], {}), "('ckpt-([0-9]{6}-[0-9]{6})', f)\n", (367, 398), False, 'import re\n'), ((586, 627), 're.match', 're.match', (['"""Result-([0-9]{6}-[0-9]{6})"""', 'f'], {}), "('Result-([0-9]{6}-[0-9]{6})', f)\n", (594, 627), False, 'import re\n'), ((808, 856), 're.match', 're.match', (['"""result-([0-9]{6}-[0-9]{6}).tar.gz"""', 'f'], {}), "('result-([0-9]{6}-[0-9]{6}).tar.gz', f)\n", (816, 856), False, 'import re\n'), ((1242, 1256), 'os.walk', 'os.walk', (['start'], {}), '(start)\n', (1249, 1256), False, 'import os\n'), ((1472, 1494), 'os.path.getsize', 'os.path.getsize', (['start'], {}), '(start)\n', (1487, 1494), False, 'import os\n'), ((3894, 3936), 're.finditer', 're.finditer', (['"""checkpoint|log|tarball"""', 'cmd'], {}), "('checkpoint|log|tarball', cmd)\n", (3905, 3936), False, 'import re\n'), ((4288, 4334), 're.finditer', 're.finditer', (['""" [0-9]+| [0-9]{6}-[0-9]{6}"""', 'cmd'], {}), "(' [0-9]+| [0-9]{6}-[0-9]{6}', cmd)\n", (4299, 4334), False, 'import re\n'), ((4599, 4615), 'numpy.unique', 'np.unique', (['dkeys'], {}), '(dkeys)\n', (4608, 4615), True, 'import numpy as np\n'), ((7007, 7121), 'subprocess.Popen', 'subprocess.Popen', (["['tensorboard', '--logdir=' + val.log, '--bind_all']"], {'shell': '(False)', 'stdout': 'subprocess.DEVNULL'}), "(['tensorboard', '--logdir=' + val.log, '--bind_all'],\n shell=False, stdout=subprocess.DEVNULL)\n", (7023, 7121), False, 'import subprocess\n'), ((1319, 1343), 'os.path.join', 'os.path.join', (['dirpath', 'f'], {}), '(dirpath, f)\n', (1331, 1343), False, 'import os\n'), ((1371, 1389), 'os.path.islink', 'os.path.islink', (['fp'], {}), '(fp)\n', (1385, 1389), False, 'import os\n'), ((1421, 1440), 'os.path.getsize', 'os.path.getsize', (['fp'], {}), '(fp)\n', (1436, 1440), False, 'import os\n'), ((5325, 5354), 'shutil.rmtree', 'shutil.rmtree', (['val.checkpoint'], {}), '(val.checkpoint)\n', (5338, 5354), False, 'import shutil\n'), ((5933, 5955), 'shutil.rmtree', 'shutil.rmtree', (['val.log'], {}), '(val.log)\n', (5946, 5955), False, 'import shutil\n'), ((6143, 6165), 'os.remove', 'os.remove', (['val.tarball'], {}), '(val.tarball)\n', (6152, 6165), False, 'import os\n')] |
import os
import time
import logging
from typing import List, Dict
from collections import deque
# from search import app
from elasticsearch import Elasticsearch
from elasticsearch.helpers import parallel_bulk
from datetime import datetime
# from config import ELASTICSEARCH_URL
import threading
from .utils import Utils
import traceback
ELASTICSEARCH_URL = os.environ.get("ELASTICSEARCH_URL", "http://localhost:9200/")
class ESIndexingUtils:
"""
Class to handle Elastic Search related indexing
and search utilities
"""
GLOBAL_DIMENSIONS_NAMES_INDEX_NAME = (
"cuesearch_global_dimensions_names_for_search_index"
)
GLOBAL_DIMENSIONS_INDEX_NAME = "global_dimensions_name_index_cuesearch"
GLOBAL_DIMENSIONS_INDEX_DATA = "cuesearch_global_dimensions_data_index"
AUTO_GLOBAL_DIMENSIONS_INDEX_DATA = "cuesearch_auto_global_dimensions_data_index"
AUTO_GLOBAL_DIMENSIONS_INDEX_DATA_SEARCH_SUGGESTION = (
"cuesearch_auto_global_dimensions_search_suggestion_data_index"
)
GLOBAL_DIMENSIONS_INDEX_SEARCH_SUGGESTION_DATA = (
"cuesearch_global_dimensions_search_suggestion_data_index"
)
DATASET_MEASURES_INDEX_NAME = "dataset_measures_index_cuesearch"
@staticmethod
def _getESClient() -> Elasticsearch:
"""
Method to get the ES Client
"""
esHost = ELASTICSEARCH_URL
esClient = Elasticsearch(hosts=[esHost], timeout=30)
return esClient
@staticmethod
def initializeIndex(indexName: str, indexDefinition: dict) -> str:
"""
Method to name the index in Elasticsearch
:indexName: the index name to be used for index creation
:indexDefinition: the index definition - dict.
"""
esClient = ESIndexingUtils._getESClient()
logging.info("intializing Index here ...")
currentIndexVersion = "_" + str(int(round(time.time() * 1000)))
aliasIndex = indexName + currentIndexVersion
logging.info("Creating index of: %s", aliasIndex)
esClient.indices.create(index=aliasIndex, body=indexDefinition)
return aliasIndex
@staticmethod
def ingestIndex(documentsToIndex: List[Dict], aliasIndex: str):
"""
Method to ingest data into the index
:param documentsToIndex The documents that need to be indexed.e.g,
List of Cards or List of Global Dimensions
:aliasIndex: the index name to be used for ingestion
"""
esClient = ESIndexingUtils._getESClient()
for documentToIndex in documentsToIndex:
documentToIndex["_index"] = aliasIndex
documentToIndex["_op_type"] = "index"
logging.debug("Parallel indexing process starting.")
deque(parallel_bulk(esClient, documentsToIndex), maxlen=0)
logging.info("Alias index created at: %s", aliasIndex)
@staticmethod
def deleteOldIndex(indexName: str, aliasIndex: str):
"""
Method to ingest data into the index
:param documentsToIndex The documents that need to be indexed.e.g,
List of Cards or List of Global Dimensions
:aliasIndex: the index name to be used for ingestion
"""
esClient = ESIndexingUtils._getESClient()
logging.info(
"Now point the alias index: { %s } to { %s }", aliasIndex, indexName
)
esClient.indices.put_alias(index=aliasIndex, name=indexName)
logging.info("Now delete the older indices. They are of no use now.")
# Now delete the older indices following a certain pattern.
# Those indices are old indices and are of no use.
allAliases = esClient.indices.get_alias("*")
for key, value in allAliases.items():
logging.debug("Checking for index: %s", key)
# delete only the indexes matching the given pattern,
# retain all the other indexes they may be coming from some other source
if indexName in key:
# do not delete the current index
if aliasIndex == key:
continue
logging.info("Deleting the index: %s", key)
esClient.indices.delete(index=key, ignore=[400, 404])
@staticmethod
def deleteAllIndex():
logging.info("Deleting all indexes")
esClient = ESIndexingUtils._getESClient()
allAliases = esClient.indices.get_alias("*")
for key, value in allAliases.items():
logging.info("Deleting the index: %s", key)
esClient.indices.delete(index=key, ignore=[400, 404])
logging.info("All indexes deleted !")
@staticmethod
def _createIndex(
documentsToIndex: List[Dict], indexName: str, indexDefinition: dict
):
"""
Method to create an index in Elasticsearch
:param documentsToIndex The documents that need to be indexed.e.g,
List of Cards or List of Global Dimensions
:indexName: the index name to be used for index creation
:indexDefinition: the index definition - dict.
"""
aliasIndex = ESIndexingUtils.initializeIndex(indexName, indexDefinition)
# ingest entries in the initialized index
ESIndexingUtils.ingestIndex(documentsToIndex, aliasIndex)
# at this stage index has been created at a new location
# now change the alias of the main Index to point to the new index
ESIndexingUtils.deleteOldIndex(indexName, aliasIndex)
@staticmethod
def runAllIndexDimension():
"""
Method to spawn a thread to index global dimension into elasticsearch existing indices
The child thread assumes an index existing with a predefined unaltered indexDefinition
"""
logging.info("Indexing starts on global dimension action")
cardIndexer1 = threading.Thread(
target=ESIndexingUtils.indexGlobalDimensionsDataForSearchSuggestion
)
cardIndexer1.start()
cardIndexer2 = threading.Thread(
target=ESIndexingUtils.indexGlobalDimensionsData
)
cardIndexer2.start()
cardIndexer3 = threading.Thread(
target=ESIndexingUtils.indexNonGlobalDimensionsDataForSearchSuggestion
)
cardIndexer3.start()
cardIndexer4 = threading.Thread(
target=ESIndexingUtils.indexNonGlobalDimensionsData()
)
cardIndexer4.start()
logging.info("Indexing completed !! ")
@staticmethod
def fetchGlobalDimensionsValueForIndexing(globalDimensionGroup):
"""
Method to fetch the global dimensions and the dimension values.
:return List of Documents to be indexed
"""
indexingDocuments = []
dimension = ""
logging.info("global dimension group in fetch %s", globalDimensionGroup)
globalDimensionName = globalDimensionGroup["name"]
logging.debug("Starting fetch for global dimension: %s", globalDimensionName)
globalDimensionId = globalDimensionGroup["id"]
dimensionObjs = globalDimensionGroup["values"] # dimensional values
logging.info(
"Merging dimensions Value percentile with mulitple values in list of dimensionValues"
)
for dmObj in dimensionObjs:
displayValue = ""
dimension = dmObj["dimension"]
dataset = dmObj["dataset"]
datasetId = dmObj["datasetId"]
res = Utils.getDimensionalValuesForDimension(datasetId, dimension)
dimensionValues = res.get("data", [])
for values in dimensionValues:
if values:
logging.info("Dimensional value is %s", values)
displayValue = values
elasticsearchUniqueId = (
str(globalDimensionId)
+ "_"
+ str(displayValue)
+ "_"
+ str(dataset)
)
document = {
"_id": elasticsearchUniqueId,
"globalDimensionValue": str(displayValue).lower(),
"globalDimensionDisplayValue": str(displayValue),
"globalDimensionName": str(globalDimensionName),
"globalDimensionId": globalDimensionId,
"dimension": dimension,
"dataset": dataset,
"datasetId": datasetId,
}
indexingDocuments.append(document)
logging.debug("Document to index: %s", document)
return indexingDocuments
@staticmethod
def indexGlobalDimensionsData(joblogger=None):
"""
Method to index global dimensions data
"""
logging.info(
"****************** Indexing Starts for Global Dimension values **************** "
)
response = Utils.getGlobalDimensionForIndex()
if response["success"]:
globalDimensions = response.get("data", [])
logging.debug("Global dimensions Fetched ")
indexDefinition = {
"settings": {
"analysis": {
"analyzer": {
"my_analyzer": {
"tokenizer": "my_tokenizer",
"filter": ["lowercase"],
}
},
"default_search": {"type": "my_analyzer"},
"tokenizer": {
"my_tokenizer": {
"type": "edge_ngram",
"min_gram": 1,
"max_gram": 10,
"token_chars": ["letter", "digit"],
}
},
}
},
"mappings": {
"properties": {
"globalDimensionId": {"type": "integer"},
"globalDimensionDisplayValue": {"type": "keyword"},
"globalDimensionValue": {
"type": "text",
"search_analyzer": "my_analyzer",
"analyzer": "my_analyzer",
"fields": {
"ngram": {"type": "text", "analyzer": "my_analyzer"}
},
},
"globalDimensionName": {
"type": "text",
"search_analyzer": "my_analyzer",
"analyzer": "my_analyzer",
"fields": {
"ngram": {"type": "text", "analyzer": "my_analyzer"}
},
},
"dimension": {
"type": "text",
"search_analyzer": "my_analyzer",
"analyzer": "my_analyzer",
"fields": {
"ngram": {"type": "text", "analyzer": "my_analyzer"}
},
},
"dataset": {"type": "text"},
"datasetId": {"type": "integer"},
}
},
}
indexName = ESIndexingUtils.GLOBAL_DIMENSIONS_INDEX_DATA
aliasIndex = ESIndexingUtils.initializeIndex(indexName, indexDefinition)
logging.info("IndexName %s", indexName)
logging.info("aliasIndex %s", aliasIndex)
for globalDimensionGroup in globalDimensions:
logging.info("globaldimensionGroup %s", globalDimensionGroup)
# globalDimensionGroup is an array
try:
documentsToIndex = (
ESIndexingUtils.fetchGlobalDimensionsValueForIndexing(
globalDimensionGroup
)
)
ESIndexingUtils.ingestIndex(documentsToIndex, aliasIndex)
except (Exception) as error:
logging.error(str(error))
pass
ESIndexingUtils.deleteOldIndex(indexName, aliasIndex)
logging.info(
"****************** Indexing Completed for Global Dimension values **************** "
)
else:
logging.error("Error in fetching global dimensions.")
raise RuntimeError("Error in fetching global dimensions")
@staticmethod
def fetchGlobalDimensionsValueForSearchSuggestionIndexing(globalDimensionGroup):
"""
Method to fetch the global dimensions and the dimension values.
:return List of Documents to be indexed
"""
indexingDocuments = []
globalDimensionName = globalDimensionGroup["name"]
logging.debug("Starting fetch for global dimension: %s", globalDimensionName)
globalDimensionId = globalDimensionGroup["id"]
dimensionObjs = globalDimensionGroup["values"] # dimensional values
logging.info(
"Merging dimensions Value with mulitple values in list of dimensionValues"
)
for dmObj in dimensionObjs:
displayValue = ""
dimension = dmObj["dimension"]
dataset = dmObj["dataset"]
datasetId = dmObj["datasetId"]
res = Utils.getDimensionalValuesForDimension(datasetId, dimension)
dimensionValues = res.get("data", [])
if dimensionValues:
for values in dimensionValues:
if values:
logging.info("Dimensional values is %s", values)
displayValue = values
elasticsearchUniqueId = (
str(globalDimensionId) + "_" + str(displayValue)
)
document = {
"_id": elasticsearchUniqueId,
"globalDimensionValue": str(displayValue).lower(),
"globalDimensionDisplayValue": str(displayValue),
"globalDimensionName": str(globalDimensionName),
"globalDimensionId": globalDimensionId,
"dataset": dataset,
"datasetId": datasetId,
}
indexingDocuments.append(document)
logging.debug("Document to index: %s", document)
return indexingDocuments
# Below function is used for search suggestion / To avoid duplicates in search dropdown(Temparory)
def indexGlobalDimensionsDataForSearchSuggestion(joblogger=None):
"""
Indexing is being done for dropdown suggestion
"""
logging.info(
"*************************** Indexing starts of Global Dimension Values for Search Suggestion **************************"
)
response = Utils.getGlobalDimensionForIndex()
if response["success"]:
globalDimensions = response.get("data", [])
logging.debug("Global dimensions: %s", globalDimensions)
indexDefinition = {
"settings": {
"analysis": {
"analyzer": {
"my_analyzer": {
"tokenizer": "my_tokenizer",
"filter": ["lowercase"],
}
},
"default_search": {"type": "my_analyzer"},
"tokenizer": {
"my_tokenizer": {
"type": "edge_ngram",
"min_gram": 1,
"max_gram": 10,
"token_chars": ["letter", "digit"],
}
},
}
},
"mappings": {
"properties": {
"globalDimensionId": {"type": "integer"},
"globalDimensionDisplayValue": {"type": "text"},
"globalDimensionValue": {
"type": "text",
"search_analyzer": "my_analyzer",
"analyzer": "my_analyzer",
"fields": {
"ngram": {"type": "text", "analyzer": "my_analyzer"}
},
},
"globalDimensionName": {
"type": "text",
"search_analyzer": "my_analyzer",
"analyzer": "my_analyzer",
"fields": {
"ngram": {"type": "text", "analyzer": "my_analyzer"}
},
},
"dataset": {"type": "text"},
"datasetId": {"type": "integer"},
}
},
}
indexName = ESIndexingUtils.GLOBAL_DIMENSIONS_INDEX_SEARCH_SUGGESTION_DATA
aliasIndex = ESIndexingUtils.initializeIndex(indexName, indexDefinition)
logging.info("IndexName %s", indexName)
logging.info("aliasIndex %s", aliasIndex)
for globalDimensionGroup in globalDimensions:
# globalDimensionGroup is an array
logging.info("globaldimensionGroup %s", globalDimensionGroup)
try:
documentsToIndex = ESIndexingUtils.fetchGlobalDimensionsValueForSearchSuggestionIndexing(
globalDimensionGroup
)
ESIndexingUtils.ingestIndex(documentsToIndex, aliasIndex)
except (Exception) as error:
logging.error(str(error))
pass
ESIndexingUtils.deleteOldIndex(indexName, aliasIndex)
logging.info(
"*************************** Indexing Completed of Global Dimension Values for Search Suggestion **************************"
)
else:
logging.error("Error in fetching global dimensions.")
raise RuntimeError("Error in fetching global dimensions")
@staticmethod
def indexNonGlobalDimensionsDataForSearchSuggestion(joblogger=None):
"""
Method to index global dimensions data
"""
from cueSearch.services import GlobalDimensionServices
logging.info(
"*************************** Indexing Starts of Non Global Dimension Values for Search Suggestion **************************"
)
response = GlobalDimensionServices.nonGlobalDimensionForIndexing()
if response["success"]:
datsetDimensions = response.get("data", [])
logging.debug("Dataset dimensions: %s", datsetDimensions)
indexDefinition = {
"settings": {
"analysis": {
"analyzer": {
"my_analyzer": {
"tokenizer": "my_tokenizer",
"filter": ["lowercase"],
}
},
"default_search": {"type": "my_analyzer"},
"tokenizer": {
"my_tokenizer": {
"type": "edge_ngram",
"min_gram": 1,
"max_gram": 10,
"token_chars": ["letter", "digit"],
}
},
}
},
"mappings": {
"properties": {
"globalDimensionId": {"type": "text"},
"globalDimensionDisplayValue": {"type": "text"},
"globalDimensionValue": {
"type": "text",
"search_analyzer": "my_analyzer",
"analyzer": "my_analyzer",
"fields": {
"ngram": {"type": "text", "analyzer": "my_analyzer"}
},
},
"globalDimensionName": {
"type": "text",
"search_analyzer": "my_analyzer",
"analyzer": "my_analyzer",
"fields": {
"ngram": {"type": "text", "analyzer": "my_analyzer"}
},
},
"dimension": {
"type": "text",
"search_analyzer": "my_analyzer",
"analyzer": "my_analyzer",
"fields": {
"ngram": {"type": "text", "analyzer": "my_analyzer"}
},
},
"dataset": {"type": "text"},
"datasetId": {"type": "integer"},
}
},
}
indexName = (
ESIndexingUtils.AUTO_GLOBAL_DIMENSIONS_INDEX_DATA_SEARCH_SUGGESTION
)
aliasIndex = ESIndexingUtils.initializeIndex(indexName, indexDefinition)
logging.info("IndexName %s", indexName)
logging.info("aliasIndex %s", aliasIndex)
# datsetDimensions is an array
try:
documentsToIndex = (
ESIndexingUtils.fetchNonGlobalDimensionsValueForIndexing(
datsetDimensions
)
)
ESIndexingUtils.ingestIndex(documentsToIndex, aliasIndex)
except (Exception) as error:
logging.error(str(error))
pass
ESIndexingUtils.deleteOldIndex(indexName, aliasIndex)
logging.info(
"*************************** Indexing Completed of Non Dimensional Values for Search Suggestion **************************"
)
else:
logging.error("Error in fetching global dimensions.")
raise RuntimeError("Error in fetching global dimensions")
@staticmethod
def fetchNonGlobalDimensionsValueForIndexing(datasetDimensions: list):
"""
Method to fetch the global dimensions and the dimension values.
:return List of Documents to be indexed
"""
indexingDocuments = []
dimension = ""
globalDimensionName = ""
globalDimensionId = ""
dimensionObjs = datasetDimensions
logging.info(
"Merging dimensions Value percentile with mulitple values in list of dimensionValues"
)
for dmObj in dimensionObjs:
displayValue = ""
dimension = dmObj["dimension"]
dataset = dmObj["dataset"]
datasetId = dmObj["datasetId"]
res = Utils.getDimensionalValuesForDimension(datasetId, dimension)
dimensionValues = res.get("data", [])
for values in dimensionValues:
if values:
logging.info(
" Non global dimensional values %s",
values,
)
displayValue = values
globalDimensionId = (
str(dimension) + "_" + str(displayValue) + "_" + str(datasetId)
)
globalDimensionName = str(dataset) + "_" + str(dimension)
elasticsearchUniqueId = (
str(globalDimensionId)
+ "_"
+ str(displayValue)
+ "_"
+ str(dataset)
)
document = {
"_id": elasticsearchUniqueId,
"globalDimensionValue": str(displayValue).lower(),
"globalDimensionDisplayValue": str(displayValue),
"globalDimensionName": str(globalDimensionName),
"globalDimensionId": globalDimensionId,
"dimension": dimension,
"dataset": dataset,
"datasetId": datasetId,
}
indexingDocuments.append(document)
logging.info(
"Indexing Documents length of non global dimension %s",
len(indexingDocuments),
)
return indexingDocuments
@staticmethod
def indexNonGlobalDimensionsData(joblogger=None):
"""
Method to index Non global dimensions data
"""
from cueSearch.services import GlobalDimensionServices
logging.info(
"*************************** Indexing Starts of Non Global Dimension Data **************************"
)
response = GlobalDimensionServices.nonGlobalDimensionForIndexing()
if response["success"]:
datsetDimensions = response.get("data", [])
logging.debug("Dataset dimensions: %s", datsetDimensions)
indexDefinition = {
"settings": {
"analysis": {
"analyzer": {
"my_analyzer": {
"tokenizer": "my_tokenizer",
"filter": ["lowercase"],
}
},
"default_search": {"type": "my_analyzer"},
"tokenizer": {
"my_tokenizer": {
"type": "edge_ngram",
"min_gram": 1,
"max_gram": 10,
"token_chars": ["letter", "digit"],
}
},
}
},
"mappings": {
"properties": {
"globalDimensionId": {"type": "text"},
"globalDimensionDisplayValue": {"type": "keyword"},
"globalDimensionValue": {
"type": "text",
"search_analyzer": "my_analyzer",
"analyzer": "my_analyzer",
"fields": {
"ngram": {"type": "text", "analyzer": "my_analyzer"}
},
},
"globalDimensionName": {
"type": "text",
"search_analyzer": "my_analyzer",
"analyzer": "my_analyzer",
"fields": {
"ngram": {"type": "text", "analyzer": "my_analyzer"}
},
},
"dimension": {
"type": "text",
"search_analyzer": "my_analyzer",
"analyzer": "my_analyzer",
"fields": {
"ngram": {"type": "text", "analyzer": "my_analyzer"}
},
},
"dataset": {"type": "text"},
"datasetId": {"type": "integer"},
}
},
}
indexName = ESIndexingUtils.AUTO_GLOBAL_DIMENSIONS_INDEX_DATA
aliasIndex = ESIndexingUtils.initializeIndex(indexName, indexDefinition)
logging.info("IndexName %s", indexName)
logging.info("aliasIndex %s", aliasIndex)
# datsetDimensions is an array
try:
documentsToIndex = (
ESIndexingUtils.fetchNonGlobalDimensionsValueForIndexing(
datsetDimensions
)
)
ESIndexingUtils.ingestIndex(documentsToIndex, aliasIndex)
except (Exception) as error:
logging.error(str(error))
pass
ESIndexingUtils.deleteOldIndex(indexName, aliasIndex)
logging.info(
"*************************** Indexing Completed of Non Dimensional Data **************************"
)
else:
logging.error("Error in fetching global dimensions.")
raise RuntimeError("Error in fetching global dimensions")
| [
"logging.debug",
"elasticsearch.Elasticsearch",
"elasticsearch.helpers.parallel_bulk",
"os.environ.get",
"cueSearch.services.GlobalDimensionServices.nonGlobalDimensionForIndexing",
"time.time",
"threading.Thread",
"logging.info",
"logging.error"
] | [((362, 423), 'os.environ.get', 'os.environ.get', (['"""ELASTICSEARCH_URL"""', '"""http://localhost:9200/"""'], {}), "('ELASTICSEARCH_URL', 'http://localhost:9200/')\n", (376, 423), False, 'import os\n'), ((1400, 1441), 'elasticsearch.Elasticsearch', 'Elasticsearch', ([], {'hosts': '[esHost]', 'timeout': '(30)'}), '(hosts=[esHost], timeout=30)\n', (1413, 1441), False, 'from elasticsearch import Elasticsearch\n'), ((1808, 1850), 'logging.info', 'logging.info', (['"""intializing Index here ..."""'], {}), "('intializing Index here ...')\n", (1820, 1850), False, 'import logging\n'), ((1985, 2034), 'logging.info', 'logging.info', (['"""Creating index of: %s"""', 'aliasIndex'], {}), "('Creating index of: %s', aliasIndex)\n", (1997, 2034), False, 'import logging\n'), ((2686, 2738), 'logging.debug', 'logging.debug', (['"""Parallel indexing process starting."""'], {}), "('Parallel indexing process starting.')\n", (2699, 2738), False, 'import logging\n'), ((2816, 2870), 'logging.info', 'logging.info', (['"""Alias index created at: %s"""', 'aliasIndex'], {}), "('Alias index created at: %s', aliasIndex)\n", (2828, 2870), False, 'import logging\n'), ((3262, 3349), 'logging.info', 'logging.info', (['"""Now point the alias index: { %s } to { %s }"""', 'aliasIndex', 'indexName'], {}), "('Now point the alias index: { %s } to { %s }', aliasIndex,\n indexName)\n", (3274, 3349), False, 'import logging\n'), ((3446, 3515), 'logging.info', 'logging.info', (['"""Now delete the older indices. They are of no use now."""'], {}), "('Now delete the older indices. They are of no use now.')\n", (3458, 3515), False, 'import logging\n'), ((4285, 4321), 'logging.info', 'logging.info', (['"""Deleting all indexes"""'], {}), "('Deleting all indexes')\n", (4297, 4321), False, 'import logging\n'), ((4603, 4640), 'logging.info', 'logging.info', (['"""All indexes deleted !"""'], {}), "('All indexes deleted !')\n", (4615, 4640), False, 'import logging\n'), ((5763, 5821), 'logging.info', 'logging.info', (['"""Indexing starts on global dimension action"""'], {}), "('Indexing starts on global dimension action')\n", (5775, 5821), False, 'import logging\n'), ((5845, 5935), 'threading.Thread', 'threading.Thread', ([], {'target': 'ESIndexingUtils.indexGlobalDimensionsDataForSearchSuggestion'}), '(target=ESIndexingUtils.\n indexGlobalDimensionsDataForSearchSuggestion)\n', (5861, 5935), False, 'import threading\n'), ((6005, 6071), 'threading.Thread', 'threading.Thread', ([], {'target': 'ESIndexingUtils.indexGlobalDimensionsData'}), '(target=ESIndexingUtils.indexGlobalDimensionsData)\n', (6021, 6071), False, 'import threading\n'), ((6146, 6239), 'threading.Thread', 'threading.Thread', ([], {'target': 'ESIndexingUtils.indexNonGlobalDimensionsDataForSearchSuggestion'}), '(target=ESIndexingUtils.\n indexNonGlobalDimensionsDataForSearchSuggestion)\n', (6162, 6239), False, 'import threading\n'), ((6440, 6478), 'logging.info', 'logging.info', (['"""Indexing completed !! """'], {}), "('Indexing completed !! ')\n", (6452, 6478), False, 'import logging\n'), ((6773, 6845), 'logging.info', 'logging.info', (['"""global dimension group in fetch %s"""', 'globalDimensionGroup'], {}), "('global dimension group in fetch %s', globalDimensionGroup)\n", (6785, 6845), False, 'import logging\n'), ((6913, 6990), 'logging.debug', 'logging.debug', (['"""Starting fetch for global dimension: %s"""', 'globalDimensionName'], {}), "('Starting fetch for global dimension: %s', globalDimensionName)\n", (6926, 6990), False, 'import logging\n'), ((7131, 7240), 'logging.info', 'logging.info', (['"""Merging dimensions Value percentile with mulitple values in list of dimensionValues"""'], {}), "(\n 'Merging dimensions Value percentile with mulitple values in list of dimensionValues'\n )\n", (7143, 7240), False, 'import logging\n'), ((8854, 8960), 'logging.info', 'logging.info', (['"""****************** Indexing Starts for Global Dimension values **************** """'], {}), "(\n '****************** Indexing Starts for Global Dimension values **************** '\n )\n", (8866, 8960), False, 'import logging\n'), ((13126, 13203), 'logging.debug', 'logging.debug', (['"""Starting fetch for global dimension: %s"""', 'globalDimensionName'], {}), "('Starting fetch for global dimension: %s', globalDimensionName)\n", (13139, 13203), False, 'import logging\n'), ((13344, 13437), 'logging.info', 'logging.info', (['"""Merging dimensions Value with mulitple values in list of dimensionValues"""'], {}), "(\n 'Merging dimensions Value with mulitple values in list of dimensionValues')\n", (13356, 13437), False, 'import logging\n'), ((15110, 15255), 'logging.info', 'logging.info', (['"""*************************** Indexing starts of Global Dimension Values for Search Suggestion **************************"""'], {}), "(\n '*************************** Indexing starts of Global Dimension Values for Search Suggestion **************************'\n )\n", (15122, 15255), False, 'import logging\n'), ((18955, 19104), 'logging.info', 'logging.info', (['"""*************************** Indexing Starts of Non Global Dimension Values for Search Suggestion **************************"""'], {}), "(\n '*************************** Indexing Starts of Non Global Dimension Values for Search Suggestion **************************'\n )\n", (18967, 19104), False, 'import logging\n'), ((19137, 19192), 'cueSearch.services.GlobalDimensionServices.nonGlobalDimensionForIndexing', 'GlobalDimensionServices.nonGlobalDimensionForIndexing', ([], {}), '()\n', (19190, 19192), False, 'from cueSearch.services import GlobalDimensionServices\n'), ((23268, 23377), 'logging.info', 'logging.info', (['"""Merging dimensions Value percentile with mulitple values in list of dimensionValues"""'], {}), "(\n 'Merging dimensions Value percentile with mulitple values in list of dimensionValues'\n )\n", (23280, 23377), False, 'import logging\n'), ((25439, 25564), 'logging.info', 'logging.info', (['"""*************************** Indexing Starts of Non Global Dimension Data **************************"""'], {}), "(\n '*************************** Indexing Starts of Non Global Dimension Data **************************'\n )\n", (25451, 25564), False, 'import logging\n'), ((25597, 25652), 'cueSearch.services.GlobalDimensionServices.nonGlobalDimensionForIndexing', 'GlobalDimensionServices.nonGlobalDimensionForIndexing', ([], {}), '()\n', (25650, 25652), False, 'from cueSearch.services import GlobalDimensionServices\n'), ((2754, 2795), 'elasticsearch.helpers.parallel_bulk', 'parallel_bulk', (['esClient', 'documentsToIndex'], {}), '(esClient, documentsToIndex)\n', (2767, 2795), False, 'from elasticsearch.helpers import parallel_bulk\n'), ((3755, 3799), 'logging.debug', 'logging.debug', (['"""Checking for index: %s"""', 'key'], {}), "('Checking for index: %s', key)\n", (3768, 3799), False, 'import logging\n'), ((4485, 4528), 'logging.info', 'logging.info', (['"""Deleting the index: %s"""', 'key'], {}), "('Deleting the index: %s', key)\n", (4497, 4528), False, 'import logging\n'), ((9127, 9170), 'logging.debug', 'logging.debug', (['"""Global dimensions Fetched """'], {}), "('Global dimensions Fetched ')\n", (9140, 9170), False, 'import logging\n'), ((11705, 11744), 'logging.info', 'logging.info', (['"""IndexName %s"""', 'indexName'], {}), "('IndexName %s', indexName)\n", (11717, 11744), False, 'import logging\n'), ((11757, 11798), 'logging.info', 'logging.info', (['"""aliasIndex %s"""', 'aliasIndex'], {}), "('aliasIndex %s', aliasIndex)\n", (11769, 11798), False, 'import logging\n'), ((12499, 12608), 'logging.info', 'logging.info', (['"""****************** Indexing Completed for Global Dimension values **************** """'], {}), "(\n '****************** Indexing Completed for Global Dimension values **************** '\n )\n", (12511, 12608), False, 'import logging\n'), ((12656, 12709), 'logging.error', 'logging.error', (['"""Error in fetching global dimensions."""'], {}), "('Error in fetching global dimensions.')\n", (12669, 12709), False, 'import logging\n'), ((15422, 15478), 'logging.debug', 'logging.debug', (['"""Global dimensions: %s"""', 'globalDimensions'], {}), "('Global dimensions: %s', globalDimensions)\n", (15435, 15478), False, 'import logging\n'), ((17645, 17684), 'logging.info', 'logging.info', (['"""IndexName %s"""', 'indexName'], {}), "('IndexName %s', indexName)\n", (17657, 17684), False, 'import logging\n'), ((17697, 17738), 'logging.info', 'logging.info', (['"""aliasIndex %s"""', 'aliasIndex'], {}), "('aliasIndex %s', aliasIndex)\n", (17709, 17738), False, 'import logging\n'), ((18400, 18548), 'logging.info', 'logging.info', (['"""*************************** Indexing Completed of Global Dimension Values for Search Suggestion **************************"""'], {}), "(\n '*************************** Indexing Completed of Global Dimension Values for Search Suggestion **************************'\n )\n", (18412, 18548), False, 'import logging\n'), ((18596, 18649), 'logging.error', 'logging.error', (['"""Error in fetching global dimensions."""'], {}), "('Error in fetching global dimensions.')\n", (18609, 18649), False, 'import logging\n'), ((19293, 19350), 'logging.debug', 'logging.debug', (['"""Dataset dimensions: %s"""', 'datsetDimensions'], {}), "('Dataset dimensions: %s', datsetDimensions)\n", (19306, 19350), False, 'import logging\n'), ((21934, 21973), 'logging.info', 'logging.info', (['"""IndexName %s"""', 'indexName'], {}), "('IndexName %s', indexName)\n", (21946, 21973), False, 'import logging\n'), ((21986, 22027), 'logging.info', 'logging.info', (['"""aliasIndex %s"""', 'aliasIndex'], {}), "('aliasIndex %s', aliasIndex)\n", (21998, 22027), False, 'import logging\n'), ((22543, 22690), 'logging.info', 'logging.info', (['"""*************************** Indexing Completed of Non Dimensional Values for Search Suggestion **************************"""'], {}), "(\n '*************************** Indexing Completed of Non Dimensional Values for Search Suggestion **************************'\n )\n", (22555, 22690), False, 'import logging\n'), ((22738, 22791), 'logging.error', 'logging.error', (['"""Error in fetching global dimensions."""'], {}), "('Error in fetching global dimensions.')\n", (22751, 22791), False, 'import logging\n'), ((25753, 25810), 'logging.debug', 'logging.debug', (['"""Dataset dimensions: %s"""', 'datsetDimensions'], {}), "('Dataset dimensions: %s', datsetDimensions)\n", (25766, 25810), False, 'import logging\n'), ((28347, 28386), 'logging.info', 'logging.info', (['"""IndexName %s"""', 'indexName'], {}), "('IndexName %s', indexName)\n", (28359, 28386), False, 'import logging\n'), ((28399, 28440), 'logging.info', 'logging.info', (['"""aliasIndex %s"""', 'aliasIndex'], {}), "('aliasIndex %s', aliasIndex)\n", (28411, 28440), False, 'import logging\n'), ((28956, 29079), 'logging.info', 'logging.info', (['"""*************************** Indexing Completed of Non Dimensional Data **************************"""'], {}), "(\n '*************************** Indexing Completed of Non Dimensional Data **************************'\n )\n", (28968, 29079), False, 'import logging\n'), ((29127, 29180), 'logging.error', 'logging.error', (['"""Error in fetching global dimensions."""'], {}), "('Error in fetching global dimensions.')\n", (29140, 29180), False, 'import logging\n'), ((4118, 4161), 'logging.info', 'logging.info', (['"""Deleting the index: %s"""', 'key'], {}), "('Deleting the index: %s', key)\n", (4130, 4161), False, 'import logging\n'), ((11873, 11934), 'logging.info', 'logging.info', (['"""globaldimensionGroup %s"""', 'globalDimensionGroup'], {}), "('globaldimensionGroup %s', globalDimensionGroup)\n", (11885, 11934), False, 'import logging\n'), ((17864, 17925), 'logging.info', 'logging.info', (['"""globaldimensionGroup %s"""', 'globalDimensionGroup'], {}), "('globaldimensionGroup %s', globalDimensionGroup)\n", (17876, 17925), False, 'import logging\n'), ((7663, 7710), 'logging.info', 'logging.info', (['"""Dimensional value is %s"""', 'values'], {}), "('Dimensional value is %s', values)\n", (7675, 7710), False, 'import logging\n'), ((8622, 8670), 'logging.debug', 'logging.debug', (['"""Document to index: %s"""', 'document'], {}), "('Document to index: %s', document)\n", (8635, 8670), False, 'import logging\n'), ((23800, 23857), 'logging.info', 'logging.info', (['""" Non global dimensional values %s"""', 'values'], {}), "(' Non global dimensional values %s', values)\n", (23812, 23857), False, 'import logging\n'), ((13909, 13957), 'logging.info', 'logging.info', (['"""Dimensional values is %s"""', 'values'], {}), "('Dimensional values is %s', values)\n", (13921, 13957), False, 'import logging\n'), ((14765, 14813), 'logging.debug', 'logging.debug', (['"""Document to index: %s"""', 'document'], {}), "('Document to index: %s', document)\n", (14778, 14813), False, 'import logging\n'), ((1901, 1912), 'time.time', 'time.time', ([], {}), '()\n', (1910, 1912), False, 'import time\n')] |
"""
A simple example of NATDiscovery between ByteBlower ports.
To discover the public IP address we will send a single packet
upstream, capture this packet at the WAN side and finally
pick it apart.
This example demonstrates:
* How to transmit a single custom packet.
* How to capture this packet and dissect it with SCAPY.
In this script we assume that ByteBlower ports are configured through DHCP.
To keep things easy we'll also assume that no one else is using
these ByteBlower interfaces.
"""
import byteblowerll.byteblower as byteblower
from scapy.all import *
import time
# Minimal config parameters.
# Adapt to your setup when necessary.
SERVER_ADDRESS = 'byteblower-tutorial-3100.lab.byteblower.excentis.com'
UDP_SRC_PORT = 9000
UDP_DEST_PORT = 1000
WAN_MAC = '00:BB:23:22:55:12'
WAN_BB_INTERFACE = 'nontrunk-1'
LAN_MAC = '00:BB:23:21:55:13'
LAN_BB_INTERFACE = 'trunk-1-81'
# ByteBlower part of the test.
api = byteblower.ByteBlower.InstanceGet()
server = api.ServerAdd(SERVER_ADDRESS)
def create_port(interface, mac_addr):
port = server.PortCreate(interface)
l2 = port.Layer2EthIISet()
l2.AddressSet(mac_addr)
l3 = port.Layer3IPv4Set()
l3.ProtocolDhcpGet().Perform()
return port
wan_port = create_port(WAN_BB_INTERFACE, WAN_MAC)
wan_ip = wan_port.Layer3IPv4Get().IpGet()
lan_port = create_port(LAN_BB_INTERFACE, LAN_MAC)
lan_ip = lan_port.Layer3IPv4Get().IpGet()
# Immediately start with capturing trtaffic.
cap = wan_port.RxCaptureBasicAdd()
cap.FilterSet('ip and udp')
cap.Start()
# Next configure the probing traffic.
# Create the requested packet.
resolved_mac = lan_port.Layer3IPv4Get().Resolve(wan_ip)
stream = lan_port.TxStreamAdd()
bb_frame = stream.FrameAdd()
sc_frame = (Ether(src=LAN_MAC, dst=resolved_mac) / IP(
src=lan_ip, dst=wan_ip) / UDP(dport=UDP_DEST_PORT, sport=UDP_SRC_PORT) /
'Excentis NAT Discovery packet')
frameContent = bytearray(bytes(sc_frame))
hexbytes = ''.join((format(b, "02x") for b in frameContent))
# Send a single Probing frame.
bb_frame.BytesSet(hexbytes)
stream.NumberOfFramesSet(1)
stream.InterFrameGapSet(1000 * 1000) # 1 millisecond in nanos.
stream.Start()
# Stop as soon as you receive any response.
while True:
sniffed = cap.ResultGet()
sniffed.Refresh()
if sniffed.PacketCountGet() > 0:
break
time.sleep(0.01)
# The Capture needs to stopped explicitly.
cap.Stop()
# Process the response: retrieve all packets.
for f in sniffed.FramesGet():
data = bytearray(f.BufferGet())
raw = Ether(data)
if IP in raw and UDP in raw:
discovered_ip = raw['IP'].getfieldval('src')
discovered_udp_port = raw['UDP'].getfieldval('sport')
print('Discovered IP: %s' % discovered_ip)
print('Discovered UDP port: %s' % discovered_udp_port)
break
else:
print('No packet received')
# Cleanup the Server. The API will implicitly clean up
# the create objects.
api.ServerRemove(server)
| [
"byteblowerll.byteblower.ByteBlower.InstanceGet",
"time.sleep"
] | [((979, 1014), 'byteblowerll.byteblower.ByteBlower.InstanceGet', 'byteblower.ByteBlower.InstanceGet', ([], {}), '()\n', (1012, 1014), True, 'import byteblowerll.byteblower as byteblower\n'), ((2387, 2403), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (2397, 2403), False, 'import time\n')] |
from rest_framework import serializers
from game.serializers.question_serializer import QuestionSerializer
class QuestionWithAnswerSerializer(QuestionSerializer):
correct_answer = serializers.CharField()
| [
"rest_framework.serializers.CharField"
] | [((187, 210), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (208, 210), False, 'from rest_framework import serializers\n')] |
from sklearn import svm
from ..data_wrappers import reject
import numpy as np
from scipy.stats import multivariate_normal
from sklearn.mixture import GMM
from sklearn.neighbors import KernelDensity
class DensityEstimators(object):
def __init__(self):
self.models = {}
self.unknown = {}
self.known = {}
def train_confidence_model(self, X_kno, X_unk):
"""Train a classifier of training points
Returns a classifier that predicts high probability values for training
points and low probability values for reject points.
"""
model = svm.SVC(probability=True)
#model = tree.DecisionTreeClassifier(max_depth=5)
X_kno_unk = np.vstack((X_kno,X_unk))
y = np.hstack((np.ones(np.alen(X_kno)), np.zeros(np.alen(X_unk)))).T
model.fit(X_kno_unk, y)
return model
def _train_aggregation_model(self, X):
scores_kno = self.predict_proba(X)
self.scores_agg_unk = reject.create_reject_data(scores_kno,
proportion=1, method='uniform_hsphere', pca=True,
pca_variance=0.99, pca_components=0, hshape_cov=0,
hshape_prop_in=0.99, hshape_multiplier=1.5)
model_agg = self.train_confidence_model(scores_kno,self.scores_agg_unk)
return model_agg
def train(self,X,Y):
"""
TODO for PCA to work we need more instances than features, if we
reduce the problem to M binary subproblems the number of instances
is reduced by M while the number of features remains constant.
This can be a problem for MNIST, CIFAR and ImageNet.
"""
self.classes = np.unique(Y)
self.accuracies = {}
for y in self.classes:
x = X[Y==y]
self.unknown[y] = reject.create_reject_data(x, proportion=1,
method='uniform_hsphere', pca=True, pca_variance=0.99,
pca_components=0, hshape_cov=0, hshape_prop_in=0.99,
hshape_multiplier=1.5)
self.models[y] = self.train_confidence_model(x,self.unknown[y])
self.model_agg = self._train_aggregation_model(X)
def predict_proba(self,X):
scores = np.zeros((np.alen(X), len(self.classes)))
for index, y in enumerate(self.classes):
scores[:,index] = self.models[y].predict_proba(X)[:,1]
return scores
def predict_confidence(self,X):
scores = self.predict_proba(X)
return self.model_agg.predict_proba(scores)[:,1]
class MyGMM(GMM):
def score(self, X):
return np.exp(super(MyGMM, self).score(X))
class MyMultivariateNormal(object):
def __init__(self, mean=None, cov=None, min_covar=1e-10,
covariance_type='diag'):
if mean is not None:
self.mu = mean
self.size = len(self.mu)
# TODO assess that the parameters mean and cov are correct
if cov is not None:
# TODO create a function that computes deg, norm_const and inv
self.sigma = cov
self.det = np.linalg.det(self.sigma)
self.norm_const = 1.0/ ( np.power((2*np.pi),float(self.size)/2) *
np.sqrt(self.det) )
self.inv = np.linalg.inv(self.sigma)
self.min_covar = min_covar
self.covariance_type = covariance_type
self.alpha = np.float32(1e-32)
if covariance_type not in ['full', 'diag',]:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
def pseudo_determinant(self, A, alpha):
n = len(A)
return np.linalg.det(A + np.eye(n)*alpha)/ np.power(alpha, n-np.rank(A))
def fit(self, x):
self.mu = x.mean(axis=0)
self.sigma = np.cov(x.T, bias=1) # bias=0 (N-1), bias=1 (N)
self.sigma[self.sigma==0] = self.min_covar
if(self.covariance_type == 'diag'):
self.sigma = np.eye(np.alen(self.sigma))*self.sigma
if len(self.mu.shape) == 0:
self.size = 1
else:
self.size = self.mu.shape[0]
self.det = np.linalg.det(self.sigma)
# If sigma is singular
if self.det == 0:
self.pseudo_det = self.pseudo_determinant(self.sigma*2*np.pi, self.alpha)
self.norm_const = 1.0/ np.sqrt(self.pseudo_det)
self.inv = np.linalg.pinv(self.sigma)
else:
self.norm_const = 1.0/ ( np.power((2*np.pi),float(self.size)/2) *
np.sqrt(self.det) )
self.inv = np.linalg.inv(self.sigma)
def score(self,x):
x_mu = np.subtract(x,self.mu)
result = np.exp(-0.5 * np.diag(np.dot(x_mu,np.dot(self.inv,x_mu.T))))
return self.norm_const * result
# FIXME: look for an appropriate name
def log_likelihood(self,x):
x_mu = np.subtract(x,self.mu)
result = -0.5 * np.diag(np.dot(x_mu,np.dot(self.inv,x_mu.T)))
return self.norm_const * result
@property
def means_(self):
return self.mu
@property
def covars_(self):
if self.covariance_type == 'diag':
return np.diag(self.sigma)
return self.sigma
def sample(self, n):
return np.random.multivariate_normal(self.mu, self.sigma, n)
@property
def maximum(self):
return self.score(np.array(self.mu).reshape(-1,1))
class MultivariateNormal(object):
def __init__(self, mean=None, cov=None, allow_singular=True,
covariance_type='diag'):
if mean is not None:
self.mu = mean
if cov is not None:
self.sigma = cov
self.allow_singular = allow_singular
self.covariance_type = covariance_type
def fit(self, x):
self.mu = x.mean(axis=0)
self.sigma = np.cov(x.T, bias=1) # bias=0 (N-1), bias=1 (N)
if self.covariance_type == 'diag':
self.sigma = np.eye(np.alen(self.sigma))*self.sigma
self.model = multivariate_normal(mean=self.mu, cov=self.sigma,
allow_singular=self.allow_singular)
def score(self,x):
return self.model.pdf(x)
@property
def means_(self):
return self.mu
@property
def covars_(self):
if self.covariance_type == 'diag':
return np.diag(self.sigma)
return self.sigma
def sample(self, n):
return np.random.multivariate_normal(self.mu, self.sigma, n)
class MyMultivariateKernelDensity(object):
def __init__(self, kernel='gaussian', bandwidth=1.0):
self._kernel = kernel
self._bandwidth = bandwidth
self._estimators = []
def fit(self, X):
p = X.shape[1]
for feature in np.arange(p):
kd = KernelDensity(kernel=self._kernel, bandwidth=self._bandwidth)
kd.fit(X[:, feature].reshape(-1, 1))
self._estimators.append(kd)
def score(self, X):
p = len(self._estimators)
scores = np.zeros((np.alen(X), p))
for feature in np.arange(p):
s = self._estimators[feature].score_samples(
X[:, feature].reshape(-1, 1))
scores[:, feature] = s
return scores.sum(axis=1)
| [
"numpy.sqrt",
"numpy.linalg.pinv",
"scipy.stats.multivariate_normal",
"numpy.array",
"numpy.cov",
"numpy.arange",
"numpy.rank",
"sklearn.neighbors.KernelDensity",
"numpy.subtract",
"numpy.dot",
"numpy.vstack",
"numpy.eye",
"numpy.random.multivariate_normal",
"numpy.alen",
"sklearn.svm.SV... | [((604, 629), 'sklearn.svm.SVC', 'svm.SVC', ([], {'probability': '(True)'}), '(probability=True)\n', (611, 629), False, 'from sklearn import svm\n'), ((709, 734), 'numpy.vstack', 'np.vstack', (['(X_kno, X_unk)'], {}), '((X_kno, X_unk))\n', (718, 734), True, 'import numpy as np\n'), ((1689, 1701), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (1698, 1701), True, 'import numpy as np\n'), ((3437, 3454), 'numpy.float32', 'np.float32', (['(1e-32)'], {}), '(1e-32)\n', (3447, 3454), True, 'import numpy as np\n'), ((3849, 3868), 'numpy.cov', 'np.cov', (['x.T'], {'bias': '(1)'}), '(x.T, bias=1)\n', (3855, 3868), True, 'import numpy as np\n'), ((4193, 4218), 'numpy.linalg.det', 'np.linalg.det', (['self.sigma'], {}), '(self.sigma)\n', (4206, 4218), True, 'import numpy as np\n'), ((4692, 4715), 'numpy.subtract', 'np.subtract', (['x', 'self.mu'], {}), '(x, self.mu)\n', (4703, 4715), True, 'import numpy as np\n'), ((4924, 4947), 'numpy.subtract', 'np.subtract', (['x', 'self.mu'], {}), '(x, self.mu)\n', (4935, 4947), True, 'import numpy as np\n'), ((5305, 5358), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.mu', 'self.sigma', 'n'], {}), '(self.mu, self.sigma, n)\n', (5334, 5358), True, 'import numpy as np\n'), ((5881, 5900), 'numpy.cov', 'np.cov', (['x.T'], {'bias': '(1)'}), '(x.T, bias=1)\n', (5887, 5900), True, 'import numpy as np\n'), ((6057, 6147), 'scipy.stats.multivariate_normal', 'multivariate_normal', ([], {'mean': 'self.mu', 'cov': 'self.sigma', 'allow_singular': 'self.allow_singular'}), '(mean=self.mu, cov=self.sigma, allow_singular=self.\n allow_singular)\n', (6076, 6147), False, 'from scipy.stats import multivariate_normal\n'), ((6463, 6516), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.mu', 'self.sigma', 'n'], {}), '(self.mu, self.sigma, n)\n', (6492, 6516), True, 'import numpy as np\n'), ((6785, 6797), 'numpy.arange', 'np.arange', (['p'], {}), '(p)\n', (6794, 6797), True, 'import numpy as np\n'), ((7092, 7104), 'numpy.arange', 'np.arange', (['p'], {}), '(p)\n', (7101, 7104), True, 'import numpy as np\n'), ((4445, 4471), 'numpy.linalg.pinv', 'np.linalg.pinv', (['self.sigma'], {}), '(self.sigma)\n', (4459, 4471), True, 'import numpy as np\n'), ((4627, 4652), 'numpy.linalg.inv', 'np.linalg.inv', (['self.sigma'], {}), '(self.sigma)\n', (4640, 4652), True, 'import numpy as np\n'), ((5218, 5237), 'numpy.diag', 'np.diag', (['self.sigma'], {}), '(self.sigma)\n', (5225, 5237), True, 'import numpy as np\n'), ((6376, 6395), 'numpy.diag', 'np.diag', (['self.sigma'], {}), '(self.sigma)\n', (6383, 6395), True, 'import numpy as np\n'), ((6816, 6877), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': 'self._kernel', 'bandwidth': 'self._bandwidth'}), '(kernel=self._kernel, bandwidth=self._bandwidth)\n', (6829, 6877), False, 'from sklearn.neighbors import KernelDensity\n'), ((2256, 2266), 'numpy.alen', 'np.alen', (['X'], {}), '(X)\n', (2263, 2266), True, 'import numpy as np\n'), ((3129, 3154), 'numpy.linalg.det', 'np.linalg.det', (['self.sigma'], {}), '(self.sigma)\n', (3142, 3154), True, 'import numpy as np\n'), ((3308, 3333), 'numpy.linalg.inv', 'np.linalg.inv', (['self.sigma'], {}), '(self.sigma)\n', (3321, 3333), True, 'import numpy as np\n'), ((4397, 4421), 'numpy.sqrt', 'np.sqrt', (['self.pseudo_det'], {}), '(self.pseudo_det)\n', (4404, 4421), True, 'import numpy as np\n'), ((7053, 7063), 'numpy.alen', 'np.alen', (['X'], {}), '(X)\n', (7060, 7063), True, 'import numpy as np\n'), ((3760, 3770), 'numpy.rank', 'np.rank', (['A'], {}), '(A)\n', (3767, 3770), True, 'import numpy as np\n'), ((4023, 4042), 'numpy.alen', 'np.alen', (['self.sigma'], {}), '(self.sigma)\n', (4030, 4042), True, 'import numpy as np\n'), ((4584, 4601), 'numpy.sqrt', 'np.sqrt', (['self.det'], {}), '(self.det)\n', (4591, 4601), True, 'import numpy as np\n'), ((4991, 5015), 'numpy.dot', 'np.dot', (['self.inv', 'x_mu.T'], {}), '(self.inv, x_mu.T)\n', (4997, 5015), True, 'import numpy as np\n'), ((5423, 5440), 'numpy.array', 'np.array', (['self.mu'], {}), '(self.mu)\n', (5431, 5440), True, 'import numpy as np\n'), ((6003, 6022), 'numpy.alen', 'np.alen', (['self.sigma'], {}), '(self.sigma)\n', (6010, 6022), True, 'import numpy as np\n'), ((765, 779), 'numpy.alen', 'np.alen', (['X_kno'], {}), '(X_kno)\n', (772, 779), True, 'import numpy as np\n'), ((791, 805), 'numpy.alen', 'np.alen', (['X_unk'], {}), '(X_unk)\n', (798, 805), True, 'import numpy as np\n'), ((3261, 3278), 'numpy.sqrt', 'np.sqrt', (['self.det'], {}), '(self.det)\n', (3268, 3278), True, 'import numpy as np\n'), ((3724, 3733), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (3730, 3733), True, 'import numpy as np\n'), ((4766, 4790), 'numpy.dot', 'np.dot', (['self.inv', 'x_mu.T'], {}), '(self.inv, x_mu.T)\n', (4772, 4790), True, 'import numpy as np\n')] |
from pyparsing import LineEnd, LineStart, SkipTo, Regex
from regparser.grammar import atomic, unified
section = (
atomic.section_marker.copy().leaveWhitespace()
+ unified.part_section
+ SkipTo(LineEnd())
)
par = (
atomic.section.copy().leaveWhitespace()
+ unified.depth1_p
+ SkipTo(LineEnd())
)
marker_par = (
atomic.paragraph_marker.copy().leaveWhitespace()
+ atomic.section
+ unified.depth1_p
)
# This matches an appendix name in an appendix header. Here we'll match
# something with a dash in the appendix name (i.e. AA-1) but we'll
# remove the dash. The effect of this is that, for label purposes only,
# the appendix becomes known as 'AA1', and therefore we don't have weird
# label collisions with a node labeled '1' underneath the appendix.
appendix = (
atomic.appendix_marker.copy().leaveWhitespace()
+ Regex(r"[A-Z]+-?[0-9]*\b").setResultsName("appendix").setParseAction(
lambda r: r[0].replace('-', '')).setResultsName("appendix")
+ SkipTo(LineEnd())
)
parser = LineStart() + (section | marker_par | par | appendix)
| [
"regparser.grammar.atomic.section.copy",
"pyparsing.Regex",
"pyparsing.LineEnd",
"regparser.grammar.atomic.section_marker.copy",
"regparser.grammar.atomic.appendix_marker.copy",
"pyparsing.LineStart",
"regparser.grammar.atomic.paragraph_marker.copy"
] | [((1039, 1050), 'pyparsing.LineStart', 'LineStart', ([], {}), '()\n', (1048, 1050), False, 'from pyparsing import LineEnd, LineStart, SkipTo, Regex\n'), ((208, 217), 'pyparsing.LineEnd', 'LineEnd', ([], {}), '()\n', (215, 217), False, 'from pyparsing import LineEnd, LineStart, SkipTo, Regex\n'), ((311, 320), 'pyparsing.LineEnd', 'LineEnd', ([], {}), '()\n', (318, 320), False, 'from pyparsing import LineEnd, LineStart, SkipTo, Regex\n'), ((1015, 1024), 'pyparsing.LineEnd', 'LineEnd', ([], {}), '()\n', (1022, 1024), False, 'from pyparsing import LineEnd, LineStart, SkipTo, Regex\n'), ((121, 149), 'regparser.grammar.atomic.section_marker.copy', 'atomic.section_marker.copy', ([], {}), '()\n', (147, 149), False, 'from regparser.grammar import atomic, unified\n'), ((235, 256), 'regparser.grammar.atomic.section.copy', 'atomic.section.copy', ([], {}), '()\n', (254, 256), False, 'from regparser.grammar import atomic, unified\n'), ((345, 375), 'regparser.grammar.atomic.paragraph_marker.copy', 'atomic.paragraph_marker.copy', ([], {}), '()\n', (373, 375), False, 'from regparser.grammar import atomic, unified\n'), ((810, 839), 'regparser.grammar.atomic.appendix_marker.copy', 'atomic.appendix_marker.copy', ([], {}), '()\n', (837, 839), False, 'from regparser.grammar import atomic, unified\n'), ((864, 890), 'pyparsing.Regex', 'Regex', (['"""[A-Z]+-?[0-9]*\\\\b"""'], {}), "('[A-Z]+-?[0-9]*\\\\b')\n", (869, 890), False, 'from pyparsing import LineEnd, LineStart, SkipTo, Regex\n')] |
import argparse
def commandLineArgs():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("--restrict-types",
dest="restrict_types",
default=False,
action="store_true")
parser.add_argument("--test",
dest="test",
default=False,
help="True if we want to just test an existing model",
action="store_true")
parser.add_argument("--limit-overfit",
dest="limit_overfit",
default=False,
action="store_true")
parser.add_argument("--use-cuda",
dest="use_cuda",
default=False,
action="store_true")
parser.add_argument("--verbose",
dest="verbose",
default=False,
action="store_true")
parser.add_argument("--rnn-decode",
dest="rnn_decode",
default=False,
action="store_true")
parser.add_argument("--batch-size",
dest="batch_size",
default=32,
type=int)
parser.add_argument("--lr",
dest="lr",
default=0.001,
type=float)
parser.add_argument("--weight-decay",
dest="weight_decay",
default=0.0,
type=float)
parser.add_argument("--beta",
dest="beta",
default=0.0,
type=float)
parser.add_argument("--epochs-per-replay",
dest="epochs_per_replay",
default=0,
type=int)
parser.add_argument("--beam-width",
dest="beam_width",
default=128,
type=int)
parser.add_argument("--epsilon",
dest="epsilon",
default=0.3,
type=float)
parser.add_argument("--num-cpus",
dest="num_cpus",
default=1,
type=int)
parser.add_argument("--num-cycles",
dest="num_cycles",
default=1,
type=int)
parser.add_argument("--max-p-per-task",
dest="max_p_per_task",
# something >> number of distinct programs per tasks
default=1000000,
type=int)
parser.add_argument("--seed",
dest="seed",
default=0,
type=int)
parser.add_argument("--jumpstart",
dest="jumpstart",
default=False,
help="Whether to jumpstart by training on set of ground truth programs first",
action="store_true")
parser.add_argument("--num-iter-beam-search",
dest="num_iter_beam_search",
default=1,
type=int)
parser.add_argument("--num-epochs-start",
dest="num_epochs_start",
default=1,
type=int)
parser.add_argument("--resume-iter",
dest="resume_iter",
default=0,
type=int)
parser.add_argument("--test-decode-time",
dest="test_decode_time",
default=0,
type=int)
parser.add_argument("--fixed-epoch-pretrain",
dest="fixed_epoch_pretrain",
default=0,
type=int)
parser.add_argument("--preload-frontiers",
dest="preload_frontiers",
default=None,
type=str)
parser.add_argument("--resume",
dest="resume",
default=None,
type=str)
parser.add_argument("--no-nl",
dest="no_nl",
default=False,
help="Whether to condition on natural language description i.e. use NL as input to encoder",
action="store_true")
parser.add_argument("--no-io",
dest="no_io",
default=False,
help="Whether to use IO as input to encoder",
action="store_true")
args = vars(parser.parse_args())
return args
| [
"argparse.ArgumentParser"
] | [((53, 114), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (76, 114), False, 'import argparse\n')] |
"""
fakedata.py
====================================
Generate artificial pupil-data.
"""
import numpy as np
import scipy.stats as stats
from .baseline import *
from .pupil import *
def generate_pupil_data(event_onsets, fs=1000, pad=5000, baseline_lowpass=0.2,
evoked_response_perc=0.02, response_fluct_sd=1,
prf_npar=(10.35,0), prf_tmax=(917.0,0),
prop_spurious_events=0.2, noise_amp=0.0005):
"""
Generate artificial pupil data as a sum of slow baseline-fluctuations
on which event-evoked responses are "riding".
Parameters
-----------
event_onsets: list
list of all events that evoke a response (in seconds)
fs: float
sampling rate in Hz
pad: float
append `pad` milliseconds of signal after the last event is decayed
baseline_lowpass: float
cutoff for the lowpass-filter that defines the baseline
(highest allowed frequency in the baseline fluctuations)
evoked_response_perc: float
amplitude of the pupil-response as proportion of the baseline
response_fluct_sd: float
How much do the amplitudes of the individual events fluctuate?
This is determined by drawing each individual pupil-response to
a single event from a (positive) normal distribution with mean as determined
by `evoked_response_perc` and sd `response_fluct_sd` (in units of
`evoked_response_perc`).
prf_npar: tuple (float,float)
(mean,std) of the npar parameter from :py:func:`pypillometry.pupil.pupil_kernel()`.
If the std is exactly zero, then the mean is used for all pupil-responses.
If the std is positive, npar is taken i.i.d. from ~ normal(mean,std) for each event.
prf_tmax: tuple (float,float)
(mean,std) of the tmax parameter from :py:func:`pypillometry.pupil.pupil_kernel()`.
If the std is exactly zero, then the mean is used for all pupil-responses.
If the std is positive, tmax is taken i.i.d. from ~ normal(mean,std) for each event.
prop_spurious_events: float
Add random events to the pupil signal. `prop_spurious_events` is expressed
as proportion of the number of real events.
noise_amp: float
Amplitude of random gaussian noise that sits on top of the simulated signal.
Expressed in units of mean baseline pupil diameter.
Returns
--------
tx, sy: np.array
time and simulated pupil-dilation (n)
x0: np.array
baseline (n)
delta_weights: np.array
pupil-response strengths (len(event_onsets))
"""
nevents=len(event_onsets)
## npar
if prf_npar[1]==0: # deterministic parameter
npars=np.ones(nevents)*prf_npar[0]
else:
npars=np.random.randn(nevents)*prf_npar[1]+prf_npar[0]
## tmax
if prf_tmax[1]==0: # deterministic parameter
tmaxs=np.ones(nevents)*prf_tmax[0]
else:
tmaxs=np.random.randn(nevents)*prf_tmax[1]+prf_tmax[0]
if np.any(npars<=0):
raise ValueError("npar must be >0")
if np.any(tmaxs<=0):
raise ValueError("tmax must be >0")
# get maximum duration of one of the PRFs
maxdur=pupil_get_max_duration(npars.min(), tmaxs.max())
T=np.array(event_onsets).max()+maxdur+pad # stop pad millisec after last event
n=int(np.ceil(T/1000.*fs)) # number of sampling points
sy=np.zeros(n) # pupil diameter
tx=np.linspace(0,T,n) # time-vector in milliseconds
# create baseline-signal
slack=int(0.50*n) # add slack to avoid edge effects of the filter
x0=butter_lowpass_filter(np.random.rand(n+slack), baseline_lowpass, fs, 2)[slack:(n+slack)]
x0=x0*1000+5000 # scale it up to a scale as usually obtained from eyetracker
### real events regressor
## scaling
event_ix=(np.array(event_onsets)/1000.*fs).astype(np.int)
#a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
delta_weights=stats.truncnorm.rvs(-1/response_fluct_sd,np.inf, loc=1, scale=response_fluct_sd, size=event_ix.size)
x1=np.zeros_like(sy)
for i,ev in enumerate(event_onsets):
# create kernel and delta-functions for events
kernel=pupil_kernel(duration=maxdur,fs=fs,npar=npars[i], tmax=tmaxs[i])
x1[event_ix[i]:(event_ix[i]+kernel.size)]=x1[event_ix[i]:(event_ix[i]+kernel.size)]+kernel*delta_weights[i]
## spurious events regressor
sp_event_ix=np.random.randint(low=0,high=np.ceil((T-maxdur-pad)/1000.*fs),size=int( nevents*prop_spurious_events ))
sp_events=tx[ sp_event_ix ]
n_sp_events=sp_events.size
## npar
if prf_npar[1]==0: # deterministic parameter
npars=np.ones(n_sp_events)*prf_npar[0]
else:
npars=np.random.randn(n_sp_events)*prf_npar[1]+prf_npar[0]
## tmax
if prf_tmax[1]==0: # deterministic parameter
tmaxs=np.ones(n_sp_events)*prf_tmax[0]
else:
tmaxs=np.random.randn(n_sp_events)*prf_tmax[1]+prf_tmax[0]
## scaling
sp_delta_weights=stats.truncnorm.rvs(-1/response_fluct_sd,np.inf, loc=1, scale=response_fluct_sd, size=sp_event_ix.size)
x2=np.zeros_like(sy)
for i,ev in enumerate(sp_events):
# create kernel and delta-functions for events
kernel=pupil_kernel(duration=maxdur,fs=fs,npar=npars[i], tmax=tmaxs[i])
x2[sp_event_ix[i]:(sp_event_ix[i]+kernel.size)]=x2[sp_event_ix[i]:(sp_event_ix[i]+kernel.size)]+kernel*sp_delta_weights[i]
amp=np.mean(x0)*evoked_response_perc # mean amplitude for the evoked response
noise=noise_amp*np.mean(x0)*np.random.randn(n)
sy = x0 + amp*x1 + amp*x2 + noise
return (tx,sy,x0,delta_weights)
def get_dataset(ntrials=100, isi=2000, rtdist=(1000,500),fs=1000,pad=5000, **kwargs):
"""
Convenience function to run :py:func:`generate_pupil_data()` with standard parameters.
Parameters
-----------
ntrials:int
number of trials
isi: float
inter-stimulus interval in milliseconds
rtdist: tuple (float,float)
mean and std of a (truncated at zero) normal distribution to generate response times
fs: float
sampling rate
pad: float
padding before the first and after the last event in seconds
kwargs: dict
arguments for :py:func:`pypillometry.fakedata.generate_pupil_data()`
Returns
--------
tx, sy: np.array
time and simulated pupil-dilation (n)
baseline: np.array
baseline (n)
event_onsets: np.array
timing of the simulated event-onsets (stimuli and responses not separated)
response_coef: np.array
pupil-response strengths (len(event_onsets))
"""
stim_onsets=np.arange(ntrials)*isi+pad
rts=stats.truncnorm.rvs( (0-rtdist[0])/rtdist[1], np.inf, loc=rtdist[0], scale=rtdist[1], size=ntrials)
resp_onsets=stim_onsets+rts
event_onsets=np.concatenate( (stim_onsets, resp_onsets) )
kwargs.update({"fs":fs})
tx,sy,baseline,response_coef=generate_pupil_data(event_onsets, **kwargs)
return tx,sy,baseline,event_onsets, response_coef
| [
"numpy.mean",
"numpy.ceil",
"numpy.ones",
"numpy.random.rand",
"numpy.any",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.random.randn",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.arange",
"scipy.stats.truncnorm.rvs"
] | [((3093, 3111), 'numpy.any', 'np.any', (['(npars <= 0)'], {}), '(npars <= 0)\n', (3099, 3111), True, 'import numpy as np\n'), ((3162, 3180), 'numpy.any', 'np.any', (['(tmaxs <= 0)'], {}), '(tmaxs <= 0)\n', (3168, 3180), True, 'import numpy as np\n'), ((3481, 3492), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3489, 3492), True, 'import numpy as np\n'), ((3524, 3544), 'numpy.linspace', 'np.linspace', (['(0)', 'T', 'n'], {}), '(0, T, n)\n', (3535, 3544), True, 'import numpy as np\n'), ((4055, 4163), 'scipy.stats.truncnorm.rvs', 'stats.truncnorm.rvs', (['(-1 / response_fluct_sd)', 'np.inf'], {'loc': '(1)', 'scale': 'response_fluct_sd', 'size': 'event_ix.size'}), '(-1 / response_fluct_sd, np.inf, loc=1, scale=\n response_fluct_sd, size=event_ix.size)\n', (4074, 4163), True, 'import scipy.stats as stats\n'), ((4163, 4180), 'numpy.zeros_like', 'np.zeros_like', (['sy'], {}), '(sy)\n', (4176, 4180), True, 'import numpy as np\n'), ((5102, 5213), 'scipy.stats.truncnorm.rvs', 'stats.truncnorm.rvs', (['(-1 / response_fluct_sd)', 'np.inf'], {'loc': '(1)', 'scale': 'response_fluct_sd', 'size': 'sp_event_ix.size'}), '(-1 / response_fluct_sd, np.inf, loc=1, scale=\n response_fluct_sd, size=sp_event_ix.size)\n', (5121, 5213), True, 'import scipy.stats as stats\n'), ((5213, 5230), 'numpy.zeros_like', 'np.zeros_like', (['sy'], {}), '(sy)\n', (5226, 5230), True, 'import numpy as np\n'), ((6813, 6919), 'scipy.stats.truncnorm.rvs', 'stats.truncnorm.rvs', (['((0 - rtdist[0]) / rtdist[1])', 'np.inf'], {'loc': 'rtdist[0]', 'scale': 'rtdist[1]', 'size': 'ntrials'}), '((0 - rtdist[0]) / rtdist[1], np.inf, loc=rtdist[0],\n scale=rtdist[1], size=ntrials)\n', (6832, 6919), True, 'import scipy.stats as stats\n'), ((6962, 7004), 'numpy.concatenate', 'np.concatenate', (['(stim_onsets, resp_onsets)'], {}), '((stim_onsets, resp_onsets))\n', (6976, 7004), True, 'import numpy as np\n'), ((3425, 3449), 'numpy.ceil', 'np.ceil', (['(T / 1000.0 * fs)'], {}), '(T / 1000.0 * fs)\n', (3432, 3449), True, 'import numpy as np\n'), ((5545, 5556), 'numpy.mean', 'np.mean', (['x0'], {}), '(x0)\n', (5552, 5556), True, 'import numpy as np\n'), ((5651, 5669), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (5666, 5669), True, 'import numpy as np\n'), ((2805, 2821), 'numpy.ones', 'np.ones', (['nevents'], {}), '(nevents)\n', (2812, 2821), True, 'import numpy as np\n'), ((2983, 2999), 'numpy.ones', 'np.ones', (['nevents'], {}), '(nevents)\n', (2990, 2999), True, 'import numpy as np\n'), ((3707, 3732), 'numpy.random.rand', 'np.random.rand', (['(n + slack)'], {}), '(n + slack)\n', (3721, 3732), True, 'import numpy as np\n'), ((4554, 4595), 'numpy.ceil', 'np.ceil', (['((T - maxdur - pad) / 1000.0 * fs)'], {}), '((T - maxdur - pad) / 1000.0 * fs)\n', (4561, 4595), True, 'import numpy as np\n'), ((4768, 4788), 'numpy.ones', 'np.ones', (['n_sp_events'], {}), '(n_sp_events)\n', (4775, 4788), True, 'import numpy as np\n'), ((4954, 4974), 'numpy.ones', 'np.ones', (['n_sp_events'], {}), '(n_sp_events)\n', (4961, 4974), True, 'import numpy as np\n'), ((5639, 5650), 'numpy.mean', 'np.mean', (['x0'], {}), '(x0)\n', (5646, 5650), True, 'import numpy as np\n'), ((6778, 6796), 'numpy.arange', 'np.arange', (['ntrials'], {}), '(ntrials)\n', (6787, 6796), True, 'import numpy as np\n'), ((2858, 2882), 'numpy.random.randn', 'np.random.randn', (['nevents'], {}), '(nevents)\n', (2873, 2882), True, 'import numpy as np\n'), ((3036, 3060), 'numpy.random.randn', 'np.random.randn', (['nevents'], {}), '(nevents)\n', (3051, 3060), True, 'import numpy as np\n'), ((4825, 4853), 'numpy.random.randn', 'np.random.randn', (['n_sp_events'], {}), '(n_sp_events)\n', (4840, 4853), True, 'import numpy as np\n'), ((5011, 5039), 'numpy.random.randn', 'np.random.randn', (['n_sp_events'], {}), '(n_sp_events)\n', (5026, 5039), True, 'import numpy as np\n'), ((3338, 3360), 'numpy.array', 'np.array', (['event_onsets'], {}), '(event_onsets)\n', (3346, 3360), True, 'import numpy as np\n'), ((3916, 3938), 'numpy.array', 'np.array', (['event_onsets'], {}), '(event_onsets)\n', (3924, 3938), True, 'import numpy as np\n')] |
"""
<NAME>
Advent of Code Day 5
Challenge 1
"""
import sys
import hashlib
def md5_func(string):
md5result = hashlib.md5()
md5result.update(string.encode('utf-8'))
return md5result.hexdigest()
INTEGER_ID = 0
PASSWORD = ""
if len(sys.argv) < 2:
print("Please pass the puzzle input as a command line argument.")
exit(0)
while len(PASSWORD) < 8:
temp_md5 = md5_func(sys.argv[1] + str(INTEGER_ID))
if temp_md5[:5] == "00000":
print(INTEGER_ID)
PASSWORD += temp_<PASSWORD>[5]
INTEGER_ID += 1
print("The password for the door is:" + PASSWORD)
sys.stdout.flush() | [
"sys.stdout.flush",
"hashlib.md5"
] | [((590, 608), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (606, 608), False, 'import sys\n'), ((114, 127), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (125, 127), False, 'import hashlib\n')] |
# Generated by Django 3.0.5 on 2020-12-07 16:56
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('slug', models.SlugField(unique=True)),
],
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('slug', models.SlugField(unique=True)),
],
),
migrations.CreateModel(
name='Title',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('year', models.IntegerField()),
('rating', models.IntegerField(blank=True, null=True)),
('description', models.TextField(max_length=200)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='api.Category')),
('genre', models.ManyToManyField(to='api.Genre')),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(max_length=255)),
('score', models.IntegerField(default=0)),
('pub_date', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('title', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to='api.Title')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(max_length=255)),
('pub_date', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('review', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='api.Review')),
],
),
]
| [
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.SlugField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((437, 530), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (453, 530), False, 'from django.db import migrations, models\n'), ((554, 586), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (570, 586), False, 'from django.db import migrations, models\n'), ((614, 643), 'django.db.models.SlugField', 'models.SlugField', ([], {'unique': '(True)'}), '(unique=True)\n', (630, 643), False, 'from django.db import migrations, models\n'), ((774, 867), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (790, 867), False, 'from django.db import migrations, models\n'), ((891, 923), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (907, 923), False, 'from django.db import migrations, models\n'), ((951, 980), 'django.db.models.SlugField', 'models.SlugField', ([], {'unique': '(True)'}), '(unique=True)\n', (967, 980), False, 'from django.db import migrations, models\n'), ((1111, 1204), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1127, 1204), False, 'from django.db import migrations, models\n'), ((1228, 1260), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1244, 1260), False, 'from django.db import migrations, models\n'), ((1288, 1309), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1307, 1309), False, 'from django.db import migrations, models\n'), ((1339, 1381), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1358, 1381), False, 'from django.db import migrations, models\n'), ((1416, 1448), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1432, 1448), False, 'from django.db import migrations, models\n'), ((1480, 1576), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""api.Category"""'}), "(null=True, on_delete=django.db.models.deletion.PROTECT,\n to='api.Category')\n", (1497, 1576), False, 'from django.db import migrations, models\n'), ((1601, 1639), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""api.Genre"""'}), "(to='api.Genre')\n", (1623, 1639), False, 'from django.db import migrations, models\n'), ((1771, 1864), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1787, 1864), False, 'from django.db import migrations, models\n'), ((1888, 1920), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1904, 1920), False, 'from django.db import migrations, models\n'), ((1949, 1979), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1968, 1979), False, 'from django.db import migrations, models\n'), ((2011, 2050), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2031, 2050), False, 'from django.db import migrations, models\n'), ((2080, 2176), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (2097, 2176), False, 'from django.db import migrations, models\n'), ((2200, 2307), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""reviews"""', 'to': '"""api.Title"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='reviews', to='api.Title')\n", (2217, 2307), False, 'from django.db import migrations, models\n'), ((2435, 2528), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2451, 2528), False, 'from django.db import migrations, models\n'), ((2552, 2584), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2568, 2584), False, 'from django.db import migrations, models\n'), ((2616, 2655), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2636, 2655), False, 'from django.db import migrations, models\n'), ((2685, 2781), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (2702, 2781), False, 'from django.db import migrations, models\n'), ((2806, 2915), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""comments"""', 'to': '"""api.Review"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='comments', to='api.Review')\n", (2823, 2915), False, 'from django.db import migrations, models\n')] |
"""proj URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
# from hello_world.views import hello_world
from django.views.generic import RedirectView
# from hello_world.views import create_aut_book_view
from hello_world import views as view_hello_world
from books import views as view_book
from books.views import BookDetailView, show_book_by_pk_view, ShowBookListView
from proj import auth_views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('', RedirectView.as_view(url='/lists-view/')),
path('admin/', admin.site.urls),
path('cart/', include('orders.urls', namespace='orders')),
path('lists-view/', view_book.ShowBookListView.as_view(), name='lists-view'),
path('book-view/<int:book_id>/', show_book_by_pk_view, name="book-view"),
path('book-update-View/<int:pk>/', view_book.UpdateBookView.as_view(), name="book-update"),
path('book-delete-View/<int:pk>/', view_book.DeleteBookView.as_view(), name="book-delete"),
path('template/', view_book.StaticView.as_view()),
path('Create-View/', view_book.CreateBookView.as_view()),
path('update-View/<int:pk>/', view_book.UpdateBookView.as_view()),
path('delete-View/<int:pk>/', view_book.DeleteBookView.as_view()),
path('bk/create/seris', view_hello_world.create_seris_view),
path('bk/create/genre', view_hello_world.create_genre_view),
path('bk/create/aut_book', view_hello_world.create_aut_book_view),
path('bk/create/publish', view_hello_world.create_publish_view),
path('bk/create/seris', view_hello_world.create_seris_view),
path('bk/create/genre', view_hello_world.create_genre_view),
path('bk/update/publish/<int:pk>/', view_hello_world.update_publish_view),
path('bk/update/aut_book/<int:pk>/', view_hello_world.update_aut_book_view),
path('bk/update/seris/<int:pk>/', view_hello_world.update_seris_view),
path('bk/update/gener/<int:pk>/', view_hello_world.update_gener_view),
path('bk/delete/aut_book/<int:pk>/', view_hello_world.delete_aut_book_view),
path('bk/delete/publish/<int:pk>/', view_hello_world.delete_publish_view),
path('bk/delete/seris/<int:pk>/', view_hello_world.delete_seris_view),
path('bk/delete/genre/<int:pk>/', view_hello_world.delete_genre_view),
path('book/create', view_book.create_book_view),
path('book/update/<int:pk>/', view_book.update_book_view),
path('book/delete/<int:pk>/', view_book.delete_book_view),
path('auth/login/', auth_views.MyLoginView.as_view(), name='login'),
path('book-detail/<int:pk>/', view_book.BookDetailView.as_view()),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"django.urls.include",
"books.views.BookDetailView.as_view",
"books.views.DeleteBookView.as_view",
"books.views.UpdateBookView.as_view",
"books.views.CreateBookView.as_view",
"django.conf.urls.static.static",
"proj.auth_views.MyLoginView.as_view",
"django.views.generic.RedirectView.as_view",
"books.... | [((3301, 3362), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (3307, 3362), False, 'from django.conf.urls.static import static\n'), ((1224, 1255), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (1228, 1255), False, 'from django.urls import path\n'), ((1407, 1479), 'django.urls.path', 'path', (['"""book-view/<int:book_id>/"""', 'show_book_by_pk_view'], {'name': '"""book-view"""'}), "('book-view/<int:book_id>/', show_book_by_pk_view, name='book-view')\n", (1411, 1479), False, 'from django.urls import path\n'), ((1948, 2007), 'django.urls.path', 'path', (['"""bk/create/seris"""', 'view_hello_world.create_seris_view'], {}), "('bk/create/seris', view_hello_world.create_seris_view)\n", (1952, 2007), False, 'from django.urls import path\n'), ((2013, 2072), 'django.urls.path', 'path', (['"""bk/create/genre"""', 'view_hello_world.create_genre_view'], {}), "('bk/create/genre', view_hello_world.create_genre_view)\n", (2017, 2072), False, 'from django.urls import path\n'), ((2079, 2144), 'django.urls.path', 'path', (['"""bk/create/aut_book"""', 'view_hello_world.create_aut_book_view'], {}), "('bk/create/aut_book', view_hello_world.create_aut_book_view)\n", (2083, 2144), False, 'from django.urls import path\n'), ((2150, 2213), 'django.urls.path', 'path', (['"""bk/create/publish"""', 'view_hello_world.create_publish_view'], {}), "('bk/create/publish', view_hello_world.create_publish_view)\n", (2154, 2213), False, 'from django.urls import path\n'), ((2219, 2278), 'django.urls.path', 'path', (['"""bk/create/seris"""', 'view_hello_world.create_seris_view'], {}), "('bk/create/seris', view_hello_world.create_seris_view)\n", (2223, 2278), False, 'from django.urls import path\n'), ((2284, 2343), 'django.urls.path', 'path', (['"""bk/create/genre"""', 'view_hello_world.create_genre_view'], {}), "('bk/create/genre', view_hello_world.create_genre_view)\n", (2288, 2343), False, 'from django.urls import path\n'), ((2350, 2423), 'django.urls.path', 'path', (['"""bk/update/publish/<int:pk>/"""', 'view_hello_world.update_publish_view'], {}), "('bk/update/publish/<int:pk>/', view_hello_world.update_publish_view)\n", (2354, 2423), False, 'from django.urls import path\n'), ((2429, 2504), 'django.urls.path', 'path', (['"""bk/update/aut_book/<int:pk>/"""', 'view_hello_world.update_aut_book_view'], {}), "('bk/update/aut_book/<int:pk>/', view_hello_world.update_aut_book_view)\n", (2433, 2504), False, 'from django.urls import path\n'), ((2510, 2579), 'django.urls.path', 'path', (['"""bk/update/seris/<int:pk>/"""', 'view_hello_world.update_seris_view'], {}), "('bk/update/seris/<int:pk>/', view_hello_world.update_seris_view)\n", (2514, 2579), False, 'from django.urls import path\n'), ((2585, 2654), 'django.urls.path', 'path', (['"""bk/update/gener/<int:pk>/"""', 'view_hello_world.update_gener_view'], {}), "('bk/update/gener/<int:pk>/', view_hello_world.update_gener_view)\n", (2589, 2654), False, 'from django.urls import path\n'), ((2661, 2736), 'django.urls.path', 'path', (['"""bk/delete/aut_book/<int:pk>/"""', 'view_hello_world.delete_aut_book_view'], {}), "('bk/delete/aut_book/<int:pk>/', view_hello_world.delete_aut_book_view)\n", (2665, 2736), False, 'from django.urls import path\n'), ((2742, 2815), 'django.urls.path', 'path', (['"""bk/delete/publish/<int:pk>/"""', 'view_hello_world.delete_publish_view'], {}), "('bk/delete/publish/<int:pk>/', view_hello_world.delete_publish_view)\n", (2746, 2815), False, 'from django.urls import path\n'), ((2821, 2890), 'django.urls.path', 'path', (['"""bk/delete/seris/<int:pk>/"""', 'view_hello_world.delete_seris_view'], {}), "('bk/delete/seris/<int:pk>/', view_hello_world.delete_seris_view)\n", (2825, 2890), False, 'from django.urls import path\n'), ((2896, 2965), 'django.urls.path', 'path', (['"""bk/delete/genre/<int:pk>/"""', 'view_hello_world.delete_genre_view'], {}), "('bk/delete/genre/<int:pk>/', view_hello_world.delete_genre_view)\n", (2900, 2965), False, 'from django.urls import path\n'), ((2972, 3019), 'django.urls.path', 'path', (['"""book/create"""', 'view_book.create_book_view'], {}), "('book/create', view_book.create_book_view)\n", (2976, 3019), False, 'from django.urls import path\n'), ((3025, 3082), 'django.urls.path', 'path', (['"""book/update/<int:pk>/"""', 'view_book.update_book_view'], {}), "('book/update/<int:pk>/', view_book.update_book_view)\n", (3029, 3082), False, 'from django.urls import path\n'), ((3088, 3145), 'django.urls.path', 'path', (['"""book/delete/<int:pk>/"""', 'view_book.delete_book_view'], {}), "('book/delete/<int:pk>/', view_book.delete_book_view)\n", (3092, 3145), False, 'from django.urls import path\n'), ((1177, 1217), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': '"""/lists-view/"""'}), "(url='/lists-view/')\n", (1197, 1217), False, 'from django.views.generic import RedirectView\n'), ((1275, 1317), 'django.urls.include', 'include', (['"""orders.urls"""'], {'namespace': '"""orders"""'}), "('orders.urls', namespace='orders')\n", (1282, 1317), False, 'from django.urls import include\n'), ((1345, 1381), 'books.views.ShowBookListView.as_view', 'view_book.ShowBookListView.as_view', ([], {}), '()\n', (1379, 1381), True, 'from books import views as view_book\n'), ((1520, 1554), 'books.views.UpdateBookView.as_view', 'view_book.UpdateBookView.as_view', ([], {}), '()\n', (1552, 1554), True, 'from books import views as view_book\n'), ((1616, 1650), 'books.views.DeleteBookView.as_view', 'view_book.DeleteBookView.as_view', ([], {}), '()\n', (1648, 1650), True, 'from books import views as view_book\n'), ((1705, 1735), 'books.views.StaticView.as_view', 'view_book.StaticView.as_view', ([], {}), '()\n', (1733, 1735), True, 'from books import views as view_book\n'), ((1764, 1798), 'books.views.CreateBookView.as_view', 'view_book.CreateBookView.as_view', ([], {}), '()\n', (1796, 1798), True, 'from books import views as view_book\n'), ((1835, 1869), 'books.views.UpdateBookView.as_view', 'view_book.UpdateBookView.as_view', ([], {}), '()\n', (1867, 1869), True, 'from books import views as view_book\n'), ((1906, 1940), 'books.views.DeleteBookView.as_view', 'view_book.DeleteBookView.as_view', ([], {}), '()\n', (1938, 1940), True, 'from books import views as view_book\n'), ((3171, 3203), 'proj.auth_views.MyLoginView.as_view', 'auth_views.MyLoginView.as_view', ([], {}), '()\n', (3201, 3203), False, 'from proj import auth_views\n'), ((3259, 3293), 'books.views.BookDetailView.as_view', 'view_book.BookDetailView.as_view', ([], {}), '()\n', (3291, 3293), True, 'from books import views as view_book\n')] |
"""
Default audio settings.
"""
import numpy as np
from modules.socket.settings import PACKAGE_SIZE
# Number of sound channels.
CHANNELS = 2
# The size of the streaming buffer, that needs to fit into the socket buffer.
CHUNK_SIZE = PACKAGE_SIZE // CHANNELS // np.dtype(np.int16).itemsize
# Sound device frame rate. In this case, 44.1 KHz.
FRAME_RATE = int(44.1e3)
| [
"numpy.dtype"
] | [((264, 282), 'numpy.dtype', 'np.dtype', (['np.int16'], {}), '(np.int16)\n', (272, 282), True, 'import numpy as np\n')] |
import ast
import base64
import jinja2
import logging
import random
import datetime
from functools import reduce
import werkzeug
from odoo import SUPERUSER_ID
from odoo import api, http
from odoo.exceptions import UserError
from odoo.http import request
from odoo.modules import get_module_resource
from odoo.addons.web.controllers.main import binary_content, Home
_logger = logging.getLogger(__name__)
SERVER_START = datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d-%H%M')
class KickerController(Home):
NUM_BG = 10
@http.route(['/free', '/free/<model("kicker.kicker"):kicker>'], type='http', auth="public")
def is_the_kicker_free(self, kicker=None, **kw):
if not kicker:
kicker = request.env['kicker.kicker'].sudo().search([], limit=1)
if not kicker:
return request.not_found()
rand_bg = random.randrange(0, self.NUM_BG - 1, step=1)
return request.render('kicker.page_is_free', {
'is_free': kicker.is_available,
'bg': ('yes_%s' if kicker.is_available else 'no_%s') % rand_bg,
})
@http.route(['/kicker/ping'], auth='none', csrf=False)
def ping(self, token=False, status="", **kw):
"""
TEST URL:
/kicker/ping?token=<PASSWORD>&status={"available": True,"temperature":"15.4"}
"""
with api.Environment.manage():
if token:
try:
ip_address = request.httprequest.environ['REMOTE_ADDR']
payload = ast.literal_eval(status)
available = status.get('available', False)
return request.env['kicker.ping'].sudo().ping(token, available, ip_address)
except Exception as err:
_logger.error("Kicker Ping failed when evaluting status")
return False
@http.route(['/app/', "/app/<path:route>"], auth="user")
def app(self, **kw):
return request.render('kicker.app', {'body_classname': 'o_kicker_app', 'user': request.env.user})
@http.route(['/app/static/<path:route>'], auth="none")
def static(self, route, **kw):
"""Serve static files via the /app route for caching purposes (servicewsorker scope)"""
return werkzeug.utils.redirect('/kicker/static/' + route)
# JSON routes
@http.route('/app/json/dashboard', type='json', auth='user', csrf=False)
def dashboard(self, **kw):
partner = request.env.user.partner_id
return partner._dashboard_stats()
@http.route('/app/json/rankings', type='json', auth='user', csrf=False)
def rankings(self, period='month', **kw):
partner = request.env.user.partner_id.sudo()
return partner._get_rankings(period=period)
@http.route('/app/json/community', type='json', auth='user', csrf=False)
def community(self, **kw):
partner = request.env.user.partner_id
return partner._community_stats()
@http.route(['/app/json/player', '/app/json/player/<int:player_id>'], type='json', auth='user')
def player_info(self, player_id=None, **kw):
if not player_id:
player_id = request.env.user.partner_id.id
partner = request.env['res.partner'].browse(player_id)
if not partner:
raise werkzeug.exceptions.NotFound()
fields = ['id', 'name', 'email', 'main_kicker_id', 'tagline',
'wins', 'losses', 'win_ratio', 'weekly_wins', 'weekly_losses', 'weekly_win_ratio']
return partner.sudo().read(fields)[0]
@http.route('/app/json/update_profile', type='json', auth='user', methods=['POST'], csrf=False)
def update_profile(self, name, tagline, main_kicker, avatar=None, **kw):
partner = request.env.user.partner_id
vals = {
'name': name,
'tagline': tagline,
'main_kicker_id': False if int(main_kicker) == -1 else int(main_kicker),
}
if avatar:
vals['image'] = avatar
partner.write(vals)
return {'success': True, 'player':partner.read(['id', 'name', 'email', 'main_kicker_id', 'tagline'])[0]}
@http.route(['/app/json/players'], type='json', auth='user')
def list_players(self, **kw):
return request.env['res.partner'].search_read([('kicker_player', '=', True)], fields=['id', 'name'])
@http.route(['/app/json/kickers'], type='json', auth='user')
def list_kickers(self, **kw):
kickers = request.env['kicker.kicker'].sudo().search_read([], fields=['id', 'name'])
default = request.env.user.partner_id.main_kicker_id.id
return {'kickers': kickers, 'default': default}
@http.route(['/kicker/score/submit'], type='json', auth='user', methods=['POST'], csrf=False)
def submit_score(self, **post):
Partner = request.env['res.partner']
player11 = post.get('player11') and Partner.browse(int(post.get('player11')))
player21 = post.get('player21') and Partner.browse(int(post.get('player21')))
if not player11 and player21:
raise UserError(_('There must be at least one player per team.'))
player12 = post.get('player12') and Partner.browse(int(post.get('player12')))
player22 = post.get('player22') and Partner.browse(int(post.get('player22')))
kicker = request.env['kicker.kicker'].browse(int(post.get('kicker_id')))
game = request.env['kicker.game'].sudo().create({
'kicker_id': kicker.id,
'score_1': post.get('score1'),
'score_2': post.get('score2'),
'session_ids':[(0, False, {'player_id': player11.id, 'team': 'team_1'}),
(0, False, {'player_id': player12.id, 'team': 'team_1'}),
(0, False, {'player_id': player21.id, 'team': 'team_2'}),
(0, False, {'player_id': player22.id, 'team': 'team_2'}),],
})
return {'success': True, 'game_id': game.id}
# Non-json routes
@http.route(['/app/avatar', '/app/avatar/<int:player_id>'], type='http', auth="public")
def avatar(self, player_id=None, **kw):
if not player_id:
player_id = request.env.user.partner_id.id
status, headers, content = binary_content(model='res.partner', id=player_id, field='image_medium', default_mimetype='image/png', env=request.env(user=SUPERUSER_ID))
if not content:
img_path = get_module_resource('web', 'static/src/img', 'placeholder.png')
with open(img_path, 'rb') as f:
image = f.read()
content = base64.b64encode(image)
if status == 304:
return werkzeug.wrappers.Response(status=304)
image_base64 = base64.b64decode(content)
headers.append(('Content-Length', len(image_base64)))
response = request.make_response(image_base64, headers)
response.status = str(status)
return response
@http.route('/app/sw.js', type='http', auth='public')
def serviceworker(self, **kw):
bundles = ['web.assets_common', 'web.assets_frontend']
attachments = request.env['ir.attachment']
for bundle in bundles:
attachments += attachments.search([
('url', '=like', '/web/content/%-%/{0}%'.format(bundle))
])
urls = attachments.mapped('url')
js = request.env['ir.ui.view'].render_template('kicker.service_worker', values={'urls': urls, 'version': SERVER_START})
headers = {
'Content-Type': 'text/javascript',
}
response = http.request.make_response(js, headers=headers)
return response
# ------------------------------------------------------
# Login - overwrite of the web login so that regular users are redirected to the backend
# while portal users are redirected to the kicker app
# ------------------------------------------------------
@http.route(auth="public")
def web_login(self, redirect=None, *args, **kw):
response = super(KickerController, self).web_login(redirect=redirect, *args, **kw)
if not redirect and request.params['login_success']:
if request.env['res.users'].browse(request.uid).has_group('base.group_user'):
redirect = b'/web?' + request.httprequest.query_string
else:
redirect = '/app'
return http.redirect_with_hash(redirect)
return response | [
"logging.getLogger",
"odoo.http.request.render",
"random.randrange",
"odoo.http.request.env",
"odoo.http.request.make_response",
"odoo.modules.get_module_resource",
"base64.b64encode",
"base64.b64decode",
"odoo.http.route",
"werkzeug.wrappers.Response",
"datetime.datetime.now",
"ast.literal_ev... | [((377, 404), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (394, 404), False, 'import logging\n'), ((448, 471), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (469, 471), False, 'import datetime\n'), ((543, 637), 'odoo.http.route', 'http.route', (['[\'/free\', \'/free/<model("kicker.kicker"):kicker>\']'], {'type': '"""http"""', 'auth': '"""public"""'}), '([\'/free\', \'/free/<model("kicker.kicker"):kicker>\'], type=\'http\',\n auth=\'public\')\n', (553, 637), False, 'from odoo import api, http\n'), ((1104, 1157), 'odoo.http.route', 'http.route', (["['/kicker/ping']"], {'auth': '"""none"""', 'csrf': '(False)'}), "(['/kicker/ping'], auth='none', csrf=False)\n", (1114, 1157), False, 'from odoo import api, http\n'), ((1870, 1925), 'odoo.http.route', 'http.route', (["['/app/', '/app/<path:route>']"], {'auth': '"""user"""'}), "(['/app/', '/app/<path:route>'], auth='user')\n", (1880, 1925), False, 'from odoo import api, http\n'), ((2063, 2116), 'odoo.http.route', 'http.route', (["['/app/static/<path:route>']"], {'auth': '"""none"""'}), "(['/app/static/<path:route>'], auth='none')\n", (2073, 2116), False, 'from odoo import api, http\n'), ((2338, 2409), 'odoo.http.route', 'http.route', (['"""/app/json/dashboard"""'], {'type': '"""json"""', 'auth': '"""user"""', 'csrf': '(False)'}), "('/app/json/dashboard', type='json', auth='user', csrf=False)\n", (2348, 2409), False, 'from odoo import api, http\n'), ((2535, 2605), 'odoo.http.route', 'http.route', (['"""/app/json/rankings"""'], {'type': '"""json"""', 'auth': '"""user"""', 'csrf': '(False)'}), "('/app/json/rankings', type='json', auth='user', csrf=False)\n", (2545, 2605), False, 'from odoo import api, http\n'), ((2764, 2835), 'odoo.http.route', 'http.route', (['"""/app/json/community"""'], {'type': '"""json"""', 'auth': '"""user"""', 'csrf': '(False)'}), "('/app/json/community', type='json', auth='user', csrf=False)\n", (2774, 2835), False, 'from odoo import api, http\n'), ((2961, 3060), 'odoo.http.route', 'http.route', (["['/app/json/player', '/app/json/player/<int:player_id>']"], {'type': '"""json"""', 'auth': '"""user"""'}), "(['/app/json/player', '/app/json/player/<int:player_id>'], type=\n 'json', auth='user')\n", (2971, 3060), False, 'from odoo import api, http\n'), ((3545, 3644), 'odoo.http.route', 'http.route', (['"""/app/json/update_profile"""'], {'type': '"""json"""', 'auth': '"""user"""', 'methods': "['POST']", 'csrf': '(False)'}), "('/app/json/update_profile', type='json', auth='user', methods=[\n 'POST'], csrf=False)\n", (3555, 3644), False, 'from odoo import api, http\n'), ((4134, 4193), 'odoo.http.route', 'http.route', (["['/app/json/players']"], {'type': '"""json"""', 'auth': '"""user"""'}), "(['/app/json/players'], type='json', auth='user')\n", (4144, 4193), False, 'from odoo import api, http\n'), ((4343, 4402), 'odoo.http.route', 'http.route', (["['/app/json/kickers']"], {'type': '"""json"""', 'auth': '"""user"""'}), "(['/app/json/kickers'], type='json', auth='user')\n", (4353, 4402), False, 'from odoo import api, http\n'), ((4656, 4753), 'odoo.http.route', 'http.route', (["['/kicker/score/submit']"], {'type': '"""json"""', 'auth': '"""user"""', 'methods': "['POST']", 'csrf': '(False)'}), "(['/kicker/score/submit'], type='json', auth='user', methods=[\n 'POST'], csrf=False)\n", (4666, 4753), False, 'from odoo import api, http\n'), ((5985, 6075), 'odoo.http.route', 'http.route', (["['/app/avatar', '/app/avatar/<int:player_id>']"], {'type': '"""http"""', 'auth': '"""public"""'}), "(['/app/avatar', '/app/avatar/<int:player_id>'], type='http',\n auth='public')\n", (5995, 6075), False, 'from odoo import api, http\n'), ((6932, 6984), 'odoo.http.route', 'http.route', (['"""/app/sw.js"""'], {'type': '"""http"""', 'auth': '"""public"""'}), "('/app/sw.js', type='http', auth='public')\n", (6942, 6984), False, 'from odoo import api, http\n'), ((7918, 7943), 'odoo.http.route', 'http.route', ([], {'auth': '"""public"""'}), "(auth='public')\n", (7928, 7943), False, 'from odoo import api, http\n'), ((867, 911), 'random.randrange', 'random.randrange', (['(0)', '(self.NUM_BG - 1)'], {'step': '(1)'}), '(0, self.NUM_BG - 1, step=1)\n', (883, 911), False, 'import random\n'), ((927, 1066), 'odoo.http.request.render', 'request.render', (['"""kicker.page_is_free"""', "{'is_free': kicker.is_available, 'bg': ('yes_%s' if kicker.is_available else\n 'no_%s') % rand_bg}"], {}), "('kicker.page_is_free', {'is_free': kicker.is_available, 'bg':\n ('yes_%s' if kicker.is_available else 'no_%s') % rand_bg})\n", (941, 1066), False, 'from odoo.http import request\n'), ((1966, 2060), 'odoo.http.request.render', 'request.render', (['"""kicker.app"""', "{'body_classname': 'o_kicker_app', 'user': request.env.user}"], {}), "('kicker.app', {'body_classname': 'o_kicker_app', 'user':\n request.env.user})\n", (1980, 2060), False, 'from odoo.http import request\n'), ((2263, 2313), 'werkzeug.utils.redirect', 'werkzeug.utils.redirect', (["('/kicker/static/' + route)"], {}), "('/kicker/static/' + route)\n", (2286, 2313), False, 'import werkzeug\n'), ((2670, 2704), 'odoo.http.request.env.user.partner_id.sudo', 'request.env.user.partner_id.sudo', ([], {}), '()\n', (2702, 2704), False, 'from odoo.http import request\n'), ((6712, 6737), 'base64.b64decode', 'base64.b64decode', (['content'], {}), '(content)\n', (6728, 6737), False, 'import base64\n'), ((6819, 6863), 'odoo.http.request.make_response', 'request.make_response', (['image_base64', 'headers'], {}), '(image_base64, headers)\n', (6840, 6863), False, 'from odoo.http import request\n'), ((7566, 7613), 'odoo.http.request.make_response', 'http.request.make_response', (['js'], {'headers': 'headers'}), '(js, headers=headers)\n', (7592, 7613), False, 'from odoo import api, http\n'), ((829, 848), 'odoo.http.request.not_found', 'request.not_found', ([], {}), '()\n', (846, 848), False, 'from odoo.http import request\n'), ((1361, 1385), 'odoo.api.Environment.manage', 'api.Environment.manage', ([], {}), '()\n', (1383, 1385), False, 'from odoo import api, http\n'), ((3291, 3321), 'werkzeug.exceptions.NotFound', 'werkzeug.exceptions.NotFound', ([], {}), '()\n', (3319, 3321), False, 'import werkzeug\n'), ((6418, 6481), 'odoo.modules.get_module_resource', 'get_module_resource', (['"""web"""', '"""static/src/img"""', '"""placeholder.png"""'], {}), "('web', 'static/src/img', 'placeholder.png')\n", (6437, 6481), False, 'from odoo.modules import get_module_resource\n'), ((6581, 6604), 'base64.b64encode', 'base64.b64encode', (['image'], {}), '(image)\n', (6597, 6604), False, 'import base64\n'), ((6650, 6688), 'werkzeug.wrappers.Response', 'werkzeug.wrappers.Response', ([], {'status': '(304)'}), '(status=304)\n', (6676, 6688), False, 'import werkzeug\n'), ((8381, 8414), 'odoo.http.redirect_with_hash', 'http.redirect_with_hash', (['redirect'], {}), '(redirect)\n', (8404, 8414), False, 'from odoo import api, http\n'), ((6338, 6368), 'odoo.http.request.env', 'request.env', ([], {'user': 'SUPERUSER_ID'}), '(user=SUPERUSER_ID)\n', (6349, 6368), False, 'from odoo.http import request\n'), ((1536, 1560), 'ast.literal_eval', 'ast.literal_eval', (['status'], {}), '(status)\n', (1552, 1560), False, 'import ast\n')] |
"""
Unit and regression test for the neuralxc package.
"""
import copy
import os
import sys
from abc import ABC, abstractmethod
import dill as pickle
import matplotlib.pyplot as plt
import numpy as np
import pytest
# Import package, test suite, and other packages as needed
import neuralxc as xc
from neuralxc.constants import Bohr, Hartree
try:
import ase
ase_found = True
except ModuleNotFoundError:
ase_found = False
try:
import torch
torch_found = True
except ModuleNotFoundError:
torch_found = False
try:
import pyscf
pyscf_found = True
except ModuleNotFoundError:
pyscf_found = False
test_dir = os.path.dirname(os.path.abspath(__file__))
save_siesta_density_getter = False
save_test_symmetrizer = False
save_grouped_transformer = False
@pytest.mark.fast
def test_siesta_density_getter():
density_getter = xc.utils.SiestaDensityGetter(binary=True)
rho, unitcell, grid = density_getter.get_density(os.path.join(test_dir, 'h2o.RHO'))
results = {'rho_sum': np.sum(rho), 'rho_norm': np.linalg.norm(rho.flatten()), 'unitcell': unitcell, 'grid': grid}
if save_siesta_density_getter:
with open(os.path.join(test_dir, 'h2o_dens.pckl'), 'wb') as file:
pickle.dump(results, file)
else:
with open(os.path.join(test_dir, 'h2o_dens.pckl'), 'rb') as file:
results_ref = pickle.load(file)
for key in results:
assert np.allclose(results_ref[key], results[key])
@pytest.mark.fast
def test_formatter():
with open(os.path.join(test_dir, 'h2o_rep.pckl'), 'rb') as file:
C = pickle.load(file)
basis_set = {'O': {'n': 2, 'l': 3, 'r_o': 1}, 'H': {'n': 2, 'l': 2, 'r_o': 1.5}}
formatter = xc.formatter.Formatter(basis_set)
C_dict = formatter.inverse_transform(C)
C_id = formatter.transform(C_dict)
for spec in C:
assert np.allclose(C_id[spec], C[spec])
formatter.fit(C_dict)
C_id = formatter.transform(C_dict)
for spec in C:
assert np.allclose(C_id[spec], C[spec])
@pytest.mark.fast
@pytest.mark.parametrize(['transformer', 'filepath'],
[[xc.ml.transformer.GroupedStandardScaler(),
os.path.join(test_dir, 'scaler.pckl')],
[xc.ml.transformer.GroupedVarianceThreshold(0.005),
os.path.join(test_dir, 'var09.pckl')]])
def test_grouped_transformers(transformer, filepath):
for use_torch in [False, True] if torch_found else [False]:
with open(os.path.join(test_dir, 'transformer_in.pckl'), 'rb') as file:
C = pickle.load(file)
transformer.fit(C)
transformed = transformer.transform(C)
if save_grouped_transformer:
with open(filepath, 'wb') as file:
pickle.dump(transformed, file)
else:
with open(filepath, 'rb') as file:
ref = pickle.load(file)
for spec in transformed:
assert np.allclose(transformed[spec], ref[spec])
def test_species_grouper():
with open(os.path.join(test_dir, 'h2o_rep.pckl'), 'rb') as file:
C = pickle.load(file)
C = [{spec: C[spec].reshape(1, -1, C[spec].shape[-1]) for spec in C}]
basis_set = {'O': {'n': 2, 'l': 3, 'r_o': 1}, 'H': {'n': 2, 'l': 2, 'r_o': 1.5}}
species_grouper = xc.formatter.SpeciesGrouper(basis_set, ['OHH'])
re_grouped = species_grouper.transform(species_grouper.inverse_transform(C, np.array([[0]])))[0]
re_grouped = re_grouped[0]
C = C[0]
for spec in C:
assert np.allclose(C[spec], re_grouped[spec])
@pytest.mark.skipif(not ase_found, reason='requires ase')
@pytest.mark.realspace
def test_neuralxc_benzene():
benzene_nxc = xc.NeuralXC(os.path.join(test_dir, 'benzene_test', 'benzene.jit'))
benzene_traj = ase.io.read(os.path.join(test_dir, 'benzene_test', 'benzene.xyz'), '0')
density_getter = xc.utils.SiestaDensityGetter(binary=True)
rho, unitcell, grid = density_getter.get_density(os.path.join(test_dir, 'benzene_test', 'benzene.RHOXC'))
positions = benzene_traj.get_positions() / Bohr
species = benzene_traj.get_chemical_symbols()
benzene_nxc.initialize(unitcell=unitcell, grid=grid, positions=positions, species=species)
V, forces = benzene_nxc.get_V(rho, calc_forces=True)[1]
V = V / Hartree
forces = forces / Hartree * Bohr
assert np.allclose(V, np.load(os.path.join(test_dir, 'benzene_test', 'V_benzene.npy')))
assert np.allclose(forces[:-3], np.load(os.path.join(test_dir, 'benzene_test', 'forces_benzene.npy'))[:-3])
| [
"numpy.allclose",
"neuralxc.utils.SiestaDensityGetter",
"neuralxc.formatter.SpeciesGrouper",
"os.path.join",
"neuralxc.formatter.Formatter",
"numpy.sum",
"numpy.array",
"neuralxc.ml.transformer.GroupedVarianceThreshold",
"pytest.mark.skipif",
"os.path.abspath",
"neuralxc.ml.transformer.GroupedSt... | [((3614, 3670), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not ase_found)'], {'reason': '"""requires ase"""'}), "(not ase_found, reason='requires ase')\n", (3632, 3670), False, 'import pytest\n'), ((658, 683), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (673, 683), False, 'import os\n'), ((860, 901), 'neuralxc.utils.SiestaDensityGetter', 'xc.utils.SiestaDensityGetter', ([], {'binary': '(True)'}), '(binary=True)\n', (888, 901), True, 'import neuralxc as xc\n'), ((1719, 1752), 'neuralxc.formatter.Formatter', 'xc.formatter.Formatter', (['basis_set'], {}), '(basis_set)\n', (1741, 1752), True, 'import neuralxc as xc\n'), ((3345, 3392), 'neuralxc.formatter.SpeciesGrouper', 'xc.formatter.SpeciesGrouper', (['basis_set', "['OHH']"], {}), "(basis_set, ['OHH'])\n", (3372, 3392), True, 'import neuralxc as xc\n'), ((3921, 3962), 'neuralxc.utils.SiestaDensityGetter', 'xc.utils.SiestaDensityGetter', ([], {'binary': '(True)'}), '(binary=True)\n', (3949, 3962), True, 'import neuralxc as xc\n'), ((955, 988), 'os.path.join', 'os.path.join', (['test_dir', '"""h2o.RHO"""'], {}), "(test_dir, 'h2o.RHO')\n", (967, 988), False, 'import os\n'), ((1017, 1028), 'numpy.sum', 'np.sum', (['rho'], {}), '(rho)\n', (1023, 1028), True, 'import numpy as np\n'), ((1600, 1617), 'dill.load', 'pickle.load', (['file'], {}), '(file)\n', (1611, 1617), True, 'import dill as pickle\n'), ((1870, 1902), 'numpy.allclose', 'np.allclose', (['C_id[spec]', 'C[spec]'], {}), '(C_id[spec], C[spec])\n', (1881, 1902), True, 'import numpy as np\n'), ((2002, 2034), 'numpy.allclose', 'np.allclose', (['C_id[spec]', 'C[spec]'], {}), '(C_id[spec], C[spec])\n', (2013, 2034), True, 'import numpy as np\n'), ((3145, 3162), 'dill.load', 'pickle.load', (['file'], {}), '(file)\n', (3156, 3162), True, 'import dill as pickle\n'), ((3572, 3610), 'numpy.allclose', 'np.allclose', (['C[spec]', 're_grouped[spec]'], {}), '(C[spec], re_grouped[spec])\n', (3583, 3610), True, 'import numpy as np\n'), ((3754, 3807), 'os.path.join', 'os.path.join', (['test_dir', '"""benzene_test"""', '"""benzene.jit"""'], {}), "(test_dir, 'benzene_test', 'benzene.jit')\n", (3766, 3807), False, 'import os\n'), ((3840, 3893), 'os.path.join', 'os.path.join', (['test_dir', '"""benzene_test"""', '"""benzene.xyz"""'], {}), "(test_dir, 'benzene_test', 'benzene.xyz')\n", (3852, 3893), False, 'import os\n'), ((4016, 4071), 'os.path.join', 'os.path.join', (['test_dir', '"""benzene_test"""', '"""benzene.RHOXC"""'], {}), "(test_dir, 'benzene_test', 'benzene.RHOXC')\n", (4028, 4071), False, 'import os\n'), ((1231, 1257), 'dill.dump', 'pickle.dump', (['results', 'file'], {}), '(results, file)\n', (1242, 1257), True, 'import dill as pickle\n'), ((1368, 1385), 'dill.load', 'pickle.load', (['file'], {}), '(file)\n', (1379, 1385), True, 'import dill as pickle\n'), ((1433, 1476), 'numpy.allclose', 'np.allclose', (['results_ref[key]', 'results[key]'], {}), '(results_ref[key], results[key])\n', (1444, 1476), True, 'import numpy as np\n'), ((1533, 1571), 'os.path.join', 'os.path.join', (['test_dir', '"""h2o_rep.pckl"""'], {}), "(test_dir, 'h2o_rep.pckl')\n", (1545, 1571), False, 'import os\n'), ((2606, 2623), 'dill.load', 'pickle.load', (['file'], {}), '(file)\n', (2617, 2623), True, 'import dill as pickle\n'), ((2136, 2177), 'neuralxc.ml.transformer.GroupedStandardScaler', 'xc.ml.transformer.GroupedStandardScaler', ([], {}), '()\n', (2175, 2177), True, 'import neuralxc as xc\n'), ((2206, 2243), 'os.path.join', 'os.path.join', (['test_dir', '"""scaler.pckl"""'], {}), "(test_dir, 'scaler.pckl')\n", (2218, 2243), False, 'import os\n'), ((2273, 2322), 'neuralxc.ml.transformer.GroupedVarianceThreshold', 'xc.ml.transformer.GroupedVarianceThreshold', (['(0.005)'], {}), '(0.005)\n', (2315, 2322), True, 'import neuralxc as xc\n'), ((2351, 2387), 'os.path.join', 'os.path.join', (['test_dir', '"""var09.pckl"""'], {}), "(test_dir, 'var09.pckl')\n", (2363, 2387), False, 'import os\n'), ((3078, 3116), 'os.path.join', 'os.path.join', (['test_dir', '"""h2o_rep.pckl"""'], {}), "(test_dir, 'h2o_rep.pckl')\n", (3090, 3116), False, 'import os\n'), ((4423, 4478), 'os.path.join', 'os.path.join', (['test_dir', '"""benzene_test"""', '"""V_benzene.npy"""'], {}), "(test_dir, 'benzene_test', 'V_benzene.npy')\n", (4435, 4478), False, 'import os\n'), ((1163, 1202), 'os.path.join', 'os.path.join', (['test_dir', '"""h2o_dens.pckl"""'], {}), "(test_dir, 'h2o_dens.pckl')\n", (1175, 1202), False, 'import os\n'), ((1286, 1325), 'os.path.join', 'os.path.join', (['test_dir', '"""h2o_dens.pckl"""'], {}), "(test_dir, 'h2o_dens.pckl')\n", (1298, 1325), False, 'import os\n'), ((2528, 2573), 'os.path.join', 'os.path.join', (['test_dir', '"""transformer_in.pckl"""'], {}), "(test_dir, 'transformer_in.pckl')\n", (2540, 2573), False, 'import os\n'), ((2800, 2830), 'dill.dump', 'pickle.dump', (['transformed', 'file'], {}), '(transformed, file)\n', (2811, 2830), True, 'import dill as pickle\n'), ((2914, 2931), 'dill.load', 'pickle.load', (['file'], {}), '(file)\n', (2925, 2931), True, 'import dill as pickle\n'), ((2992, 3033), 'numpy.allclose', 'np.allclose', (['transformed[spec]', 'ref[spec]'], {}), '(transformed[spec], ref[spec])\n', (3003, 3033), True, 'import numpy as np\n'), ((3473, 3488), 'numpy.array', 'np.array', (['[[0]]'], {}), '([[0]])\n', (3481, 3488), True, 'import numpy as np\n'), ((4525, 4585), 'os.path.join', 'os.path.join', (['test_dir', '"""benzene_test"""', '"""forces_benzene.npy"""'], {}), "(test_dir, 'benzene_test', 'forces_benzene.npy')\n", (4537, 4585), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-01-25 09:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('focus', '0003_auto_20190125_1721'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'verbose_name': '文章', 'verbose_name_plural': '文章'},
),
migrations.AlterModelOptions(
name='author',
options={'verbose_name': '作者', 'verbose_name_plural': '作者'},
),
migrations.AlterModelOptions(
name='column',
options={'ordering': ['name'], 'verbose_name': '类别', 'verbose_name_plural': '类别'},
),
migrations.AlterModelOptions(
name='comment',
options={'verbose_name': '评论', 'verbose_name_plural': '评论'},
),
migrations.AlterModelOptions(
name='poll',
options={'verbose_name': '点赞', 'verbose_name_plural': '点赞'},
),
]
| [
"django.db.migrations.AlterModelOptions"
] | [((291, 400), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""article"""', 'options': "{'verbose_name': '文章', 'verbose_name_plural': '文章'}"}), "(name='article', options={'verbose_name': '文章',\n 'verbose_name_plural': '文章'})\n", (319, 400), False, 'from django.db import migrations\n'), ((441, 549), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""author"""', 'options': "{'verbose_name': '作者', 'verbose_name_plural': '作者'}"}), "(name='author', options={'verbose_name': '作者',\n 'verbose_name_plural': '作者'})\n", (469, 549), False, 'from django.db import migrations\n'), ((590, 720), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""column"""', 'options': "{'ordering': ['name'], 'verbose_name': '类别', 'verbose_name_plural': '类别'}"}), "(name='column', options={'ordering': ['name'],\n 'verbose_name': '类别', 'verbose_name_plural': '类别'})\n", (618, 720), False, 'from django.db import migrations\n'), ((761, 870), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""comment"""', 'options': "{'verbose_name': '评论', 'verbose_name_plural': '评论'}"}), "(name='comment', options={'verbose_name': '评论',\n 'verbose_name_plural': '评论'})\n", (789, 870), False, 'from django.db import migrations\n'), ((911, 1017), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""poll"""', 'options': "{'verbose_name': '点赞', 'verbose_name_plural': '点赞'}"}), "(name='poll', options={'verbose_name': '点赞',\n 'verbose_name_plural': '点赞'})\n", (939, 1017), False, 'from django.db import migrations\n')] |
import numpy as np
from PIL import Image
nets = ["caffenet", "googlenet", "vggf", "vgg16", "vgg19"]
def load(nets):
res = []
for net in nets:
data_path = "perturbations/perturbation_%s.npy" % net
imgs = np.load(data_path, allow_pickle=True, encoding="latin1")
# print(imgs.shape)
img = np.transpose(imgs[0], (0, 1, 2))
im = Image.fromarray(np.uint8(img))
im.save("imgs/%s.jpg" % net)
res.append(im)
return res
def connet(imgs, rate=1):
n = len(imgs)
im = imgs[0]
width = int(im.size[0] * rate)
height = int(im.size[1] * rate)
# im = im.resize((width, height), Image.ANTIALIAS)
interval = int(0.05 * width)
toImage = Image.new("RGB", (n * width + interval * (n - 1), height), "white")
# 构造图片的宽和高,如果图片不能填充完全会出现黑色区域
for i in range(n):
im = imgs[i]
im = im.resize((width, height), Image.ANTIALIAS)
toImage.paste(im, (i * (width + interval), 0))
toImage.save("imgs/result.jpg")
if __name__ == "__main__":
connet(load(nets))
| [
"numpy.uint8",
"PIL.Image.new",
"numpy.transpose",
"numpy.load"
] | [((717, 784), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(n * width + interval * (n - 1), height)', '"""white"""'], {}), "('RGB', (n * width + interval * (n - 1), height), 'white')\n", (726, 784), False, 'from PIL import Image\n'), ((230, 286), 'numpy.load', 'np.load', (['data_path'], {'allow_pickle': '(True)', 'encoding': '"""latin1"""'}), "(data_path, allow_pickle=True, encoding='latin1')\n", (237, 286), True, 'import numpy as np\n'), ((329, 361), 'numpy.transpose', 'np.transpose', (['imgs[0]', '(0, 1, 2)'], {}), '(imgs[0], (0, 1, 2))\n', (341, 361), True, 'import numpy as np\n'), ((391, 404), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (399, 404), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code from different mains."""
import jax.numpy as jnp
import numpy as np
STEPS_PER_EPOCH = 4500
def create_learning_rate_scheduler(
factors='constant * linear_warmup * rsqrt_decay',
base_learning_rate=0.5,
warmup_steps=1000,
decay_factor=0.5,
steps_per_decay=20000,
steps_per_cycle=100000,
init_step=0,
finetune_lr=False):
"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
factors: string, factors separated by "*" that defines the schedule.
base_learning_rate: float, the starting constant for the lr schedule.
warmup_steps: int, how many steps to warm up for in the warmup schedule.
decay_factor: float, the amount to decay the learning rate by.
steps_per_decay: int, how often to decay the learning rate.
steps_per_cycle: int, steps per cycle when using cosine decay.
init_step: int, first step of this run. Used with finetune_lr
finetune_lr: bool, modify step count for finetuning smaller datasets
Returns:
a function learning_rate(step): float -> {"learning_rate": float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split('*')]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
if finetune_lr:
steps_this_run = step - init_step
multiplier = STEPS_PER_EPOCH / steps_per_cycle
finetune_steps = steps_this_run * multiplier
step = init_step + finetune_steps
for name in factors:
if name == 'constant':
ret *= base_learning_rate
elif name == 'linear_warmup':
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == 'rsqrt_decay':
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'rsqrt_normalized_decay':
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'decay_every':
ret *= (decay_factor**(step // steps_per_decay))
elif name == 'cosine_decay':
progress = jnp.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0,
0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else:
raise ValueError('Unknown factor %s.' % name)
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn
def pad_examples(x, desired_batch_size):
"""Expand batch to desired size by repeating last slice."""
batch_pad = desired_batch_size - x.shape[0]
return np.concatenate([x, np.tile(x[-1], (batch_pad, 1))], axis=0)
def tohost(x):
"""Collect batches from all devices to host and flatten batch dimensions."""
n_device, n_batch, *remaining_dims = x.shape
return np.array(x).reshape((n_device * n_batch,) + tuple(remaining_dims))
| [
"numpy.tile",
"jax.numpy.cos",
"jax.numpy.sqrt",
"jax.numpy.asarray",
"numpy.array",
"jax.numpy.maximum",
"jax.numpy.minimum"
] | [((3342, 3377), 'jax.numpy.asarray', 'jnp.asarray', (['ret'], {'dtype': 'jnp.float32'}), '(ret, dtype=jnp.float32)\n', (3353, 3377), True, 'import jax.numpy as jnp\n'), ((3575, 3605), 'numpy.tile', 'np.tile', (['x[-1]', '(batch_pad, 1)'], {}), '(x[-1], (batch_pad, 1))\n', (3582, 3605), True, 'import numpy as np\n'), ((3768, 3779), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3776, 3779), True, 'import numpy as np\n'), ((2646, 2683), 'jax.numpy.minimum', 'jnp.minimum', (['(1.0)', '(step / warmup_steps)'], {}), '(1.0, step / warmup_steps)\n', (2657, 2683), True, 'import jax.numpy as jnp\n'), ((2742, 2773), 'jax.numpy.maximum', 'jnp.maximum', (['step', 'warmup_steps'], {}), '(step, warmup_steps)\n', (2753, 2773), True, 'import jax.numpy as jnp\n'), ((2835, 2857), 'jax.numpy.sqrt', 'jnp.sqrt', (['warmup_steps'], {}), '(warmup_steps)\n', (2843, 2857), True, 'import jax.numpy as jnp\n'), ((2882, 2913), 'jax.numpy.maximum', 'jnp.maximum', (['step', 'warmup_steps'], {}), '(step, warmup_steps)\n', (2893, 2913), True, 'import jax.numpy as jnp\n'), ((3228, 3262), 'jax.numpy.cos', 'jnp.cos', (['(jnp.pi * (progress % 1.0))'], {}), '(jnp.pi * (progress % 1.0))\n', (3235, 3262), True, 'import jax.numpy as jnp\n')] |
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import numpy as np
import torch
import torch.distributed as dist
from datasets import CustomDataset
from timm.data import create_transform
from timm.data.constants import IMAGENET_DEFAULT_MEAN
from timm.data.constants import IMAGENET_DEFAULT_STD
from timm.data.transforms import _pil_interp
from torchvision import transforms
from .cached_image_folder import CachedImageFolder
from .samplers import SubsetRandomSampler
def build_loader(config, manifest_info):
config.defrost()
dataset_train, _ = build_dataset(
is_train=True,
config=config,
manifest_info=manifest_info[0],
)
config.freeze()
print(
f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build train dataset",
)
dataset_val, _ = build_dataset(
is_train=False,
config=config,
manifest_info=manifest_info[1],
)
print(
f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset",
)
num_tasks = dist.get_world_size()
global_rank = dist.get_rank()
if config.DATA.ZIP_MODE and config.DATA.CACHE_MODE == "part":
indices = np.arange(
dist.get_rank(),
len(dataset_train),
dist.get_world_size(),
)
sampler_train = SubsetRandomSampler(indices)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train,
num_replicas=num_tasks,
rank=global_rank,
shuffle=True,
)
indices = np.arange(
dist.get_rank(),
len(dataset_val),
dist.get_world_size(),
)
sampler_val = SubsetRandomSampler(indices)
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler_train,
batch_size=config.DATA.BATCH_SIZE,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
sampler=sampler_val,
batch_size=config.DATA.BATCH_SIZE,
shuffle=False,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=False,
)
return dataset_train, dataset_val, data_loader_train, data_loader_val
def build_dataset(is_train, config, manifest_info):
transform = build_transform(is_train, config)
if config.DATA.DATASET == "imagenet":
prefix = "train" if is_train else "val"
if config.DATA.ZIP_MODE:
ann_file = prefix + "_map.txt"
prefix = prefix + ".zip@/"
dataset = CachedImageFolder(
config.DATA.DATA_PATH,
ann_file,
prefix,
transform,
cache_mode=config.DATA.CACHE_MODE if is_train else "part",
)
else:
# root = os.path.join(config.DATA.DATA_PATH, prefix)
# dataset = datasets.ImageFolder(root, transform=transform)
dataset = CustomDataset(
manifest_info=manifest_info,
transform=transform,
)
nb_classes = 1000
else:
raise NotImplementedError("We only support ImageNet Now.")
return dataset, nb_classes
def build_transform(is_train, config):
resize_im = config.DATA.IMG_SIZE > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=config.DATA.IMG_SIZE,
is_training=True,
color_jitter=config.AUG.COLOR_JITTER
if config.AUG.COLOR_JITTER > 0
else None,
auto_augment=config.AUG.AUTO_AUGMENT
if config.AUG.AUTO_AUGMENT != "none"
else None,
re_prob=config.AUG.REPROB,
re_mode=config.AUG.REMODE,
re_count=config.AUG.RECOUNT,
interpolation=config.DATA.INTERPOLATION,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
config.DATA.IMG_SIZE,
padding=4,
)
return transform
t = []
if resize_im:
if config.TEST.CROP:
size = int((256 / 224) * config.DATA.IMG_SIZE)
t.append(
transforms.Resize(
size,
interpolation=_pil_interp(config.DATA.INTERPOLATION),
),
# to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(config.DATA.IMG_SIZE))
else:
t.append(
transforms.Resize(
(config.DATA.IMG_SIZE, config.DATA.IMG_SIZE),
interpolation=_pil_interp(config.DATA.INTERPOLATION),
),
)
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
| [
"torch.utils.data.DistributedSampler",
"torchvision.transforms.CenterCrop",
"torch.distributed.get_rank",
"datasets.CustomDataset",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"timm.data.transforms._pil_interp",
"torchvision.transforms.ToTe... | [((1275, 1296), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (1294, 1296), True, 'import torch.distributed as dist\n'), ((1315, 1330), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1328, 1330), True, 'import torch.distributed as dist\n'), ((1971, 2167), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_train'], {'sampler': 'sampler_train', 'batch_size': 'config.DATA.BATCH_SIZE', 'num_workers': 'config.DATA.NUM_WORKERS', 'pin_memory': 'config.DATA.PIN_MEMORY', 'drop_last': '(True)'}), '(dataset_train, sampler=sampler_train,\n batch_size=config.DATA.BATCH_SIZE, num_workers=config.DATA.NUM_WORKERS,\n pin_memory=config.DATA.PIN_MEMORY, drop_last=True)\n', (1998, 2167), False, 'import torch\n'), ((2238, 2448), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_val'], {'sampler': 'sampler_val', 'batch_size': 'config.DATA.BATCH_SIZE', 'shuffle': '(False)', 'num_workers': 'config.DATA.NUM_WORKERS', 'pin_memory': 'config.DATA.PIN_MEMORY', 'drop_last': '(False)'}), '(dataset_val, sampler=sampler_val, batch_size=\n config.DATA.BATCH_SIZE, shuffle=False, num_workers=config.DATA.\n NUM_WORKERS, pin_memory=config.DATA.PIN_MEMORY, drop_last=False)\n', (2265, 2448), False, 'import torch\n'), ((5331, 5352), 'torchvision.transforms.Compose', 'transforms.Compose', (['t'], {}), '(t)\n', (5349, 5352), False, 'from torchvision import transforms\n'), ((1619, 1729), 'torch.utils.data.DistributedSampler', 'torch.utils.data.DistributedSampler', (['dataset_train'], {'num_replicas': 'num_tasks', 'rank': 'global_rank', 'shuffle': '(True)'}), '(dataset_train, num_replicas=num_tasks,\n rank=global_rank, shuffle=True)\n', (1654, 1729), False, 'import torch\n'), ((1819, 1834), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1832, 1834), True, 'import torch.distributed as dist\n'), ((1870, 1891), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (1889, 1891), True, 'import torch.distributed as dist\n'), ((3738, 4109), 'timm.data.create_transform', 'create_transform', ([], {'input_size': 'config.DATA.IMG_SIZE', 'is_training': '(True)', 'color_jitter': '(config.AUG.COLOR_JITTER if config.AUG.COLOR_JITTER > 0 else None)', 'auto_augment': "(config.AUG.AUTO_AUGMENT if config.AUG.AUTO_AUGMENT != 'none' else None)", 're_prob': 'config.AUG.REPROB', 're_mode': 'config.AUG.REMODE', 're_count': 'config.AUG.RECOUNT', 'interpolation': 'config.DATA.INTERPOLATION'}), "(input_size=config.DATA.IMG_SIZE, is_training=True,\n color_jitter=config.AUG.COLOR_JITTER if config.AUG.COLOR_JITTER > 0 else\n None, auto_augment=config.AUG.AUTO_AUGMENT if config.AUG.AUTO_AUGMENT !=\n 'none' else None, re_prob=config.AUG.REPROB, re_mode=config.AUG.REMODE,\n re_count=config.AUG.RECOUNT, interpolation=config.DATA.INTERPOLATION)\n", (3754, 4109), False, 'from timm.data import create_transform\n'), ((5217, 5238), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5236, 5238), False, 'from torchvision import transforms\n'), ((5253, 5318), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['IMAGENET_DEFAULT_MEAN', 'IMAGENET_DEFAULT_STD'], {}), '(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)\n', (5273, 5318), False, 'from torchvision import transforms\n'), ((1438, 1453), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1451, 1453), True, 'import torch.distributed as dist\n'), ((1499, 1520), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (1518, 1520), True, 'import torch.distributed as dist\n'), ((3305, 3368), 'datasets.CustomDataset', 'CustomDataset', ([], {'manifest_info': 'manifest_info', 'transform': 'transform'}), '(manifest_info=manifest_info, transform=transform)\n', (3318, 3368), False, 'from datasets import CustomDataset\n'), ((4399, 4453), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['config.DATA.IMG_SIZE'], {'padding': '(4)'}), '(config.DATA.IMG_SIZE, padding=4)\n', (4420, 4453), False, 'from torchvision import transforms\n'), ((948, 963), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (961, 963), True, 'import torch.distributed as dist\n'), ((1202, 1217), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1215, 1217), True, 'import torch.distributed as dist\n'), ((4914, 4957), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['config.DATA.IMG_SIZE'], {}), '(config.DATA.IMG_SIZE)\n', (4935, 4957), False, 'from torchvision import transforms\n'), ((4761, 4799), 'timm.data.transforms._pil_interp', '_pil_interp', (['config.DATA.INTERPOLATION'], {}), '(config.DATA.INTERPOLATION)\n', (4772, 4799), False, 'from timm.data.transforms import _pil_interp\n'), ((5130, 5168), 'timm.data.transforms._pil_interp', '_pil_interp', (['config.DATA.INTERPOLATION'], {}), '(config.DATA.INTERPOLATION)\n', (5141, 5168), False, 'from timm.data.transforms import _pil_interp\n')] |