code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import unittest
import pandas as pd
import supervise
import data
class test_supervise(unittest.TestCase):
# Read in test Datafile
def setUp(self):
self.dataset = 'iris.data'
self.headers = None
self.classcolumn = 4
self.folds = 2
self.data, self.class_data, self.class_column = data.create_column_class(self.dataset, self.classcolumn, self.headers)
def test_multiclass(self):
accuracies, classifiers = supervise.multiclass(self.folds,self.class_data,self.class_column)
for clf_name, classifier in classifiers:
stdev = True if accuracies[clf_name]['stdev'] <= 1 and accuracies[clf_name]['stdev'] >= 0 else False
self.assertTrue(stdev)
average = True if accuracies[clf_name]['average'] <= 1 and accuracies[clf_name]['average'] >= 0 else False
self.assertTrue(average)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"supervise.multiclass",
"data.create_column_class"
] |
[((917, 932), 'unittest.main', 'unittest.main', ([], {}), '()\n', (930, 932), False, 'import unittest\n'), ((328, 398), 'data.create_column_class', 'data.create_column_class', (['self.dataset', 'self.classcolumn', 'self.headers'], {}), '(self.dataset, self.classcolumn, self.headers)\n', (352, 398), False, 'import data\n'), ((465, 533), 'supervise.multiclass', 'supervise.multiclass', (['self.folds', 'self.class_data', 'self.class_column'], {}), '(self.folds, self.class_data, self.class_column)\n', (485, 533), False, 'import supervise\n')]
|
import sqlite3
import os
import logging
log = logging.getLogger(__name__)
def do_migration(db_dir):
log.info("Doing the migration")
migrate_blobs_db(db_dir)
log.info("Migration succeeded")
def migrate_blobs_db(db_dir):
"""
We migrate the blobs.db used in BlobManager to have a "should_announce" column,
and set this to True for blobs that are sd_hash's or head blobs (first blob in stream)
"""
blobs_db = os.path.join(db_dir, "blobs.db")
lbryfile_info_db = os.path.join(db_dir, 'lbryfile_info.db')
# skip migration on fresh installs
if not os.path.isfile(blobs_db) and not os.path.isfile(lbryfile_info_db):
return
# if blobs.db doesn't exist, skip migration
if not os.path.isfile(blobs_db):
log.error("blobs.db was not found but lbryfile_info.db was found, skipping migration")
return
blobs_db_file = sqlite3.connect(blobs_db)
blobs_db_cursor = blobs_db_file.cursor()
# check if new columns exist (it shouldn't) and create it
try:
blobs_db_cursor.execute("SELECT should_announce FROM blobs")
except sqlite3.OperationalError:
blobs_db_cursor.execute(
"ALTER TABLE blobs ADD COLUMN should_announce integer NOT NULL DEFAULT 0")
else:
log.warn("should_announce already exists somehow, proceeding anyways")
# if lbryfile_info.db doesn't exist, skip marking blobs as should_announce = True
if not os.path.isfile(lbryfile_info_db):
log.error("lbryfile_info.db was not found, skipping check for should_announce")
return
lbryfile_info_file = sqlite3.connect(lbryfile_info_db)
lbryfile_info_cursor = lbryfile_info_file.cursor()
# find blobs that are stream descriptors
lbryfile_info_cursor.execute('SELECT * FROM lbry_file_descriptors')
descriptors = lbryfile_info_cursor.fetchall()
should_announce_blob_hashes = []
for d in descriptors:
sd_blob_hash = (d[0],)
should_announce_blob_hashes.append(sd_blob_hash)
# find blobs that are the first blob in a stream
lbryfile_info_cursor.execute('SELECT * FROM lbry_file_blobs WHERE position = 0')
blobs = lbryfile_info_cursor.fetchall()
head_blob_hashes = []
for b in blobs:
blob_hash = (b[0],)
should_announce_blob_hashes.append(blob_hash)
# now mark them as should_announce = True
blobs_db_cursor.executemany('UPDATE blobs SET should_announce=1 WHERE blob_hash=?',
should_announce_blob_hashes)
# Now run some final checks here to make sure migration succeeded
try:
blobs_db_cursor.execute("SELECT should_announce FROM blobs")
except sqlite3.OperationalError:
raise Exception('Migration failed, cannot find should_announce')
blobs_db_cursor.execute("SELECT * FROM blobs WHERE should_announce=1")
blobs = blobs_db_cursor.fetchall()
if len(blobs) != len(should_announce_blob_hashes):
log.error("Some how not all blobs were marked as announceable")
blobs_db_file.commit()
blobs_db_file.close()
lbryfile_info_file.close()
|
[
"os.path.isfile",
"sqlite3.connect",
"os.path.join",
"logging.getLogger"
] |
[((47, 74), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (64, 74), False, 'import logging\n'), ((443, 475), 'os.path.join', 'os.path.join', (['db_dir', '"""blobs.db"""'], {}), "(db_dir, 'blobs.db')\n", (455, 475), False, 'import os\n'), ((499, 539), 'os.path.join', 'os.path.join', (['db_dir', '"""lbryfile_info.db"""'], {}), "(db_dir, 'lbryfile_info.db')\n", (511, 539), False, 'import os\n'), ((890, 915), 'sqlite3.connect', 'sqlite3.connect', (['blobs_db'], {}), '(blobs_db)\n', (905, 915), False, 'import sqlite3\n'), ((1609, 1642), 'sqlite3.connect', 'sqlite3.connect', (['lbryfile_info_db'], {}), '(lbryfile_info_db)\n', (1624, 1642), False, 'import sqlite3\n'), ((733, 757), 'os.path.isfile', 'os.path.isfile', (['blobs_db'], {}), '(blobs_db)\n', (747, 757), False, 'import os\n'), ((1446, 1478), 'os.path.isfile', 'os.path.isfile', (['lbryfile_info_db'], {}), '(lbryfile_info_db)\n', (1460, 1478), False, 'import os\n'), ((591, 615), 'os.path.isfile', 'os.path.isfile', (['blobs_db'], {}), '(blobs_db)\n', (605, 615), False, 'import os\n'), ((624, 656), 'os.path.isfile', 'os.path.isfile', (['lbryfile_info_db'], {}), '(lbryfile_info_db)\n', (638, 656), False, 'import os\n')]
|
# -*-: coding utf-8 -*-
""" Helper methods for OS related tasks. """
from getpass import getpass
import os
import platform
import re
import shlex
import subprocess
import urllib2
from snipsmanagercore import pretty_printer as pp
email_regex = r"[^@]+@[^@]+\.[^@]+"
github_url_regex = re.compile(
r'^(?:http|ftp|git)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def cmd_exists(cmd):
""" Check if a command exists.
:param cmd: the command to look for.
:return: true if the command exists, false otherwise.
"""
return subprocess.call("type " + cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
def is_raspi_os():
""" Check if the current system is Raspberry.
:return: true if the current system is Raspberry.
"""
return 'arm' in " ".join(os.uname())
def is_mac_os():
""" Check if the current system is OSX.
:return: true if the current system is OSX.
"""
return 'Darwin' in platform.system()
def is_node_available():
return cmd_exists('node') and cmd_exists('npm')
def file_exists(file_path):
return os.path.exists(file_path)
def create_dir(dir_name):
""" Create directory in the current working directory, if it does
not exist already.
:param dir_name: the name of the directory.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def create_dir_verbose(dir_name, indentation_level):
create_dir(dir_name)
def write_text_file(output_file_path, text):
with open(output_file_path, "w") as f:
f.write(text)
def write_binary_file(output_file_path, content):
with open(output_file_path, "wb") as f:
f.write(content)
def read_file(file_path):
if not file_exists(file_path):
return None
with open(file_path, "r") as f:
return f.read()
return None
def write_text_file_verbose(output_file_path, text, indentation_level):
write_text_file(output_file_path, text)
def execute_command(command, silent=False):
""" Execute a shell command.
:param command: the command to execute.
:param silent: if True, do not output anything to terminal.
"""
if silent:
stdout = open(os.devnull, 'w')
stderr = open(os.devnull, 'w')
else:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
return subprocess.Popen(command.split(), stdout=stdout, stderr=stderr).communicate()
def pipe_commands(first_command, second_command, silent):
""" Execute piped commands: `first_command | second_command`.
:param first_command: the first command to execute.
:param second_command: the second command to execute.
"""
process1 = subprocess.Popen(first_command.split(), stdout=subprocess.PIPE)
if silent:
FNULL = open(os.devnull, 'w')
process2 = subprocess.Popen(
second_command.split(), stdin=process1.stdout, stdout=FNULL)
else:
process2 = subprocess.Popen(
second_command.split(), stdin=process1.stdout)
process1.stdout.close()
process2.communicate()
def remove_file(file_path):
""" Delete a file.
:param file_path: the path to the file.
"""
try:
os.remove(file_path)
except OSError:
pass
def download_file(url, output_file):
""" Download a file.
:param url: the remote location of the file.
:param output_file: the file to write to.
"""
downloaded_file = urllib2.urlopen(url)
with open(output_file, 'wb') as output:
output.write(downloaded_file.read())
def ask_yes_no(question, default_value=None):
""" Ask a yes/no question in the prompt.
:param question: the question to ask.
:return: true if the user answered yes (or empty), false otherwise
"""
if default_value is not None:
return default_value
answer = raw_input("{} [Y/n] ".format(question))
if answer is not None and answer.strip() != "" and answer.lower() != "y":
return False
return True
def ask_for_input(question, default_value=None):
if default_value and len(default_value) > 0:
question = pp.generate_user_input_string("{} [{}] ".format(question, default_value))
answer = raw_input(question)
if len(answer) == 0: # The user hit enter.
answer = default_value
else:
question = pp.generate_user_input_string(question)
answer = raw_input(question)
if answer is not None and answer.strip() != "":
return answer
else:
return None
def ask_for_password(question):
question = pp.generate_user_input_string(question)
answer = getpass(question)
if answer is not None and answer.strip() != "":
return answer.strip()
else:
return None
def which(command):
""" Get full path for an executable.
:param command: the executable command, e.g. 'node'.
:return: the full path for the command, e.g. '/usr/local/bin/node'.
"""
try:
return subprocess.check_output(
['which', command]).strip()
except subprocess.CalledProcessError:
return None
def reboot():
""" Reboot the device."""
execute_command("sudo reboot")
def get_os_name():
os_release = subprocess.check_output(['cat', '/etc/os-release'])
for line in os_release.splitlines():
if line.startswith("PRETTY_NAME="):
split = line.split("=")
if len(split) > 1:
os_version = split[1]
return os_version.replace("\"", "")
return None
def get_revision():
process1 = subprocess.Popen('cat /proc/cpuinfo'.split(), stdout=subprocess.PIPE)
process2 = subprocess.Popen('grep Revision'.split(), stdin=process1.stdout, stdout=subprocess.PIPE)
process3 = subprocess.Popen(['awk', '{print $3}'], stdin=process2.stdout)
process1.stdout.close()
process2.stdout.close()
return process3.communicate()
def get_sysinfo():
return {
"os_name": get_os_name()
}
def get_command_output(command_array):
return subprocess.check_output(command_array)
def get_user_email_git():
if cmd_exists("git"):
command = "git config user.email"
output = get_command_output(command.split())
if output is not None and len(output) > 0:
return output.strip()
return None
else:
return None
def email_is_valid(email):
return True if re.match(email_regex, email) else False
def is_valid_github_url(url):
return True if re.match(github_url_regex, url) else False
|
[
"subprocess.Popen",
"os.remove",
"os.makedirs",
"getpass.getpass",
"snipsmanagercore.pretty_printer.generate_user_input_string",
"subprocess.check_output",
"os.path.exists",
"re.match",
"os.uname",
"subprocess.call",
"platform.system",
"urllib2.urlopen",
"re.compile"
] |
[((288, 511), 're.compile', 're.compile', (['"""^(?:http|ftp|git)s?://(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\\\.)+(?:[A-Z]{2,6}\\\\.?|[A-Z0-9-]{2,}\\\\.?)|localhost|\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3})(?::\\\\d+)?(?:/?|[/?]\\\\S+)$"""', 're.IGNORECASE'], {}), "(\n '^(?:http|ftp|git)s?://(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\\\.)+(?:[A-Z]{2,6}\\\\.?|[A-Z0-9-]{2,}\\\\.?)|localhost|\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3})(?::\\\\d+)?(?:/?|[/?]\\\\S+)$'\n , re.IGNORECASE)\n", (298, 511), False, 'import re\n'), ((1455, 1480), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1469, 1480), False, 'import os\n'), ((3793, 3813), 'urllib2.urlopen', 'urllib2.urlopen', (['url'], {}), '(url)\n', (3808, 3813), False, 'import urllib2\n'), ((4927, 4966), 'snipsmanagercore.pretty_printer.generate_user_input_string', 'pp.generate_user_input_string', (['question'], {}), '(question)\n', (4956, 4966), True, 'from snipsmanagercore import pretty_printer as pp\n'), ((4980, 4997), 'getpass.getpass', 'getpass', (['question'], {}), '(question)\n', (4987, 4997), False, 'from getpass import getpass\n'), ((5581, 5632), 'subprocess.check_output', 'subprocess.check_output', (["['cat', '/etc/os-release']"], {}), "(['cat', '/etc/os-release'])\n", (5604, 5632), False, 'import subprocess\n'), ((6117, 6179), 'subprocess.Popen', 'subprocess.Popen', (["['awk', '{print $3}']"], {'stdin': 'process2.stdout'}), "(['awk', '{print $3}'], stdin=process2.stdout)\n", (6133, 6179), False, 'import subprocess\n'), ((6395, 6433), 'subprocess.check_output', 'subprocess.check_output', (['command_array'], {}), '(command_array)\n', (6418, 6433), False, 'import subprocess\n'), ((876, 971), 'subprocess.call', 'subprocess.call', (["('type ' + cmd)"], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "('type ' + cmd, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n", (891, 971), False, 'import subprocess\n'), ((1317, 1334), 'platform.system', 'platform.system', ([], {}), '()\n', (1332, 1334), False, 'import platform\n'), ((1674, 1698), 'os.path.exists', 'os.path.exists', (['dir_name'], {}), '(dir_name)\n', (1688, 1698), False, 'import os\n'), ((1708, 1729), 'os.makedirs', 'os.makedirs', (['dir_name'], {}), '(dir_name)\n', (1719, 1729), False, 'import os\n'), ((3549, 3569), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (3558, 3569), False, 'import os\n'), ((4696, 4735), 'snipsmanagercore.pretty_printer.generate_user_input_string', 'pp.generate_user_input_string', (['question'], {}), '(question)\n', (4725, 4735), True, 'from snipsmanagercore import pretty_printer as pp\n'), ((6766, 6794), 're.match', 're.match', (['email_regex', 'email'], {}), '(email_regex, email)\n', (6774, 6794), False, 'import re\n'), ((6857, 6888), 're.match', 're.match', (['github_url_regex', 'url'], {}), '(github_url_regex, url)\n', (6865, 6888), False, 'import re\n'), ((1162, 1172), 'os.uname', 'os.uname', ([], {}), '()\n', (1170, 1172), False, 'import os\n'), ((5335, 5378), 'subprocess.check_output', 'subprocess.check_output', (["['which', command]"], {}), "(['which', command])\n", (5358, 5378), False, 'import subprocess\n')]
|
'''Physarum simulation example.
See https://sagejenson.com/physarum for the details.'''
import numpy as np
import taichi as ti
ti.init(arch=ti.gpu)
PARTICLE_N = 1024
GRID_SIZE = 512
SENSE_ANGLE = 0.20 * np.pi
SENSE_DIST = 4.0
EVAPORATION = 0.95
MOVE_ANGLE = 0.1 * np.pi
MOVE_STEP = 2.0
grid = ti.field(dtype=ti.f32, shape=[2, GRID_SIZE, GRID_SIZE])
position = ti.Vector.field(2, dtype=ti.f32, shape=[PARTICLE_N])
heading = ti.field(dtype=ti.f32, shape=[PARTICLE_N])
@ti.kernel
def init():
for p in ti.grouped(grid):
grid[p] = 0.0
for i in position:
position[i] = ti.Vector([ti.random(), ti.random()]) * GRID_SIZE
heading[i] = ti.random() * np.pi * 2.0
@ti.func
def sense(phase, pos, ang):
p = pos + ti.Vector([ti.cos(ang), ti.sin(ang)]) * SENSE_DIST
return grid[phase, p.cast(int) % GRID_SIZE]
@ti.kernel
def step(phase: ti.i32):
# move
for i in position:
pos, ang = position[i], heading[i]
l = sense(phase, pos, ang - SENSE_ANGLE)
c = sense(phase, pos, ang)
r = sense(phase, pos, ang + SENSE_ANGLE)
if l < c < r:
ang += MOVE_ANGLE
elif l > c > r:
ang -= MOVE_ANGLE
elif c < l and c < r:
ang += MOVE_ANGLE * (2 * (ti.random() < 0.5) - 1)
pos += ti.Vector([ti.cos(ang), ti.sin(ang)]) * MOVE_STEP
position[i], heading[i] = pos, ang
# deposit
for i in position:
ipos = position[i].cast(int) % GRID_SIZE
grid[phase, ipos] += 1.0
# diffuse
for i, j in ti.ndrange(GRID_SIZE, GRID_SIZE):
a = 0.0
for di in ti.static(range(-1, 2)):
for dj in ti.static(range(-1, 2)):
a += grid[phase, (i + di) % GRID_SIZE, (j + dj) % GRID_SIZE]
a *= EVAPORATION / 9.0
grid[1 - phase, i, j] = a
print("[Hint] Press A/Z to change the simulation speed.")
gui = ti.GUI('Physarum')
init()
i = 0
step_per_frame = gui.slider('step_per_frame', 1, 100, 1)
while gui.running and not gui.get_event(gui.ESCAPE):
for _ in range(int(step_per_frame.value)):
step(i % 2)
i += 1
gui.set_image(grid.to_numpy()[0])
gui.show()
|
[
"taichi.field",
"taichi.GUI",
"taichi.Vector.field",
"taichi.sin",
"taichi.grouped",
"taichi.cos",
"taichi.init",
"taichi.ndrange",
"taichi.random"
] |
[((129, 149), 'taichi.init', 'ti.init', ([], {'arch': 'ti.gpu'}), '(arch=ti.gpu)\n', (136, 149), True, 'import taichi as ti\n'), ((297, 352), 'taichi.field', 'ti.field', ([], {'dtype': 'ti.f32', 'shape': '[2, GRID_SIZE, GRID_SIZE]'}), '(dtype=ti.f32, shape=[2, GRID_SIZE, GRID_SIZE])\n', (305, 352), True, 'import taichi as ti\n'), ((364, 416), 'taichi.Vector.field', 'ti.Vector.field', (['(2)'], {'dtype': 'ti.f32', 'shape': '[PARTICLE_N]'}), '(2, dtype=ti.f32, shape=[PARTICLE_N])\n', (379, 416), True, 'import taichi as ti\n'), ((427, 469), 'taichi.field', 'ti.field', ([], {'dtype': 'ti.f32', 'shape': '[PARTICLE_N]'}), '(dtype=ti.f32, shape=[PARTICLE_N])\n', (435, 469), True, 'import taichi as ti\n'), ((1895, 1913), 'taichi.GUI', 'ti.GUI', (['"""Physarum"""'], {}), "('Physarum')\n", (1901, 1913), True, 'import taichi as ti\n'), ((508, 524), 'taichi.grouped', 'ti.grouped', (['grid'], {}), '(grid)\n', (518, 524), True, 'import taichi as ti\n'), ((1547, 1579), 'taichi.ndrange', 'ti.ndrange', (['GRID_SIZE', 'GRID_SIZE'], {}), '(GRID_SIZE, GRID_SIZE)\n', (1557, 1579), True, 'import taichi as ti\n'), ((664, 675), 'taichi.random', 'ti.random', ([], {}), '()\n', (673, 675), True, 'import taichi as ti\n'), ((604, 615), 'taichi.random', 'ti.random', ([], {}), '()\n', (613, 615), True, 'import taichi as ti\n'), ((617, 628), 'taichi.random', 'ti.random', ([], {}), '()\n', (626, 628), True, 'import taichi as ti\n'), ((754, 765), 'taichi.cos', 'ti.cos', (['ang'], {}), '(ang)\n', (760, 765), True, 'import taichi as ti\n'), ((767, 778), 'taichi.sin', 'ti.sin', (['ang'], {}), '(ang)\n', (773, 778), True, 'import taichi as ti\n'), ((1314, 1325), 'taichi.cos', 'ti.cos', (['ang'], {}), '(ang)\n', (1320, 1325), True, 'import taichi as ti\n'), ((1327, 1338), 'taichi.sin', 'ti.sin', (['ang'], {}), '(ang)\n', (1333, 1338), True, 'import taichi as ti\n'), ((1264, 1275), 'taichi.random', 'ti.random', ([], {}), '()\n', (1273, 1275), True, 'import taichi as ti\n')]
|
from django.shortcuts import render
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from loans.models import Product, ProductConfig, InterestConfig
from api.serializers import LoanInterestConfigSerializer as InterestConfigSerializer
from rest_framework import status
from api.views.permissions_check import *
from core.utils import randomstr
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def createInterestConfig(request, product_id, product_config_id):
user = request.user
data = request.data
# Check to make sure the product_config is valid
product_config = ProductConfig.objects.filter(product_config_id=product_config_id, status='active').first()
if not product_config:
message = {'detail': 'Product configuration does not exist'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
# Check if the product is valid
product = Product.objects.filter(product_id=product_id, status='active').first()
if not product:
message = {'detail': 'Product does not exist'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
if product_config.product != product:
message = {'detail': 'Product id is not valid'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
# Verify the user has permission to be looking at and editing product details (must be part of the org and admin)
organization_id = product.organization.organization_id
user_organization = check_organization_permissions(user=user, organization_id=organization_id, roles=['admin'])
if not user_organization['organization']:
message = {'detail': user_organization['message']}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
if product.organization != user_organization['organization']:
message = {'detail': 'Organization id is not valid'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
try:
# Create the product configuration
interest_config = InterestConfig.objects.create(
interest_config_id=randomstr(),
product_config=product_config,
day=data['day'],
structure=data['structure'],
created_user=user,
)
if 'amount' in data:
interest_config.amount=data['amount']
interest_config.save()
if 'label' in data:
interest_config.label=data['label']
interest_config.save()
serializer = InterestConfigSerializer(interest_config, many=False)
return Response(serializer.data)
except Exception as e:
print(e)
message = {'detail': 'There was an error creating this interest configuration'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getInterestConfigs(request, product_id, product_config_id):
user = request.user
data = request.data
# Check to make sure the product_config is valid
product_config = ProductConfig.objects.filter(product_config_id=product_config_id, status='active').first()
if not product_config:
message = {'detail': 'Product configuration does not exist'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
# Check if the product is valid
product = Product.objects.filter(product_id=product_id, status='active').first()
if not product:
message = {'detail': 'Product does not exist'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
if product_config.product != product:
message = {'detail': 'Product id is not valid'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
# Verify the user has permission to be looking at and editing product details (must be part of the org and admin)
organization_id = product.organization.organization_id
user_organization = check_organization_permissions(user=user, organization_id=organization_id, roles=['admin'])
if not user_organization['organization']:
message = {'detail': user_organization['message']}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
if product.organization != user_organization['organization']:
message = {'detail': 'Organization id is not valid'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
organization = user_organization['organization']
interest_configs = InterestConfig.objects.filter(
product_config=product_config,
status='active',
).order_by('day')
page = request.query_params.get('page')
paginator = Paginator(interest_configs, 5)
try:
interest_configs = paginator.page(page)
except PageNotAnInteger:
interest_configs = paginator.page(1)
except EmptyPage:
interest_configs = paginator.page(paginator.num_pages)
if page == None:
page = 1
page = int(page)
print('Page:', page)
serializer = InterestConfigSerializer(interest_configs, many=True)
return Response({'interest_configs': serializer.data, 'page': page, 'pages': paginator.num_pages})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getInterestConfig(request, product_id, product_config_id, interest_config_id):
user = request.user
# Check to make sure the interest_config is valid
interest_config = InterestConfig.objects.filter(interest_config_id=interest_config_id, status='active').first()
if not interest_config:
message = {'detail': 'Interest configuration does not exist'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
# Check to make sure the product_config is valid
product_config = ProductConfig.objects.filter(product_config_id=product_config_id, status='active').first()
if not product_config:
message = {'detail': 'Product configuration does not exist'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
if interest_config.product_config != product_config:
message = {'detail': 'Product config id is not valid'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
# Check if the product is valid
product = Product.objects.filter(product_id=product_id, status='active').first()
if not product:
message = {'detail': 'Product does not exist'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
if product_config.product != product:
message = {'detail': 'Product id is not valid'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
# Verify the user has permission to be looking at and editing product details (must be part of the org and admin)
organization_id = product.organization.organization_id
user_organization = check_organization_permissions(user=user, organization_id=organization_id, roles=['admin'])
if not user_organization['organization']:
message = {'detail': user_organization['message']}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
if product.organization != user_organization['organization']:
message = {'detail': 'Organization id is not valid'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
serializer = InterestConfigSerializer(interest_config, many=False)
return Response(serializer.data)
@api_view(['PUT'])
@permission_classes([IsAuthenticated])
def updateInterestConfig(request, product_id, product_config_id, interest_config_id):
data = request.data
user = request.user
# Check to make sure the interest_config is valid
interest_config = InterestConfig.objects.filter(interest_config_id=interest_config_id, status='active').first()
if not interest_config:
message = {'detail': 'Interest configuration does not exist'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
# Check to make sure the product_config is valid
product_config = ProductConfig.objects.filter(product_config_id=product_config_id, status='active').first()
if not product_config:
message = {'detail': 'Product configuration does not exist'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
if interest_config.product_config != product_config:
message = {'detail': 'Product config id is not valid'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
# Check if the product is valid
product = Product.objects.filter(product_id=product_id, status='active').first()
if not product:
message = {'detail': 'Product does not exist'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
if product_config.product != product:
message = {'detail': 'Product id is not valid'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
# Verify the user has permission to be looking at and editing product details (must be part of the org and admin)
organization_id = product.organization.organization_id
user_organization = check_organization_permissions(user=user, organization_id=organization_id, roles=['admin'])
if not user_organization['organization']:
message = {'detail': user_organization['message']}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
if product.organization != user_organization['organization']:
message = {'detail': 'Organization id is not valid'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
if 'label' in data:
interest_config.label=data['label']
if 'day' in data:
interest_config.day=data['day']
if 'structure' in data:
interest_config.structure=data['structure']
if 'amount' in data:
interest_config.amount=data['amount']
interest_config.save()
serializer = InterestConfigSerializer(interest_config, many=False)
return Response(serializer.data)
@api_view(['PUT'])
@permission_classes([IsAuthenticated])
def archiveInterestConfig(request, product_id, product_config_id, interest_config_id):
user = request.user
# Check to make sure the interest_config is valid
interest_config = InterestConfig.objects.filter(interest_config_id=interest_config_id, status='active').first()
if not interest_config:
message = {'detail': 'Interest configuration does not exist'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
# Check to make sure the product_config is valid
product_config = ProductConfig.objects.filter(product_config_id=product_config_id, status='active').first()
if not product_config:
message = {'detail': 'Product configuration does not exist'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
if interest_config.product_config != product_config:
message = {'detail': 'Product config id is not valid'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
# Check if the product is valid
product = Product.objects.filter(product_id=product_id, status='active').first()
if not product:
message = {'detail': 'Product does not exist'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
if product_config.product != product:
message = {'detail': 'Product id is not valid'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
# Verify the user has permission to be looking at and editing product details (must be part of the org and admin)
organization_id = product.organization.organization_id
user_organization = check_organization_permissions(user=user, organization_id=organization_id, roles=['admin'])
if not user_organization['organization']:
message = {'detail': user_organization['message']}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
if product.organization != user_organization['organization']:
message = {'detail': 'Organization id is not valid'}
return Response(message, status=status.HTTP_400_BAD_REQUEST)
interest_config.status = 'archived'
interest_config.save()
message = {'detail': 'Interest configuration was archived'}
return Response(message, status=200)
|
[
"core.utils.randomstr",
"loans.models.ProductConfig.objects.filter",
"loans.models.InterestConfig.objects.filter",
"rest_framework.response.Response",
"django.core.paginator.Paginator",
"loans.models.Product.objects.filter",
"rest_framework.decorators.permission_classes",
"rest_framework.decorators.api_view",
"api.serializers.LoanInterestConfigSerializer"
] |
[((554, 572), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (562, 572), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((574, 611), 'rest_framework.decorators.permission_classes', 'permission_classes', (['[IsAuthenticated]'], {}), '([IsAuthenticated])\n', (592, 611), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((3023, 3040), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (3031, 3040), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((3042, 3079), 'rest_framework.decorators.permission_classes', 'permission_classes', (['[IsAuthenticated]'], {}), '([IsAuthenticated])\n', (3060, 3079), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((5399, 5416), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (5407, 5416), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((5418, 5455), 'rest_framework.decorators.permission_classes', 'permission_classes', (['[IsAuthenticated]'], {}), '([IsAuthenticated])\n', (5436, 5455), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((7647, 7664), 'rest_framework.decorators.api_view', 'api_view', (["['PUT']"], {}), "(['PUT'])\n", (7655, 7664), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((7666, 7703), 'rest_framework.decorators.permission_classes', 'permission_classes', (['[IsAuthenticated]'], {}), '([IsAuthenticated])\n', (7684, 7703), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((10229, 10246), 'rest_framework.decorators.api_view', 'api_view', (["['PUT']"], {}), "(['PUT'])\n", (10237, 10246), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((10248, 10285), 'rest_framework.decorators.permission_classes', 'permission_classes', (['[IsAuthenticated]'], {}), '([IsAuthenticated])\n', (10266, 10285), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((4888, 4918), 'django.core.paginator.Paginator', 'Paginator', (['interest_configs', '(5)'], {}), '(interest_configs, 5)\n', (4897, 4918), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((5239, 5292), 'api.serializers.LoanInterestConfigSerializer', 'InterestConfigSerializer', (['interest_configs'], {'many': '(True)'}), '(interest_configs, many=True)\n', (5263, 5292), True, 'from api.serializers import LoanInterestConfigSerializer as InterestConfigSerializer\n'), ((5304, 5399), 'rest_framework.response.Response', 'Response', (["{'interest_configs': serializer.data, 'page': page, 'pages': paginator.\n num_pages}"], {}), "({'interest_configs': serializer.data, 'page': page, 'pages':\n paginator.num_pages})\n", (5312, 5399), False, 'from rest_framework.response import Response\n'), ((7545, 7598), 'api.serializers.LoanInterestConfigSerializer', 'InterestConfigSerializer', (['interest_config'], {'many': '(False)'}), '(interest_config, many=False)\n', (7569, 7598), True, 'from api.serializers import LoanInterestConfigSerializer as InterestConfigSerializer\n'), ((7610, 7635), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (7618, 7635), False, 'from rest_framework.response import Response\n'), ((10131, 10184), 'api.serializers.LoanInterestConfigSerializer', 'InterestConfigSerializer', (['interest_config'], {'many': '(False)'}), '(interest_config, many=False)\n', (10155, 10184), True, 'from api.serializers import LoanInterestConfigSerializer as InterestConfigSerializer\n'), ((10196, 10221), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (10204, 10221), False, 'from rest_framework.response import Response\n'), ((12500, 12529), 'rest_framework.response.Response', 'Response', (['message'], {'status': '(200)'}), '(message, status=200)\n', (12508, 12529), False, 'from rest_framework.response import Response\n'), ((1003, 1056), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (1011, 1056), False, 'from rest_framework.response import Response\n'), ((1269, 1322), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (1277, 1322), False, 'from rest_framework.response import Response\n'), ((1436, 1489), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (1444, 1489), False, 'from rest_framework.response import Response\n'), ((1904, 1957), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (1912, 1957), False, 'from rest_framework.response import Response\n'), ((2100, 2153), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (2108, 2153), False, 'from rest_framework.response import Response\n'), ((2715, 2768), 'api.serializers.LoanInterestConfigSerializer', 'InterestConfigSerializer', (['interest_config'], {'many': '(False)'}), '(interest_config, many=False)\n', (2739, 2768), True, 'from api.serializers import LoanInterestConfigSerializer as InterestConfigSerializer\n'), ((2784, 2809), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (2792, 2809), False, 'from rest_framework.response import Response\n'), ((3469, 3522), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (3477, 3522), False, 'from rest_framework.response import Response\n'), ((3735, 3788), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (3743, 3788), False, 'from rest_framework.response import Response\n'), ((3902, 3955), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (3910, 3955), False, 'from rest_framework.response import Response\n'), ((4370, 4423), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (4378, 4423), False, 'from rest_framework.response import Response\n'), ((4566, 4619), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (4574, 4619), False, 'from rest_framework.response import Response\n'), ((5848, 5901), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (5856, 5901), False, 'from rest_framework.response import Response\n'), ((6179, 6232), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (6187, 6232), False, 'from rest_framework.response import Response\n'), ((6368, 6421), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (6376, 6421), False, 'from rest_framework.response import Response\n'), ((6634, 6687), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (6642, 6687), False, 'from rest_framework.response import Response\n'), ((6801, 6854), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (6809, 6854), False, 'from rest_framework.response import Response\n'), ((7269, 7322), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (7277, 7322), False, 'from rest_framework.response import Response\n'), ((7465, 7518), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (7473, 7518), False, 'from rest_framework.response import Response\n'), ((8122, 8175), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (8130, 8175), False, 'from rest_framework.response import Response\n'), ((8453, 8506), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (8461, 8506), False, 'from rest_framework.response import Response\n'), ((8642, 8695), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (8650, 8695), False, 'from rest_framework.response import Response\n'), ((8908, 8961), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (8916, 8961), False, 'from rest_framework.response import Response\n'), ((9075, 9128), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (9083, 9128), False, 'from rest_framework.response import Response\n'), ((9543, 9596), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (9551, 9596), False, 'from rest_framework.response import Response\n'), ((9739, 9792), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (9747, 9792), False, 'from rest_framework.response import Response\n'), ((10686, 10739), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (10694, 10739), False, 'from rest_framework.response import Response\n'), ((11017, 11070), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (11025, 11070), False, 'from rest_framework.response import Response\n'), ((11206, 11259), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (11214, 11259), False, 'from rest_framework.response import Response\n'), ((11472, 11525), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (11480, 11525), False, 'from rest_framework.response import Response\n'), ((11639, 11692), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (11647, 11692), False, 'from rest_framework.response import Response\n'), ((12107, 12160), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (12115, 12160), False, 'from rest_framework.response import Response\n'), ((12303, 12356), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (12311, 12356), False, 'from rest_framework.response import Response\n'), ((801, 888), 'loans.models.ProductConfig.objects.filter', 'ProductConfig.objects.filter', ([], {'product_config_id': 'product_config_id', 'status': '"""active"""'}), "(product_config_id=product_config_id, status=\n 'active')\n", (829, 888), False, 'from loans.models import Product, ProductConfig, InterestConfig\n'), ((1108, 1170), 'loans.models.Product.objects.filter', 'Product.objects.filter', ([], {'product_id': 'product_id', 'status': '"""active"""'}), "(product_id=product_id, status='active')\n", (1130, 1170), False, 'from loans.models import Product, ProductConfig, InterestConfig\n'), ((2958, 3011), 'rest_framework.response.Response', 'Response', (['message'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(message, status=status.HTTP_400_BAD_REQUEST)\n', (2966, 3011), False, 'from rest_framework.response import Response\n'), ((3267, 3354), 'loans.models.ProductConfig.objects.filter', 'ProductConfig.objects.filter', ([], {'product_config_id': 'product_config_id', 'status': '"""active"""'}), "(product_config_id=product_config_id, status=\n 'active')\n", (3295, 3354), False, 'from loans.models import Product, ProductConfig, InterestConfig\n'), ((3574, 3636), 'loans.models.Product.objects.filter', 'Product.objects.filter', ([], {'product_id': 'product_id', 'status': '"""active"""'}), "(product_id=product_id, status='active')\n", (3596, 3636), False, 'from loans.models import Product, ProductConfig, InterestConfig\n'), ((4710, 4787), 'loans.models.InterestConfig.objects.filter', 'InterestConfig.objects.filter', ([], {'product_config': 'product_config', 'status': '"""active"""'}), "(product_config=product_config, status='active')\n", (4739, 4787), False, 'from loans.models import Product, ProductConfig, InterestConfig\n'), ((5641, 5731), 'loans.models.InterestConfig.objects.filter', 'InterestConfig.objects.filter', ([], {'interest_config_id': 'interest_config_id', 'status': '"""active"""'}), "(interest_config_id=interest_config_id, status\n ='active')\n", (5670, 5731), False, 'from loans.models import Product, ProductConfig, InterestConfig\n'), ((5977, 6064), 'loans.models.ProductConfig.objects.filter', 'ProductConfig.objects.filter', ([], {'product_config_id': 'product_config_id', 'status': '"""active"""'}), "(product_config_id=product_config_id, status=\n 'active')\n", (6005, 6064), False, 'from loans.models import Product, ProductConfig, InterestConfig\n'), ((6473, 6535), 'loans.models.Product.objects.filter', 'Product.objects.filter', ([], {'product_id': 'product_id', 'status': '"""active"""'}), "(product_id=product_id, status='active')\n", (6495, 6535), False, 'from loans.models import Product, ProductConfig, InterestConfig\n'), ((7915, 8005), 'loans.models.InterestConfig.objects.filter', 'InterestConfig.objects.filter', ([], {'interest_config_id': 'interest_config_id', 'status': '"""active"""'}), "(interest_config_id=interest_config_id, status\n ='active')\n", (7944, 8005), False, 'from loans.models import Product, ProductConfig, InterestConfig\n'), ((8251, 8338), 'loans.models.ProductConfig.objects.filter', 'ProductConfig.objects.filter', ([], {'product_config_id': 'product_config_id', 'status': '"""active"""'}), "(product_config_id=product_config_id, status=\n 'active')\n", (8279, 8338), False, 'from loans.models import Product, ProductConfig, InterestConfig\n'), ((8747, 8809), 'loans.models.Product.objects.filter', 'Product.objects.filter', ([], {'product_id': 'product_id', 'status': '"""active"""'}), "(product_id=product_id, status='active')\n", (8769, 8809), False, 'from loans.models import Product, ProductConfig, InterestConfig\n'), ((10479, 10569), 'loans.models.InterestConfig.objects.filter', 'InterestConfig.objects.filter', ([], {'interest_config_id': 'interest_config_id', 'status': '"""active"""'}), "(interest_config_id=interest_config_id, status\n ='active')\n", (10508, 10569), False, 'from loans.models import Product, ProductConfig, InterestConfig\n'), ((10815, 10902), 'loans.models.ProductConfig.objects.filter', 'ProductConfig.objects.filter', ([], {'product_config_id': 'product_config_id', 'status': '"""active"""'}), "(product_config_id=product_config_id, status=\n 'active')\n", (10843, 10902), False, 'from loans.models import Product, ProductConfig, InterestConfig\n'), ((11311, 11373), 'loans.models.Product.objects.filter', 'Product.objects.filter', ([], {'product_id': 'product_id', 'status': '"""active"""'}), "(product_id=product_id, status='active')\n", (11333, 11373), False, 'from loans.models import Product, ProductConfig, InterestConfig\n'), ((2300, 2311), 'core.utils.randomstr', 'randomstr', ([], {}), '()\n', (2309, 2311), False, 'from core.utils import randomstr\n')]
|
import os
from drivers import IPHONE_UA
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
def get(driver_path):
if not os.path.exists(driver_path):
raise FileNotFoundError("Could not find phantomjs executable at %s. Download it for your platform at http://phantomjs.org/download.html", driver_path)
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = IPHONE_UA
driver = webdriver.PhantomJS(desired_capabilities=dcap, executable_path=driver_path)
driver.set_window_size(1024, 3000)
return driver
|
[
"selenium.webdriver.PhantomJS",
"os.path.exists"
] |
[((493, 568), 'selenium.webdriver.PhantomJS', 'webdriver.PhantomJS', ([], {'desired_capabilities': 'dcap', 'executable_path': 'driver_path'}), '(desired_capabilities=dcap, executable_path=driver_path)\n', (512, 568), False, 'from selenium import webdriver\n'), ((185, 212), 'os.path.exists', 'os.path.exists', (['driver_path'], {}), '(driver_path)\n', (199, 212), False, 'import os\n')]
|
from aws_cdk import aws_s3 as s3
def base_bucket(construct, **kwargs):
"""
Function that generates an S3 Bucket.
:param construct: Custom construct that will use this function. From the external construct is usually 'self'.
:param kwargs:
:return: S3 Bucket Construct.
"""
bucket_name = construct.prefix + "-" + kwargs["bucket_name"] + "-bucket-" + construct.environment_
parsed_bucket_name = bucket_name.replace("_", "-")
versioned = kwargs.get("versioned")
public_read_access = kwargs["public_read_access"]
cors_settings = kwargs.get("cors")
website_error_document = kwargs.get("website", {}).get("error")
website_index_document = kwargs.get("website", {}).get("index")
if cors_settings is not None:
allowed_methods = [value for value in list(s3.HttpMethods) if value.value in cors_settings["allowed_methods"]]
cors_settings = s3.CorsRule(allowed_methods=allowed_methods, allowed_origins=cors_settings["allowed_origins"])
cors_settings = [cors_settings]
bucket = s3.Bucket(
construct,
id=parsed_bucket_name,
bucket_name=parsed_bucket_name,
cors=cors_settings,
versioned=versioned,
website_error_document=website_error_document,
website_index_document=website_index_document,
)
if public_read_access is True:
bucket.grant_public_access()
return bucket
|
[
"aws_cdk.aws_s3.CorsRule",
"aws_cdk.aws_s3.Bucket"
] |
[((1053, 1272), 'aws_cdk.aws_s3.Bucket', 's3.Bucket', (['construct'], {'id': 'parsed_bucket_name', 'bucket_name': 'parsed_bucket_name', 'cors': 'cors_settings', 'versioned': 'versioned', 'website_error_document': 'website_error_document', 'website_index_document': 'website_index_document'}), '(construct, id=parsed_bucket_name, bucket_name=parsed_bucket_name,\n cors=cors_settings, versioned=versioned, website_error_document=\n website_error_document, website_index_document=website_index_document)\n', (1062, 1272), True, 'from aws_cdk import aws_s3 as s3\n'), ((904, 1003), 'aws_cdk.aws_s3.CorsRule', 's3.CorsRule', ([], {'allowed_methods': 'allowed_methods', 'allowed_origins': "cors_settings['allowed_origins']"}), "(allowed_methods=allowed_methods, allowed_origins=cors_settings[\n 'allowed_origins'])\n", (915, 1003), True, 'from aws_cdk import aws_s3 as s3\n')]
|
"""This code generates interactive HTML file with MCTS Tree Visualized"""
import os
from monte_carlo_tree_search.trees.abstract_tree import TreeNode
DATA_DIR = os.path.dirname(os.path.abspath(__file__))
PREAMBLE_FILE = os.path.join(DATA_DIR, 'preamble')
POSTAMBLE_FILE = os.path.join(DATA_DIR, 'postamble')
class TreeVisualizer:
def __init__(self, show_unvisited_nodes = False, level_interval = 3):
self.show_unvisited_nodes = show_unvisited_nodes
self.root_color = ["cyan"]
self.list_of_colors = ["lime", "red"]
self.nodes_str = ''
self.edges_str = ''
self.level_str = ''
self.nodes_to_id = {}
self.last_nod_id = 0
self.states_as_dicts = ''
self.level_interval = level_interval
def add_node_to_dict(self, node):
self.nodes_to_id[node] = self.last_nod_id
self.last_nod_id += 1
def generate_html(self, root, file_name):
code = self.generate_tree_data(root)
self.combine_html(code, file_name)
self.__init__(show_unvisited_nodes=self.show_unvisited_nodes)
def node_to_string(self, node: TreeNode):
value_to_show_raw = node.value_acc.get()
value_to_show = round(value_to_show_raw, 2) if value_to_show_raw is not None else value_to_show_raw
conf_to_show = round(node.value_acc.get_confidence(), 2)
return 'nodes.push(' + '{' + ' id: {}, label: \"Id: {} \\n V: {} \\n C: {} \\n CF: {} \"'.format(self.nodes_to_id[node], self.nodes_to_id[node],
value_to_show,
node.value_acc.count(),
conf_to_show) + '}); \n'
def node_to_level(self, node: TreeNode):
return 'nodes[{}]["level"] = {}; \n'.format(self.nodes_to_id[node], self.level_interval*node.generation)
def generate_tree_data(self, root: TreeNode):
all_code = ''
# BFS
kiu = [root]
self.add_node_to_dict(root)
self.states_as_dicts += '<br>' + ' --- P = [{}, {}] ---'.format(root.state.active_players_hand().number_of_my_points(), root.state.previous_players_hand().number_of_my_points()) +\
str(self.nodes_to_id[root]) + str(root.observation.observation_dict) + '<br><br><br>'
while len(kiu) > 0:
# take first:
#print(kiu)
node_to_eval = kiu.pop(0)
if node_to_eval.value_acc.count() > 0 or self.show_unvisited_nodes:
self.nodes_str += self.node_to_string(node_to_eval)
self.level_str += self.node_to_level(node_to_eval)
for child in node_to_eval.children:
if child.value_acc.count() > 0 or self.show_unvisited_nodes:
kiu.append(child)
self.add_node_to_dict(child)
self.states_as_dicts += str(self.nodes_to_id[child]) + ' --- P = [{}, {}]'.format(child.recreate_state().active_players_hand().number_of_my_points(), child.recreate_state().previous_players_hand().number_of_my_points()) + str(child.observation.observation_dict) + '<br><br><br>'
self.edges_str += self.edge_to_string(node_to_eval, child)
def edge_to_string(self, node1, node2):
edge_caption = str(node2.parent_action.short_description())
return 'edges.push({' +'from: {}, to: {}'.format(self.nodes_to_id[node1], self.nodes_to_id[node2])+ ',label: \"' + edge_caption + '\", ' + 'font: { align: "middle" } }); \n'
def combine_html(self, tree_code, file_name):
with open(PREAMBLE_FILE, 'r') as file:
preamble = file.read()
with open(POSTAMBLE_FILE, 'r') as file:
postamble = file.read()
combined = preamble + self.nodes_str + self.edges_str + self.level_str + postamble + self.states_as_dicts + '</body></html>'
text_file = open(file_name, "w")
text_file.write(combined)
text_file.close()
|
[
"os.path.abspath",
"os.path.join"
] |
[((221, 255), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""preamble"""'], {}), "(DATA_DIR, 'preamble')\n", (233, 255), False, 'import os\n'), ((273, 308), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""postamble"""'], {}), "(DATA_DIR, 'postamble')\n", (285, 308), False, 'import os\n'), ((178, 203), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (193, 203), False, 'import os\n')]
|
import asyncio
from telethon import events
from telethon.errors.rpcerrorlist import MessageDeleteForbiddenError
from telethon.tl.types import ChannelParticipantsAdmins
from YorForger import client, DEV_USERS
# Check if user has admin rights
async def is_administrator(user_id: int, message):
admin = False
async for user in client.iter_participants(
message.chat_id, filter=ChannelParticipantsAdmins
):
if user_id == user.id or user_id in DEV_USERS:
admin = True
break
return admin
@client.on(events.NewMessage(pattern="^/purge"))
async def purge(event):
chat = event.chat_id
msgs = []
if not await is_administrator(user_id=event.from_id, message=event):
await event.reply("You're not an admin!")
return
msg = await event.get_reply_message()
if not msg:
await event.reply("Reply to a message to select where to start purging from.")
return
try:
msg_id = msg.id
count = 0
to_delete = event.message.id - 1
await event.client.delete_messages(chat, event.message.id)
msgs.append(event.reply_to_msg_id)
for m_id in range(to_delete, msg_id - 1, -1):
msgs.append(m_id)
count += 1
if len(msgs) == 100:
await event.client.delete_messages(chat, msgs)
msgs = []
await event.client.delete_messages(chat, msgs)
del_res = await event.client.send_message(
event.chat_id, f"Purged {count} messages."
)
await asyncio.sleep(4)
await del_res.delete()
except MessageDeleteForbiddenError:
text = "Failed to delete messages.\n"
text += "Messages maybe too old or I'm not admin! or dont have delete rights!"
del_res = await event.respond(text, parse_mode="md")
await asyncio.sleep(5)
await del_res.delete()
@client.on(events.NewMessage(pattern="^/del$"))
async def delete_msg(event):
if not await is_administrator(user_id=event.from_id, message=event):
await event.reply("You're not an admin!")
return
chat = event.chat_id
msg = await event.get_reply_message()
if not msg:
await event.reply("Reply to some message to delete it.")
return
to_delete = event.message
chat = await event.get_input_chat()
rm = [msg, to_delete]
await event.client.delete_messages(chat, rm)
__help__ = """
Deleting messages made easy with this command. Bot purges \
messages all together or individually.
*Admin only:*
× /del: Deletes the message you replied to
× /purge: Deletes all messages between this and the replied to message.
"""
__mod_name__ = "Purges"
|
[
"YorForger.client.iter_participants",
"telethon.events.NewMessage",
"asyncio.sleep"
] |
[((337, 412), 'YorForger.client.iter_participants', 'client.iter_participants', (['message.chat_id'], {'filter': 'ChannelParticipantsAdmins'}), '(message.chat_id, filter=ChannelParticipantsAdmins)\n', (361, 412), False, 'from YorForger import client, DEV_USERS\n'), ((556, 592), 'telethon.events.NewMessage', 'events.NewMessage', ([], {'pattern': '"""^/purge"""'}), "(pattern='^/purge')\n", (573, 592), False, 'from telethon import events\n'), ((1934, 1969), 'telethon.events.NewMessage', 'events.NewMessage', ([], {'pattern': '"""^/del$"""'}), "(pattern='^/del$')\n", (1951, 1969), False, 'from telethon import events\n'), ((1576, 1592), 'asyncio.sleep', 'asyncio.sleep', (['(4)'], {}), '(4)\n', (1589, 1592), False, 'import asyncio\n'), ((1873, 1889), 'asyncio.sleep', 'asyncio.sleep', (['(5)'], {}), '(5)\n', (1886, 1889), False, 'import asyncio\n')]
|
'''
Convert finance statistics: From JSON to CSV.
Update log: (date / version / author : comments)
2018-02-19 / 1.0.0 / <NAME> : Creation
Support Yahoo Finance stock
'''
from collections import OrderedDict
import csv
import getopt
import json
import sys
from time import localtime, strftime, time
# Global variables.
# The value can be updated by command line options.
__data_type = None
__json_file_path = None
__csv_file_path = None
__Constants = None
class Constants_Base(object):
'''
Use a class to keep constant variables.
'''
RESULT = "Result"
RESULT_ERROR = "Error"
INVENTORIES = "Inventories"
RECORD = "Record"
INVENTORY_DATA = "Inventory data"
URL = "URL"
class Constants_YahooStock(Constants_Base):
STOCK_INFO = "Stock info"
STOCK_INFO_NAME = "Name"
STOCK_INFO_EXCHANGE = "Exchange"
STOCK_INFO_TICKER = "Ticker"
MARKET_INFO = "Market info"
MARKET_INFO_FILTER = ["200-Day Moving Average",
"5 Year Average Dividend Yield",
"50-Day Moving Average",
"52 Week High",
"52 Week Low",
"52-Week Change",
"Beta",
"Diluted EPS (ttm)",
"PEG Ratio (5 yr expected)",
"Price/Book (mrq)",
"Profit Margin",
"Return on Assets (ttm)",
"Return on Equity (ttm)",
"Trailing Annual Dividend Yield",
"Trailing P/E"]
def process_inventory_list():
global __Constants
print("-" * 100)
time_str = strftime("%Y-%m-%d %H:%M:%S", localtime(time()))
print("Start time =", time_str)
field_names = []
records = []
try:
# Open input file.
with open(__json_file_path) as input_file:
print('input_file =', input_file)
inventory_data = json.load(
input_file, object_pairs_hook = OrderedDict)
print('inventory_data =', inventory_data)
print("-" * 80)
if __data_type == 0:
__Constants = Constants_YahooStock
add_field_name = True
for item, record_value in inventory_data[__Constants.INVENTORIES].items():
print("item =", item)
if record_value[__Constants.RESULT] == __Constants.RESULT_ERROR:
print("Result =", record_value[__Constants.RESULT_ERROR])
print("-" * 60)
continue
record = OrderedDict()
if __data_type == 0:
record[__Constants.STOCK_INFO_NAME] = record_value[__Constants.INVENTORY_DATA][__Constants.STOCK_INFO][__Constants.STOCK_INFO_NAME]
record[__Constants.STOCK_INFO_EXCHANGE] = record_value[__Constants.INVENTORY_DATA][__Constants.STOCK_INFO][__Constants.STOCK_INFO_EXCHANGE]
record[__Constants.STOCK_INFO_TICKER] = record_value[__Constants.INVENTORY_DATA][__Constants.STOCK_INFO][__Constants.STOCK_INFO_TICKER]
if add_field_name:
field_names.append(__Constants.STOCK_INFO_NAME)
field_names.append(__Constants.STOCK_INFO_EXCHANGE)
field_names.append(__Constants.STOCK_INFO_TICKER)
# Following iteration must be sorted for adding field names in
# correct order.
for item_key, item_value in sorted(record_value[__Constants.INVENTORY_DATA][__Constants.MARKET_INFO].items()):
if item_key in __Constants.MARKET_INFO_FILTER:
print(item_key, "=", item_value)
record[item_key] = item_value
if add_field_name:
field_names.append(item_key)
record[__Constants.URL] = record_value[__Constants.URL]
if add_field_name:
field_names.append(__Constants.URL)
records.append(record)
if add_field_name:
add_field_name = False
print("field_names =", field_names)
print("record =", record)
print("-" * 60)
print("-" * 80)
print("Process inventory list: ok.")
except Exception as e:
print("Process inventory list: Exception = {0}".format(e))
time_str = strftime("%Y-%m-%d %H:%M:%S", localtime(time()))
print("Stop time =", time_str)
print("-" * 100)
# If given __csv_file_path, output to file; otherwise, output to
# screen.
if __csv_file_path:
try:
# Open output file.
with open(__csv_file_path, "wt") as output_file:
print('output_file =', output_file)
# Output file as CSV format.
cout = csv.DictWriter(
output_file, fieldnames = field_names, lineterminator = "\n")
# Write header line.
cout.writeheader()
# Write record lines.
cout.writerows(records)
except Exception as e:
print("Output process results: Exception = {0}".format(e))
else:
# Output screen as JSON format.
print("field_names =", field_names)
print("records =", records)
print("-" * 100)
def usage():
print('''
Convert finance statistics: From JSON to CSV.
Usage:
-h
-d <DataType> -i <FilePath> [-o <FilePath>]
Options:
-h : Show help.
-d <DataType> : Finance data type. Compulsory, Value [0: Yahoo Finance stock].
-i <FilePath> : Source data file path (CSV). Compulsory.
-o <FilePath> : Result output file path (JSON). Optional, output to screen by default.
''')
def main(argv):
'''
Pass input arguments from command line to method.
@param argv: A list of arguments
'''
global __data_type
global __json_file_path
global __csv_file_path
print("argv =", argv)
__show_usage = False
__exit_code = 0
__error_message = None
# If no any option.
if not argv:
__show_usage = True
# Parse command line.
if not __show_usage:
try:
opts, args = getopt.getopt(argv, "hd:i:o:")
print("opts =", opts)
print("args =", args)
except Exception as e:
# There would be getopt.GetoptError.
print("Parse command line: Exception = {0}".format(e))
__show_usage, __exit_code, __error_message = True, -1, "Wrong command line option."
# Check and parse each option.
if not __show_usage:
try:
for opt, arg in opts:
if opt == "-h":
__show_usage, __exit_code = True, 0
elif opt == "-d":
__data_type = int(arg)
elif opt == "-i":
__json_file_path = arg
elif opt == "-o":
__csv_file_path = arg
else:
__show_usage, __exit_code, __error_message = True, -\
2, "Unknown command line option."
except Exception as e:
print("Parse command options: Exception = {0}".format(e))
__show_usage, __exit_code, __error_message = True, -\
3, "Wrong value for command line option."
print("show_usage =", __show_usage)
print("data_type =", __data_type)
print("json_file_path =", __json_file_path)
print("csv_file_path =", __csv_file_path)
# Check options are valid.
if not __show_usage:
if (__data_type is None) or (__json_file_path is None):
__show_usage, __exit_code, __error_message = True, -\
4, "Missing compulsory command line option."
elif (__data_type < 0) or (__data_type > 1):
__show_usage, __exit_code, __error_message = True, -5, "Wrong value for -d."
if not __show_usage:
process_inventory_list()
else:
print("__exit_code =", __exit_code)
if __error_message:
print("__error_message =", __error_message)
print("")
usage()
sys.exit(__exit_code)
if __name__ == '__main__':
main(sys.argv[1:])
|
[
"json.load",
"getopt.getopt",
"time.time",
"collections.OrderedDict",
"sys.exit",
"csv.DictWriter"
] |
[((8474, 8495), 'sys.exit', 'sys.exit', (['__exit_code'], {}), '(__exit_code)\n', (8482, 8495), False, 'import sys\n'), ((1869, 1875), 'time.time', 'time', ([], {}), '()\n', (1873, 1875), False, 'from time import localtime, strftime, time\n'), ((2124, 2176), 'json.load', 'json.load', (['input_file'], {'object_pairs_hook': 'OrderedDict'}), '(input_file, object_pairs_hook=OrderedDict)\n', (2133, 2176), False, 'import json\n'), ((2749, 2762), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2760, 2762), False, 'from collections import OrderedDict\n'), ((4656, 4662), 'time.time', 'time', ([], {}), '()\n', (4660, 4662), False, 'from time import localtime, strftime, time\n'), ((6476, 6506), 'getopt.getopt', 'getopt.getopt', (['argv', '"""hd:i:o:"""'], {}), "(argv, 'hd:i:o:')\n", (6489, 6506), False, 'import getopt\n'), ((5069, 5141), 'csv.DictWriter', 'csv.DictWriter', (['output_file'], {'fieldnames': 'field_names', 'lineterminator': '"""\n"""'}), "(output_file, fieldnames=field_names, lineterminator='\\n')\n", (5083, 5141), False, 'import csv\n')]
|
'''
Script originally for doing dp grads using parameter expansions
'''
import numpy as np
import torch
from torch.autograd import Variable
import sys
from utils import generate_proj_matrix_piece
# clip and accumulate clipped gradients
def acc_scaled_grads(model, C, cum_grads, use_cuda=False):
batch_size = model.batch_proc_size
g_norm = Variable(torch.zeros(batch_size),requires_grad=False)
if use_cuda:
g_norm = g_norm.cuda()
for p in filter(lambda p: p.requires_grad, model.parameters() ):
if p.grad is not None:
g_norm += torch.sum( p.grad.view(batch_size,-1)**2, 1)
g_norm = torch.sqrt(g_norm)
# do clipping and accumulate
for p, key in zip( filter(lambda p: p.requires_grad, model.parameters()), cum_grads.keys() ):
if p is not None:
cum_grads[key] += torch.sum( (p.grad/torch.clamp(g_norm.contiguous().view(-1,1,1)/C, min=1)), dim=0 )
# add noise and replace model grads with cumulative grads
def add_noise_with_cum_grads(model, C, neighbour_const, sigma, cum_grads, noise_tensors=None, use_cuda=False):
batch_proc_size = model.batch_proc_size
for p, key in zip( filter(lambda p: p.requires_grad, model.parameters()), cum_grads.keys() ):
if p.grad is not None:
if noise_tensors is None:
noise = Variable( (sigma*neighbour_const*C)*torch.normal(mean=torch.zeros_like(p.grad[0]).data, \
std=1.0).expand(batch_proc_size,-1,-1) )
else:
noise = Variable(noise_tensors[key].expand(batch_proc_size,-1,-1))
p.grad = ((cum_grads[key].expand(batch_proc_size,-1,-1) + noise)/model.batch_size)
if use_cuda:
p.grad = p.grad.cuda()
|
[
"torch.sqrt",
"torch.zeros_like",
"torch.zeros"
] |
[((620, 638), 'torch.sqrt', 'torch.sqrt', (['g_norm'], {}), '(g_norm)\n', (630, 638), False, 'import torch\n'), ((358, 381), 'torch.zeros', 'torch.zeros', (['batch_size'], {}), '(batch_size)\n', (369, 381), False, 'import torch\n'), ((1368, 1395), 'torch.zeros_like', 'torch.zeros_like', (['p.grad[0]'], {}), '(p.grad[0])\n', (1384, 1395), False, 'import torch\n')]
|
import logging
logger = logging.getLogger(__name__)
class Result:
def __init__(self):
self.ok = True
def ok(self):
return self.ok
class BadResult(Result):
def __init__(self):
super().__init__()
self.ok = False
class Bus:
def __init__(self):
self.handlers = {}
def register(self, action, handler):
self.handlers[action] = handler
def handle(self, action, action_type):
try:
self.handlers[action_type].handle(action)
return Result()
except Exception as e:
logger.exception(e)
return BadResult()
def execute(self, action):
action_type = type(action)
if action_type not in self.handlers:
raise Exception('No handler for action ' + str(action_type) + ' found')
return self.handle(action, action_type)
class QueryDispatcher(Bus):
def handle(self, query, query_type):
try:
return self.handlers[query_type].handle(query)
except Exception as e:
logger.exception(e)
return None
|
[
"logging.getLogger"
] |
[((25, 52), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (42, 52), False, 'import logging\n')]
|
import currency_converter
def main():
currency_converter.main()
|
[
"currency_converter.main"
] |
[((44, 69), 'currency_converter.main', 'currency_converter.main', ([], {}), '()\n', (67, 69), False, 'import currency_converter\n')]
|
from django.db import models
from django.contrib.auth.models import User
import datetime
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete = models.CASCADE, default='')
profile_pic = models.ImageField(upload_to = 'media/', default='default.jpg',blank=True)
bio = models.TextField()
updated_on = models.DateTimeField(auto_now_add=True)
goal1 = models.CharField(max_length=50)
goal2 = models.CharField(max_length=50)
goal3 = models.CharField(max_length=50)
goal4 = models.CharField(max_length=50)
goal5 = models.CharField(max_length=50)
def save_profile(self):
self.save()
@classmethod
def get_profile_by_name(cls,name):
profile = cls.objects.filter(user = name)
return name
class Workouts(models.Model):
user = models.ForeignKey(User,on_delete = models.CASCADE)
workout_name = models.CharField(max_length=50)
day = models.CharField(max_length=50)
body_section = models.CharField(max_length=60)
date_created = models.DateTimeField(auto_now_add=True)
number_of_sets = models.IntegerField(default=0)
def save_workout():
self.save()
def delete_workout():
self.delete()
@classmethod
def get_all_workouts(cls):
workouts = cls.objects.all()
return workouts
@classmethod
def get_workout_by_workout_name(cls,name):
workouts = cls.objects.filter(workout_name=workout_name)
return workouts
|
[
"django.db.models.OneToOneField",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.ImageField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((158, 222), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE', 'default': '""""""'}), "(User, on_delete=models.CASCADE, default='')\n", (178, 222), False, 'from django.db import models\n'), ((243, 315), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""media/"""', 'default': '"""default.jpg"""', 'blank': '(True)'}), "(upload_to='media/', default='default.jpg', blank=True)\n", (260, 315), False, 'from django.db import models\n'), ((327, 345), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (343, 345), False, 'from django.db import models\n'), ((363, 402), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (383, 402), False, 'from django.db import models\n'), ((415, 446), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (431, 446), False, 'from django.db import models\n'), ((459, 490), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (475, 490), False, 'from django.db import models\n'), ((503, 534), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (519, 534), False, 'from django.db import models\n'), ((547, 578), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (563, 578), False, 'from django.db import models\n'), ((591, 622), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (607, 622), False, 'from django.db import models\n'), ((875, 924), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (892, 924), False, 'from django.db import models\n'), ((945, 976), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (961, 976), False, 'from django.db import models\n'), ((987, 1018), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1003, 1018), False, 'from django.db import models\n'), ((1038, 1069), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (1054, 1069), False, 'from django.db import models\n'), ((1090, 1129), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1110, 1129), False, 'from django.db import models\n'), ((1151, 1181), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1170, 1181), False, 'from django.db import models\n')]
|
# Generated by Django 2.0 on 2018-04-08 08:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('make_queue', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='reservation3d',
options={'permissions': (('can_view_reservation_user', 'Can view reservation user'),)},
),
migrations.AlterModelOptions(
name='reservationsewing',
options={'permissions': (('can_view_reservation_user', 'Can view reservation user'),)},
),
migrations.AddField(
model_name='reservation3d',
name='comment',
field=models.TextField(default='', max_length=2000),
),
migrations.AddField(
model_name='reservationsewing',
name='comment',
field=models.TextField(default='', max_length=2000),
),
]
|
[
"django.db.models.TextField",
"django.db.migrations.AlterModelOptions"
] |
[((225, 367), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""reservation3d"""', 'options': "{'permissions': (('can_view_reservation_user', 'Can view reservation user'),)}"}), "(name='reservation3d', options={'permissions':\n (('can_view_reservation_user', 'Can view reservation user'),)})\n", (253, 367), False, 'from django.db import migrations, models\n'), ((408, 559), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""reservationsewing"""', 'options': "{'permissions': (('can_view_reservation_user', 'Can view reservation user'),)}"}), "(name='reservationsewing', options={\n 'permissions': (('can_view_reservation_user',\n 'Can view reservation user'),)})\n", (436, 559), False, 'from django.db import migrations, models\n'), ((702, 747), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""', 'max_length': '(2000)'}), "(default='', max_length=2000)\n", (718, 747), False, 'from django.db import migrations, models\n'), ((879, 924), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""', 'max_length': '(2000)'}), "(default='', max_length=2000)\n", (895, 924), False, 'from django.db import migrations, models\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import astropy.units as u
from numpy.testing import assert_allclose
from astropy.tests.helper import pytest, assert_quantity_allclose
from ...datasets import gammapy_extra
from ...utils.testing import requires_dependency, requires_data
from ...spectrum import SpectrumObservation, models
@requires_data('gammapy-extra')
@requires_dependency('matplotlib')
@requires_dependency('scipy')
def test_spectrum_observation():
phafile = gammapy_extra.filename("datasets/hess-crab4_pha/pha_obs23523.fits")
obs = SpectrumObservation.read(phafile)
obs.peek()
@requires_dependency('scipy')
@requires_data('gammapy-extra')
def test_observation_stacking():
obs1 = SpectrumObservation.read(
'$GAMMAPY_EXTRA/datasets/hess-crab4_pha/pha_obs23523.fits')
obs2 = SpectrumObservation.read(
'$GAMMAPY_EXTRA/datasets/hess-crab4_pha/pha_obs23592.fits')
# Change threshold to make stuff more interesing
obs1.on_vector.lo_threshold = 1.2 * u.TeV
stacked_obs = SpectrumObservation.stack([obs1, obs2])
# Veryfing npred is preserved during the stacking
pwl = models.PowerLaw(index=2 * u.Unit(''),
amplitude=2e-11 * u.Unit('cm-2 s-1 TeV-1'),
reference=1 * u.TeV)
npred1 = obs1.predicted_counts(model=pwl)
npred2 = obs2.predicted_counts(model=pwl)
npred_stacked = stacked_obs.predicted_counts(model=pwl)
# Set npred outside safe range to 0
npred1.data[np.nonzero(obs1.on_vector.quality)] = 0
npred2.data[np.nonzero(obs2.on_vector.quality)] = 0
npred_summed = npred1 + npred2
assert_allclose(npred_stacked.data, npred_summed.data)
|
[
"numpy.nonzero",
"numpy.testing.assert_allclose",
"astropy.units.Unit"
] |
[((1759, 1813), 'numpy.testing.assert_allclose', 'assert_allclose', (['npred_stacked.data', 'npred_summed.data'], {}), '(npred_stacked.data, npred_summed.data)\n', (1774, 1813), False, 'from numpy.testing import assert_allclose\n'), ((1622, 1656), 'numpy.nonzero', 'np.nonzero', (['obs1.on_vector.quality'], {}), '(obs1.on_vector.quality)\n', (1632, 1656), True, 'import numpy as np\n'), ((1678, 1712), 'numpy.nonzero', 'np.nonzero', (['obs2.on_vector.quality'], {}), '(obs2.on_vector.quality)\n', (1688, 1712), True, 'import numpy as np\n'), ((1283, 1293), 'astropy.units.Unit', 'u.Unit', (['""""""'], {}), "('')\n", (1289, 1293), True, 'import astropy.units as u\n'), ((1339, 1363), 'astropy.units.Unit', 'u.Unit', (['"""cm-2 s-1 TeV-1"""'], {}), "('cm-2 s-1 TeV-1')\n", (1345, 1363), True, 'import astropy.units as u\n')]
|
import torch
import pyro.ops.jit
from tests.common import assert_equal
def test_varying_len_args():
def fn(*args):
return sum(args)
jit_fn = pyro.ops.jit.trace(fn)
examples = [
[torch.tensor(1.)],
[torch.tensor(2.), torch.tensor(3.)],
[torch.tensor(4.), torch.tensor(5.), torch.tensor(6.)],
]
for args in examples:
assert_equal(jit_fn(*args), fn(*args))
def test_varying_kwargs():
def fn(x, scale=1.):
return x * scale
jit_fn = pyro.ops.jit.trace(fn)
x = torch.tensor(1.)
for scale in [-1., 0., 1., 10.]:
assert_equal(jit_fn(x, scale=scale), fn(x, scale=scale))
def test_varying_unhashable_kwargs():
def fn(x, config={}):
return x * config.get(scale, 1.)
jit_fn = pyro.ops.jit.trace(fn)
x = torch.tensor(1.)
for scale in [-1., 0., 1., 10.]:
config = {'scale': scale}
assert_equal(jit_fn(x, config=config), fn(x, config=config))
|
[
"torch.tensor"
] |
[((544, 561), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (556, 561), False, 'import torch\n'), ((816, 833), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (828, 833), False, 'import torch\n'), ((211, 228), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (223, 228), False, 'import torch\n'), ((239, 256), 'torch.tensor', 'torch.tensor', (['(2.0)'], {}), '(2.0)\n', (251, 256), False, 'import torch\n'), ((257, 274), 'torch.tensor', 'torch.tensor', (['(3.0)'], {}), '(3.0)\n', (269, 274), False, 'import torch\n'), ((285, 302), 'torch.tensor', 'torch.tensor', (['(4.0)'], {}), '(4.0)\n', (297, 302), False, 'import torch\n'), ((303, 320), 'torch.tensor', 'torch.tensor', (['(5.0)'], {}), '(5.0)\n', (315, 320), False, 'import torch\n'), ((321, 338), 'torch.tensor', 'torch.tensor', (['(6.0)'], {}), '(6.0)\n', (333, 338), False, 'import torch\n')]
|
# Generated by Django 3.0.2 on 2020-03-29 18:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hitchhikeapp', '0009_user_ride'),
]
operations = [
migrations.AddField(
model_name='userdata',
name='userId',
field=models.IntegerField(null=True),
),
]
|
[
"django.db.models.IntegerField"
] |
[((332, 362), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (351, 362), False, 'from django.db import migrations, models\n')]
|
import socket
seeders = [
'satoshi.BitWin24.io',
'satoshi.litemint.com',
'172.16.17.32',
'172.16.31.10',
'192.168.127.12'
]
for seeder in seeders:
try:
ais = socket.getaddrinfo(seeder, 0)
except socket.gaierror:
ais = []
# Prevent duplicates, need to update to check
# for ports, can have multiple nodes on 1 ip.
addrs = []
for a in ais:
addr = a[4][0]
if addrs.count(addr) == 0:
addrs.append(addr)
print(seeder + ' = ' + str(len(addrs)))
|
[
"socket.getaddrinfo"
] |
[((193, 222), 'socket.getaddrinfo', 'socket.getaddrinfo', (['seeder', '(0)'], {}), '(seeder, 0)\n', (211, 222), False, 'import socket\n')]
|
#!/usr/bin/python3
from getpass import getpass
import json
import traceback
from uuid import UUID, uuid4
from gmusicapi.clients import Mobileclient
class Song:
def __init__(self, song_id, artist, title, album, in_library = True):
self.id = song_id
self.artist = artist
self.title = title
self.album = album
self.in_library = in_library
def __repr__(self):
return "<Song artist:\"%s\" title:\"%s\" album:\"%s\">" % (self.artist, self.title, self.album)
class Playlist:
def __init__(self, song_id, name):
self.id = song_id
self.name = name
self.songs = []
def add_song(self, song):
self.songs.append(song)
def parse_library_to_json(user, passphrase, json_output):
client = Mobileclient()
print("Attempting to authenticate with Google Play Music...")
if not client.login(user, passphrase, Mobileclient.FROM_MAC_ADDRESS):
print("Failed to authenticate!")
exit(-1)
print("Successfully authenticated.")
print("Fetching listing of songs in library...")
api_songs = client.get_all_songs()
# map of songs the script has ingested
local_songs = {}
store_to_uuid = {}
skipped = 0
for api_song in api_songs:
try:
id = UUID(api_song['id'])
song = Song(id, api_song['artist'], api_song['title'], api_song['album'])
local_songs[id] = song
if 'storeId' in api_song:
store_to_uuid[api_song['storeId']] = id
except KeyboardInterrupt as e:
raise e
except:
skipped += 1
traceback.print_exc()
print("Failed to ingest song with ID %s" % api_song['id'])
print("Found %d songs." % len(local_songs))
print("Skipped %d songs." % skipped)
print("Fetching playlist listing...")
api_playlist_entries = client.get_all_user_playlist_contents()
# dict of playlists the script is aware of (they have not necessarily been fully constructed yet)
local_playlists = {}
added = 0
skipped = 0
for entry in api_playlist_entries:
try:
playlist_id = UUID(entry['id'])
playlist = Playlist(id, entry['name'])
local_playlists[playlist_id] = playlist
for track in entry['tracks']:
try:
base_id = track['trackId']
# trackId is different depending on whether the track is from the store or user-uploaded
if track['source'] == '2':
# we need to map the store ID to the track's UUID
# if we aren't aware of the track already, we can create a representation from the entry data
if base_id not in store_to_uuid:
if 'track' not in track:
print("Failed to construct representation for song with store ID %s." % base_id)
skipped += 1
continue
track_info = track['track']
uuid = uuid4()
store_to_uuid[base_id] = uuid
song = Song(uuid, track_info['artist'], track_info['title'], track_info['album'],
in_library=False)
local_songs[uuid] = song
playlist.add_song(song)
track_id = store_to_uuid[base_id]
else:
# we can just use trackId directly
track_id = UUID(base_id)
if track_id not in local_songs.keys():
print("Found non-existent song in playlist with ID %s." % track_id)
skipped += 1
continue
song = local_songs[track_id]
playlist.add_song(song)
added += 1
except KeyboardInterrupt as e:
raise e
except:
skipped += 1
traceback.print_exc()
print("Failed to process playlist track with ID %s." % track['trackId'])
except KeyboardInterrupt as e:
raise e
except:
traceback.print_exc()
print("Failed to process playlist with ID %s." % entry['id'])
print("Found %d entries in %d playlists." % (added, len(local_playlists)))
print("Skipped %d entries." % skipped)
print("Serializing library data to JSON...")
songs_dict = {}
# serialize songs to map
for song in local_songs.values():
songs_dict[str(song.id)] = {
'artist': song.artist,
'title': song.title,
'album': song.album,
'in_library': song.in_library,
}
playlist_list = []
# serialize playlists as unlabeled list
for playlist in local_playlists.values():
songs = []
for song in playlist.songs:
songs.append(str(song.id))
playlist_list.append({
'name': playlist.name,
'songs': songs,
})
# create complete serial of library
serial = {
'songs': songs_dict,
'playlists': playlist_list,
}
print("Writing JSON to disk...")
json.dump(serial, json_output, indent=2)
print("Done!")
if __name__ == "__main__":
print("Google username: ", end='')
user = input()
passphrase = getpass("Google passphrase (or app-specific password for 2FA users): ")
with open('output_library.json', 'w+') as json_file:
parse_library_to_json(user, passphrase, json_file)
|
[
"json.dump",
"traceback.print_exc",
"uuid.uuid4",
"getpass.getpass",
"uuid.UUID",
"gmusicapi.clients.Mobileclient"
] |
[((789, 803), 'gmusicapi.clients.Mobileclient', 'Mobileclient', ([], {}), '()\n', (801, 803), False, 'from gmusicapi.clients import Mobileclient\n'), ((5457, 5497), 'json.dump', 'json.dump', (['serial', 'json_output'], {'indent': '(2)'}), '(serial, json_output, indent=2)\n', (5466, 5497), False, 'import json\n'), ((5623, 5694), 'getpass.getpass', 'getpass', (['"""Google passphrase (or app-specific password for 2FA users): """'], {}), "('Google passphrase (or app-specific password for 2FA users): ')\n", (5630, 5694), False, 'from getpass import getpass\n'), ((1308, 1328), 'uuid.UUID', 'UUID', (["api_song['id']"], {}), "(api_song['id'])\n", (1312, 1328), False, 'from uuid import UUID, uuid4\n'), ((2191, 2208), 'uuid.UUID', 'UUID', (["entry['id']"], {}), "(entry['id'])\n", (2195, 2208), False, 'from uuid import UUID, uuid4\n'), ((1659, 1680), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1678, 1680), False, 'import traceback\n'), ((4416, 4437), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4435, 4437), False, 'import traceback\n'), ((3699, 3712), 'uuid.UUID', 'UUID', (['base_id'], {}), '(base_id)\n', (3703, 3712), False, 'from uuid import UUID, uuid4\n'), ((4214, 4235), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4233, 4235), False, 'import traceback\n'), ((3177, 3184), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (3182, 3184), False, 'from uuid import UUID, uuid4\n')]
|
import math
import numpy as np
from parameter import *
if using_salome:
from parameter_salome import *
else:
from parameter_gmsh import *
if workpiece_type_id == 1:
disc_H = 0.01; #same with cutter now
length_scale = disc_R;
#if is_straight_chip:
# mesh_file = meshfolder + "/metal_cut_straight_chip.h5"
else:
#rect work coord, already provided in salome
length_scale = disc_R
if is_straight_chip:
if using_salome:
chip_Y = chip_top_y # chip_length * math.cos(cutter_angle_v*math.pi/180);
else:
chip_Y = length_scale;
# still needed velocity expression
chip_speed = cutting_speed * (feed_thickness/chip_thickness)
frequency = cutting_speed / disc_R / 2 / math.pi
direction = -1
omega = cutting_speed / (disc_R+0.5*feed_thickness)
chip_omega = cutting_speed * (feed_thickness/chip_thickness) / (chip_radius ) # correct
chip_sliding_vel_x = chip_speed*math.sin(cutter_angle_v*math.pi/180)
chip_sliding_vel_y = chip_speed*math.cos(cutter_angle_v*math.pi/180)
#print("chip_speed == chip_omega*(chip_radius+0.5*chip_thickness)?", chip_speed, chip_omega*(chip_radius+0.5*chip_thickness))
#chip_shear_angle = math.atan(-chip_start_y/chip_start_x); # phi, recently renamed in gmsh
chip_friction_end_x = chip_friction_distance*math.sin(cutter_angle_v*math.pi/180);
chip_friction_end_y = chip_friction_distance*math.cos(cutter_angle_v*math.pi/180);
#chip_sliding_end_x = chip_sliding_distance*math.sin(cutter_angle_v*math.pi/180);
#chip_sliding_end_y = chip_sliding_distance*math.cos(cutter_angle_v*math.pi/180);
chip_center_x = chip_friction_end_x - (chip_radius+chip_thickness)*math.cos(cutter_angle_v*math.pi/180);
chip_center_y = chip_friction_end_y + (chip_radius+chip_thickness)*math.sin(cutter_angle_v*math.pi/180);
# need only by python code
p_chip_end_center_x = chip_center_x + (chip_radius + 0.5*chip_thickness) * math.cos(chip_end_angle*math.pi/180);
p_chip_end_center_y = chip_center_y + (chip_radius + 0.5*chip_thickness) * math.sin(chip_end_angle*math.pi/180);
p_chip_end_center_z = 0.5*cutter_thickness;
p_chip_end_xyz = p_chip_end_center_x, p_chip_end_center_y, p_chip_end_center_z
##################################################
if is_straight_chip:
p_chip_end_o_x = chip_Y * math.tan(cutter_angle_v*math.pi/180);
p_chip_end_o_y = chip_Y;
p_chip_end_i_x = chip_Y * math.tan(cutter_angle_v*math.pi/180) - chip_thickness/math.cos(cutter_angle_v*math.pi/180);
p_chip_end_i_y = chip_Y;
def check_distance():
#line p,q
p = np.array([chip_friction_end_x, chip_friction_end_y, 0])
q = np.array([p_chip_end_o_x, p_chip_end_o_y, 0])
r1 = np.array([p_chip_end_i_x, p_chip_end_i_y, 0])
r2 = np.array([chip_start_x, chip_start_y, 0])
def t(p, q, r):
x = p-q
return np.dot(r-q, x)/np.dot(x, x)
def d(p, q, r):
return np.linalg.norm(t(p, q, r)*(p-q)+q-r)
print('check distannce must match, ', d(p, q, r1), d(p, q, r2))
# Prints 1.0
check_distance()
if using_3D:
dim = 3
else:
dim = 2
###########################################
# Code for C++ evaluation of velocity
velocity_code = '''
class Velocity : public Expression
{
public:
// Create expression with any components
Velocity() : Expression(%d) {}
// Function for evaluating expression on each cell
void eval(Array<double>& values, const Array<double>& x, const ufc::cell& cell) const
{
const double x0 = %f;
const double y0 = %f;
const double feed_thickness = %f;
const double omega = %f;
const uint cell_index = cell.index;
const size_t did = (*subdomain_id)[cell_index];
const double cx0 = %f;
const double cy0 = %f;
const double comega = %f;
const double chip_sliding_end_x = %f;
const double chip_sliding_end_y = %f;
const double chip_sliding_vel_x = %f;
const double chip_sliding_vel_y = %f;
const double cutter_angle_v = %f;
const double work_vel_x = y0*omega;
const double turning_center_x = %f;
const double turning_center_y = %f;
const double fillet_r = %f;
const double a_phi = %f;
const double a_start = -pi/2;
double a_delta = pi/2.0 - cutter_angle_v*pi/180;
const int workpiece_type_id = %d;
const int is_straight_chip = %d;
const int using_fillet_shear_zone = %d;
const int using_double_shear_heat_layer = %d;
const int work_subdomain_id = %d;
const int chip_subdomain_id = %d;
const int shear_subdomain_id = %d;
const int friction_subdomain_id = %d;
values[0] = 0.0;
values[1] = 0.0;
//values[2] = 0.0;
if(did == work_subdomain_id) { // workpiece, left and right has diff radius an center
if(workpiece_type_id == 1) { // is disc
double r = sqrt((x[0]-x0)*(x[0]-x0) + (x[1]-y0)*(x[1]-y0));
double v = omega * r;
double a = atan2((x[1]-y0), (x[0]-x0));
if (x[0]<0) {
double y0_1 = y0 + feed_thickness/2.0;
r = sqrt((x[0]-x0)*(x[0]-x0) + (x[1]-y0_1)*(x[1]-y0_1));
v = omega * r + feed_thickness/2.0;
a = atan2((x[1]-y0_1), (x[0]-x0));
}
values[0] = -v * sin(a);
values[1] = v * cos(a);
}
else { // workpiece rectangle
values[0] = work_vel_x; // only x-axis speed
values[1] = 0.0;
}
}
else if(did == chip_subdomain_id) { // chip, consisting of straight and arc sections
if (is_straight_chip == 0) {
double a = atan2((x[1]-cy0), (x[0]-cx0));
//if (x[0] < chip_sliding_end_x && x[1] > chip_sliding_end_y) {
if (a > (-cutter_angle_v*pi/180.0)) {
double r = sqrt((x[0]-cx0)*(x[0]-cx0) + (x[1]-cy0)*(x[1]-cy0));
double v = comega * r;
values[0] = v * sin(a);
values[1] = -v * cos(a);
}
}
else {
values[0] = chip_sliding_vel_x;
values[1] = chip_sliding_vel_y;
}
}
else if(did == shear_subdomain_id) {
if(using_fillet_shear_zone) {// shear zone has the fillet
//double a_intersection = a_t - (pi - a_phi);
double dist = sqrt((x[0]-turning_center_x)*(x[0]-turning_center_x) + (x[1]-turning_center_y)*(x[1]-turning_center_y));
double shift = dist - fillet_r;
double shifted_tcx = turning_center_x + shift * cos(a_phi);
double shifted_tcy = turning_center_y - shift * sin(a_phi);
double shifted_a = atan2((x[1]-shifted_tcy), (x[0]-shifted_tcx));
double v_chip = sqrt(chip_sliding_vel_x*chip_sliding_vel_x + chip_sliding_vel_y*chip_sliding_vel_y);
double v_feed = y0*omega;
double v = v_chip + (1.0 - (shifted_a - a_start)/a_delta) * (v_feed - v_chip);
values[0] = v * sin(-shifted_a);
values[1] = v * cos(-shifted_a);
}
else {
values[0] = chip_sliding_vel_x;
values[1] = chip_sliding_vel_y;
if(using_double_shear_heat_layer) { // double mapping bc
double a_t = atan2((x[1]), (x[0]));
if ((a_t - (pi - a_phi)) > 1e-3) {
values[0] = work_vel_x; // only x-axis speed
values[1] = 0;
}
}
}
}
else if(did == friction_subdomain_id) { // friction thin layer inside chip
values[0] = chip_sliding_vel_x;
values[1] = chip_sliding_vel_y;
}
else {
values[0] = 0.0;
values[1] = 0.0;
}
}
// The data stored in mesh functions
std::shared_ptr<MeshFunction<std::size_t> > subdomain_id;
};
'''%(dim, 0, -disc_R, feed_thickness, omega*direction, chip_center_x, chip_center_y, chip_omega*direction,
chip_friction_end_x, chip_friction_end_y, chip_sliding_vel_x, chip_sliding_vel_y, cutter_angle_v,
turning_center_x, turning_center_y, fillet_r, shear_angle * math.pi/180,
workpiece_type_id, is_straight_chip, int(using_fillet_shear_zone), int(using_double_shear_heat_layer),
work_subdomain_id, chip_subdomain_id, shear_subdomain_id, friction_subdomain_id
)
#print(velocity_code)
|
[
"math.tan",
"math.sin",
"numpy.array",
"math.cos",
"numpy.dot"
] |
[((921, 961), 'math.sin', 'math.sin', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (929, 961), False, 'import math\n'), ((990, 1030), 'math.cos', 'math.cos', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (998, 1030), False, 'import math\n'), ((1293, 1333), 'math.sin', 'math.sin', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (1301, 1333), False, 'import math\n'), ((1376, 1416), 'math.cos', 'math.cos', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (1384, 1416), False, 'import math\n'), ((1646, 1686), 'math.cos', 'math.cos', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (1654, 1686), False, 'import math\n'), ((1751, 1791), 'math.sin', 'math.sin', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (1759, 1791), False, 'import math\n'), ((1892, 1932), 'math.cos', 'math.cos', (['(chip_end_angle * math.pi / 180)'], {}), '(chip_end_angle * math.pi / 180)\n', (1900, 1932), False, 'import math\n'), ((2005, 2045), 'math.sin', 'math.sin', (['(chip_end_angle * math.pi / 180)'], {}), '(chip_end_angle * math.pi / 180)\n', (2013, 2045), False, 'import math\n'), ((2269, 2309), 'math.tan', 'math.tan', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (2277, 2309), False, 'import math\n'), ((2543, 2598), 'numpy.array', 'np.array', (['[chip_friction_end_x, chip_friction_end_y, 0]'], {}), '([chip_friction_end_x, chip_friction_end_y, 0])\n', (2551, 2598), True, 'import numpy as np\n'), ((2611, 2656), 'numpy.array', 'np.array', (['[p_chip_end_o_x, p_chip_end_o_y, 0]'], {}), '([p_chip_end_o_x, p_chip_end_o_y, 0])\n', (2619, 2656), True, 'import numpy as np\n'), ((2670, 2715), 'numpy.array', 'np.array', (['[p_chip_end_i_x, p_chip_end_i_y, 0]'], {}), '([p_chip_end_i_x, p_chip_end_i_y, 0])\n', (2678, 2715), True, 'import numpy as np\n'), ((2729, 2770), 'numpy.array', 'np.array', (['[chip_start_x, chip_start_y, 0]'], {}), '([chip_start_x, chip_start_y, 0])\n', (2737, 2770), True, 'import numpy as np\n'), ((2366, 2406), 'math.tan', 'math.tan', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (2374, 2406), False, 'import math\n'), ((2420, 2460), 'math.cos', 'math.cos', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (2428, 2460), False, 'import math\n'), ((2835, 2851), 'numpy.dot', 'np.dot', (['(r - q)', 'x'], {}), '(r - q, x)\n', (2841, 2851), True, 'import numpy as np\n'), ((2850, 2862), 'numpy.dot', 'np.dot', (['x', 'x'], {}), '(x, x)\n', (2856, 2862), True, 'import numpy as np\n')]
|
# 16-TaterBot main.py
'''
Created on Mar 13, 2016
@author: Dead Robot Society
'''
import actions as act
import constants as c
from sensors import DEBUG
from servos import moveClaw
def main():
act.init()
#act.disposeOfDirt()
act.goToWestPile()
act.grabWestPile()
act.wiggle()
act.wiggle()
act.wiggle()
moveClaw(c.clawClose, 10)
act.goToTaterBin()
act.depositWestPile()
act.backUpFromBin()
act.goToNorthernPile()
act.grabNorthPile()
act.recollectNorthPile()
act.grabBin()
DEBUG()
act.turnToSouth()
act.grabMiddlePile()
act.grabSouthPile()
'''act.goToHome()
act.deliverBin()
act.releaseBin()
act.goToCenter()
act.grabCube()
act.returnToBase()
act.scoreCube()
act.attempt()
'''
DEBUG()
if __name__ == '__main__':
# set print to unbuffered
import os
import sys
sys.stdout = os.fdopen(sys.stdout.fileno(),'w',0)
main()
|
[
"actions.grabWestPile",
"actions.recollectNorthPile",
"servos.moveClaw",
"actions.goToTaterBin",
"sensors.DEBUG",
"actions.grabBin",
"actions.depositWestPile",
"actions.grabNorthPile",
"actions.grabMiddlePile",
"actions.turnToSouth",
"sys.stdout.fileno",
"actions.backUpFromBin",
"actions.wiggle",
"actions.grabSouthPile",
"actions.goToWestPile",
"actions.goToNorthernPile",
"actions.init"
] |
[((216, 226), 'actions.init', 'act.init', ([], {}), '()\n', (224, 226), True, 'import actions as act\n'), ((258, 276), 'actions.goToWestPile', 'act.goToWestPile', ([], {}), '()\n', (274, 276), True, 'import actions as act\n'), ((282, 300), 'actions.grabWestPile', 'act.grabWestPile', ([], {}), '()\n', (298, 300), True, 'import actions as act\n'), ((306, 318), 'actions.wiggle', 'act.wiggle', ([], {}), '()\n', (316, 318), True, 'import actions as act\n'), ((324, 336), 'actions.wiggle', 'act.wiggle', ([], {}), '()\n', (334, 336), True, 'import actions as act\n'), ((342, 354), 'actions.wiggle', 'act.wiggle', ([], {}), '()\n', (352, 354), True, 'import actions as act\n'), ((360, 385), 'servos.moveClaw', 'moveClaw', (['c.clawClose', '(10)'], {}), '(c.clawClose, 10)\n', (368, 385), False, 'from servos import moveClaw\n'), ((391, 409), 'actions.goToTaterBin', 'act.goToTaterBin', ([], {}), '()\n', (407, 409), True, 'import actions as act\n'), ((416, 437), 'actions.depositWestPile', 'act.depositWestPile', ([], {}), '()\n', (435, 437), True, 'import actions as act\n'), ((443, 462), 'actions.backUpFromBin', 'act.backUpFromBin', ([], {}), '()\n', (460, 462), True, 'import actions as act\n'), ((468, 490), 'actions.goToNorthernPile', 'act.goToNorthernPile', ([], {}), '()\n', (488, 490), True, 'import actions as act\n'), ((496, 515), 'actions.grabNorthPile', 'act.grabNorthPile', ([], {}), '()\n', (513, 515), True, 'import actions as act\n'), ((521, 545), 'actions.recollectNorthPile', 'act.recollectNorthPile', ([], {}), '()\n', (543, 545), True, 'import actions as act\n'), ((551, 564), 'actions.grabBin', 'act.grabBin', ([], {}), '()\n', (562, 564), True, 'import actions as act\n'), ((570, 577), 'sensors.DEBUG', 'DEBUG', ([], {}), '()\n', (575, 577), False, 'from sensors import DEBUG\n'), ((583, 600), 'actions.turnToSouth', 'act.turnToSouth', ([], {}), '()\n', (598, 600), True, 'import actions as act\n'), ((606, 626), 'actions.grabMiddlePile', 'act.grabMiddlePile', ([], {}), '()\n', (624, 626), True, 'import actions as act\n'), ((632, 651), 'actions.grabSouthPile', 'act.grabSouthPile', ([], {}), '()\n', (649, 651), True, 'import actions as act\n'), ((842, 849), 'sensors.DEBUG', 'DEBUG', ([], {}), '()\n', (847, 849), False, 'from sensors import DEBUG\n'), ((974, 993), 'sys.stdout.fileno', 'sys.stdout.fileno', ([], {}), '()\n', (991, 993), False, 'import sys\n')]
|
import torch
import torch.nn.functional as F
from torch import nn
from torch import sigmoid, tanh, relu_
class LockedDropout(nn.Module):
def __init__(self, dropout):
self.dropout = dropout
super().__init__()
def forward(self, x):
if not self.training or not self.dropout:
return x
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.dropout)
mask = m / (1 - self.dropout)
mask = mask.expand_as(x)
return mask * x
class WeightDrop(nn.Module):
def __init__(self, module, weights, dropout=0.0, variational=False):
super(WeightDrop, self).__init__()
self.module = module
self.weights = weights
self.dropout = dropout
self.variational = variational
self._setup()
def widget_demagnetizer_y2k_edition(*args, **kwargs):
# We need to replace flatten_parameters with a nothing function
# It must be a function rather than a lambda as otherwise pickling explodes
# We can't write boring code though, so ... WIDGET DEMAGNETIZER Y2K EDITION!
# (╯°□°)╯︵ ┻━┻
return
def _setup(self):
# Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN
if issubclass(type(self.module), torch.nn.RNNBase):
self.module.flatten_parameters = self.widget_demagnetizer_y2k_edition
for name_w in self.weights:
print('Applying weight drop of {} to {}'.format(self.dropout, name_w))
w = getattr(self.module, name_w)
del self.module._parameters[name_w]
self.module.register_parameter(name_w + '_raw', nn.Parameter(w.data))
def set_weights(self):
for name_w in self.weights:
raw_w = getattr(self.module, name_w + '_raw')
if self.variational:
mask = torch.ones(raw_w.size(0), 1)
if raw_w.is_cuda: mask = mask.cuda()
mask = F.dropout(mask, p=self.dropout, training=True)
w = mask.expand_as(raw_w) * raw_w
else:
w = F.dropout(raw_w, p=self.dropout, training=self.training)
w = nn.Parameter(w)
setattr(self.module, name_w, w)
def forward(self, *args):
self.set_weights()
return self.module.forward(*args)
class NASCell(nn.Module):
def __init__(self, input_size, hidden_size, num_proj=None, use_biases=False):
super(NASCell, self).__init__()
self._num_units = hidden_size
self._num_proj = num_proj
self._use_biases = use_biases
self._input_size = input_size
num_proj = self._num_units if num_proj is None else num_proj
self.concat_w_m = nn.Parameter(torch.randn(num_proj, 8 * self._num_units))
self.concat_w_inputs = nn.Parameter(torch.randn(self._input_size, 8 * self._num_units))
if use_biases:
self.bias = nn.Parameter(torch.randn(8 * self._num_units))
if self._num_proj is not None:
self.concat_w_proj = nn.Parameter(torch.randn(self._num_units, 8 * self._num_proj))
def forward(self, input, state):
(m_prev, c_prev) = state
m_matrix = torch.mm(m_prev, self.concat_w_m)
input_matrix = torch.mm(input, self.concat_w_inputs)
if self._use_biases:
m_matrix = torch.add(m_matrix, self.b)
m_matrix_splits = torch.split(m_matrix, self._num_units, dim=1)
inputs_matrix_splits = torch.split(input_matrix, self._num_units, dim=1)
layer1_0 = sigmoid(inputs_matrix_splits[0] + m_matrix_splits[0])
layer1_1 = relu_(inputs_matrix_splits[1] + m_matrix_splits[1])
layer1_2 = sigmoid(inputs_matrix_splits[2] + m_matrix_splits[2])
layer1_3 = relu_(inputs_matrix_splits[3] * m_matrix_splits[3])
layer1_4 = tanh(inputs_matrix_splits[4] + m_matrix_splits[4])
layer1_5 = sigmoid(inputs_matrix_splits[5] + m_matrix_splits[5])
layer1_6 = tanh(inputs_matrix_splits[6] + m_matrix_splits[6])
layer1_7 = sigmoid(inputs_matrix_splits[7] + m_matrix_splits[7])
l2_0 = tanh(layer1_0 * layer1_1)
l2_1 = tanh(layer1_2 + layer1_3)
l2_2 = tanh(layer1_4 * layer1_5)
l2_3 = sigmoid(layer1_6 + layer1_7)
l2_0 = tanh(l2_0 + c_prev)
l3_0_pre = l2_0 * l2_1
new_c = l3_0_pre
l3_0 = l3_0_pre
l3_1 = tanh(l2_2 + l2_3)
new_m = tanh(l3_0 * l3_1)
if self._num_proj is not None:
new_m = torch.mm(new_m, self.concat_w_proj)
return new_m, (new_m, new_c)
class R2N2_VAR(nn.Module):
def __init__(self, num_inputs, rnn_config):
super(R2N2_VAR, self).__init__()
num_hidden, P, num_layer, dropout = rnn_config
self.encoder_rnn = nn.LSTM(num_inputs, num_hidden, 2, batch_first=True, bidirectional=True)
self.encoder_rnn = nn.GRUCell
self.wdec = WeightDrop(self.encoder_rnn, ['weight_hh_l0', 'weight_ih_l0'], dropout=dropout)
self.A = nn.ModuleList([nn.Linear(num_inputs, num_inputs) for _ in range(self.P)])
self.lock_drop = LockedDropout(dropout)
self.output_layer = nn.Linear(num_hidden * 2, num_inputs)
def forward(self, y, last_state=None):
sum_wyb = torch.zeros_like(y[:, 0, :])
for idx, layer in enumerate(self.A):
sum_wyb += layer(y[:, idx, :])
encoded_y, hidden_state = self.wdec(y, last_state)
encoded_y = self.lock_drop(encoded_y)
outputs = self.output_layer(encoded_y[:, -1, :])
return outputs + sum_wyb
|
[
"torch.nn.Parameter",
"torch.zeros_like",
"torch.relu_",
"torch.add",
"torch.split",
"torch.nn.functional.dropout",
"torch.mm",
"torch.randn",
"torch.sigmoid",
"torch.nn.Linear",
"torch.nn.LSTM",
"torch.tanh"
] |
[((3292, 3325), 'torch.mm', 'torch.mm', (['m_prev', 'self.concat_w_m'], {}), '(m_prev, self.concat_w_m)\n', (3300, 3325), False, 'import torch\n'), ((3350, 3387), 'torch.mm', 'torch.mm', (['input', 'self.concat_w_inputs'], {}), '(input, self.concat_w_inputs)\n', (3358, 3387), False, 'import torch\n'), ((3499, 3544), 'torch.split', 'torch.split', (['m_matrix', 'self._num_units'], {'dim': '(1)'}), '(m_matrix, self._num_units, dim=1)\n', (3510, 3544), False, 'import torch\n'), ((3577, 3626), 'torch.split', 'torch.split', (['input_matrix', 'self._num_units'], {'dim': '(1)'}), '(input_matrix, self._num_units, dim=1)\n', (3588, 3626), False, 'import torch\n'), ((3649, 3702), 'torch.sigmoid', 'sigmoid', (['(inputs_matrix_splits[0] + m_matrix_splits[0])'], {}), '(inputs_matrix_splits[0] + m_matrix_splits[0])\n', (3656, 3702), False, 'from torch import sigmoid, tanh, relu_\n'), ((3723, 3774), 'torch.relu_', 'relu_', (['(inputs_matrix_splits[1] + m_matrix_splits[1])'], {}), '(inputs_matrix_splits[1] + m_matrix_splits[1])\n', (3728, 3774), False, 'from torch import sigmoid, tanh, relu_\n'), ((3795, 3848), 'torch.sigmoid', 'sigmoid', (['(inputs_matrix_splits[2] + m_matrix_splits[2])'], {}), '(inputs_matrix_splits[2] + m_matrix_splits[2])\n', (3802, 3848), False, 'from torch import sigmoid, tanh, relu_\n'), ((3869, 3920), 'torch.relu_', 'relu_', (['(inputs_matrix_splits[3] * m_matrix_splits[3])'], {}), '(inputs_matrix_splits[3] * m_matrix_splits[3])\n', (3874, 3920), False, 'from torch import sigmoid, tanh, relu_\n'), ((3941, 3991), 'torch.tanh', 'tanh', (['(inputs_matrix_splits[4] + m_matrix_splits[4])'], {}), '(inputs_matrix_splits[4] + m_matrix_splits[4])\n', (3945, 3991), False, 'from torch import sigmoid, tanh, relu_\n'), ((4012, 4065), 'torch.sigmoid', 'sigmoid', (['(inputs_matrix_splits[5] + m_matrix_splits[5])'], {}), '(inputs_matrix_splits[5] + m_matrix_splits[5])\n', (4019, 4065), False, 'from torch import sigmoid, tanh, relu_\n'), ((4086, 4136), 'torch.tanh', 'tanh', (['(inputs_matrix_splits[6] + m_matrix_splits[6])'], {}), '(inputs_matrix_splits[6] + m_matrix_splits[6])\n', (4090, 4136), False, 'from torch import sigmoid, tanh, relu_\n'), ((4157, 4210), 'torch.sigmoid', 'sigmoid', (['(inputs_matrix_splits[7] + m_matrix_splits[7])'], {}), '(inputs_matrix_splits[7] + m_matrix_splits[7])\n', (4164, 4210), False, 'from torch import sigmoid, tanh, relu_\n'), ((4229, 4254), 'torch.tanh', 'tanh', (['(layer1_0 * layer1_1)'], {}), '(layer1_0 * layer1_1)\n', (4233, 4254), False, 'from torch import sigmoid, tanh, relu_\n'), ((4271, 4296), 'torch.tanh', 'tanh', (['(layer1_2 + layer1_3)'], {}), '(layer1_2 + layer1_3)\n', (4275, 4296), False, 'from torch import sigmoid, tanh, relu_\n'), ((4313, 4338), 'torch.tanh', 'tanh', (['(layer1_4 * layer1_5)'], {}), '(layer1_4 * layer1_5)\n', (4317, 4338), False, 'from torch import sigmoid, tanh, relu_\n'), ((4355, 4383), 'torch.sigmoid', 'sigmoid', (['(layer1_6 + layer1_7)'], {}), '(layer1_6 + layer1_7)\n', (4362, 4383), False, 'from torch import sigmoid, tanh, relu_\n'), ((4402, 4421), 'torch.tanh', 'tanh', (['(l2_0 + c_prev)'], {}), '(l2_0 + c_prev)\n', (4406, 4421), False, 'from torch import sigmoid, tanh, relu_\n'), ((4527, 4544), 'torch.tanh', 'tanh', (['(l2_2 + l2_3)'], {}), '(l2_2 + l2_3)\n', (4531, 4544), False, 'from torch import sigmoid, tanh, relu_\n'), ((4562, 4579), 'torch.tanh', 'tanh', (['(l3_0 * l3_1)'], {}), '(l3_0 * l3_1)\n', (4566, 4579), False, 'from torch import sigmoid, tanh, relu_\n'), ((4922, 4994), 'torch.nn.LSTM', 'nn.LSTM', (['num_inputs', 'num_hidden', '(2)'], {'batch_first': '(True)', 'bidirectional': '(True)'}), '(num_inputs, num_hidden, 2, batch_first=True, bidirectional=True)\n', (4929, 4994), False, 'from torch import nn\n'), ((5305, 5342), 'torch.nn.Linear', 'nn.Linear', (['(num_hidden * 2)', 'num_inputs'], {}), '(num_hidden * 2, num_inputs)\n', (5314, 5342), False, 'from torch import nn\n'), ((5408, 5436), 'torch.zeros_like', 'torch.zeros_like', (['y[:, 0, :]'], {}), '(y[:, 0, :])\n', (5424, 5436), False, 'import torch\n'), ((2238, 2253), 'torch.nn.Parameter', 'nn.Parameter', (['w'], {}), '(w)\n', (2250, 2253), False, 'from torch import nn\n'), ((2824, 2866), 'torch.randn', 'torch.randn', (['num_proj', '(8 * self._num_units)'], {}), '(num_proj, 8 * self._num_units)\n', (2835, 2866), False, 'import torch\n'), ((2913, 2963), 'torch.randn', 'torch.randn', (['self._input_size', '(8 * self._num_units)'], {}), '(self._input_size, 8 * self._num_units)\n', (2924, 2963), False, 'import torch\n'), ((3442, 3469), 'torch.add', 'torch.add', (['m_matrix', 'self.b'], {}), '(m_matrix, self.b)\n', (3451, 3469), False, 'import torch\n'), ((4641, 4676), 'torch.mm', 'torch.mm', (['new_m', 'self.concat_w_proj'], {}), '(new_m, self.concat_w_proj)\n', (4649, 4676), False, 'import torch\n'), ((1713, 1733), 'torch.nn.Parameter', 'nn.Parameter', (['w.data'], {}), '(w.data)\n', (1725, 1733), False, 'from torch import nn\n'), ((2026, 2072), 'torch.nn.functional.dropout', 'F.dropout', (['mask'], {'p': 'self.dropout', 'training': '(True)'}), '(mask, p=self.dropout, training=True)\n', (2035, 2072), True, 'import torch.nn.functional as F\n'), ((2164, 2220), 'torch.nn.functional.dropout', 'F.dropout', (['raw_w'], {'p': 'self.dropout', 'training': 'self.training'}), '(raw_w, p=self.dropout, training=self.training)\n', (2173, 2220), True, 'import torch.nn.functional as F\n'), ((3027, 3059), 'torch.randn', 'torch.randn', (['(8 * self._num_units)'], {}), '(8 * self._num_units)\n', (3038, 3059), False, 'import torch\n'), ((3148, 3196), 'torch.randn', 'torch.randn', (['self._num_units', '(8 * self._num_proj)'], {}), '(self._num_units, 8 * self._num_proj)\n', (3159, 3196), False, 'import torch\n'), ((5168, 5201), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', 'num_inputs'], {}), '(num_inputs, num_inputs)\n', (5177, 5201), False, 'from torch import nn\n')]
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main
from tornado.escape import json_decode
from qiita_db.study import Study
from qiita_pet.test.rest.test_base import RESTHandlerTestCase
class StudyHandlerTests(RESTHandlerTestCase):
def test_get_valid(self):
exp = {u'title': u'Identification of the Microbiomes for Cannabis '
u'Soils',
u'contacts': {'principal_investigator': [u'PIDude',
u'<NAME>',
u'<EMAIL>'],
'lab_person': [u'LabDude',
u'knight lab',
u'<EMAIL>']},
u'study_abstract':
(u'This is a preliminary study to examine the '
u'microbiota associated with the Cannabis plant. '
u'Soils samples from the bulk soil, soil '
u'associated with the roots, and the rhizosphere '
u'were extracted and the DNA sequenced. Roots '
u'from three independent plants of different '
u'strains were examined. These roots were '
u'obtained November 11, 2011 from plants that '
u'had been harvested in the summer. Future '
u'studies will attempt to analyze the soils and '
u'rhizospheres from the same location at '
u'different time points in the plant lifecycle.'),
u'study_description': (u'Analysis of the Cannabis Plant '
u'Microbiome'),
u'study_alias': 'Cannabis Soils'}
response = self.get('/api/v1/study/1', headers=self.headers)
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
self.assertEqual(obs, exp)
def test_get_invalid(self):
response = self.get('/api/v1/study/0', headers=self.headers)
self.assertEqual(response.code, 404)
self.assertEqual(json_decode(response.body),
{'message': 'Study not found'})
def test_get_invalid_negative(self):
response = self.get('/api/v1/study/-1', headers=self.headers)
self.assertEqual(response.code, 404)
# not asserting the body content as this is not a valid URI according
# to the regex associating the handler to the webserver
def test_get_invalid_namespace(self):
response = self.get('/api/v1/study/1.11111', headers=self.headers)
self.assertEqual(response.code, 404)
# not asserting the body content as this is not a valid URI according
# to the regex associating the handler to the webserver
class StudyCreatorTests(RESTHandlerTestCase):
def test_post_malformed_study(self):
response = self.post('/api/v1/study', data={'foo': 'bar'},
headers=self.headers, asjson=True)
self.assertEqual(response.code, 400)
def test_post_already_exists(self):
payload = {'title': 'Identification of the Microbiomes for Cannabis '
'Soils',
'study_abstract': 'stuff',
'study_description': 'asdasd',
'owner': '<EMAIL>',
'study_alias': 'blah',
'notes': '',
'contacts': {'principal_investigator': [u'PIDude',
u'<EMAIL>'],
'lab_person': [u'LabDude',
u'<EMAIL>']}}
response = self.post('/api/v1/study', data=payload, asjson=True,
headers=self.headers)
self.assertEqual(response.code, 409)
obs = json_decode(response.body)
self.assertEqual(obs,
{'message': 'Study title already exists'})
def test_post_valid(self):
payload = {'title': 'foo',
'study_abstract': 'stuff',
'study_description': 'asdasd',
'owner': '<EMAIL>',
'study_alias': 'blah',
'notes': '',
'contacts': {'principal_investigator': [u'PIDude',
u'Wash U'],
'lab_person': [u'LabDude',
u'knight lab']}}
response = self.post('/api/v1/study', data=payload,
headers=self.headers, asjson=True)
self.assertEqual(response.code, 201)
study_id = json_decode(response.body)['id']
study = Study(int(study_id))
self.assertEqual(study.title, payload['title'])
self.assertEqual(study.info['study_abstract'],
payload['study_abstract'])
self.assertEqual(study.info['study_description'],
payload['study_description'])
self.assertEqual(study.info['study_alias'], payload['study_alias'])
self.assertEqual(study.owner.email, payload['owner'])
self.assertEqual(study.info['principal_investigator'].name,
payload['contacts']['principal_investigator'][0])
self.assertEqual(study.info['principal_investigator'].affiliation,
payload['contacts']['principal_investigator'][1])
self.assertEqual(study.info['lab_person'].name,
payload['contacts']['lab_person'][0])
self.assertEqual(study.info['lab_person'].affiliation,
payload['contacts']['lab_person'][1])
def test_post_invalid_user(self):
payload = {'title': 'foo',
'study_abstract': 'stuff',
'study_description': 'asdasd',
'owner': '<EMAIL>',
'study_alias': 'blah',
'notes': '',
'contacts': {'principal_investigator': [u'PIDude',
u'Wash U'],
'lab_person': [u'LabDude',
u'knight lab']}}
response = self.post('/api/v1/study', data=payload,
headers=self.headers, asjson=True)
self.assertEqual(response.code, 403)
obs = json_decode(response.body)
self.assertEqual(obs, {'message': 'Unknown user'})
class StudyStatusHandlerTests(RESTHandlerTestCase):
def test_get_no_study(self):
response = self.get('/api/v1/study/0/status', headers=self.headers)
self.assertEqual(response.code, 404)
obs = json_decode(response.body)
exp = {'message': 'Study not found'}
self.assertEqual(obs, exp)
def test_get_valid(self):
response = self.get('/api/v1/study/1/status', headers=self.headers)
self.assertEqual(response.code, 200)
exp = {'is_public': False,
'has_sample_information': True,
'sample_information_has_warnings': False,
'preparations': [{'id': 1, 'has_artifact': True},
{'id': 2, 'has_artifact': True}]
}
obs = json_decode(response.body)
self.assertEqual(obs, exp)
if __name__ == '__main__':
main()
|
[
"unittest.main",
"tornado.escape.json_decode"
] |
[((7791, 7797), 'unittest.main', 'main', ([], {}), '()\n', (7795, 7797), False, 'from unittest import main\n'), ((2225, 2251), 'tornado.escape.json_decode', 'json_decode', (['response.body'], {}), '(response.body)\n', (2236, 2251), False, 'from tornado.escape import json_decode\n'), ((4223, 4249), 'tornado.escape.json_decode', 'json_decode', (['response.body'], {}), '(response.body)\n', (4234, 4249), False, 'from tornado.escape import json_decode\n'), ((6829, 6855), 'tornado.escape.json_decode', 'json_decode', (['response.body'], {}), '(response.body)\n', (6840, 6855), False, 'from tornado.escape import json_decode\n'), ((7137, 7163), 'tornado.escape.json_decode', 'json_decode', (['response.body'], {}), '(response.body)\n', (7148, 7163), False, 'from tornado.escape import json_decode\n'), ((7696, 7722), 'tornado.escape.json_decode', 'json_decode', (['response.body'], {}), '(response.body)\n', (7707, 7722), False, 'from tornado.escape import json_decode\n'), ((2459, 2485), 'tornado.escape.json_decode', 'json_decode', (['response.body'], {}), '(response.body)\n', (2470, 2485), False, 'from tornado.escape import json_decode\n'), ((5076, 5102), 'tornado.escape.json_decode', 'json_decode', (['response.body'], {}), '(response.body)\n', (5087, 5102), False, 'from tornado.escape import json_decode\n')]
|
"""
Example script to show how to use Engine Node health
-get virtual engine or Layer3 firewall
-get health data for each node
-retrieve master engine from virtual engine health
"""
# Python Base Import
from smc import session
from smc.core.engines import Layer3VirtualEngine, Layer3Firewall
from smc.core.waiters import NodeStatusWaiter
from smc_info import *
if __name__ == "__main__":
session.login(url=SMC_URL, api_key=API_KEY, verify=False, timeout=120, api_version=API_VERSION)
print("session OK")
try:
virtual_engine = Layer3VirtualEngine("Dubai Virtual 1")
print("Check nodes status for {}...".format(virtual_engine))
for node in virtual_engine.nodes:
# Wait for node to be Online
waiter = NodeStatusWaiter(node, "Online", max_wait=3)
while not waiter.done():
status = waiter.result(5)
print("Status after 5 sec wait: {}".format(status))
print("Node:{} is {}".format(node, status))
assert status is not None, "Node {} can't be contacted".format(node)
for stats in node.hardware_status.filesystem:
print("hardware status.filesystem={}".format(stats))
for stats in node.hardware_status.logging_subsystem:
print("hardware status.logging_subsystem={}".format(stats))
for stats in node.hardware_status.sandbox_subsystem:
print("hardware status.sandbox_subsystem={}".format(stats))
for stats in node.interface_status:
print("interface status={}".format(stats))
print("health=>Master Node={}".format(node.health.master_node))
print("health=>Node status={}".format(node.health.engine_node_status))
print("health=>dyn up={}".format(node.health.dyn_up))
# print all attributes
print("health=>{}".format(node.health))
single_fw = Layer3Firewall("Plano")
print("Check nodes status for {}...".format(single_fw))
for node in single_fw.nodes:
# Wait for node to be Online
waiter = NodeStatusWaiter(node, 'Online', max_wait=3)
while not waiter.done():
status = waiter.result(5)
print("Status after 5 sec wait: {}".format(status))
print("Node:{} is {}".format(node, status))
assert status is not None, "Node {} can't be contacted".format(node)
# should be None
print("health=>Master Node={}".format(node.health.master_node))
print("health=>Node status={}".format(node.health.engine_node_status))
print("health=>dyn up={}".format(node.health.dyn_up))
# print all attributes
print("health=>{}".format(node.health))
except Exception as e:
print(e)
exit(-1)
finally:
session.logout()
|
[
"smc.session.login",
"smc.core.waiters.NodeStatusWaiter",
"smc.core.engines.Layer3VirtualEngine",
"smc.session.logout",
"smc.core.engines.Layer3Firewall"
] |
[((395, 494), 'smc.session.login', 'session.login', ([], {'url': 'SMC_URL', 'api_key': 'API_KEY', 'verify': '(False)', 'timeout': '(120)', 'api_version': 'API_VERSION'}), '(url=SMC_URL, api_key=API_KEY, verify=False, timeout=120,\n api_version=API_VERSION)\n', (408, 494), False, 'from smc import session\n'), ((543, 581), 'smc.core.engines.Layer3VirtualEngine', 'Layer3VirtualEngine', (['"""Dubai Virtual 1"""'], {}), "('Dubai Virtual 1')\n", (562, 581), False, 'from smc.core.engines import Layer3VirtualEngine, Layer3Firewall\n'), ((1845, 1868), 'smc.core.engines.Layer3Firewall', 'Layer3Firewall', (['"""Plano"""'], {}), "('Plano')\n", (1859, 1868), False, 'from smc.core.engines import Layer3VirtualEngine, Layer3Firewall\n'), ((2709, 2725), 'smc.session.logout', 'session.logout', ([], {}), '()\n', (2723, 2725), False, 'from smc import session\n'), ((741, 785), 'smc.core.waiters.NodeStatusWaiter', 'NodeStatusWaiter', (['node', '"""Online"""'], {'max_wait': '(3)'}), "(node, 'Online', max_wait=3)\n", (757, 785), False, 'from smc.core.waiters import NodeStatusWaiter\n'), ((2018, 2062), 'smc.core.waiters.NodeStatusWaiter', 'NodeStatusWaiter', (['node', '"""Online"""'], {'max_wait': '(3)'}), "(node, 'Online', max_wait=3)\n", (2034, 2062), False, 'from smc.core.waiters import NodeStatusWaiter\n')]
|
import requests
import json
class Client:
def __init__(self, key):
self.key = key
self.base = "https://api.aletheiaapi.com/"
def StockData(self, symbol, summary = False, statistics = False):
url = self.base + f"StockData?key={self.key}&symbol={symbol}"
if summary: url = url + "&summary=true"
if statistics: url = url + "&statistics=true"
return json.loads(requests.get(url).text)
def Crypto(self, symbol):
url = self.base + f"Crypto?key={self.key}&symbol={symbol}"
return json.loads(requests.get(url).text)
def SearchEntities(self, term, top = 0):
url = self.base + f"SearchEntities?key={self.key}&term={term}"
if top > 0: url = url + f"&top={top}"
return json.loads(requests.get(url).text)
def GetEntity(self, id):
url = self.base + f"GetEntity?key={self.key}&id={id}"
return json.loads(requests.get(url).text)
def GetFiling(self, id = "", url = ""):
call = self.base + f"GetFiling?key={self.key}" # Here we use "call" since "url" is already a parameter.
if len(id) == 0 and len(url) == 0:
print("Please specify either the id or url of the filing.")
elif len(id) > 0: call = call + f"&id={id}"
else: call = call + f"&url={url}"
return json.loads(requests.get(call).text)
def LatestTransactions(self, issuer = "", owner = "", top = 20, before = None, securitytype = -1, transactiontype = -1, cascade = False):
url = self.base + f"LatestTransactions?key={self.key}&top={top}"
if len(issuer) > 0: url = url + f"&issuer={issuer}"
if len(owner) > 0: url = url + f"&owner={owner}"
if before != None: url = url + f"&before={before}"
if securitytype >= 0: url = url + f"&securitytype={securitytype}"
if transactiontype >= 0: url = url + f"&transactiontype={transactiontype}"
if cascade: url = url + "&cascade"
return json.loads(requests.get(url).text)
def AffiliatedOwners(self, id):
url = self.base + f"AffiliatedOwners?key={self.key}&id={id}"
return json.loads(requests.get(url).text)
def CommonFinancials(self, id, period = None, before = None):
url = self.base + f"CommonFinancials?key={self.key}&id={id}"
if period != None: url = url + f"&period={period}"
if before != None: url = url + f"&before={before}"
return json.loads(requests.get(url).text)
def FinancialFactTrend(self, id, label, period = None, after = None, before = None):
url = self.base + f"FinancialFactTrend?key={self.key}&id={id}&label={label}"
if period != None: url = url + f"&period={period}"
if after != None: url = url + f"&after={after}"
if before != None: url = url + f"&before={before}"
return json.loads(requests.get(url).text)
def SearchEarningsCalls(self, company = None, year = 0, quarter = None, top = 15):
url = self.base + f"SearchEarningsCalls?key={self.key}&top={top}"
if company != None: url = url + f"&company={company}"
if year > 0: url = url + f"&year={year}"
if quarter != None: url = url + f"&quarter={quarter}"
return json.loads(requests.get(url).text)
def EarningsCall(self, company, year, quarter, begin = None, end = None):
url = self.base + f"EarningsCall?key={self.key}&company={company}&year={year}&quarter={quarter}"
if begin != None: url = url + f"&begin={begin}"
if end != None: url = url + f"&end={end}"
return json.loads(requests.get(url).text)
def EarningsCallHighlights(self, company, year, quarter, category = None):
url = self.base + f"EarningsCallHighlights?key={self.key}&company={company}&year={year}&quarter={quarter}"
if category != None: url = url + f"&category={category}"
return json.loads(requests.get(url).text)
def EntityFilings(self, id, filing = None, before = None):
url = self.base + f"EntityFilings?key={self.key}&id:={id}"
if filing != None: url = url + f"&filing={filing}"
if before != None: url = url + f"&before={before}"
return json.loads(requests.get(url).text)
def OpenForm4(self, filingurl):
url = self.base + f"OpenForm4?key={self.key}&filingurl={filingurl}"
return json.loads(requests.get(url).text)
def OpenCommonFinancials(self, filingurl):
url = self.base + f"OpenCommonFinancials?key={self.key}&filingurl={filingurl}"
return json.loads(requests.get(url).text)
def consumption(self, begin = None, end = None, year = None, month = None): # Doesn't work yet
url = self.base + f"consumption?key={self.key}"
if begin != None: url = url + f"begin={begin}"
if end != None: url = url + f"end={end}"
if year != None: url = url + f"year={year}"
if month != None: url = url + f"month={month}"
return requests.get(url).text
def mycalls(self, last = None): # Doesn't work yet
url = self.base + f"mycalls?key={self.key}"
if last != None: url = url + f"&last={last}"
return json.loads(requests.get(url).text)
def version(self):
return requests.get(self.base + "version").text
def CountSecEntities(self, onlyco = False):
url = self.base + "CountSecEntities"
if onlyco: url = url + "?onlyco=true"
return json.loads(requests.get(url).text)
def CountSecFilings(self):
return requests.get(self.base + "CountSecFilings").text
def CountTransactions(self):
return requests.get(self.base + "CountTransactions").text
def CountFactContexts(self):
return requests.get(self.base + "CountFactContexts").text
def CountFinancialFacts(self, id = ""):
url = self.base + "CountFinancialFacts"
if len(id) > 0: url = url + f"?id={id}"
return requests.get(url).text
|
[
"requests.get"
] |
[((5129, 5146), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (5141, 5146), False, 'import requests\n'), ((5416, 5451), 'requests.get', 'requests.get', (["(self.base + 'version')"], {}), "(self.base + 'version')\n", (5428, 5451), False, 'import requests\n'), ((5724, 5767), 'requests.get', 'requests.get', (["(self.base + 'CountSecFilings')"], {}), "(self.base + 'CountSecFilings')\n", (5736, 5767), False, 'import requests\n'), ((5839, 5884), 'requests.get', 'requests.get', (["(self.base + 'CountTransactions')"], {}), "(self.base + 'CountTransactions')\n", (5851, 5884), False, 'import requests\n'), ((5952, 5997), 'requests.get', 'requests.get', (["(self.base + 'CountFactContexts')"], {}), "(self.base + 'CountFactContexts')\n", (5964, 5997), False, 'import requests\n'), ((6172, 6189), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6184, 6189), False, 'import requests\n'), ((428, 445), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (440, 445), False, 'import requests\n'), ((589, 606), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (601, 606), False, 'import requests\n'), ((815, 832), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (827, 832), False, 'import requests\n'), ((970, 987), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (982, 987), False, 'import requests\n'), ((1399, 1417), 'requests.get', 'requests.get', (['call'], {}), '(call)\n', (1411, 1417), False, 'import requests\n'), ((2051, 2068), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2063, 2068), False, 'import requests\n'), ((2216, 2233), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2228, 2233), False, 'import requests\n'), ((2529, 2546), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2541, 2546), False, 'import requests\n'), ((2945, 2962), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2957, 2962), False, 'import requests\n'), ((3347, 3364), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3359, 3364), False, 'import requests\n'), ((3704, 3721), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3716, 3721), False, 'import requests\n'), ((4027, 4044), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4039, 4044), False, 'import requests\n'), ((4339, 4356), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4351, 4356), False, 'import requests\n'), ((4515, 4532), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4527, 4532), False, 'import requests\n'), ((4713, 4730), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4725, 4730), False, 'import requests\n'), ((5344, 5361), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (5356, 5361), False, 'import requests\n'), ((5636, 5653), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (5648, 5653), False, 'import requests\n')]
|
import pandas as pd
import urllib.parse
import requests
SERVER_URL = "https://npclassifier.ucsd.edu/"
#SERVER_URL = "http://mingwangbeta.ucsd.edu:6541"
def test_heartbeat():
request_url = "{}/model/metadata".format(SERVER_URL)
r = requests.get(request_url)
r.raise_for_status()
def test():
df = pd.read_csv("test.tsv", sep=",")
for entry in df.to_dict(orient="records"):
smiles = str(entry["smiles"])
if len(smiles) > 5:
request_url = "{}/classify?smiles={}".format(SERVER_URL, urllib.parse.quote(smiles))
r = requests.get(request_url)
r.raise_for_status()
classification = r.json()
|
[
"pandas.read_csv",
"requests.get"
] |
[((242, 267), 'requests.get', 'requests.get', (['request_url'], {}), '(request_url)\n', (254, 267), False, 'import requests\n'), ((320, 352), 'pandas.read_csv', 'pd.read_csv', (['"""test.tsv"""'], {'sep': '""","""'}), "('test.tsv', sep=',')\n", (331, 352), True, 'import pandas as pd\n'), ((580, 605), 'requests.get', 'requests.get', (['request_url'], {}), '(request_url)\n', (592, 605), False, 'import requests\n')]
|
import sys
import argparse
import os
import math
import pandas as pd
from matplotlib import pyplot as plt
# Sample command line execution:
# python3.6 Azure-functions-cdf-builder.py --datadir "/home/ubuntu/data/Azure" --figuresdir "/home/ubuntu" --n 12
parser = argparse.ArgumentParser(description = 'Building CDF of Azure functions invocations')
parser.add_argument('--datadir', dest = 'datadir',
action = 'store', default = None,
help = 'directory with the Azure functions invocations files')
parser.add_argument('--figuresdir', dest = 'figuresdir',
action = 'store', default = None,
help = 'directory to store the plots with resulting CDF')
parser.add_argument('--n', dest = 'number_of_files',
action = 'store', default = 14, type = int,
help = 'number of files to use from the dataset (default: 14)')
args = parser.parse_args()
if args.datadir is None:
sys.exit('No data directory specified.')
if args.figuresdir is None:
sys.exit('No directory to store figures specified.')
def file_id_to_str(file_id : int) -> str:
return '0' + str(file_id) if file_id < 10 else str(file_id)
filename_pattern_invocations = os.path.join(args.datadir, 'invocations_per_function_md.anon.d{}.csv')
data_collected = pd.DataFrame(columns = ['invocations', 'HashApp', 'HashFunction', 'minute_in_day']).set_index(['HashApp', 'HashFunction', 'minute_in_day'])
start_idx = 1
for file_id in range(start_idx, args.number_of_files + 1):
filename_invocations = filename_pattern_invocations.format(file_id_to_str(file_id))
invocations_data_raw = pd.read_csv(filename_invocations)
invocations_data_http = invocations_data_raw[invocations_data_raw.Trigger == 'http']
invocations_data = pd.melt(invocations_data_http, id_vars = ['HashApp', 'HashFunction'], value_vars = invocations_data_http.columns[4:]).rename(columns = {'variable': 'minute_in_day', 'value': 'invocations'})
invocations_data.set_index(['HashApp', 'HashFunction', 'minute_in_day'], inplace = True)
data_collected = data_collected.add(invocations_data / args.number_of_files, fill_value = 0)
data_collected = data_collected.groupby(['HashApp', 'minute_in_day']).max()
non_zero_invocations = data_collected[data_collected.invocations > 0]
X = [0] + sorted(non_zero_invocations.invocations.unique())
Y = pd.Series({0: 0}).append(non_zero_invocations.groupby('invocations')['invocations'].count().sort_index().cumsum())
Y /= max(Y)
zero_invocations_count = len(data_collected[data_collected.invocations == 0])
fig, ax = plt.subplots(1, 1, figsize = (8, 6))
percentiles = [0.99, 0.95, 0.90, 0.80, 0.50]
font = {'color': 'black', 'weight': 'normal', 'size': 8}
for percentile in percentiles:
ax.axhline(percentile, 0, 1.0, color = 'k', linestyle = 'dashed', lw = 0.5)
load_at_percentile = Y.index[abs(Y - percentile).argmin()]
ax.text(0, percentile + 0.002, f"{(int(percentile * 100))}th percentile (~{int(load_at_percentile)} ipm)", fontdict = font)
ax.plot(X, Y)
ax.set_xlabel(f'Load, invocations per minute (ipm).\nNot included {zero_invocations_count} cases of 0 invocations per minute.')
plt.ylim(0, 1.02)
for max_x in [math.ceil(max(X)), 10000, 5000, 2500, 2000, 1000]:
plt.xlim(-2, max_x)
plt.savefig(os.path.join(args.figuresdir, f'cdf-zoomed-{max_x}.png'), dpi = 600, bbox_inches='tight')
plt.close()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.xlim",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylim",
"pandas.read_csv",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"pandas.Series",
"pandas.melt",
"os.path.join",
"sys.exit"
] |
[((264, 351), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Building CDF of Azure functions invocations"""'}), "(description=\n 'Building CDF of Azure functions invocations')\n", (287, 351), False, 'import argparse\n'), ((1254, 1324), 'os.path.join', 'os.path.join', (['args.datadir', '"""invocations_per_function_md.anon.d{}.csv"""'], {}), "(args.datadir, 'invocations_per_function_md.anon.d{}.csv')\n", (1266, 1324), False, 'import os\n'), ((2631, 2665), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6)'}), '(1, 1, figsize=(8, 6))\n', (2643, 2665), True, 'from matplotlib import pyplot as plt\n'), ((3218, 3235), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.02)'], {}), '(0, 1.02)\n', (3226, 3235), True, 'from matplotlib import pyplot as plt\n'), ((3432, 3443), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3441, 3443), True, 'from matplotlib import pyplot as plt\n'), ((988, 1028), 'sys.exit', 'sys.exit', (['"""No data directory specified."""'], {}), "('No data directory specified.')\n", (996, 1028), False, 'import sys\n'), ((1062, 1114), 'sys.exit', 'sys.exit', (['"""No directory to store figures specified."""'], {}), "('No directory to store figures specified.')\n", (1070, 1114), False, 'import sys\n'), ((1672, 1705), 'pandas.read_csv', 'pd.read_csv', (['filename_invocations'], {}), '(filename_invocations)\n', (1683, 1705), True, 'import pandas as pd\n'), ((3305, 3324), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-2)', 'max_x'], {}), '(-2, max_x)\n', (3313, 3324), True, 'from matplotlib import pyplot as plt\n'), ((1343, 1428), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['invocations', 'HashApp', 'HashFunction', 'minute_in_day']"}), "(columns=['invocations', 'HashApp', 'HashFunction',\n 'minute_in_day'])\n", (1355, 1428), True, 'import pandas as pd\n'), ((2414, 2433), 'pandas.Series', 'pd.Series', (['{(0): 0}'], {}), '({(0): 0})\n', (2423, 2433), True, 'import pandas as pd\n'), ((3341, 3397), 'os.path.join', 'os.path.join', (['args.figuresdir', 'f"""cdf-zoomed-{max_x}.png"""'], {}), "(args.figuresdir, f'cdf-zoomed-{max_x}.png')\n", (3353, 3397), False, 'import os\n'), ((1819, 1936), 'pandas.melt', 'pd.melt', (['invocations_data_http'], {'id_vars': "['HashApp', 'HashFunction']", 'value_vars': 'invocations_data_http.columns[4:]'}), "(invocations_data_http, id_vars=['HashApp', 'HashFunction'],\n value_vars=invocations_data_http.columns[4:])\n", (1826, 1936), True, 'import pandas as pd\n')]
|
#!/usr/bin/python3
from pathlib import Path
import sys
import re
pattern = re.compile(r'takenCorrect: (\d+) takenIncorrect: (\d+) notTakenCorrect: (\d+) notTakenIncorrect: (\d+)')
def read_results_file(result_file):
with open(result_file) as f:
result_text = f.read()
m = pattern.match(result_text)
if not m:
raise ValueError('Invalid results file')
return [int(g) for g in m.groups()]
def print_results(results, title):
total_branches = sum(results)
total_correct = results[0] + results[2]
correct_rate = 100 * (total_correct / total_branches)
print(f'{title}:')
print(f'\tTaken Correct = {results[0]}')
print(f'\tTaken Incorrect = {results[1]}')
print(f'\tNot Taken Correct = {results[2]}')
print(f'\tNot Taken Incorrect = {results[3]}')
print(f'\n\tResults: {total_correct} / {total_branches} ({correct_rate:0.3f}%)\n')
if __name__ == '__main__':
if len(sys.argv) == 1:
results = read_results_file('result.out')
print_results(results, 'result.out')
sys.exit()
result_path = Path(sys.argv[1])
if result_path.is_dir():
total_results = [0, 0, 0, 0]
for result_file in result_path.glob('**/*.out'):
short_name = result_file.name.split('_')[0]
results = read_results_file(result_file)
print_results(results, short_name)
total_results = [t + r for t, r in zip(total_results, results)]
print_results(total_results, 'Total')
else:
short_name = result_path.name.split('_')[0]
results = read_results_file(result_path)
print_results(results, short_name)
|
[
"pathlib.Path",
"sys.exit",
"re.compile"
] |
[((76, 194), 're.compile', 're.compile', (['"""takenCorrect: (\\\\d+) takenIncorrect: (\\\\d+) notTakenCorrect: (\\\\d+) notTakenIncorrect: (\\\\d+)"""'], {}), "(\n 'takenCorrect: (\\\\d+) takenIncorrect: (\\\\d+) notTakenCorrect: (\\\\d+) notTakenIncorrect: (\\\\d+)'\n )\n", (86, 194), False, 'import re\n'), ((1019, 1036), 'pathlib.Path', 'Path', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (1023, 1036), False, 'from pathlib import Path\n'), ((990, 1000), 'sys.exit', 'sys.exit', ([], {}), '()\n', (998, 1000), False, 'import sys\n')]
|
import torch
from acquisition.acquisition_functions import expected_improvement
from acquisition.acquisition_marginalization import acquisition_expectation
import numpy as np
import cma
import time
import scipy.optimize as spo
from functools import partial
def continuous_acquisition_expectation(x_continuous, discrete_part, inference_samples, partition_samples,
n_vertices, acquisition_func, reference, batch=False):
if batch:
eval_x = torch.from_numpy(np.concatenate((np.tile(discrete_part, (len(x_continuous), 1)), x_continuous), axis = 1)).float()
results = acquisition_expectation(eval_x,inference_samples, partition_samples, n_vertices,acquisition_func, reference)
return np.array(results)
else:
eval_x = torch.from_numpy(np.concatenate((discrete_part, x_continuous))).float()
print(acquisition_expectation(eval_x, inference_samples, partition_samples, n_vertices,
acquisition_func, reference)[0].numpy())
return acquisition_expectation(eval_x, inference_samples, partition_samples, n_vertices,
acquisition_func, reference)[0].numpy()
def cma_es_optimizer(objective, x_init, max_acquisition, inference_samples, partition_samples, n_vertices, acquisition_func=expected_improvement, reference=None):
cont_bounds = [objective.problem.lower_bounds[objective.num_discrete:], objective.problem.upper_bounds[objective.num_discrete:]]
start_time = time.time()
es = cma.CMAEvolutionStrategy(x0=x_init[objective.num_discrete:],sigma0=0.1,inopts={'bounds': cont_bounds, "popsize": 50},)
iter = 1
total_time_in_acq = 0
while not es.stop():
iter += 1
xs = es.ask()
X = torch.tensor(xs).float()
# evaluate the acquisition function (optimizer assumes we're minimizing)
temp_time = time.time()
Y = -1 * continuous_acquisition_expectation(xs, x_init[:objective.num_discrete].numpy(),
inference_samples, partition_samples, n_vertices, acquisition_func, reference, batch=True)
total_time_in_acq += time.time() - temp_time
es.tell(xs, Y) # return the result to the optimizer
if (iter > 10):
break
best_x = torch.from_numpy(es.best.x).float()
if -1*es.best.f > max_acquisition:
return torch.cat((x_init[:objective.num_discrete], best_x), dim=0), -1*es.best.f
else:
return x_init, max_acquisition
|
[
"numpy.concatenate",
"cma.CMAEvolutionStrategy",
"torch.cat",
"time.time",
"numpy.array",
"acquisition.acquisition_marginalization.acquisition_expectation",
"torch.tensor",
"torch.from_numpy"
] |
[((1589, 1600), 'time.time', 'time.time', ([], {}), '()\n', (1598, 1600), False, 'import time\n'), ((1610, 1733), 'cma.CMAEvolutionStrategy', 'cma.CMAEvolutionStrategy', ([], {'x0': 'x_init[objective.num_discrete:]', 'sigma0': '(0.1)', 'inopts': "{'bounds': cont_bounds, 'popsize': 50}"}), "(x0=x_init[objective.num_discrete:], sigma0=0.1,\n inopts={'bounds': cont_bounds, 'popsize': 50})\n", (1634, 1733), False, 'import cma\n'), ((653, 767), 'acquisition.acquisition_marginalization.acquisition_expectation', 'acquisition_expectation', (['eval_x', 'inference_samples', 'partition_samples', 'n_vertices', 'acquisition_func', 'reference'], {}), '(eval_x, inference_samples, partition_samples,\n n_vertices, acquisition_func, reference)\n', (676, 767), False, 'from acquisition.acquisition_marginalization import acquisition_expectation\n'), ((776, 793), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (784, 793), True, 'import numpy as np\n'), ((1973, 1984), 'time.time', 'time.time', ([], {}), '()\n', (1982, 1984), False, 'import time\n'), ((2216, 2227), 'time.time', 'time.time', ([], {}), '()\n', (2225, 2227), False, 'import time\n'), ((2356, 2383), 'torch.from_numpy', 'torch.from_numpy', (['es.best.x'], {}), '(es.best.x)\n', (2372, 2383), False, 'import torch\n'), ((2446, 2505), 'torch.cat', 'torch.cat', (['(x_init[:objective.num_discrete], best_x)'], {'dim': '(0)'}), '((x_init[:objective.num_discrete], best_x), dim=0)\n', (2455, 2505), False, 'import torch\n'), ((1847, 1863), 'torch.tensor', 'torch.tensor', (['xs'], {}), '(xs)\n', (1859, 1863), False, 'import torch\n'), ((838, 883), 'numpy.concatenate', 'np.concatenate', (['(discrete_part, x_continuous)'], {}), '((discrete_part, x_continuous))\n', (852, 883), True, 'import numpy as np\n'), ((1100, 1214), 'acquisition.acquisition_marginalization.acquisition_expectation', 'acquisition_expectation', (['eval_x', 'inference_samples', 'partition_samples', 'n_vertices', 'acquisition_func', 'reference'], {}), '(eval_x, inference_samples, partition_samples,\n n_vertices, acquisition_func, reference)\n', (1123, 1214), False, 'from acquisition.acquisition_marginalization import acquisition_expectation\n'), ((907, 1021), 'acquisition.acquisition_marginalization.acquisition_expectation', 'acquisition_expectation', (['eval_x', 'inference_samples', 'partition_samples', 'n_vertices', 'acquisition_func', 'reference'], {}), '(eval_x, inference_samples, partition_samples,\n n_vertices, acquisition_func, reference)\n', (930, 1021), False, 'from acquisition.acquisition_marginalization import acquisition_expectation\n')]
|
from helpers import *
from collection_api import info, add_game, remove_game, lend_game
from cs50 import SQL
from flask import Flask, jsonify, render_template, request, url_for
from flask_jsglue import JSGlue
from flask_session import Session
from passlib.apps import custom_app_context as pwd_context
from tempfile import mkdtemp
import re
import json
from bgg_api import hot_list, info_games
# configure application
app = Flask(__name__)
JSGlue(app)
"""
# ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
"""
# configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# configure CS50 Library to use SQLite database
db = SQL("sqlite:///bgi.db")
@app.route("/")
def index():
return render_template("index.html")
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user."""
# forget any user_id
session.clear()
# if user reached route via POST (as by submitting a form via POST)
if request.method == "POST":
inputEmail = request.form.get("email")
inputPassword = request.form.get("psw")
inputRepassword = request.form.get("pswRepeat")
if not inputEmail:
return apology("must provide email")
elif not re.match("^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$", inputEmail):
return apology("Invalid Email Format")
elif not inputPassword:
return apology("must provide password")
elif inputPassword != inputRepassword:
return apology("password and re-password don't match")
existEmail = db.execute("SELECT id FROM users WHERE email = :email", email = inputEmail)
if len(existEmail) > 0:
return apology("User already exist")
else:
#hash password
hashPassword = pwd_context.hash(inputPassword)
# insert in db
result = db.execute("INSERT INTO users (email, pwd) VALUES (:email, :pwd)", \
email = inputEmail, pwd = hashPassword)
if not result:
return apology("Error 10125 DB")
# remember which user has logged in
user_r = db.execute("SELECT * FROM users WHERE email = :email", email=inputEmail)
session["user_id"] = user_r[0]["id"]
# redirect user to home page
return redirect(url_for("index"))
# else if user reached route via GET (as by clicking a link or via redirect)
else:
return render_template("index.html")
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in."""
# forget any user_id
session.clear()
inputEmail = request.form.get("email")
inputPassword = request.form.get("<PASSWORD>")
# if user reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# ensure username was submitted
if not inputEmail:
return apology("must provide email")
# ensure password was submitted
elif not inputPassword:
return apology("must provide password")
# query database for username
rows = db.execute("SELECT * FROM users WHERE email = :email", email=inputEmail)
# ensure username exists and password is correct
if len(rows) != 1 or not pwd_context.verify(inputPassword, rows[0]["pwd"]):
return apology("invalid username and/or password")
# remember which user has logged in
session["user_id"] = rows[0]["id"]
# redirect user to home page
return redirect(url_for("index"))
# else if user reached route via GET (as by clicking a link or via redirect)
else:
return render_template("index.html")
@app.route("/logout")
@login_required
def logout():
"""Log user out."""
# forget any user_id
session.clear()
# redirect user to login form
return redirect(url_for("index"))
@app.route("/usercheck", methods=["GET", "POST"])
def usercheck():
# if user reached route via POST (as by submitting a form via POST)
if request.method == "POST":
"""Check if user exist in DB"""
inputEmail = request.form.get("email")
# query database for username
existEmail = db.execute("SELECT id FROM users WHERE email = :email", email = inputEmail)
response = "true";
if len(existEmail) > 0:
response = "true";
return json.dumps({'status':'OK','response':response, 'db':existEmail});
else:
response = "false";
return json.dumps({'status':'OK','response':response, 'db':existEmail});
# else if user reached route via GET (as by clicking a link or via redirect)
else:
return render_template("index.html")
@app.route("/search")
@login_required
def search():
query = request.args.get("q")
result = info_games(query)
return (result)
@app.route("/hot")
@login_required
def hot():
result = hot_list()
return (result)
@app.route("/search_boardgame")
@login_required
def search_boardgame():
return render_template("search.html")
@app.route("/collection", methods=["GET"])
@login_required
def collection():
option = request.args.get("option")
if option == "user_collection":
user_id = session.get("user_id")
result = info(user_id)
return result
elif option == "add_game":
game_id = request.args.get("id")
user_id = session.get("user_id")
check = db.execute("SELECT game_name FROM collections WHERE user_id = :user_id AND game_id = :game_id ;", \
user_id = user_id, game_id = int(game_id))
if len(check) == 0:
add_game(game_id, user_id)
return "0"
else:
return "game exist in collection"
elif option == "remove_game":
game_id = request.args.get("id")
user_id = session.get("user_id")
result = remove_game(game_id, user_id)
return result
elif option == "lend_game":
game_id = request.args.get("id")
user_id = session.get("user_id")
result = lend_game(game_id, user_id)
if result > 0:
return "Game Lend/Return"
else:
return "0"
else:
return "bad"
|
[
"flask_jsglue.JSGlue",
"flask.request.form.get",
"json.dumps",
"flask.url_for",
"passlib.apps.custom_app_context.hash",
"collection_api.info",
"flask.request.args.get",
"tempfile.mkdtemp",
"flask.render_template",
"collection_api.lend_game",
"bgg_api.info_games",
"flask_session.Session",
"re.match",
"collection_api.remove_game",
"passlib.apps.custom_app_context.verify",
"bgg_api.hot_list",
"flask.Flask",
"collection_api.add_game",
"cs50.SQL"
] |
[((426, 441), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (431, 441), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((442, 453), 'flask_jsglue.JSGlue', 'JSGlue', (['app'], {}), '(app)\n', (448, 453), False, 'from flask_jsglue import JSGlue\n'), ((822, 831), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (829, 831), False, 'from tempfile import mkdtemp\n'), ((914, 926), 'flask_session.Session', 'Session', (['app'], {}), '(app)\n', (921, 926), False, 'from flask_session import Session\n'), ((981, 1004), 'cs50.SQL', 'SQL', (['"""sqlite:///bgi.db"""'], {}), "('sqlite:///bgi.db')\n", (984, 1004), False, 'from cs50 import SQL\n'), ((1046, 1075), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1061, 1075), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((2985, 3010), 'flask.request.form.get', 'request.form.get', (['"""email"""'], {}), "('email')\n", (3001, 3010), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((3031, 3061), 'flask.request.form.get', 'request.form.get', (['"""<PASSWORD>"""'], {}), "('<PASSWORD>')\n", (3047, 3061), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((5149, 5170), 'flask.request.args.get', 'request.args.get', (['"""q"""'], {}), "('q')\n", (5165, 5170), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((5184, 5201), 'bgg_api.info_games', 'info_games', (['query'], {}), '(query)\n', (5194, 5201), False, 'from bgg_api import hot_list, info_games\n'), ((5283, 5293), 'bgg_api.hot_list', 'hot_list', ([], {}), '()\n', (5291, 5293), False, 'from bgg_api import hot_list, info_games\n'), ((5398, 5428), 'flask.render_template', 'render_template', (['"""search.html"""'], {}), "('search.html')\n", (5413, 5428), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((5520, 5546), 'flask.request.args.get', 'request.args.get', (['"""option"""'], {}), "('option')\n", (5536, 5546), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((1342, 1367), 'flask.request.form.get', 'request.form.get', (['"""email"""'], {}), "('email')\n", (1358, 1367), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((1392, 1415), 'flask.request.form.get', 'request.form.get', (['"""psw"""'], {}), "('psw')\n", (1408, 1415), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((1442, 1471), 'flask.request.form.get', 'request.form.get', (['"""pswRepeat"""'], {}), "('pswRepeat')\n", (1458, 1471), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((2808, 2837), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (2823, 2837), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((4017, 4046), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (4032, 4046), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((4226, 4242), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (4233, 4242), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((4479, 4504), 'flask.request.form.get', 'request.form.get', (['"""email"""'], {}), "('email')\n", (4495, 4504), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((5054, 5083), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (5069, 5083), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((5641, 5654), 'collection_api.info', 'info', (['user_id'], {}), '(user_id)\n', (5645, 5654), False, 'from collection_api import info, add_game, remove_game, lend_game\n'), ((2138, 2169), 'passlib.apps.custom_app_context.hash', 'pwd_context.hash', (['inputPassword'], {}), '(inputPassword)\n', (2154, 2169), True, 'from passlib.apps import custom_app_context as pwd_context\n'), ((3892, 3908), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (3899, 3908), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((4750, 4818), 'json.dumps', 'json.dumps', (["{'status': 'OK', 'response': response, 'db': existEmail}"], {}), "({'status': 'OK', 'response': response, 'db': existEmail})\n", (4760, 4818), False, 'import json\n'), ((4881, 4949), 'json.dumps', 'json.dumps', (["{'status': 'OK', 'response': response, 'db': existEmail}"], {}), "({'status': 'OK', 'response': response, 'db': existEmail})\n", (4891, 4949), False, 'import json\n'), ((5726, 5748), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (5742, 5748), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((1567, 1644), 're.match', 're.match', (['"""^\\\\w+([\\\\.-]?\\\\w+)*@\\\\w+([\\\\.-]?\\\\w+)*(\\\\.\\\\w{2,3})+$"""', 'inputEmail'], {}), "('^\\\\w+([\\\\.-]?\\\\w+)*@\\\\w+([\\\\.-]?\\\\w+)*(\\\\.\\\\w{2,3})+$', inputEmail)\n", (1575, 1644), False, 'import re\n'), ((2683, 2699), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (2690, 2699), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((3628, 3677), 'passlib.apps.custom_app_context.verify', 'pwd_context.verify', (['inputPassword', "rows[0]['pwd']"], {}), "(inputPassword, rows[0]['pwd'])\n", (3646, 3677), True, 'from passlib.apps import custom_app_context as pwd_context\n'), ((5997, 6023), 'collection_api.add_game', 'add_game', (['game_id', 'user_id'], {}), '(game_id, user_id)\n', (6005, 6023), False, 'from collection_api import info, add_game, remove_game, lend_game\n'), ((6159, 6181), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (6175, 6181), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((6240, 6269), 'collection_api.remove_game', 'remove_game', (['game_id', 'user_id'], {}), '(game_id, user_id)\n', (6251, 6269), False, 'from collection_api import info, add_game, remove_game, lend_game\n'), ((6342, 6364), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (6358, 6364), False, 'from flask import Flask, jsonify, render_template, request, url_for\n'), ((6423, 6450), 'collection_api.lend_game', 'lend_game', (['game_id', 'user_id'], {}), '(game_id, user_id)\n', (6432, 6450), False, 'from collection_api import info, add_game, remove_game, lend_game\n')]
|
import torch.nn as nn
import torch.nn.functional as F
import torch
from transformer.Attention import Attention
class TransformerEncoderLayer(nn.Module):
r"""
Encoder Layer
"""
def __init__(self, d_model, n_heads, dim_feedforward=2048, attention_dropout_rate=0.1, projection_dropout_rate=0.1):
super(TransformerEncoderLayer, self).__init__()
self.pre_norm = nn.LayerNorm(d_model)
self.self_attn = Attention(dim=d_model, num_heads=n_heads,
attn_dropout_rate=attention_dropout_rate,
projection_dropout_rate=projection_dropout_rate)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout1 = nn.Dropout(projection_dropout_rate)
self.norm1 = nn.LayerNorm(d_model)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.dropout2 = nn.Dropout(projection_dropout_rate)
# not using stochastic depth
# self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
self.activation = F.gelu
def forward(self, src: torch.Tensor, *args, **kwargs) -> torch.Tensor:
skip = src
src = self.pre_norm(src)
src = self.self_attn(src)
src = self.norm1(src)
# src = src + skip
skip = self.linear1(src)
skip = self.activation(skip)
skip = self.dropout1(skip)
skip = self.linear2(skip)
src = src + self.dropout2(skip)
return src
|
[
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"transformer.Attention.Attention",
"torch.nn.Linear"
] |
[((393, 414), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {}), '(d_model)\n', (405, 414), True, 'import torch.nn as nn\n'), ((440, 577), 'transformer.Attention.Attention', 'Attention', ([], {'dim': 'd_model', 'num_heads': 'n_heads', 'attn_dropout_rate': 'attention_dropout_rate', 'projection_dropout_rate': 'projection_dropout_rate'}), '(dim=d_model, num_heads=n_heads, attn_dropout_rate=\n attention_dropout_rate, projection_dropout_rate=projection_dropout_rate)\n', (449, 577), False, 'from transformer.Attention import Attention\n'), ((673, 708), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'dim_feedforward'], {}), '(d_model, dim_feedforward)\n', (682, 708), True, 'import torch.nn as nn\n'), ((733, 768), 'torch.nn.Dropout', 'nn.Dropout', (['projection_dropout_rate'], {}), '(projection_dropout_rate)\n', (743, 768), True, 'import torch.nn as nn\n'), ((790, 811), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {}), '(d_model)\n', (802, 811), True, 'import torch.nn as nn\n'), ((835, 870), 'torch.nn.Linear', 'nn.Linear', (['dim_feedforward', 'd_model'], {}), '(dim_feedforward, d_model)\n', (844, 870), True, 'import torch.nn as nn\n'), ((895, 930), 'torch.nn.Dropout', 'nn.Dropout', (['projection_dropout_rate'], {}), '(projection_dropout_rate)\n', (905, 930), True, 'import torch.nn as nn\n')]
|
""""
This is the main module for the CKAN-WIT.
It first imports the necessary packages from within python and its environs.
"""
import logging
import aiohttp
import asyncio
import requests
from urllib.error import URLError
from ckan_wit.src import uris
from ckan_wit.src import proxies
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s: %(levelname)-5s: \n\t\t\t%(message)s: \n\t\t\t%(pathname)s: \n\t\t\t%(module)s: %(funcName)s\n')
file_handler = logging.FileHandler('wit.log')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
try:
ext_proxy = {
'http': proxies.ProxySetting().http_proxy,
'https': proxies.ProxySetting().https_proxy
}
except KeyError:
pass
def verify_acquire():
"""
This method handles the pattern check for the uris using some reqex syntax.
It ensures that the CKAN-API standard is a first class citizen, before it can be processed, whilst logging the errors found.
If no errors, it has to return:
:return: "number_of_portals", "verified_portals"
"""
verified_uris = []
ckan_standard_interface = "/api/3/action/package_search"
""" First verify that the URI is a valid URI and it is available"""
for uri in uris.ckan_opendata_portal_uris:
uri += ckan_standard_interface
try:
global res
res = requests.get(uri)
if res.status_code == 200:
verified_uris.append(uri)
except res.status_code != 200:
res = requests.get(uri, proxies=ext_proxy)
if res.status_code == 200:
verified_uris.append(uri)
try:
return {
"number_of_portals": len(verified_uris),
"verified_portals": verified_uris
}
except URLError:
logger.exception("ERROR:: The File (ckan_opendata_portals_urls.txt) is not Found.")
return {
"ERROR Info": "Please check the log file for details"
}
finally:
logger.info("SUCCESS - All referenced OpenData Portals have been successfully loaded for processing")
def ckan_wit_main():
portals_main = verify_acquire()
meta = dict()
async def fetch(session, portal, proxy):
async with session.get(portal, proxy=proxy) as response:
try:
assert response.status == 200, "API Standardization issues"
except AssertionError as err:
logger.exception("ERROR:: PORTAL UNAVAILABLE: {0}:".format(err))
except OSError as err:
logger.exception("ERROR:: Problem with OS: {0}".format(err))
except ValueError:
logger.exception("ERROR:: One or more values has been wrongly configured.")
else:
return await response.json()
async def main():
portals = portals_main["verified_portals"]
proxy = next(iter(ext_proxy.values()))
tasks = list()
async with aiohttp.ClientSession() as session:
for portal in portals:
tasks.append(fetch(session, portal, proxy))
resp = await asyncio.gather(*tasks)
resp_length = len(resp)
counter = 0
for res in resp:
if resp_length > counter:
counter = counter + 1
meta[counter] = res
loop = asyncio.new_event_loop() # this line is necessary for the edms flask app
# loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(main())
# sleep gracefully
loop.run_until_complete(asyncio.sleep(0))
# loop.close()
final_results = aggregate_filter_present(meta=meta)
if final_results:
logger.info("SUCCESS - Metadata successfully processed, aggregated, and formatted.\n\t\t\t See the docs on how to get started")
try:
return {"wit_resources": {"AFRICA": {"total_metadata": sum(wit_resources["africa"]["total_metadata"]), "wit_metadata": wit_resources["africa"]["wit_metadata"]},
"AMERICAS": {"total_metadata": sum(wit_resources["americas"]["total_metadata"]), "wit_metadata": wit_resources["americas"]["wit_metadata"]},
"ASIA": {"total_metadata": sum(wit_resources["asia"]["total_metadata"]), "wit_metadata": wit_resources["asia"]["wit_metadata"]},
"EUROPE": {"total_metadata": sum(wit_resources["europe"]["total_metadata"]), "wit_metadata": wit_resources["europe"]["wit_metadata"]},
"overall_total": sum(wit_resources["africa"]["total_metadata"]) + sum(wit_resources["americas"]["total_metadata"]) + sum(wit_resources[
"asia"][
"total_metadata"]) + sum(wit_resources["europe"]["total_metadata"])
}
}
except KeyError:
logger.exception("WARNING - The processing of one or more metadata was not successful.\n\t\tPlease see the logfile for more information")
def aggregate_filter_present(meta):
temp_aggregator = {}
temp2_aggregator = {}
aggregated_results = list()
aggregated_results_2 = list()
idx = list(range(10))
for key, value in meta.items():
default = 'url'
link = 'access_url'
if meta[key]['success']:
for id_num in idx:
x_values = list(range(meta[key]['result']['results'][id_num]['num_resources']))
for x_val in x_values:
if "access_url" in meta[key]['result']['results'][id_num]['resources'][x_val]:
temp_aggregator.update({
key: {"total_metadata": meta[key]['result']['count'],
"Metadata": {
'num_resources': meta[key]['result']['results'][id_num]['num_resources'],
'owner_organization': meta[key]['result']['results'][id_num]['organization']['description']['en'],
'resource_group_title': meta[key]['result']['results'][id_num]['organization']['title'],
"wit_resources": {"name": meta[key]['result']['results'][id_num]['resources'][x_val]['id'],
"file_format": meta[key]['result']['results'][id_num]['resources'][x_val]['format'],
"download_link": meta[key]['result']['results'][id_num]['resources'][x_val][link]
}
}, }
})
def clearNullNoneValues(d):
clearedValues = {}
for k, v in d.items():
if isinstance(v, dict):
nested = clearNullNoneValues(v)
if len(nested.keys()) > 0:
clearedValues[k] = nested
elif v is not None:
clearedValues[k] = v
return clearedValues
try:
aggregated_results.append(temp_aggregator.copy())
except AttributeError:
clearNullNoneValues(temp_aggregator.copy())
aggregated_results.append(temp_aggregator.copy())
except TypeError:
clearNullNoneValues(temp_aggregator.copy())
aggregated_results.append(temp_aggregator.copy())
if "url" in meta[key]['result']['results'][id_num]['resources'][x_val]:
try:
temp2_aggregator.update({
key: {"total_metadata": meta[key]['result']['count'],
"Metadata": {
'num_resources': meta[key]['result']['results'][id_num]['num_resources'],
'license': meta[key]['result']['results'][id_num]['license_id'],
'resource_group_title': meta[key]['result']['results'][id_num]['title'],
'owner_organization': meta[key]['result']['results'][id_num]['organization']['name'],
'owner_description': meta[key]['result']['results'][id_num]['organization']['description'],
"wit_resources": {"name": meta[key]['result']['results'][id_num]['resources'][x_val]['name'],
"file_format": meta[key]['result']['results'][id_num]['resources'][x_val]['format'],
"download_link": meta[key]['result']['results'][id_num]['resources'][x_val][default]
}
}, }
})
except TypeError:
pass
def clearNullNoneValues_2(d):
cleared_values_2 = {}
for k, v in d.items():
if isinstance(v, dict):
nested = clearNullNoneValues_2(v)
if len(nested.keys()) > 0:
cleared_values_2[k] = nested
elif v is not None:
cleared_values_2[k] = v
return cleared_values_2
try:
aggregated_results_2.append(temp2_aggregator.copy())
except AttributeError:
clearNullNoneValues_2(temp2_aggregator.copy())
aggregated_results_2.append(temp2_aggregator.copy())
except TypeError:
clearNullNoneValues_2(temp_aggregator.copy())
aggregated_results.append(temp_aggregator.copy())
else:
logger.error("PORTAL ERROR - ONE or More of the Portals is not responding..\n\t\t\tPlease see the troubleshooting guide in the "
"documentation for assistance.")
return "Please see the wit.log file"
aggregated_results.extend(aggregated_results_2)
global wit_resources
wit_resources = dict()
wit_resources['africa'] = {
"total_metadata": [],
"wit_metadata": []
}
wit_resources['americas'] = {
"total_metadata": [],
"wit_metadata": []
}
wit_resources['asia'] = {
"total_metadata": [],
"wit_metadata": []
}
wit_resources['europe'] = {
"total_metadata": [],
"wit_metadata": []
}
for y in range(len(aggregated_results)):
if len(aggregated_results[y]) == 1:
wit_resources['europe']["total_metadata"].append(aggregated_results[y][1]['Metadata']['num_resources'])
wit_resources['europe']["wit_metadata"].append(aggregated_results[y][1]['Metadata'])
elif len(aggregated_results[y]) == 2:
wit_resources['europe']["total_metadata"].append(aggregated_results[y][2]['Metadata']['num_resources'])
wit_resources['europe']["wit_metadata"].append(aggregated_results[y][2]['Metadata'])
elif len(aggregated_results[y]) == 3:
wit_resources['americas']["total_metadata"].append(aggregated_results[y][3]['Metadata']['num_resources'])
wit_resources['americas']["wit_metadata"].append(aggregated_results[y][3]['Metadata'])
elif len(aggregated_results[y]) == 4:
wit_resources['americas']["total_metadata"].append(aggregated_results[y][4]['Metadata']['num_resources'])
wit_resources['americas']["wit_metadata"].append(aggregated_results[y][4]['Metadata'])
elif len(aggregated_results[y]) == 5:
wit_resources['asia']["total_metadata"].append(aggregated_results[y][5]['Metadata']['num_resources'])
wit_resources['asia']["wit_metadata"].append(aggregated_results[y][5]['Metadata'])
elif len(aggregated_results[y]) == 6:
wit_resources['asia']["total_metadata"].append(aggregated_results[y][6]['Metadata']['num_resources'])
wit_resources['asia']["wit_metadata"].append(aggregated_results[y][6]['Metadata'])
elif len(aggregated_results[y]) == 7:
wit_resources['africa']["total_metadata"].append(aggregated_results[y][7]['Metadata']['num_resources'])
wit_resources['africa']["wit_metadata"].append(aggregated_results[y][7]['Metadata'])
elif len(aggregated_results[y]) == 8:
wit_resources['africa']["total_metadata"].append(aggregated_results[y][8]['Metadata']['num_resources'])
wit_resources['africa']["wit_metadata"].append(aggregated_results[y][8]['Metadata'])
else:
logger.error("ERROR - One or more keys have been wrongly configured.\n\t\t\tPlease see the troubleshooting docs for assistance.")
break
try:
return wit_resources
# print(aggregated_results)
except IndexError as err:
logger.exception(err)
if __name__ == '__main__':
ckan_wit_main()
|
[
"asyncio.gather",
"ckan_wit.src.proxies.ProxySetting",
"logging.FileHandler",
"asyncio.sleep",
"asyncio.set_event_loop",
"logging.getLogger",
"logging.Formatter",
"aiohttp.ClientSession",
"requests.get",
"asyncio.new_event_loop"
] |
[((305, 332), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (322, 332), False, 'import logging\n'), ((375, 504), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s: %(levelname)-5s: \n\t\t\t%(message)s: \n\t\t\t%(pathname)s: \n\t\t\t%(module)s: %(funcName)s\n"""'], {}), '(\n """%(asctime)s: %(levelname)-5s: \n\t\t\t%(message)s: \n\t\t\t%(pathname)s: \n\t\t\t%(module)s: %(funcName)s\n"""\n )\n', (392, 504), False, 'import logging\n'), ((519, 549), 'logging.FileHandler', 'logging.FileHandler', (['"""wit.log"""'], {}), "('wit.log')\n", (538, 549), False, 'import logging\n'), ((3430, 3454), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (3452, 3454), False, 'import asyncio\n'), ((3546, 3574), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (3568, 3574), False, 'import asyncio\n'), ((3663, 3679), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (3676, 3679), False, 'import asyncio\n'), ((659, 681), 'ckan_wit.src.proxies.ProxySetting', 'proxies.ProxySetting', ([], {}), '()\n', (679, 681), False, 'from ckan_wit.src import proxies\n'), ((711, 733), 'ckan_wit.src.proxies.ProxySetting', 'proxies.ProxySetting', ([], {}), '()\n', (731, 733), False, 'from ckan_wit.src import proxies\n'), ((1413, 1430), 'requests.get', 'requests.get', (['uri'], {}), '(uri)\n', (1425, 1430), False, 'import requests\n'), ((3024, 3047), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (3045, 3047), False, 'import aiohttp\n'), ((1569, 1605), 'requests.get', 'requests.get', (['uri'], {'proxies': 'ext_proxy'}), '(uri, proxies=ext_proxy)\n', (1581, 1605), False, 'import requests\n'), ((3180, 3202), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (3194, 3202), False, 'import asyncio\n')]
|
import collections
import logging
from pathlib import Path
import re
from unidecode import unidecode
from itertools import groupby
__all__ = [
"flatten",
"PROJECT_ROOT",
"FILE_NAME_CLEANER",
"DUPE_SPECIAL_CHARS",
"sanitize_name",
"all_equal"
]
logger = logging.getLogger(__name__)
PROJECT_ROOT = (Path(__file__).parent / ".." / "..").resolve()
FILE_NAME_CLEANER = re.compile(r'[^\w]')
DUPE_SPECIAL_CHARS = re.compile(r'([_\.\-])[_\.\-]+')
LEADING_SPECIAL = re.compile(r'^[^A-Za-z0-9]')
TRAILING_SPECIAL = re.compile(r'[^A-Za-z0-9]+$')
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def sanitize_name(name: str) -> str:
cleaned = FILE_NAME_CLEANER.sub('_', unidecode(name).replace('/', '_')).replace(" ", "_")
cleaned = DUPE_SPECIAL_CHARS.sub(r'\1', cleaned)
cleaned = LEADING_SPECIAL.sub('', cleaned)
return TRAILING_SPECIAL.sub('', cleaned)
def all_equal(iterable):
g = groupby(iterable)
return next(g, True) and not next(g, False)
|
[
"unidecode.unidecode",
"pathlib.Path",
"itertools.groupby",
"logging.getLogger",
"re.compile"
] |
[((278, 305), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (295, 305), False, 'import logging\n'), ((391, 411), 're.compile', 're.compile', (['"""[^\\\\w]"""'], {}), "('[^\\\\w]')\n", (401, 411), False, 'import re\n'), ((433, 468), 're.compile', 're.compile', (['"""([_\\\\.\\\\-])[_\\\\.\\\\-]+"""'], {}), "('([_\\\\.\\\\-])[_\\\\.\\\\-]+')\n", (443, 468), False, 'import re\n'), ((484, 511), 're.compile', 're.compile', (['"""^[^A-Za-z0-9]"""'], {}), "('^[^A-Za-z0-9]')\n", (494, 511), False, 'import re\n'), ((532, 560), 're.compile', 're.compile', (['"""[^A-Za-z0-9]+$"""'], {}), "('[^A-Za-z0-9]+$')\n", (542, 560), False, 'import re\n'), ((1216, 1233), 'itertools.groupby', 'groupby', (['iterable'], {}), '(iterable)\n', (1223, 1233), False, 'from itertools import groupby\n'), ((323, 337), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (327, 337), False, 'from pathlib import Path\n'), ((983, 998), 'unidecode.unidecode', 'unidecode', (['name'], {}), '(name)\n', (992, 998), False, 'from unidecode import unidecode\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "<NAME>"
__all__ = [
'Test_invoice_main_config',
]
import datetime
import glob
import os
import tempfile
import unittest
from invoice.log import get_null_logger
from invoice.invoice_collection import InvoiceCollection
from invoice.invoice_main import invoice_main
from invoice.invoice_db import InvoiceDb
from invoice.database.db_types import Path
from invoice.string_printer import StringPrinter
class Test_invoice_main_patterns(unittest.TestCase):
PATTERNS_CLEAR = """\
patterns:
"""
PATTERNS_DEFAULT = """\
patterns:
+ Pattern(pattern='<DIRNAME>/*.doc', skip=False)
"""
PATTERNS_ADD_REMOVE = """\
patterns:
+ Pattern(pattern='<DIRNAME>/*.doc', skip=False)
+ Pattern(pattern='<DIRNAME>/*.Doc', skip=True)
+ Pattern(pattern='<DIRNAME>/*.DOC', skip=False)
"""
def setUp(self):
self.dirname = Path.db_to(os.path.join(os.path.dirname(__file__), '..', '..', 'example'))
self.logger = get_null_logger()
self.maxDiff = None
def test_invoice_main_patterns_add_remove(self):
with tempfile.TemporaryDirectory() as tmpdir:
rc_dir = os.path.join(tmpdir, 'rc_dir')
os.makedirs(rc_dir)
p = StringPrinter()
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['init', '-R', rc_dir, os.path.join(self.dirname, '*.doc')],
)
self.assertEqual(p.string(), '')
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['patterns', '-R', rc_dir, '-a', '!example/*.Doc', '-a', 'example/*.DOC'],
)
self.assertEqual(p.string().replace(self.dirname, '<DIRNAME>'), self.PATTERNS_ADD_REMOVE)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['patterns', '-R', rc_dir, '-x', '!example/*.Doc', '-x', 'example/*.DOC'],
)
self.assertEqual(p.string().replace(self.dirname, '<DIRNAME>'), self.PATTERNS_DEFAULT)
# check if duplicate pattern raises:
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['patterns', '-R', rc_dir, '-a', 'example/*.doc'],
)
self.assertEqual(p.string().replace(self.dirname, '<DIRNAME>'), self.PATTERNS_DEFAULT)
def test_invoice_main_patterns_clear(self):
with tempfile.TemporaryDirectory() as tmpdir:
rc_dir = os.path.join(tmpdir, 'rc_dir')
os.makedirs(rc_dir)
p = StringPrinter()
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['init', '-R', rc_dir, os.path.join(self.dirname, '*.doc')],
)
self.assertEqual(p.string(), '')
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['patterns', '-R', rc_dir, '--clear'],
)
self.assertEqual(p.string().replace(self.dirname, '<DIRNAME>'), self.PATTERNS_CLEAR)
def test_invoice_main_patterns_warning(self):
with tempfile.TemporaryDirectory() as tmpdir:
rc_dir = os.path.join(tmpdir, 'rc_dir')
os.makedirs(rc_dir)
p = StringPrinter()
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['init', '-R', rc_dir] + list(glob.glob(os.path.join(self.dirname, '*.doc'))),
)
self.assertEqual(p.string(), '')
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['patterns', '-R', rc_dir, '--clear'],
)
self.assertEqual(p.string().replace(self.dirname, '<DIRNAME>'), self.PATTERNS_CLEAR)
def test_invoice_main_patterns_import_export(self):
with tempfile.TemporaryDirectory() as tmpdir, tempfile.NamedTemporaryFile() as p_file:
rc_dir = os.path.join(tmpdir, 'rc_dir')
os.makedirs(rc_dir)
p = StringPrinter()
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['init', '-R', rc_dir, os.path.join(self.dirname, '*.doc')],
)
self.assertEqual(p.string(), '')
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['patterns', '-R', rc_dir, '-a', '!example/*.Doc', '-a', 'example/*.DOC'],
)
self.assertEqual(p.string().replace(self.dirname, '<DIRNAME>'), self.PATTERNS_ADD_REMOVE)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['patterns', '-R', rc_dir],
)
self.assertEqual(p.string().replace(self.dirname, '<DIRNAME>'), self.PATTERNS_ADD_REMOVE)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['patterns', '-R', rc_dir, '--export', p_file.name],
)
self.assertEqual(p.string().replace(self.dirname, '<DIRNAME>'), self.PATTERNS_ADD_REMOVE)
p_file.flush()
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['patterns', '-R', rc_dir, '--clear'],
)
self.assertEqual(p.string(), self.PATTERNS_CLEAR)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['patterns', '-R', rc_dir, '--import', p_file.name],
)
self.assertEqual(p.string().replace(self.dirname, '<DIRNAME>'), self.PATTERNS_ADD_REMOVE)
def test_invoice_main_patterns_edit(self):
with tempfile.TemporaryDirectory() as tmpdir:
rc_dir = os.path.join(tmpdir, 'rc_dir')
os.makedirs(rc_dir)
p = StringPrinter()
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['init', '-R', rc_dir, os.path.join(self.dirname, '*.doc')],
)
self.assertEqual(p.string(), '')
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['patterns', '-R', rc_dir, '-a', '!example/*.Doc', '-a', 'example/*.DOC'],
)
self.assertEqual(p.string().replace(self.dirname, '<DIRNAME>'), self.PATTERNS_ADD_REMOVE)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['patterns', '-R', rc_dir, '--edit', '--editor', 'sed "s/DOC/docx/g" -i'],
)
self.assertEqual(p.string().replace(self.dirname, '<DIRNAME>'), self.PATTERNS_ADD_REMOVE.replace('DOC', 'docx'))
|
[
"tempfile.NamedTemporaryFile",
"invoice.invoice_main.invoice_main",
"tempfile.TemporaryDirectory",
"os.makedirs",
"invoice.log.get_null_logger",
"os.path.dirname",
"invoice.string_printer.StringPrinter",
"os.path.join"
] |
[((1546, 1563), 'invoice.log.get_null_logger', 'get_null_logger', ([], {}), '()\n', (1561, 1563), False, 'from invoice.log import get_null_logger\n'), ((1659, 1688), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1686, 1688), False, 'import tempfile\n'), ((1721, 1751), 'os.path.join', 'os.path.join', (['tmpdir', '"""rc_dir"""'], {}), "(tmpdir, 'rc_dir')\n", (1733, 1751), False, 'import os\n'), ((1764, 1783), 'os.makedirs', 'os.makedirs', (['rc_dir'], {}), '(rc_dir)\n', (1775, 1783), False, 'import os\n'), ((1801, 1816), 'invoice.string_printer.StringPrinter', 'StringPrinter', ([], {}), '()\n', (1814, 1816), False, 'from invoice.string_printer import StringPrinter\n'), ((2105, 2232), 'invoice.invoice_main.invoice_main', 'invoice_main', ([], {'printer': 'p', 'logger': 'self.logger', 'args': "['patterns', '-R', rc_dir, '-a', '!example/*.Doc', '-a', 'example/*.DOC']"}), "(printer=p, logger=self.logger, args=['patterns', '-R', rc_dir,\n '-a', '!example/*.Doc', '-a', 'example/*.DOC'])\n", (2117, 2232), False, 'from invoice.invoice_main import invoice_main\n'), ((2429, 2556), 'invoice.invoice_main.invoice_main', 'invoice_main', ([], {'printer': 'p', 'logger': 'self.logger', 'args': "['patterns', '-R', rc_dir, '-x', '!example/*.Doc', '-x', 'example/*.DOC']"}), "(printer=p, logger=self.logger, args=['patterns', '-R', rc_dir,\n '-x', '!example/*.Doc', '-x', 'example/*.DOC'])\n", (2441, 2556), False, 'from invoice.invoice_main import invoice_main\n'), ((2799, 2902), 'invoice.invoice_main.invoice_main', 'invoice_main', ([], {'printer': 'p', 'logger': 'self.logger', 'args': "['patterns', '-R', rc_dir, '-a', 'example/*.doc']"}), "(printer=p, logger=self.logger, args=['patterns', '-R', rc_dir,\n '-a', 'example/*.doc'])\n", (2811, 2902), False, 'from invoice.invoice_main import invoice_main\n'), ((3123, 3152), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3150, 3152), False, 'import tempfile\n'), ((3185, 3215), 'os.path.join', 'os.path.join', (['tmpdir', '"""rc_dir"""'], {}), "(tmpdir, 'rc_dir')\n", (3197, 3215), False, 'import os\n'), ((3228, 3247), 'os.makedirs', 'os.makedirs', (['rc_dir'], {}), '(rc_dir)\n', (3239, 3247), False, 'import os\n'), ((3265, 3280), 'invoice.string_printer.StringPrinter', 'StringPrinter', ([], {}), '()\n', (3278, 3280), False, 'from invoice.string_printer import StringPrinter\n'), ((3569, 3660), 'invoice.invoice_main.invoice_main', 'invoice_main', ([], {'printer': 'p', 'logger': 'self.logger', 'args': "['patterns', '-R', rc_dir, '--clear']"}), "(printer=p, logger=self.logger, args=['patterns', '-R', rc_dir,\n '--clear'])\n", (3581, 3660), False, 'from invoice.invoice_main import invoice_main\n'), ((3881, 3910), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3908, 3910), False, 'import tempfile\n'), ((3943, 3973), 'os.path.join', 'os.path.join', (['tmpdir', '"""rc_dir"""'], {}), "(tmpdir, 'rc_dir')\n", (3955, 3973), False, 'import os\n'), ((3986, 4005), 'os.makedirs', 'os.makedirs', (['rc_dir'], {}), '(rc_dir)\n', (3997, 4005), False, 'import os\n'), ((4023, 4038), 'invoice.string_printer.StringPrinter', 'StringPrinter', ([], {}), '()\n', (4036, 4038), False, 'from invoice.string_printer import StringPrinter\n'), ((4345, 4436), 'invoice.invoice_main.invoice_main', 'invoice_main', ([], {'printer': 'p', 'logger': 'self.logger', 'args': "['patterns', '-R', rc_dir, '--clear']"}), "(printer=p, logger=self.logger, args=['patterns', '-R', rc_dir,\n '--clear'])\n", (4357, 4436), False, 'from invoice.invoice_main import invoice_main\n'), ((4663, 4692), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (4690, 4692), False, 'import tempfile\n'), ((4704, 4733), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (4731, 4733), False, 'import tempfile\n'), ((4766, 4796), 'os.path.join', 'os.path.join', (['tmpdir', '"""rc_dir"""'], {}), "(tmpdir, 'rc_dir')\n", (4778, 4796), False, 'import os\n'), ((4809, 4828), 'os.makedirs', 'os.makedirs', (['rc_dir'], {}), '(rc_dir)\n', (4820, 4828), False, 'import os\n'), ((4846, 4861), 'invoice.string_printer.StringPrinter', 'StringPrinter', ([], {}), '()\n', (4859, 4861), False, 'from invoice.string_printer import StringPrinter\n'), ((5154, 5281), 'invoice.invoice_main.invoice_main', 'invoice_main', ([], {'printer': 'p', 'logger': 'self.logger', 'args': "['patterns', '-R', rc_dir, '-a', '!example/*.Doc', '-a', 'example/*.DOC']"}), "(printer=p, logger=self.logger, args=['patterns', '-R', rc_dir,\n '-a', '!example/*.Doc', '-a', 'example/*.DOC'])\n", (5166, 5281), False, 'from invoice.invoice_main import invoice_main\n'), ((5481, 5557), 'invoice.invoice_main.invoice_main', 'invoice_main', ([], {'printer': 'p', 'logger': 'self.logger', 'args': "['patterns', '-R', rc_dir]"}), "(printer=p, logger=self.logger, args=['patterns', '-R', rc_dir])\n", (5493, 5557), False, 'from invoice.invoice_main import invoice_main\n'), ((5761, 5866), 'invoice.invoice_main.invoice_main', 'invoice_main', ([], {'printer': 'p', 'logger': 'self.logger', 'args': "['patterns', '-R', rc_dir, '--export', p_file.name]"}), "(printer=p, logger=self.logger, args=['patterns', '-R', rc_dir,\n '--export', p_file.name])\n", (5773, 5866), False, 'from invoice.invoice_main import invoice_main\n'), ((6090, 6181), 'invoice.invoice_main.invoice_main', 'invoice_main', ([], {'printer': 'p', 'logger': 'self.logger', 'args': "['patterns', '-R', rc_dir, '--clear']"}), "(printer=p, logger=self.logger, args=['patterns', '-R', rc_dir,\n '--clear'])\n", (6102, 6181), False, 'from invoice.invoice_main import invoice_main\n'), ((6338, 6443), 'invoice.invoice_main.invoice_main', 'invoice_main', ([], {'printer': 'p', 'logger': 'self.logger', 'args': "['patterns', '-R', rc_dir, '--import', p_file.name]"}), "(printer=p, logger=self.logger, args=['patterns', '-R', rc_dir,\n '--import', p_file.name])\n", (6350, 6443), False, 'from invoice.invoice_main import invoice_main\n'), ((6666, 6695), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6693, 6695), False, 'import tempfile\n'), ((6728, 6758), 'os.path.join', 'os.path.join', (['tmpdir', '"""rc_dir"""'], {}), "(tmpdir, 'rc_dir')\n", (6740, 6758), False, 'import os\n'), ((6771, 6790), 'os.makedirs', 'os.makedirs', (['rc_dir'], {}), '(rc_dir)\n', (6782, 6790), False, 'import os\n'), ((6808, 6823), 'invoice.string_printer.StringPrinter', 'StringPrinter', ([], {}), '()\n', (6821, 6823), False, 'from invoice.string_printer import StringPrinter\n'), ((7116, 7243), 'invoice.invoice_main.invoice_main', 'invoice_main', ([], {'printer': 'p', 'logger': 'self.logger', 'args': "['patterns', '-R', rc_dir, '-a', '!example/*.Doc', '-a', 'example/*.DOC']"}), "(printer=p, logger=self.logger, args=['patterns', '-R', rc_dir,\n '-a', '!example/*.Doc', '-a', 'example/*.DOC'])\n", (7128, 7243), False, 'from invoice.invoice_main import invoice_main\n'), ((7443, 7570), 'invoice.invoice_main.invoice_main', 'invoice_main', ([], {'printer': 'p', 'logger': 'self.logger', 'args': '[\'patterns\', \'-R\', rc_dir, \'--edit\', \'--editor\', \'sed "s/DOC/docx/g" -i\']'}), '(printer=p, logger=self.logger, args=[\'patterns\', \'-R\', rc_dir,\n \'--edit\', \'--editor\', \'sed "s/DOC/docx/g" -i\'])\n', (7455, 7570), False, 'from invoice.invoice_main import invoice_main\n'), ((1473, 1498), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1488, 1498), False, 'import os\n'), ((1973, 2008), 'os.path.join', 'os.path.join', (['self.dirname', '"""*.doc"""'], {}), "(self.dirname, '*.doc')\n", (1985, 2008), False, 'import os\n'), ((3437, 3472), 'os.path.join', 'os.path.join', (['self.dirname', '"""*.doc"""'], {}), "(self.dirname, '*.doc')\n", (3449, 3472), False, 'import os\n'), ((5018, 5053), 'os.path.join', 'os.path.join', (['self.dirname', '"""*.doc"""'], {}), "(self.dirname, '*.doc')\n", (5030, 5053), False, 'import os\n'), ((6980, 7015), 'os.path.join', 'os.path.join', (['self.dirname', '"""*.doc"""'], {}), "(self.dirname, '*.doc')\n", (6992, 7015), False, 'import os\n'), ((4212, 4247), 'os.path.join', 'os.path.join', (['self.dirname', '"""*.doc"""'], {}), "(self.dirname, '*.doc')\n", (4224, 4247), False, 'import os\n')]
|
#!/usr/bin/python3
import logging
import sys
from pathlib import Path
from logging.handlers import RotatingFileHandler
from minerwatch import (
DictConfig, Manager, ManagerConfig,
Dispatcher, DispatcherConfig,
EtherMineAPIProber, ProberConfig
)
class Defaults:
config_path = Path.home().joinpath('.config', 'minerwatch.json')
# data is cached on server for 2min and the max cap is 100req/15min
timer_interval = 4.0
abs_stale_treshold = 4.0
delta_stale_treshold = 1.5
hashrate_drop_treshold = 3.0
@staticmethod
def get_config_defaults():
d = {
'address': '',
'timer_interval': Defaults.timer_interval,
'no_notify_abs_stale': False,
'abs_stale_treshold': Defaults.abs_stale_treshold,
'no_notify_delta_stale': False,
'delta_stale_treshold': Defaults.delta_stale_treshold,
'no_notify_hashrate_drop': False,
'hashrate_drop_treshold': Defaults.hashrate_drop_treshold,
'use_telegram': True,
'tg_bot_token': '',
'tg_chat_id': '',
}
return d
def parse_args():
from argparse import ArgumentParser
parser = ArgumentParser('MinerWatch')
# General
parser.add_argument('--debug', action='store_true',
help='Show verbose debug info')
parser.add_argument('--log-file', type=str,
help='File to log into (default: STDOUT)')
parser.add_argument('-a', '--addr', type=str,
help='Address to watch')
parser.add_argument('--config', type=str, default=Defaults.config_path,
help=f'Path to config file (default: {str(Defaults.config_path)})')
parser.add_argument('--timer-interval', type=float, default=Defaults.timer_interval,
help=f'Checks interval in minutes (min: 2.5), (default: {Defaults.timer_interval})')
# Checks and notifications
parser.add_argument('--no-notify-abs-stale', action='store_true',
help="Don't notify about absolute high stale shares percentage")
parser.add_argument('--abs-stale-treshold', type=float, default=Defaults.abs_stale_treshold,
help=f'Absolute stale shares percentage to notify (default: {Defaults.abs_stale_treshold})')
parser.add_argument('--no-notify-delta-stale', action='store_true',
help="Don't notify about stale shares high delta percentage")
parser.add_argument('--delta-stale-treshold', type=float, default=Defaults.delta_stale_treshold,
help=f'Delta stale shares percentage to notify (default: {Defaults.delta_stale_treshold})')
parser.add_argument('--no-notify-hashrate-drop', action='store_true',
help="Don't notify about hashrate drop")
parser.add_argument('--hashrate-drop-treshold', type=float, default=Defaults.hashrate_drop_treshold,
help=f'Delta hashrate drop percentage to notify (default: {Defaults.hashrate_drop_treshold})')
# Dispatcher
parser.add_argument('--no-telegram', action='store_true',
help="Don't dispatch notifications to telegram")
parser.add_argument('--tg-bot-token', type=str, help='Telegram bot token')
parser.add_argument('--tg-chat-id', type=str, help='Telegram chat ID')
return parser.parse_args()
def main():
# Parse args
args = parse_args()
log_level = logging.INFO
if args.debug:
log_level = logging.DEBUG
handlers = [
logging.StreamHandler(sys.stdout)
]
if args.log_file:
log_file = Path(args.log_file).resolve()
log_file.parent.mkdir(exist_ok=True)
handlers = [
RotatingFileHandler(
filename=log_file, mode='w',
maxBytes=256 * 1024, backupCount=5, encoding='utf-8')
]
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p', level=log_level,
handlers=handlers)
# Parse config
conf = DictConfig(args.config, Defaults.get_config_defaults())
# Prober configurations
addr = conf['address']
if args.addr:
addr = args.addr
if not addr:
logging.error(
'No address specified, please edit config file or pass address by argument')
return -1 # exit
# Dispatcher configurations
use_telegram = conf['use_telegram']
if args.no_telegram:
use_telegram = False
tg_bot_token = conf['tg_bot_token']
if args.tg_bot_token:
tg_bot_token = args.tg_bot_token
tg_chat_id = conf['tg_chat_id']
if args.tg_chat_id:
tg_chat_id = args.tg_chat_id
if use_telegram:
if not tg_bot_token:
logging.error(
'No Telegram bot token specified, please edit config file or pass by argument')
return -1
if not tg_chat_id:
logging.error(
'No Telegram chat ID specified, please edit config file or pass by argument')
return -1
# Manager configurations
timer_interval = conf['timer_interval']
if args.timer_interval:
timer_interval = args.timer_interval
abs_stale_treshold = conf['abs_stale_treshold']
if args.abs_stale_treshold:
abs_stale_treshold = args.abs_stale_treshold
delta_stale_treshold = conf['delta_stale_treshold']
if args.delta_stale_treshold:
delta_stale_treshold = args.delta_stale_treshold
hashrate_drop_treshold = conf['hashrate_drop_treshold']
if args.hashrate_drop_treshold:
hashrate_drop_treshold = args.hashrate_drop_treshold
# Init Prober & Dispatcher and run Manager
Manager(ManagerConfig(
timer_interval,
abs_stale_treshold,
delta_stale_treshold,
hashrate_drop_treshold
),
EtherMineAPIProber(ProberConfig(
addr
)),
Dispatcher(DispatcherConfig(
use_telegram,
tg_bot_token,
tg_chat_id,
))
)
if __name__ == '__main__':
exit(main())
|
[
"logging.error",
"minerwatch.ManagerConfig",
"argparse.ArgumentParser",
"logging.basicConfig",
"pathlib.Path.home",
"minerwatch.ProberConfig",
"logging.StreamHandler",
"pathlib.Path",
"minerwatch.DispatcherConfig",
"logging.handlers.RotatingFileHandler"
] |
[((1211, 1239), 'argparse.ArgumentParser', 'ArgumentParser', (['"""MinerWatch"""'], {}), "('MinerWatch')\n", (1225, 1239), False, 'from argparse import ArgumentParser\n'), ((3933, 4093), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S %p"""', 'level': 'log_level', 'handlers': 'handlers'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt=\n '%m/%d/%Y %I:%M:%S %p', level=log_level, handlers=handlers)\n", (3952, 4093), False, 'import logging\n'), ((3592, 3625), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (3613, 3625), False, 'import logging\n'), ((4344, 4444), 'logging.error', 'logging.error', (['"""No address specified, please edit config file or pass address by argument"""'], {}), "(\n 'No address specified, please edit config file or pass address by argument'\n )\n", (4357, 4444), False, 'import logging\n'), ((5819, 5918), 'minerwatch.ManagerConfig', 'ManagerConfig', (['timer_interval', 'abs_stale_treshold', 'delta_stale_treshold', 'hashrate_drop_treshold'], {}), '(timer_interval, abs_stale_treshold, delta_stale_treshold,\n hashrate_drop_treshold)\n', (5832, 5918), False, 'from minerwatch import DictConfig, Manager, ManagerConfig, Dispatcher, DispatcherConfig, EtherMineAPIProber, ProberConfig\n'), ((295, 306), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (304, 306), False, 'from pathlib import Path\n'), ((3782, 3888), 'logging.handlers.RotatingFileHandler', 'RotatingFileHandler', ([], {'filename': 'log_file', 'mode': '"""w"""', 'maxBytes': '(256 * 1024)', 'backupCount': '(5)', 'encoding': '"""utf-8"""'}), "(filename=log_file, mode='w', maxBytes=256 * 1024,\n backupCount=5, encoding='utf-8')\n", (3801, 3888), False, 'from logging.handlers import RotatingFileHandler\n'), ((4868, 4971), 'logging.error', 'logging.error', (['"""No Telegram bot token specified, please edit config file or pass by argument"""'], {}), "(\n 'No Telegram bot token specified, please edit config file or pass by argument'\n )\n", (4881, 4971), False, 'import logging\n'), ((5040, 5141), 'logging.error', 'logging.error', (['"""No Telegram chat ID specified, please edit config file or pass by argument"""'], {}), "(\n 'No Telegram chat ID specified, please edit config file or pass by argument'\n )\n", (5053, 5141), False, 'import logging\n'), ((5981, 5999), 'minerwatch.ProberConfig', 'ProberConfig', (['addr'], {}), '(addr)\n', (5993, 5999), False, 'from minerwatch import DictConfig, Manager, ManagerConfig, Dispatcher, DispatcherConfig, EtherMineAPIProber, ProberConfig\n'), ((6043, 6099), 'minerwatch.DispatcherConfig', 'DispatcherConfig', (['use_telegram', 'tg_bot_token', 'tg_chat_id'], {}), '(use_telegram, tg_bot_token, tg_chat_id)\n', (6059, 6099), False, 'from minerwatch import DictConfig, Manager, ManagerConfig, Dispatcher, DispatcherConfig, EtherMineAPIProber, ProberConfig\n'), ((3673, 3692), 'pathlib.Path', 'Path', (['args.log_file'], {}), '(args.log_file)\n', (3677, 3692), False, 'from pathlib import Path\n')]
|
from tuprolog import logger
# noinspection PyUnresolvedReferences
import jpype.imports
# noinspection PyUnresolvedReferences
import it.unibo.tuprolog.solve.library.exception as _exception
AlreadyLoadedLibraryException = _exception.AlreadyLoadedLibraryException
LibraryException = _exception.LibraryException
NoSuchALibraryException = _exception.NoSuchALibraryException
logger.debug("Loaded JVM classes from it.unibo.tuprolog.solve.library.exception.*")
|
[
"tuprolog.logger.debug"
] |
[((375, 463), 'tuprolog.logger.debug', 'logger.debug', (['"""Loaded JVM classes from it.unibo.tuprolog.solve.library.exception.*"""'], {}), "(\n 'Loaded JVM classes from it.unibo.tuprolog.solve.library.exception.*')\n", (387, 463), False, 'from tuprolog import logger\n')]
|
import os
import math
import numpy as np
from PIL import Image
import skimage.transform as trans
import cv2
import torch
from data import dataset_info
from data.base_dataset import BaseDataset
import util.util as util
dataset_info = dataset_info()
class AllFaceDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument('--no_pairing_check', action='store_true',
help='If specified, skip sanity check of correct label-image file pairing')
return parser
def cv2_loader(self, img_str):
img_array = np.frombuffer(img_str, dtype=np.uint8)
return cv2.imdecode(img_array, cv2.IMREAD_COLOR)
def fill_list(self, tmp_list):
length = len(tmp_list)
if length % self.opt.batchSize != 0:
end = math.ceil(length / self.opt.batchSize) * self.opt.batchSize
tmp_list = tmp_list + tmp_list[-1 * (end - length) :]
return tmp_list
def initialize(self, opt):
self.opt = opt
dataset_num = dataset_info.get_dataset(opt)
self.prefix = [dataset_info.prefix[num] for num in dataset_num]
file_list = [dataset_info.file_list[num] for num in dataset_num]
land_mark_list = [dataset_info.land_mark_list[num] for num in dataset_num]
self.params_dir = [dataset_info.params_dir[num] for num in dataset_num]
self.folder_level = [dataset_info.folder_level[num] for num in dataset_num]
self.num_datasets = len(file_list)
assert len(land_mark_list) == self.num_datasets, \
'num of landmk dir should be the num of datasets'
assert len(self.params_dir) == self.num_datasets, \
'num of params_dir should be the num of datasets'
self.dataset_lists = []
self.landmark_paths = []
self.sizes = []
for n in range(self.num_datasets):
with open(file_list[n]) as f:
img_lists = f.readlines()
img_lists = self.fill_list(img_lists)
self.sizes.append(len(img_lists))
self.dataset_lists.append(sorted(img_lists))
with open(land_mark_list[n]) as f:
landmarks = f.readlines()
landmarks = self.fill_list(landmarks)
self.landmark_paths.append(sorted(landmarks))
self.dataset_size = min(self.sizes)
self.initialized = False
def get_landmarks(self, landmark, img_list):
landmark_split = landmark.strip().split(' ')
filename1_without_ext = os.path.basename(img_list.strip())
filename2_without_ext = os.path.basename(landmark_split[0])
assert (filename1_without_ext == filename2_without_ext), \
"The image_path %s and params_path %s don't match." % \
(img_list, landmark_split[0])
label = landmark_split[1]
landmarks = landmark_split[2:]
landmarks = list(map(float, landmarks))
landmarks_array = np.array(landmarks).reshape(5, 2)
return landmarks_array, label
def get_param_file(self, img_list, dataset_num):
img_name = os.path.splitext(img_list)[0]
name_split = img_name.split("/")
folder_level = self.folder_level[dataset_num]
param_folder = os.path.join(self.params_dir[dataset_num],
"/".join([name_split[i] for i in range(len(name_split) - folder_level, len(name_split))]) + ".txt")
# params = np.loadtxt(param_folder)
return param_folder
def paths_match(self, path1, path2):
filename1_without_ext = os.path.splitext(os.path.basename(path1)[-10:])[0]
filename2_without_ext = os.path.splitext(os.path.basename(path2)[-10:])[0]
return filename1_without_ext == filename2_without_ext
def affine_align(self, img, landmark=None, **kwargs):
M = None
h, w, c = img.shape
src = np.array([
[38.2946, 51.6963],
[73.5318, 51.5014],
[56.0252, 71.7366],
[41.5493, 92.3655],
[70.7299, 92.2041]], dtype=np.float32)
src = src * 290 / 112
src[:, 0] += 50
src[:, 1] += 60
src = src / 400 * self.opt.crop_size
dst = landmark
# dst = landmark.astype(np.float32)
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
M = tform.params[0:2, :]
warped = cv2.warpAffine(img, M, (self.opt.crop_size, self.opt.crop_size), borderValue=0.0)
return warped, M
def __getitem__(self, index):
# Label Image
randnum = np.random.randint(sum(self.sizes))
dataset_num = np.random.randint(self.num_datasets)
image_path = self.dataset_lists[dataset_num][index].strip()
image_path = os.path.join(self.prefix[dataset_num], image_path)
img = cv2.imread(image_path)
if img is None:
raise Exception('None Image')
param_path = self.get_param_file(image_path, dataset_num)
# img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
M = None
landmark_path = self.landmark_paths[dataset_num][index].strip()
landmarks, label = self.get_landmarks(landmark_path, image_path)
wrapped_img, M = self.affine_align(img, landmarks)
M = torch.from_numpy(M).float()
wrapped_img = wrapped_img.transpose(2, 0, 1) / 255.0
wrapped_img = torch.from_numpy(wrapped_img).float()
input_dict = {
'image': wrapped_img,
'param_path': param_path,
'M': M,
'path': image_path
}
# Give subclasses a chance to modify the final output
self.postprocess(input_dict)
return input_dict
def postprocess(self, input_dict):
return input_dict
def __len__(self):
return self.dataset_size
|
[
"data.dataset_info",
"os.path.basename",
"cv2.cvtColor",
"numpy.frombuffer",
"math.ceil",
"cv2.imdecode",
"skimage.transform.SimilarityTransform",
"data.dataset_info.get_dataset",
"cv2.warpAffine",
"numpy.random.randint",
"numpy.array",
"cv2.imread",
"os.path.splitext",
"os.path.join",
"torch.from_numpy"
] |
[((234, 248), 'data.dataset_info', 'dataset_info', ([], {}), '()\n', (246, 248), False, 'from data import dataset_info\n'), ((610, 648), 'numpy.frombuffer', 'np.frombuffer', (['img_str'], {'dtype': 'np.uint8'}), '(img_str, dtype=np.uint8)\n', (623, 648), True, 'import numpy as np\n'), ((664, 705), 'cv2.imdecode', 'cv2.imdecode', (['img_array', 'cv2.IMREAD_COLOR'], {}), '(img_array, cv2.IMREAD_COLOR)\n', (676, 705), False, 'import cv2\n'), ((1063, 1092), 'data.dataset_info.get_dataset', 'dataset_info.get_dataset', (['opt'], {}), '(opt)\n', (1087, 1092), False, 'from data import dataset_info\n'), ((2629, 2664), 'os.path.basename', 'os.path.basename', (['landmark_split[0]'], {}), '(landmark_split[0])\n', (2645, 2664), False, 'import os\n'), ((3927, 4060), 'numpy.array', 'np.array', (['[[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], [41.5493, \n 92.3655], [70.7299, 92.2041]]'], {'dtype': 'np.float32'}), '([[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], [\n 41.5493, 92.3655], [70.7299, 92.2041]], dtype=np.float32)\n', (3935, 4060), True, 'import numpy as np\n'), ((4323, 4350), 'skimage.transform.SimilarityTransform', 'trans.SimilarityTransform', ([], {}), '()\n', (4348, 4350), True, 'import skimage.transform as trans\n'), ((4434, 4519), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(self.opt.crop_size, self.opt.crop_size)'], {'borderValue': '(0.0)'}), '(img, M, (self.opt.crop_size, self.opt.crop_size),\n borderValue=0.0)\n', (4448, 4519), False, 'import cv2\n'), ((4674, 4710), 'numpy.random.randint', 'np.random.randint', (['self.num_datasets'], {}), '(self.num_datasets)\n', (4691, 4710), True, 'import numpy as np\n'), ((4801, 4851), 'os.path.join', 'os.path.join', (['self.prefix[dataset_num]', 'image_path'], {}), '(self.prefix[dataset_num], image_path)\n', (4813, 4851), False, 'import os\n'), ((4867, 4889), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (4877, 4889), False, 'import cv2\n'), ((5077, 5113), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (5089, 5113), False, 'import cv2\n'), ((3135, 3161), 'os.path.splitext', 'os.path.splitext', (['img_list'], {}), '(img_list)\n', (3151, 3161), False, 'import os\n'), ((836, 874), 'math.ceil', 'math.ceil', (['(length / self.opt.batchSize)'], {}), '(length / self.opt.batchSize)\n', (845, 874), False, 'import math\n'), ((2990, 3009), 'numpy.array', 'np.array', (['landmarks'], {}), '(landmarks)\n', (2998, 3009), True, 'import numpy as np\n'), ((5347, 5366), 'torch.from_numpy', 'torch.from_numpy', (['M'], {}), '(M)\n', (5363, 5366), False, 'import torch\n'), ((5460, 5489), 'torch.from_numpy', 'torch.from_numpy', (['wrapped_img'], {}), '(wrapped_img)\n', (5476, 5489), False, 'import torch\n'), ((3630, 3653), 'os.path.basename', 'os.path.basename', (['path1'], {}), '(path1)\n', (3646, 3653), False, 'import os\n'), ((3713, 3736), 'os.path.basename', 'os.path.basename', (['path2'], {}), '(path2)\n', (3729, 3736), False, 'import os\n')]
|
'''
Created on 25 Jan 2018
@author: Slaporter
'''
import platform
def get_platform_info():
return (platform.platform())
if __name__ == '__main__':
get_platform_info()
|
[
"platform.platform"
] |
[((105, 124), 'platform.platform', 'platform.platform', ([], {}), '()\n', (122, 124), False, 'import platform\n')]
|
from bs4 import BeautifulSoup as bs
import os
import pandas as pd
import re
import csv
import io
result = {}
new = {}
id = 0
'''
result = {id:{'title':' ', 'abstract':' ', 'key_wordsZ':{'a','b','c'}, 'key_wordsE':{'a','b','c'},'authors': {'author1'} }}
'''
p = os.walk('知网html') # html文件夹路径
output_route = '../output/'
with open(output_route + "中文关键词.csv", "w", newline='') as csvfile: # 写csv文件表头
writer = csv.writer(csvfile)
writer.writerow(['title', 'key_words'])
with open(output_route + "英文关键词.csv", "w", newline='') as csvfile: # 写csv文件表头
writer = csv.writer(csvfile)
writer.writerow(['title', 'key_words'])
with open(output_route + "作者.csv", "w", newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['title', 'authors'])
with open(output_route + "标题摘要.csv", "w", newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['title', 'abstract'])
for path, dir_list, file_list in p:
for file_name in file_list:
print(file_name)
if file_name[-5:] == '.html':
try:
htmlfile = io.open('./知网html/' + file_name, 'r', encoding='utf-8')
htmlhandle = htmlfile.read()
soup = bs(htmlhandle, 'lxml')
result[file_name] = new # 第n个文献信息
# result = pd.DataFrame({}, index=[id]) #存储第n个文献的信息
# author = {}
result[file_name]['author'] = [] # {作者:研究机构} 多个作者
result[file_name]['title'] = '' # 文章标题
result[file_name]['abstract'] = '' # 文献摘要
result[file_name]['key_wordsZ'] = [] # 多个关键词
result[file_name]['key_wordsE'] = []
# author['Institute'] = ''
content = soup.find('div', class_='top-title') #解析标题
# print(content)
print(content.h1.text)
result[file_name]['title'] = content.h1.text.replace('"','').replace('\n', '').replace('\r', '')
data = soup.find('div', class_='data') #解析摘要
# print(data)
print(data.p.text)
result[file_name]['abstract'] = data.p.text.replace('\n', '').replace('\r', '')
data = soup.find('div', id='a_keywords') #解析关键词
da = soup.find('div', id="a_keywordsEN")
# print(data)
a = data.p.text
b = da.p.text
print(a)
print(b)
#print(a.splitlines()[1])
for i in a.splitlines():
result[file_name]['key_wordsZ'].append(i)
result[file_name]['key_wordsZ'] = filter(None, result[file_name]['key_wordsZ'])
for i in b.splitlines():
result[file_name]['key_wordsE'].append(i)
result[file_name]['key_wordsE'] = filter(None, result[file_name]['key_wordsE'])
content = soup.find('div', class_='content') #解析作者
print(content.h2.text)
for i in content.h2.text.splitlines():
result[file_name]['author'].append(i)
print(i)
result[file_name]['author'] = filter(None, result[file_name]['author'])
#new['author'] = content.h2.text
with open(output_route + "中文关键词.csv", "a", newline='') as csvfile: #把关键词写入csv文件
writer = csv.writer(csvfile)
for i in result[file_name]['key_wordsZ']:
list = [str(result[file_name]['title']).replace('\n', '').replace('\r', ''), str(i)]
writer.writerow(list)
with open(output_route + "英文关键词.csv", "a", newline='') as csvfile: #把关键词写入csv文件
writer = csv.writer(csvfile)
for i in result[file_name]['key_wordsE']:
list = [str(result[file_name]['title']).replace('\n', '').replace('\r', ''),str(i)]
writer.writerow(list)
with open(output_route + "作者.csv", "a", newline='') as csvfile:
writer = csv.writer(csvfile)
for i in result[file_name]['author']:
list = [str(result[file_name]['title']).replace('\n', '').replace('\r', ''), str(i)]
writer.writerow(list)
with open(output_route + "标题摘要.csv", "a", newline='') as csvfile:
writer = csv.writer(csvfile)
list = [result[file_name]['title'].replace('\n', '').replace('\r', ''), result[file_name]['abstract']]
writer.writerow(list)
print(result)
except:
print('异常')
print(result)
else:
pass
with open(output_route + "中文关键词.csv", "r") as csvfile: #把关键词写入csv文件
content = csvfile.read()
with open(output_route + "中文关键词.csv", "w", newline='') as csvfile:
csvfile.write(content.replace('"', '').replace(';',''))
with open(output_route + "英文关键词.csv", "r") as csvfile: #把关键词写入csv文件
content = csvfile.read()
with open(output_route + "英文关键词.csv", "w", newline='') as csvfile:
csvfile.write(content.replace('"', '').replace(';',''))
with open(output_route + "作者.csv", "r") as csvfile: #把关键词写入csv文件
content = csvfile.read()
with open(output_route + "作者.csv", "w", newline='') as csvfile:
csvfile.write(content.replace('"', '').replace(';',''))
with open(output_route + "标题摘要.csv", "r") as csvfile: #把关键词写入csv文件
content = csvfile.read()
with open(output_route + "标题摘要.csv", "w", newline='') as csvfile:
csvfile.write(content.replace('"', '').replace(';',''))
|
[
"bs4.BeautifulSoup",
"os.walk",
"csv.writer",
"io.open"
] |
[((265, 282), 'os.walk', 'os.walk', (['"""知网html"""'], {}), "('知网html')\n", (272, 282), False, 'import os\n'), ((417, 436), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (427, 436), False, 'import csv\n'), ((574, 593), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (584, 593), False, 'import csv\n'), ((715, 734), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (725, 734), False, 'import csv\n'), ((856, 875), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (866, 875), False, 'import csv\n'), ((1097, 1152), 'io.open', 'io.open', (["('./知网html/' + file_name)", '"""r"""'], {'encoding': '"""utf-8"""'}), "('./知网html/' + file_name, 'r', encoding='utf-8')\n", (1104, 1152), False, 'import io\n'), ((1221, 1243), 'bs4.BeautifulSoup', 'bs', (['htmlhandle', '"""lxml"""'], {}), "(htmlhandle, 'lxml')\n", (1223, 1243), True, 'from bs4 import BeautifulSoup as bs\n'), ((3446, 3465), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (3456, 3465), False, 'import csv\n'), ((3810, 3829), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (3820, 3829), False, 'import csv\n'), ((4156, 4175), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (4166, 4175), False, 'import csv\n'), ((4502, 4521), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (4512, 4521), False, 'import csv\n')]
|
from django.conf.urls import url
from djexperience.core.views import home, about
urlpatterns = [
url(r'^$', home, name='home'),
url(r'^about/$', about, name='about'),
]
|
[
"django.conf.urls.url"
] |
[((103, 131), 'django.conf.urls.url', 'url', (['"""^$"""', 'home'], {'name': '"""home"""'}), "('^$', home, name='home')\n", (106, 131), False, 'from django.conf.urls import url\n'), ((138, 174), 'django.conf.urls.url', 'url', (['"""^about/$"""', 'about'], {'name': '"""about"""'}), "('^about/$', about, name='about')\n", (141, 174), False, 'from django.conf.urls import url\n')]
|
# _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2020/4/13.
"""
from app import create_app
from tests.utils import get_authorization
__author__ = 'Allen7D'
app = create_app()
def test_create_auth_list():
with app.test_client() as client:
rv = client.post('/cms/auth/append', headers={
'Authorization': get_authorization()
}, json={
'group_id': 5,
'auth_ids': [1, 2, 3]
})
json_data = rv.get_json()
print(json_data)
def test_delete_auth_list():
with app.test_client() as client:
rv = client.post('/cms/auth/remove', headers={
'Authorization': get_authorization()
}, json={
'group_id': 5,
'auth_ids': [1, 2, 3]
})
json_data = rv.get_json()
print(json_data)
test_create_auth_list()
test_delete_auth_list()
|
[
"app.create_app",
"tests.utils.get_authorization"
] |
[((167, 179), 'app.create_app', 'create_app', ([], {}), '()\n', (177, 179), False, 'from app import create_app\n'), ((333, 352), 'tests.utils.get_authorization', 'get_authorization', ([], {}), '()\n', (350, 352), False, 'from tests.utils import get_authorization\n'), ((655, 674), 'tests.utils.get_authorization', 'get_authorization', ([], {}), '()\n', (672, 674), False, 'from tests.utils import get_authorization\n')]
|
"""
Demonstrates title normalization and parsing.
"""
import sys
import os
sys.path.insert(0, os.path.abspath(os.getcwd()))
from mw.api import Session
from mw.lib import title
# Normalize titles
title.normalize("foo bar")
# > "Foo_bar"
# Construct a title parser from the API
api_session = Session("https://en.wikipedia.org/w/api.php")
parser = title.Parser.from_api(api_session)
# Handles normalization
parser.parse("user:epochFail")
# > 2, "EpochFail"
# Handles namespace aliases
parser.parse("WT:foobar")
# > 5, "Foobar"
|
[
"os.getcwd",
"mw.lib.title.normalize",
"mw.api.Session",
"mw.lib.title.Parser.from_api"
] |
[((198, 224), 'mw.lib.title.normalize', 'title.normalize', (['"""foo bar"""'], {}), "('foo bar')\n", (213, 224), False, 'from mw.lib import title\n'), ((294, 339), 'mw.api.Session', 'Session', (['"""https://en.wikipedia.org/w/api.php"""'], {}), "('https://en.wikipedia.org/w/api.php')\n", (301, 339), False, 'from mw.api import Session\n'), ((349, 383), 'mw.lib.title.Parser.from_api', 'title.Parser.from_api', (['api_session'], {}), '(api_session)\n', (370, 383), False, 'from mw.lib import title\n'), ((111, 122), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (120, 122), False, 'import os\n')]
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START composer_bashoperator_python2]
import datetime
from airflow import models
from airflow.operators import bash_operator
yesterday = datetime.datetime.combine(
datetime.datetime.today() - datetime.timedelta(1),
datetime.datetime.min.time())
default_dag_args = {
# Setting start date as yesterday starts the DAG immediately when it is
# detected in the Cloud Storage bucket.
'start_date': yesterday,
}
with models.DAG(
'composer_sample_bashoperator_python2',
schedule_interval=datetime.timedelta(days=1),
default_args=default_dag_args) as dag:
run_python2 = bash_operator.BashOperator(
task_id='run_python2',
# This example runs a Python script from the data folder to prevent
# Airflow from attempting to parse the script as a DAG.
bash_command='python2 /home/airflow/gcs/data/python2_script.py',
)
# [END composer_bashoperator_python2]
|
[
"airflow.operators.bash_operator.BashOperator",
"datetime.timedelta",
"datetime.datetime.today",
"datetime.datetime.min.time"
] |
[((827, 855), 'datetime.datetime.min.time', 'datetime.datetime.min.time', ([], {}), '()\n', (853, 855), False, 'import datetime\n'), ((1231, 1350), 'airflow.operators.bash_operator.BashOperator', 'bash_operator.BashOperator', ([], {'task_id': '"""run_python2"""', 'bash_command': '"""python2 /home/airflow/gcs/data/python2_script.py"""'}), "(task_id='run_python2', bash_command=\n 'python2 /home/airflow/gcs/data/python2_script.py')\n", (1257, 1350), False, 'from airflow.operators import bash_operator\n'), ((771, 796), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (794, 796), False, 'import datetime\n'), ((799, 820), 'datetime.timedelta', 'datetime.timedelta', (['(1)'], {}), '(1)\n', (817, 820), False, 'import datetime\n'), ((1134, 1160), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1152, 1160), False, 'import datetime\n')]
|
import logging
from .field_parser import parse_field_row
from .parse_exception import ExcelParseException
LOGGER = logging.getLogger(__name__)
def is_empty_row(row):
if row[0].value == "":
return True
return False
def is_field_row(row):
"""
row: xlrd row object.
"""
if row[2].value != "":
return True
return False
def validate_register_row_empty_field(row, previous_context):
"""
row can be obtained by xlrd sheet.row() method.
It's a sequence of cell objects.
"""
context = previous_context.copy()
field_map = [
(2, "msb"),
(3, "lsb"),
(4, "field"),
(5, "access"),
(6, "default")
]
for (col, field_name) in field_map:
context.column = col
if row[col].value != "":
msg = "Field '%s' must be emtpy." % field_name
raise ExcelParseException(msg, context)
return
def parse_register_row(row, previous_context):
"""
row: xlrd row object. You can obtain it by sheet.row()
a sequence of cells.
"""
context = previous_context.copy()
validate_register_row_empty_field(row, context)
context.column = 0
try:
offset = int(row[0].value, 16)
except Exception as exc:
msg = "Parse offset error: {}.".format(exc)
raise ExcelParseException(msg, context)
name = row[1].value
description = "%s" % row[7].value
result = {
"offset": offset,
"name": name,
"description": description
}
return result
def validate_field(new_field, parsed_fields, previous_context):
context = previous_context
field_dict = new_field
field_dict_list = parsed_fields
fields = []
inserted = False
for field in field_dict_list:
if inserted:
fields.append(field)
overlapped = False
if field_dict["lsb"] > field["lsb"]:
if field_dict["lsb"] > field["msb"]:
fields.append(field)
else:
overlapped = True
elif field_dict["lsb"] == field["lsb"]:
overlapped = True
else:
if field_dict["msb"] < field["msb"]:
fields.append(field_dict)
fields.append(field)
inserted = True
else:
overlapped = True
if overlapped:
error_msg = "Fields overlap: \n{0}\n{1}".format(
field, field_dict)
raise ExcelParseException(error_msg, context)
return
def parse_register(sheet, start_row, previous_context):
context = previous_context.copy()
rowx = start_row
row = sheet.row(rowx)
context.row = rowx
register_dict = parse_register_row(row, context)
rowx = rowx + 1
row = sheet.row(rowx)
context.row = rowx
field_dict_list = []
while is_field_row(row):
field_dict = parse_field_row(row, context)
validate_field(field_dict, field_dict_list, context)
field_dict_list.append(field_dict)
if rowx < sheet.nrows - 1:
rowx = rowx + 1
row = sheet.row(rowx)
context.row = rowx
else:
break
if is_empty_row(row):
rowx += 1
else:
err_msg = \
"sheet {0} row {1} error: no blank row between registers".format(
sheet.name,
rowx + 1)
LOGGER.debug(err_msg)
msg = "No blank row between registers."
raise ExcelParseException(msg, context)
register_dict["fields"] = field_dict_list
return register_dict, rowx
|
[
"logging.getLogger"
] |
[((118, 145), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (135, 145), False, 'import logging\n')]
|
#
# Copyright (C) 2019 Authlete, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the
# License.
from authlete.django.handler.authorization_request_base_handler import AuthorizationRequestBaseHandler
from authlete.django.web.response_utility import ResponseUtility
from authlete.dto.authorization_action import AuthorizationAction
class AuthorizationRequestErrorHandler(AuthorizationRequestBaseHandler):
"""Handler for error cases of authorization reuqests.
A response from Authlete's /api/auth/authorization API contains an "action"
response parameter. When the value of the response parameter is neither
"NO_INTERACTION" nor "INTERACTION", the authorization request should be
handled as an error case. This class is a handler for such error cases.
"""
def __init__(self):
"""Constructor"""
super().__init__(None)
def handle(self, response):
"""Handle an error case of an authorization request.
This method returns None when response.action returns
AuthorizationAction.INTERACTION or AuthorizationAction.NO_INTERACTION.
In other cases, an instance of django.http.HttpResponse is returned.
Args:
response (authlete.dto.AuthorizationResponse)
Returns:
django.http.HttpResponse : An error response
Raises:
authlete.api.AuthleteApiException
"""
# 'action' in the response denotes the next action which the
# implementation of the authorization endpoint should take.
action = response.action
# The content of the response which should be returned to the
# user agent. The format varies depending on the action.
content = response.responseContent
if action == AuthorizationAction.INTERNAL_SERVER_ERROR:
# 500 Internal Server Error
return ResponseUtility.internalServerError(content)
elif action == AuthorizationAction.BAD_REQUEST:
# 400 Bad Request
return ResponseUtility.badRequest(content)
elif action == AuthorizationAction.LOCATION:
# 302 Found
return ResponseUtility.location(content)
elif action == AuthorizationAction.FORM:
# 200 OK
return ResponseUtility.okHtml(content)
elif action == AuthorizationAction.INTERACTION:
# This is not an error case. The implementation of the
# authorization endpoint should show an authorization
# page to the user.
return None
elif action == AuthorizationAction.NO_INTERACTION:
# This is not an error case. The implementation of the
# authorization endpoint should handle the authorization
# request without user interaction.
return None
else:
# 500 Internal Server Error
# Authlete's /api/auth/authorization API returned an unknown action.
return self.unknownAction('/api/auth/authorization')
|
[
"authlete.django.web.response_utility.ResponseUtility.location",
"authlete.django.web.response_utility.ResponseUtility.badRequest",
"authlete.django.web.response_utility.ResponseUtility.internalServerError",
"authlete.django.web.response_utility.ResponseUtility.okHtml"
] |
[((2413, 2457), 'authlete.django.web.response_utility.ResponseUtility.internalServerError', 'ResponseUtility.internalServerError', (['content'], {}), '(content)\n', (2448, 2457), False, 'from authlete.django.web.response_utility import ResponseUtility\n'), ((2563, 2598), 'authlete.django.web.response_utility.ResponseUtility.badRequest', 'ResponseUtility.badRequest', (['content'], {}), '(content)\n', (2589, 2598), False, 'from authlete.django.web.response_utility import ResponseUtility\n'), ((2695, 2728), 'authlete.django.web.response_utility.ResponseUtility.location', 'ResponseUtility.location', (['content'], {}), '(content)\n', (2719, 2728), False, 'from authlete.django.web.response_utility import ResponseUtility\n'), ((2818, 2849), 'authlete.django.web.response_utility.ResponseUtility.okHtml', 'ResponseUtility.okHtml', (['content'], {}), '(content)\n', (2840, 2849), False, 'from authlete.django.web.response_utility import ResponseUtility\n')]
|
# MIT License
#
# Copyright (c) 2020 - Present nxtlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""An undefined type object."""
from __future__ import annotations
import typing
_T = typing.TypeVar("_T", covariant=True)
class UndefinedType:
"""An `UNDEFINED` type."""
__instance: typing.ClassVar[UndefinedType]
def __bool__(self) -> typing.Literal[False]:
return False
def __repr__(self) -> str:
return "UNDEFINED"
def __str__(self) -> str:
return "UNDEFINED"
def __new__(cls) -> UndefinedType:
try:
return cls.__instance
except AttributeError:
o = super().__new__(cls)
cls.__instance = o
return cls.__instance
Undefined: typing.Final[UndefinedType] = UndefinedType()
"""An undefined type for attribs that may be undefined and not None."""
UndefinedOr = typing.Union[UndefinedType, _T]
"""A union version of the Undefined type which can be undefined or any other type."""
|
[
"typing.TypeVar"
] |
[((1200, 1236), 'typing.TypeVar', 'typing.TypeVar', (['"""_T"""'], {'covariant': '(True)'}), "('_T', covariant=True)\n", (1214, 1236), False, 'import typing\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from torch.autograd import Variable
from .lazy_variable import LazyVariable
from .non_lazy_variable import NonLazyVariable
def _inner_repeat(tensor, amt):
return tensor.unsqueeze(-1).repeat(amt, 1).squeeze(-1)
def _outer_repeat(tensor, amt):
return tensor.unsqueeze(-1).repeat(1, amt).view(-1)
class MatmulLazyVariable(LazyVariable):
def __init__(self, lhs, rhs):
if not isinstance(lhs, LazyVariable):
lhs = NonLazyVariable(lhs)
if not isinstance(rhs, LazyVariable):
rhs = NonLazyVariable(rhs)
super(MatmulLazyVariable, self).__init__(lhs, rhs)
self.lhs = lhs
self.rhs = rhs
def _matmul(self, rhs):
return self.lhs._matmul(self.rhs._matmul(rhs))
def _t_matmul(self, rhs):
return self.rhs._t_matmul(self.lhs._t_matmul(rhs))
def _quad_form_derivative(self, left_vecs, right_vecs):
if left_vecs.ndimension() == 1:
left_vecs = left_vecs.unsqueeze(1)
right_vecs = right_vecs.unsqueeze(1)
right_vecs_times_rhs = self.rhs._matmul(right_vecs)
left_vecs_times_lhs_t = self.lhs._t_matmul(left_vecs)
left_grad, = self.lhs._quad_form_derivative(left_vecs, right_vecs_times_rhs)
right_grad, = self.rhs._quad_form_derivative(left_vecs_times_lhs_t, right_vecs)
return left_grad, right_grad
def _size(self):
if self.lhs.ndimension() > 2:
return torch.Size((self.lhs.size(0), self.lhs.size(1), self.rhs.size(2)))
else:
return torch.Size((self.lhs.size(0), self.rhs.size(1)))
def _transpose_nonbatch(self, *args):
return self.__class__(
self.rhs._transpose_nonbatch(), self.lhs._transpose_nonbatch()
)
def _batch_get_indices(self, batch_indices, left_indices, right_indices):
outer_size = batch_indices.size(0)
inner_size = self.lhs.size(-1)
inner_indices = Variable(right_indices.data.new(inner_size))
torch.arange(0, inner_size, out=inner_indices.data)
left_vals = self.lhs._batch_get_indices(
_outer_repeat(batch_indices, inner_size),
_outer_repeat(left_indices, inner_size),
_inner_repeat(inner_indices, outer_size),
)
right_vals = self.rhs._batch_get_indices(
_outer_repeat(batch_indices, inner_size),
_inner_repeat(inner_indices, outer_size),
_outer_repeat(right_indices, inner_size),
)
return (left_vals.view(-1, inner_size) * right_vals.view(-1, inner_size)).sum(
-1
)
def _get_indices(self, left_indices, right_indices):
outer_size = left_indices.size(0)
inner_size = self.lhs.size(-1)
inner_indices = Variable(right_indices.data.new(inner_size))
torch.arange(0, inner_size, out=inner_indices.data)
left_vals = self.lhs._get_indices(
_outer_repeat(left_indices, inner_size),
_inner_repeat(inner_indices, outer_size),
)
right_vals = self.rhs._get_indices(
_inner_repeat(inner_indices, outer_size),
_outer_repeat(right_indices, inner_size),
)
return (left_vals.view(-1, inner_size) * right_vals.view(-1, inner_size)).sum(
-1
)
def diag(self):
if (
isinstance(self.lhs, NonLazyVariable)
and isinstance(self.rhs, NonLazyVariable)
):
return (self.lhs.tensor * self.rhs.tensor.transpose(-1, -2)).sum(-1)
else:
return super(MatmulLazyVariable, self).diag()
def evaluate(self):
return torch.matmul(self.lhs.evaluate(), self.rhs.evaluate())
|
[
"torch.arange"
] |
[((2150, 2201), 'torch.arange', 'torch.arange', (['(0)', 'inner_size'], {'out': 'inner_indices.data'}), '(0, inner_size, out=inner_indices.data)\n', (2162, 2201), False, 'import torch\n'), ((2974, 3025), 'torch.arange', 'torch.arange', (['(0)', 'inner_size'], {'out': 'inner_indices.data'}), '(0, inner_size, out=inner_indices.data)\n', (2986, 3025), False, 'import torch\n')]
|
"""
We are given two sentences A and B. (A sentence is a string of space separated words.
Each word consists only of lowercase letters.) A word is uncommon if it appears exactly
once in one of the sentences, and does not appear in the other sentence. Return a list
of all uncommon words. You may return the list in any order.
"""
from typing import List
from collections import Counter
class Solution:
def uncommonFromSentences(self, A: str, B: str) -> List[str]:
tmp = A.split()
tmp.extend(B.split())
c = Counter(tmp)
res = [el for el in c if c[el] == 1]
return res
if __name__ == "__main__":
A = "this apple is sweet"
B = "this apple is sour"
out = ["sweet","sour"]
res = Solution().uncommonFromSentences(A, B)
assert set(res) == set(out) and len(res) == len(out)
A = "apple apple"
B = "banana"
out = ["banana"]
res = Solution().uncommonFromSentences(A, B)
assert set(res) == set(out) and len(res) == len(out)
|
[
"collections.Counter"
] |
[((538, 550), 'collections.Counter', 'Counter', (['tmp'], {}), '(tmp)\n', (545, 550), False, 'from collections import Counter\n')]
|
#!/bin/python3
import os
import sys
from collections import deque
LOCAL_INPUT = "ON"
class CitiesAndRoads:
def __init__(self):
self.nodesToEdges = {}
def addNode(self, nodeId):
self.nodesToEdges[nodeId] = set()
def addEdge(self, startNodeId, endNodeId):
self.nodesToEdges[startNodeId].add(endNodeId)
def visitConnectableNodes(self, startNodeId):
nodesToVisit = deque()
nodesToVisit.appendleft(startNodeId)
visitedNodes = set()
while nodesToVisit:
currentNode = nodesToVisit.pop()
visitedNodes.add(currentNode)
neighborNodes = self.nodesToEdges[currentNode]
for neighborNode in neighborNodes:
if neighborNode not in visitedNodes:
nodesToVisit.appendleft(neighborNode)
visitedNodes.add(neighborNode)
return visitedNodes
def countGroupsOfConnectableNodes(self, startNodeId):
unvisitedSet = set(self.nodesToEdges.keys())
componentsCounter = 0
while unvisitedSet:
currentVisitedNodesList = list(self.visitConnectableNodes(next(iter(unvisitedSet))))
for node in currentVisitedNodesList:
unvisitedSet.remove(node)
componentsCounter += 1
return componentsCounter
def getMinimumCostBuildingRoads(self, citiesNum, libraryCost, roadCost, startCity):
communitiesNum = self.countGroupsOfConnectableNodes(startCity)
minimalRoadsCost = (citiesNum - communitiesNum) * roadCost
libraryEachCommunityCost = communitiesNum * libraryCost
return minimalRoadsCost + libraryEachCommunityCost
def createPossibleRoadsMap(roadsNum, citiesNum):
roadsBetweenCities = []
for _ in range(roadsNum):
roadsBetweenCities.append(list(map(int, input().rstrip().split())))
citiesWithRoads = CitiesAndRoads()
for city in range(1, citiesNum + 1):
citiesWithRoads.addNode(city)
for road in roadsBetweenCities:
citiesWithRoads.addEdge(road[0], road[1])
citiesWithRoads.addEdge(road[1], road[0])
return citiesWithRoads
def parseInputAndCompareCost():
nmC_libC_road = input().split()
citiesNum = int(nmC_libC_road[0])
roadsNum = int(nmC_libC_road[1])
libraryCost = int(nmC_libC_road[2])
roadCost = int(nmC_libC_road[3])
citiesWithRoads = createPossibleRoadsMap(roadsNum, citiesNum)
startCity = 1
if libraryCost <= roadCost:
minimalCost = citiesNum * libraryCost
else:
minimalCost = citiesWithRoads.getMinimumCostBuildingRoads(citiesNum, libraryCost, roadCost, startCity)
return minimalCost
def main():
if LOCAL_INPUT == "ON":
sys.stdin = open('RoadsAndLibraries_input.txt')
q = int(input())
for q_itr in range(q):
minimalCost = parseInputAndCompareCost()
print(minimalCost)
elif LOCAL_INPUT == "OFF":
with open(os.environ['OUTPUT_PATH'], 'w') as fptr:
q = int(input())
for q_itr in range(q):
minimalCost = parseInputAndCompareCost()
fptr.write(str(minimalCost) + '\n')
else:
print("Please set LOCAL_INPUT to 'ON' or 'OFF'.")
if __name__ == '__main__':
main()
|
[
"collections.deque"
] |
[((417, 424), 'collections.deque', 'deque', ([], {}), '()\n', (422, 424), False, 'from collections import deque\n')]
|
# coding: utf-8
# @Author: oliver
# @Date: 2019-07-29 19:14:22
import re
import math
import torch
import logging
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from copy import deepcopy
from adaptive_avgmax_pool import SelectAdaptivePool2d
from mixed_conv2d import select_conv2d
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
#**kwargs
}
default_cfgs = {
'mixnet_s': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth'),
'mixnet_m': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth'),
'mixnet_l': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth'),
}
_DEBUG = False
# Default args for PyTorch BN impl
_BN_MOMENTUM_PT_DEFAULT = 0.1
_BN_EPS_PT_DEFAULT = 1e-5
_BN_ARGS_PT = dict(momentum=_BN_MOMENTUM_PT_DEFAULT, eps=_BN_EPS_PT_DEFAULT)
# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per
# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay)
# NOTE: momentum varies btw .99 and .9997 depending on source
# .99 in official TF TPU impl
# .9997 (/w .999 in search space) for paper
_BN_MOMENTUM_TF_DEFAULT = 1 - 0.99
_BN_EPS_TF_DEFAULT = 1e-3
_BN_ARGS_TF = dict(momentum=_BN_MOMENTUM_TF_DEFAULT, eps=_BN_EPS_TF_DEFAULT)
def _resolve_bn_args(kwargs):
bn_args = _BN_ARGS_TF.copy() if kwargs.pop('bn_tf', False) else _BN_ARGS_PT.copy()
bn_momentum = kwargs.pop('bn_momentum', None)
if bn_momentum is not None:
bn_args['momentum'] = bn_momentum
bn_eps = kwargs.pop('bn_eps', None)
if bn_eps is not None:
bn_args['eps'] = bn_eps
return bn_args
def _round_channels(channels, multiplier=1.0, divisor=8, channel_min=None):
"""Round number of filters based on depth multiplier."""
if not multiplier:
return channels
channels *= multiplier
channel_min = channel_min or divisor
new_channels = max(
int(channels + divisor / 2) // divisor * divisor,
channel_min)
# Make sure that round down does not go down by more than 10%.
if new_channels < 0.9 * channels:
new_channels += divisor
return new_channels
def _parse_ksize(ss):
if ss.isdigit():
return int(ss)
else:
return [int(k) for k in ss.split('.')]
def _decode_block_str(block_str, depth_multiplier=1.0):
""" Decode block definition string
Gets a list of block arg (dicts) through a string notation of arguments.
E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip
All args can exist in any order with the exception of the leading string which
is assumed to indicate the block type.
leading string - block type (
ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct)
r - number of repeat blocks,
k - kernel size,
s - strides (1-9),
e - expansion ratio,
c - output channels,
se - squeeze/excitation ratio
n - activation fn ('re', 'r6', 'hs', or 'sw')
Args:
block_str: a string representation of block arguments.
Returns:
A list of block args (dicts)
Raises:
ValueError: if the string def not properly specified (TODO)
"""
assert isinstance(block_str, str)
ops = block_str.split('_')
block_type = ops[0] # take the block type off the front
ops = ops[1:]
options = {}
noskip = False
for op in ops:
# string options being checked on individual basis, combine if they grow
if op == 'noskip':
noskip = True
elif op.startswith('n'):
# activation fn
key = op[0]
v = op[1:]
if v == 're':
value = F.relu
elif v == 'r6':
value = F.relu6
elif v == 'hs':
value = hard_swish
elif v == 'sw':
value = swish
else:
continue
options[key] = value
else:
# all numeric options
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# if act_fn is None, the model default (passed to model init) will be used
act_fn = options['n'] if 'n' in options else None
exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1
pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1
num_repeat = int(options['r'])
# each type of block has different valid arguments, fill accordingly
if block_type == 'ir':
block_args = dict(
block_type=block_type,
dw_kernel_size=_parse_ksize(options['k']),
exp_kernel_size=exp_kernel_size,
pw_kernel_size=pw_kernel_size,
out_chs=int(options['c']),
exp_ratio=float(options['e']),
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_fn=act_fn,
noskip=noskip,
)
elif block_type == 'ds' or block_type == 'dsa':
block_args = dict(
block_type=block_type,
dw_kernel_size=_parse_ksize(options['k']),
pw_kernel_size=pw_kernel_size,
out_chs=int(options['c']),
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_fn=act_fn,
pw_act=block_type == 'dsa',
noskip=block_type == 'dsa' or noskip,
)
elif block_type == 'cn':
block_args = dict(
block_type=block_type,
kernel_size=int(options['k']),
out_chs=int(options['c']),
stride=int(options['s']),
act_fn=act_fn,
)
else:
assert False, 'Unknown block type (%s)' % block_type
# return a list of block args expanded by num_repeat and
# scaled by depth_multiplier
num_repeat = int(math.ceil(num_repeat * depth_multiplier))
return [deepcopy(block_args) for _ in range(num_repeat)]
def _decode_arch_args(string_list):
block_args = []
for block_str in string_list:
block_args.append(_decode_block_str(block_str))
return block_args
def _decode_arch_def(arch_def, depth_multiplier=1.0):
arch_args = []
for stack_idx, block_strings in enumerate(arch_def):
assert isinstance(block_strings, list)
stack_args = []
for block_str in block_strings:
assert isinstance(block_str, str)
stack_args.extend(_decode_block_str(block_str, depth_multiplier))
arch_args.append(stack_args)
return arch_args
def swish(x, inplace=False):
if inplace:
return x.mul_(x.sigmoid())
else:
return x * x.sigmoid()
def sigmoid(x, inplace=False):
return x.sigmoid_() if inplace else x.sigmoid()
def hard_swish(x, inplace=False):
if inplace:
return x.mul_(F.relu6(x + 3.) / 6.)
else:
return x * F.relu6(x + 3.) / 6.
def hard_sigmoid(x, inplace=False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class _BlockBuilder(object):
""" Build Trunk Blocks
This ended up being somewhat of a cross between
https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py
and
https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py
"""
def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None,
pad_type='', act_fn=None, se_gate_fn=sigmoid, se_reduce_mid=False,
bn_args=_BN_ARGS_PT, drop_connect_rate=0., verbose=False):
self.channel_multiplier = channel_multiplier
self.channel_divisor = channel_divisor
self.channel_min = channel_min
self.pad_type = pad_type
self.act_fn = act_fn
self.se_gate_fn = se_gate_fn
self.se_reduce_mid = se_reduce_mid
self.bn_args = bn_args
self.drop_connect_rate = drop_connect_rate
self.verbose = verbose
# updated during build
self.in_chs = None
self.block_idx = 0
self.block_count = 0
def _round_channels(self, chs):
return _round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min)
def _make_block(self, ba):
bt = ba.pop('block_type')
ba['in_chs'] = self.in_chs
ba['out_chs'] = self._round_channels(ba['out_chs'])
ba['bn_args'] = self.bn_args
ba['pad_type'] = self.pad_type
# block act fn overrides the model default
ba['act_fn'] = ba['act_fn'] if ba['act_fn'] is not None else self.act_fn
assert ba['act_fn'] is not None
if bt == 'ir':
ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count
ba['se_gate_fn'] = self.se_gate_fn
ba['se_reduce_mid'] = self.se_reduce_mid
if self.verbose:
logging.info(' InvertedResidual {}, Args: {}'.format(self.block_idx, str(ba)))
block = InvertedResidual(**ba)
elif bt == 'ds' or bt == 'dsa':
ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count
if self.verbose:
logging.info(' DepthwiseSeparable {}, Args: {}'.format(self.block_idx, str(ba)))
block = DepthwiseSeparableConv(**ba)
elif bt == 'cn':
if self.verbose:
logging.info(' ConvBnAct {}, Args: {}'.format(self.block_idx, str(ba)))
block = ConvBnAct(**ba)
else:
assert False, 'Uknkown block type (%s) while building model.' % bt
self.in_chs = ba['out_chs'] # update in_chs for arg of next block
return block
def _make_stack(self, stack_args):
blocks = []
# each stack (stage) contains a list of block arguments
for i, ba in enumerate(stack_args):
if self.verbose:
logging.info(' Block: {}'.format(i))
if i >= 1:
# only the first block in any stack can have a stride > 1
ba['stride'] = 1
block = self._make_block(ba)
blocks.append(block)
self.block_idx += 1 # incr global idx (across all stacks)
return nn.Sequential(*blocks)
def __call__(self, in_chs, block_args):
""" Build the blocks
Args:
in_chs: Number of input-channels passed to first block
block_args: A list of lists, outer list defines stages, inner
list contains strings defining block configuration(s)
Return:
List of block stacks (each stack wrapped in nn.Sequential)
"""
if self.verbose:
logging.info('Building model trunk with %d stages...' % len(block_args))
self.in_chs = in_chs
self.block_count = sum([len(x) for x in block_args])
self.block_idx = 0
blocks = []
# outer list of block_args defines the stacks ('stages' by some conventions)
for stack_idx, stack in enumerate(block_args):
if self.verbose:
logging.info('Stack: {}'.format(stack_idx))
assert isinstance(stack, list)
stack = self._make_stack(stack)
blocks.append(stack)
return blocks
def _initialize_weight_goog(m):
# weight init as per Tensorflow Official impl
# https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels # fan-out
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(0) # fan-out
init_range = 1.0 / math.sqrt(n)
m.weight.data.uniform_(-init_range, init_range)
m.bias.data.zero_()
def _initialize_weight_default(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='linear')
def drop_connect(inputs, training=False, drop_connect_rate=0.):
"""Apply drop connect."""
if not training:
return inputs
keep_prob = 1 - drop_connect_rate
random_tensor = keep_prob + torch.rand(
(inputs.size()[0], 1, 1, 1), dtype=inputs.dtype, device=inputs.device)
random_tensor.floor_() # binarize
output = inputs.div(keep_prob) * random_tensor
return output
class ChannelShuffle(nn.Module):
# FIXME haven't used yet
def __init__(self, groups):
super(ChannelShuffle, self).__init__()
self.groups = groups
def forward(self, x):
"""Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]"""
N, C, H, W = x.size()
g = self.groups
assert C % g == 0, "Incompatible group size {} for input channel {}".format(
g, C
)
return (
x.view(N, g, int(C / g), H, W)
.permute(0, 2, 1, 3, 4)
.contiguous()
.view(N, C, H, W)
)
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, reduce_chs=None, act_fn=F.relu, gate_fn=sigmoid):
super(SqueezeExcite, self).__init__()
self.act_fn = act_fn
self.gate_fn = gate_fn
reduced_chs = reduce_chs or in_chs
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
# NOTE adaptiveavgpool can be used here, but seems to cause issues with NVIDIA AMP performance
x_se = x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1)
x_se = self.conv_reduce(x_se)
x_se = self.act_fn(x_se, inplace=True)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
class ConvBnAct(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size,
stride=1, pad_type='', act_fn=F.relu, bn_args=_BN_ARGS_PT):
super(ConvBnAct, self).__init__()
assert stride in [1, 2]
self.act_fn = act_fn
self.conv = select_conv2d(in_chs, out_chs, kernel_size, stride=stride, padding=pad_type)
self.bn1 = nn.BatchNorm2d(out_chs, **bn_args)
def forward(self, x):
x = self.conv(x)
x = self.bn1(x)
x = self.act_fn(x, inplace=True)
return x
class DepthwiseSeparableConv(nn.Module):
""" DepthwiseSeparable block
Used for DS convs in MobileNet-V1 and in the place of IR blocks with an expansion
factor of 1.0. This is an alternative to having a IR with optional first pw conv.
"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, pad_type='', act_fn=F.relu, noskip=False,
pw_kernel_size=1, pw_act=False,
se_ratio=0., se_gate_fn=sigmoid,
bn_args=_BN_ARGS_PT, drop_connect_rate=0.):
super(DepthwiseSeparableConv, self).__init__()
assert stride in [1, 2]
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip
self.has_pw_act = pw_act # activation after point-wise conv
self.act_fn = act_fn
self.drop_connect_rate = drop_connect_rate
self.conv_dw = select_conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True)
self.bn1 = nn.BatchNorm2d(in_chs, **bn_args)
# Squeeze-and-excitation
if self.has_se:
self.se = SqueezeExcite(
in_chs, reduce_chs=max(1, int(in_chs * se_ratio)), act_fn=act_fn, gate_fn=se_gate_fn)
self.conv_pw = select_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = nn.BatchNorm2d(out_chs, **bn_args)
def forward(self, x):
residual = x
x = self.conv_dw(x)
x = self.bn1(x)
x = self.act_fn(x, inplace=True)
if self.has_se:
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
if self.has_pw_act:
x = self.act_fn(x, inplace=True)
if self.has_residual:
if self.drop_connect_rate > 0.:
x = drop_connect(x, self.training, self.drop_connect_rate)
x += residual
return x
class InvertedResidual(nn.Module):
""" Inverted residual block w/ optional SE"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, pad_type='', act_fn=F.relu, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_reduce_mid=False, se_gate_fn=sigmoid,
shuffle_type=None, bn_args=_BN_ARGS_PT, drop_connect_rate=0.):
super(InvertedResidual, self).__init__()
mid_chs = int(in_chs * exp_ratio)
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.act_fn = act_fn
self.drop_connect_rate = drop_connect_rate
# Point-wise expansion
self.conv_pw = select_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type)
self.bn1 = nn.BatchNorm2d(mid_chs, **bn_args)
self.shuffle_type = shuffle_type
if shuffle_type is not None and isinstance(exp_kernel_size, list):
self.shuffle = ChannelShuffle(len(exp_kernel_size))
# Depth-wise convolution
self.conv_dw = select_conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True)
self.bn2 = nn.BatchNorm2d(mid_chs, **bn_args)
# Squeeze-and-excitation
if self.has_se:
se_base_chs = mid_chs if se_reduce_mid else in_chs
self.se = SqueezeExcite(
mid_chs, reduce_chs=max(1, int(se_base_chs * se_ratio)), act_fn=act_fn, gate_fn=se_gate_fn)
# Point-wise linear projection
self.conv_pwl = select_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn3 = nn.BatchNorm2d(out_chs, **bn_args)
def forward(self, x):
residual = x
# Point-wise expansion
x = self.conv_pw(x)
x = self.bn1(x)
x = self.act_fn(x, inplace=True)
# FIXME haven't tried this yet
# for channel shuffle when using groups with pointwise convs as per FBNet variants
if self.shuffle_type == "mid":
x = self.shuffle(x)
# Depth-wise convolution
x = self.conv_dw(x)
x = self.bn2(x)
x = self.act_fn(x, inplace=True)
# Squeeze-and-excitation
if self.has_se:
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_residual:
if self.drop_connect_rate > 0.:
x = drop_connect(x, self.training, self.drop_connect_rate)
x += residual
# NOTE maskrcnn_benchmark building blocks have an SE module defined here for some variants
return x
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class MixNet(nn.Module):
"""
* MixNet S, M, L
"""
def __init__(self, block_args, heads={}, head_conv=256, num_classes=1000, in_chans=3, scale=1, stem_size=32, num_features=1280,
channel_multiplier=1.0, channel_divisor=8, channel_min=None,
pad_type='', act_fn=F.relu, drop_rate=0., drop_connect_rate=0.,
se_gate_fn=sigmoid, se_reduce_mid=False, bn_args=_BN_ARGS_PT, weight_init='goog'):
super(MixNet, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
self.act_fn = act_fn
self.num_features = num_features
self.heads = heads
stem_size = _round_channels(stem_size, channel_multiplier, channel_divisor, channel_min)
self.conv_stem = select_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = nn.BatchNorm2d(stem_size, **bn_args)
in_chs = stem_size
builder = _BlockBuilder(
channel_multiplier, channel_divisor, channel_min,
pad_type, act_fn, se_gate_fn, se_reduce_mid,
bn_args, drop_connect_rate, verbose=_DEBUG)
self.blocks = nn.Sequential(*builder(in_chs, block_args))
self.blocks = self.blocks
in_chs = builder.in_chs
self.inplanes = builder.in_chs
for m in self.modules():
if weight_init == 'goog':
_initialize_weight_goog(m)
else:
_initialize_weight_default(m)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
3,
[256, 128, 64],
[4, 4, 4],
)
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(64, classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
#fc = DCN(self.inplanes, planes,
# kernel_size=(3,3), stride=1,
# padding=1, dilation=1, deformable_groups=1)
fc = nn.Conv2d(self.inplanes, planes,
kernel_size=3, stride=1,
padding=1, dilation=1, bias=False)
fill_fc_weights(fc)
up = nn.ConvTranspose2d(
in_channels=planes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=False)
fill_up_weights(up)
layers.append(fc)
layers.append(nn.BatchNorm2d(planes, momentum=_BN_MOMENTUM_PT_DEFAULT))
layers.append(nn.ReLU(inplace=True))
layers.append(up)
layers.append(nn.BatchNorm2d(planes, momentum=_BN_MOMENTUM_PT_DEFAULT))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, inputs):
x = self.conv_stem(inputs)
x = self.bn1(x)
x = self.act_fn(x, inplace=True)
x = self.blocks(x)
x = self.deconv_layers(x)
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
def _gen_mixnet_s(channel_multiplier=1.0, num_classes=1000, **kwargs):
"""Creates a MixNet Small model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet
Paper: https://arxiv.org/abs/1907.09595
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16'], # relu
# stage 1, 112x112 in
['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu
# stage 2, 56x56 in
['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish
# stage 3, 28x28 in
['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish
# stage 4, 14x14in
['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish
# stage 5, 14x14in
['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish
# 7x7
]
model = MixNet(
_decode_arch_def(arch_def),
num_classes=num_classes,
stem_size=16,
num_features=1536,
channel_multiplier=channel_multiplier,
channel_divisor=8,
channel_min=None,
bn_args=_resolve_bn_args(kwargs),
act_fn=F.relu,
**kwargs
)
return model
def _gen_mixnet_m(channel_multiplier=1.0, num_classes=1000, **kwargs):
"""Creates a MixNet Medium-Large model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet
Paper: https://arxiv.org/abs/1907.09595
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c24'], # relu
# stage 1, 112x112 in
['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu
# stage 2, 56x56 in
['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish
# stage 3, 28x28 in
['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish
# stage 4, 14x14in
['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish
# stage 5, 14x14in
['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish
# 7x7
]
model = MixNet(
_decode_arch_def(arch_def),
num_classes=num_classes,
stem_size=24,
num_features=1536,
channel_multiplier=channel_multiplier,
channel_divisor=8,
channel_min=None,
bn_args=_resolve_bn_args(kwargs),
act_fn=F.relu,
**kwargs
)
return model
def mixnet_s(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Creates a MixNet Small model. Tensorflow compatible variant
"""
default_cfg = default_cfgs['mixnet_s']
kwargs['bn_eps'] = _BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mixnet_s(
channel_multiplier=1.0, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, in_chans)
return model
def mixnet_m(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Creates a MixNet Medium model. Tensorflow compatible variant
"""
default_cfg = default_cfgs['mixnet_m']
kwargs['bn_eps'] = _BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mixnet_m(
channel_multiplier=1.0, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, in_chans)
return model
def mixnet_l(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Creates a MixNet Large model. Tensorflow compatible variant
"""
default_cfg = default_cfgs['mixnet_l']
kwargs['bn_eps'] = _BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mixnet_m(
channel_multiplier=1.3, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, in_chans)
return model
def load_pretrained(model, default_cfg, in_chans=3):
if 'url' not in default_cfg or not default_cfg['url']:
logging.warning("Pretrained model URL is invalid, using random initialization.")
return
pretrained_state_dict = model_zoo.load_url(default_cfg['url'])
model_state_dict = model.state_dict()
if in_chans == 1:
conv1_name = default_cfg['first_conv']
logging.info('Converting first conv (%s) from 3 to 1 channel' % conv1_name)
conv1_weight = pretrained_state_dict[conv1_name + '.weight']
pretrained_state_dict[conv1_name + '.weight'] = conv1_weight.mean(dim=1, keepdim=True)
elif in_chans != 3:
assert False, "Invalid in_chans for pretrained weights"
for k in pretrained_state_dict.keys():
if k in model_state_dict.keys():
if pretrained_state_dict[k].shape != model_state_dict[k].shape:
pretrained_state_dict[k] = model_state_dict[k]
else:
print('Drop parameter {}.'.format(k))
for k in model_state_dict.keys():
if not (k in pretrained_state_dict.keys()):
print('No param {}.'.format(k))
pretrained_state_dict[k] = model_state_dict[k]
model.load_state_dict(pretrained_state_dict, strict=False)
return model
if __name__ == '__main__':
model = mixnet_s()
|
[
"torch.nn.init.constant_",
"torch.nn.init.kaiming_normal_",
"torch.utils.model_zoo.load_url",
"logging.warning",
"mixed_conv2d.select_conv2d",
"copy.deepcopy",
"re.split",
"math.sqrt",
"math.ceil",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_uniform_",
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d",
"math.fabs",
"torch.nn.Sequential",
"logging.info",
"torch.nn.functional.relu6",
"torch.nn.init.normal_"
] |
[((30192, 30230), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (["default_cfg['url']"], {}), "(default_cfg['url'])\n", (30210, 30230), True, 'import torch.utils.model_zoo as model_zoo\n'), ((6521, 6561), 'math.ceil', 'math.ceil', (['(num_repeat * depth_multiplier)'], {}), '(num_repeat * depth_multiplier)\n', (6530, 6561), False, 'import math\n'), ((6575, 6595), 'copy.deepcopy', 'deepcopy', (['block_args'], {}), '(block_args)\n', (6583, 6595), False, 'from copy import deepcopy\n'), ((10976, 10998), 'torch.nn.Sequential', 'nn.Sequential', (['*blocks'], {}), '(*blocks)\n', (10989, 10998), True, 'import torch.nn as nn\n'), ((12796, 12866), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_out', nonlinearity='relu')\n", (12819, 12866), True, 'import torch.nn as nn\n'), ((14399, 14443), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_chs', 'reduced_chs', '(1)'], {'bias': '(True)'}), '(in_chs, reduced_chs, 1, bias=True)\n', (14408, 14443), True, 'import torch.nn as nn\n'), ((14471, 14515), 'torch.nn.Conv2d', 'nn.Conv2d', (['reduced_chs', 'in_chs', '(1)'], {'bias': '(True)'}), '(reduced_chs, in_chs, 1, bias=True)\n', (14480, 14515), True, 'import torch.nn as nn\n'), ((15194, 15270), 'mixed_conv2d.select_conv2d', 'select_conv2d', (['in_chs', 'out_chs', 'kernel_size'], {'stride': 'stride', 'padding': 'pad_type'}), '(in_chs, out_chs, kernel_size, stride=stride, padding=pad_type)\n', (15207, 15270), False, 'from mixed_conv2d import select_conv2d\n'), ((15290, 15324), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_chs'], {}), '(out_chs, **bn_args)\n', (15304, 15324), True, 'import torch.nn as nn\n'), ((16402, 16501), 'mixed_conv2d.select_conv2d', 'select_conv2d', (['in_chs', 'in_chs', 'dw_kernel_size'], {'stride': 'stride', 'padding': 'pad_type', 'depthwise': '(True)'}), '(in_chs, in_chs, dw_kernel_size, stride=stride, padding=\n pad_type, depthwise=True)\n', (16415, 16501), False, 'from mixed_conv2d import select_conv2d\n'), ((16529, 16562), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['in_chs'], {}), '(in_chs, **bn_args)\n', (16543, 16562), True, 'import torch.nn as nn\n'), ((16784, 16848), 'mixed_conv2d.select_conv2d', 'select_conv2d', (['in_chs', 'out_chs', 'pw_kernel_size'], {'padding': 'pad_type'}), '(in_chs, out_chs, pw_kernel_size, padding=pad_type)\n', (16797, 16848), False, 'from mixed_conv2d import select_conv2d\n'), ((16868, 16902), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_chs'], {}), '(out_chs, **bn_args)\n', (16882, 16902), True, 'import torch.nn as nn\n'), ((18217, 18282), 'mixed_conv2d.select_conv2d', 'select_conv2d', (['in_chs', 'mid_chs', 'exp_kernel_size'], {'padding': 'pad_type'}), '(in_chs, mid_chs, exp_kernel_size, padding=pad_type)\n', (18230, 18282), False, 'from mixed_conv2d import select_conv2d\n'), ((18302, 18336), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['mid_chs'], {}), '(mid_chs, **bn_args)\n', (18316, 18336), True, 'import torch.nn as nn\n'), ((18575, 18676), 'mixed_conv2d.select_conv2d', 'select_conv2d', (['mid_chs', 'mid_chs', 'dw_kernel_size'], {'stride': 'stride', 'padding': 'pad_type', 'depthwise': '(True)'}), '(mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=\n pad_type, depthwise=True)\n', (18588, 18676), False, 'from mixed_conv2d import select_conv2d\n'), ((18704, 18738), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['mid_chs'], {}), '(mid_chs, **bn_args)\n', (18718, 18738), True, 'import torch.nn as nn\n'), ((19069, 19134), 'mixed_conv2d.select_conv2d', 'select_conv2d', (['mid_chs', 'out_chs', 'pw_kernel_size'], {'padding': 'pad_type'}), '(mid_chs, out_chs, pw_kernel_size, padding=pad_type)\n', (19082, 19134), False, 'from mixed_conv2d import select_conv2d\n'), ((19154, 19188), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_chs'], {}), '(out_chs, **bn_args)\n', (19168, 19188), True, 'import torch.nn as nn\n'), ((21681, 21746), 'mixed_conv2d.select_conv2d', 'select_conv2d', (['in_chans', 'stem_size', '(3)'], {'stride': '(2)', 'padding': 'pad_type'}), '(in_chans, stem_size, 3, stride=2, padding=pad_type)\n', (21694, 21746), False, 'from mixed_conv2d import select_conv2d\n'), ((21766, 21802), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['stem_size'], {}), '(stem_size, **bn_args)\n', (21780, 21802), True, 'import torch.nn as nn\n'), ((25406, 25428), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (25419, 25428), True, 'import torch.nn as nn\n'), ((30067, 30152), 'logging.warning', 'logging.warning', (['"""Pretrained model URL is invalid, using random initialization."""'], {}), "('Pretrained model URL is invalid, using random initialization.'\n )\n", (30082, 30152), False, 'import logging\n'), ((30351, 30426), 'logging.info', 'logging.info', (["('Converting first conv (%s) from 3 to 1 channel' % conv1_name)"], {}), "('Converting first conv (%s) from 3 to 1 channel' % conv1_name)\n", (30363, 30426), False, 'import logging\n'), ((7702, 7718), 'torch.nn.functional.relu6', 'F.relu6', (['(x + 3.0)'], {}), '(x + 3.0)\n', (7709, 7718), True, 'import torch.nn.functional as F\n'), ((12335, 12353), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (12344, 12353), False, 'import math\n'), ((20635, 20671), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight'], {'std': '(0.001)'}), '(m.weight, std=0.001)\n', (20650, 20671), True, 'import torch.nn as nn\n'), ((24522, 24618), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.inplanes', 'planes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'dilation': '(1)', 'bias': '(False)'}), '(self.inplanes, planes, kernel_size=3, stride=1, padding=1,\n dilation=1, bias=False)\n', (24531, 24618), True, 'import torch.nn as nn\n'), ((24705, 24864), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': 'planes', 'out_channels': 'planes', 'kernel_size': 'kernel', 'stride': '(2)', 'padding': 'padding', 'output_padding': 'output_padding', 'bias': '(False)'}), '(in_channels=planes, out_channels=planes, kernel_size=\n kernel, stride=2, padding=padding, output_padding=output_padding, bias=\n False)\n', (24723, 24864), True, 'import torch.nn as nn\n'), ((4586, 4609), 're.split', 're.split', (['"""(\\\\d.*)"""', 'op'], {}), "('(\\\\d.*)', op)\n", (4594, 4609), False, 'import re\n'), ((7501, 7517), 'torch.nn.functional.relu6', 'F.relu6', (['(x + 3.0)'], {}), '(x + 3.0)\n', (7508, 7517), True, 'import torch.nn.functional as F\n'), ((7552, 7568), 'torch.nn.functional.relu6', 'F.relu6', (['(x + 3.0)'], {}), '(x + 3.0)\n', (7559, 7568), True, 'import torch.nn.functional as F\n'), ((13011, 13083), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['m.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""linear"""'}), "(m.weight, mode='fan_in', nonlinearity='linear')\n", (13035, 13083), True, 'import torch.nn as nn\n'), ((20861, 20889), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (20878, 20889), True, 'import torch.nn as nn\n'), ((23163, 23232), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', 'classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(64, classes, kernel_size=1, stride=1, padding=0, bias=True)\n', (23172, 23232), True, 'import torch.nn as nn\n'), ((25085, 25141), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {'momentum': '_BN_MOMENTUM_PT_DEFAULT'}), '(planes, momentum=_BN_MOMENTUM_PT_DEFAULT)\n', (25099, 25141), True, 'import torch.nn as nn\n'), ((25169, 25190), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (25176, 25190), True, 'import torch.nn as nn\n'), ((25248, 25304), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {'momentum': '_BN_MOMENTUM_PT_DEFAULT'}), '(planes, momentum=_BN_MOMENTUM_PT_DEFAULT)\n', (25262, 25304), True, 'import torch.nn as nn\n'), ((25332, 25353), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (25339, 25353), True, 'import torch.nn as nn\n'), ((12621, 12633), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (12630, 12633), False, 'import math\n'), ((20401, 20421), 'math.fabs', 'math.fabs', (['(i / f - c)'], {}), '(i / f - c)\n', (20410, 20421), False, 'import math\n'), ((20430, 20450), 'math.fabs', 'math.fabs', (['(j / f - c)'], {}), '(j / f - c)\n', (20439, 20450), False, 'import math\n'), ((22717, 22778), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', 'head_conv'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(64, head_conv, kernel_size=3, padding=1, bias=True)\n', (22726, 22778), True, 'import torch.nn as nn\n'), ((22818, 22839), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (22825, 22839), True, 'import torch.nn as nn\n'), ((22859, 22935), 'torch.nn.Conv2d', 'nn.Conv2d', (['head_conv', 'classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(head_conv, classes, kernel_size=1, stride=1, padding=0, bias=True)\n', (22868, 22935), True, 'import torch.nn as nn\n')]
|
# -*- coding: utf-8 -*-
"""
Read gslib file format
Created on Wen Sep 5th 2018
"""
from __future__ import absolute_import, division, print_function
__author__ = "yuhao"
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist
from mpl_toolkits.mplot3d import Axes3D
class SpatialData(object):
def __init__(self, file_path):
self.datafl = file_path
self.vr = None
self.property_name = None
self._2d = False
self._read_data()
def _read_data(self):
"""
read gslib file
"""
column_name = []
with open(self.datafl, 'r') as fin:
_ = fin.readline().strip()
ncols = int(fin.readline().strip())
for _ in range(ncols):
column_name.append(fin.readline().strip())
self.property_name = [item for item in column_name
if item not in ['x', 'y', 'z']]
df = pd.read_csv(self.datafl, sep='\t', header=None, names=column_name,
skiprows=ncols+2)
if 'z' not in column_name:
self._2d = True
column_name.append('z')
df['z'] = 0
self.df = df
data_dtype = np.dtype({
'names': column_name,
'formats': ['f8'] * len(column_name)})
self.vr = np.core.records.fromarrays(
df.values.transpose(), dtype=data_dtype)
def preview(self):
return self.vr.head(20)
def pdf(self, ax, bins=15):
hist, bin_edges = np.histogram(self.vr[self.property_name[0]],
bins=bins)
ax.set_title("pdf")
ax.bar(bin_edges[:-1], hist, width=bin_edges[1]-bin_edges[0],
color='red', alpha=0.5)
def cdf(self, ax):
data = self.vr[self.property_name[0]]
data = np.sort(data)
cdf = np.arange(1, len(data) + 1) / len(data)
ax.set_title("cdf")
ax.plot(data, cdf)
@property
def maximum(self):
return self.df[self.property_name[0]].max()
@property
def minimum(self):
return self.df[self.property_name[0]].min()
@property
def mean(self):
return self.df[self.property_name[0]].mean()
@property
def variance(self):
return self.df[self.property_name[0]].var()
@property
def meadian(self):
return np.median(self.vr[self.property_name[0]])
@property
def upper_quartile(self):
return self.df[self.property_name[0]].quantile(0.75)
@property
def lower_quartile(self):
return self.df[self.property_name[0]].quantile(0.25)
@property
def num(self):
return self.vr.shape[0]
def distance(self):
num = self.vr.shape[0]
return pdist(np.concatenate((self.vr['x'].reshape((num, 1)),
self.vr['y'].reshape((num, 1))), axis=1))
@property
def summary(self):
return (
"Summary\n"
"-------\n"
"Number of Points: {}\n"
"Mean: {}\n"
"Variance: {}\n"
"Minimum: {}\n"
"Lower Quartile: {}\n"
"Median: {}\n"
"Upper Quartile: {}\n"
"Maximum: {}\n").format(
self.num,
self.mean,
self.variance,
self.minimum,
self.lower_quartile,
self.meadian,
self.upper_quartile,
self.maximum)
def scatter(self, ax, prop=None):
"""
Plot scatter of data points on given axis
Parameters
----------
ax : AxesSubplot or Axes3DSubplot
axis on which the scatter plot is drawn
prop : str
property to display with colormap
"""
sc = None
prop = self.property_name[0] if prop is None else prop
if not self._2d and isinstance(ax, Axes3D):
sc = ax.scatter(
self.vr['x'], self.vr['y'], self.vr['z'],
c=prop)
else:
sc = ax.scatter(
self.vr['x'], self.vr['y'], c=prop)
return sc
|
[
"pandas.read_csv",
"numpy.sort",
"numpy.histogram",
"numpy.median"
] |
[((955, 1046), 'pandas.read_csv', 'pd.read_csv', (['self.datafl'], {'sep': '"""\t"""', 'header': 'None', 'names': 'column_name', 'skiprows': '(ncols + 2)'}), "(self.datafl, sep='\\t', header=None, names=column_name, skiprows\n =ncols + 2)\n", (966, 1046), True, 'import pandas as pd\n'), ((1542, 1597), 'numpy.histogram', 'np.histogram', (['self.vr[self.property_name[0]]'], {'bins': 'bins'}), '(self.vr[self.property_name[0]], bins=bins)\n', (1554, 1597), True, 'import numpy as np\n'), ((1859, 1872), 'numpy.sort', 'np.sort', (['data'], {}), '(data)\n', (1866, 1872), True, 'import numpy as np\n'), ((2394, 2435), 'numpy.median', 'np.median', (['self.vr[self.property_name[0]]'], {}), '(self.vr[self.property_name[0]])\n', (2403, 2435), True, 'import numpy as np\n')]
|
import urllib3
import json
def ETRI_POS_Tagging(text) :
openApiURL = "http://aiopen.etri.re.kr:8000/WiseNLU"
accessKey = "14af2341-2fde-40f3-a0b9-b724fa029380"
analysisCode = "morp"
requestJson = {
"access_key": accessKey,
"argument": {
"text": text,
"analysis_code": analysisCode
}
}
http = urllib3.PoolManager()
response = http.request(
"POST",
openApiURL,
headers={"Content-Type": "application/json; charset=UTF-8"},
body=json.dumps(requestJson)
)
return Pos_extract(response)
def Pos_extract(Data) :
Noun = []
Extract_a = json.loads(str(Data.data,"utf=8"))['return_object']['sentence']
for i in range(len(Extract_a)) :
Extract_b = dict(Extract_a[i])
for j in range(len(Extract_b['morp'])) :
if Extract_b['morp'][j]['type'] =='NNG' or Extract_b['morp'][j]['type'] =='NNP':
Noun.append(Extract_b['morp'][j]['lemma'])
return Noun
|
[
"urllib3.PoolManager",
"json.dumps"
] |
[((365, 386), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (384, 386), False, 'import urllib3\n'), ((534, 557), 'json.dumps', 'json.dumps', (['requestJson'], {}), '(requestJson)\n', (544, 557), False, 'import json\n')]
|
import random
from tqdm import tqdm
from Crypto.Util.number import *
for seed in tqdm(range(10000000)):
random.seed(seed)
toBreak = False
for i in range(19):
random.seed(random.random())
seedtosave = random.random()
for add in range(0, 1000):
random.seed(seedtosave+add)
for i in range(0, 100):
temp = random.random()
if add == 0 and i == 0 and temp != 0.5327486342598738:
toBreak = True
break
if toBreak:
break
if toBreak:
continue
for add in range(0, 1000):
random.seed(seedtosave-add)
for i in range(0, 1000):
random.random()
random.seed(seedtosave)
for i in range(0, 100):
t = random.random()*100
if t == 83.74981977975804:
print("idek{", seed, "}", sep="")
exit()
#idek{103123}
|
[
"random.random",
"random.seed"
] |
[((110, 127), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (121, 127), False, 'import random\n'), ((228, 243), 'random.random', 'random.random', ([], {}), '()\n', (241, 243), False, 'import random\n'), ((706, 729), 'random.seed', 'random.seed', (['seedtosave'], {}), '(seedtosave)\n', (717, 729), False, 'import random\n'), ((284, 313), 'random.seed', 'random.seed', (['(seedtosave + add)'], {}), '(seedtosave + add)\n', (295, 313), False, 'import random\n'), ((612, 641), 'random.seed', 'random.seed', (['(seedtosave - add)'], {}), '(seedtosave - add)\n', (623, 641), False, 'import random\n'), ((193, 208), 'random.random', 'random.random', ([], {}), '()\n', (206, 208), False, 'import random\n'), ((363, 378), 'random.random', 'random.random', ([], {}), '()\n', (376, 378), False, 'import random\n'), ((685, 700), 'random.random', 'random.random', ([], {}), '()\n', (698, 700), False, 'import random\n'), ((770, 785), 'random.random', 'random.random', ([], {}), '()\n', (783, 785), False, 'import random\n')]
|
#twitterclient
import twitter
from configuration import configuration
class twitterclient:
def __init__(self):
config = configuration("config.ini")
self.api = twitter.Api(consumer_key=config.getTwitterConsumerKey(),
consumer_secret=config.getTwitterConsumerSecret(),
access_token_key=config.getTwitterAccessToken(),
access_token_secret=config.getTwitterAccessTokenSecret())
def tweet(self, message):
self.api.PostUpdate(message)
|
[
"configuration.configuration"
] |
[((135, 162), 'configuration.configuration', 'configuration', (['"""config.ini"""'], {}), "('config.ini')\n", (148, 162), False, 'from configuration import configuration\n')]
|
from enrichmentmanager.models import EnrichmentSignup, EnrichmentOption
from enrichmentmanager.lib import canEditSignup
from io import StringIO
from datetime import date
from django import template
register = template.Library()
@register.assignment_tag(takes_context=True)
def select_for(context, slot, student):
if student.lockout:
return '<p class="readOnlySignup">{lockout}</p>'.format(lockout=student.lockout)
canEdit = canEditSignup(context.request.user, slot, student)
key = "slot_{slot_id}_{student_id}".format(slot_id=slot.id, student_id=student.id)
if canEdit:
out = StringIO()
choices = context['slotChoices'][slot]
out.write('<select name="{key}_option" class="slotSelector saveTrack">'.format(key=key))
out.write('<option value="">--</option>')
selected = context['relatedSignups'].get(key)
preferredChoices = StringIO()
otherChoices = StringIO()
associatedTeachers = student.associated_teachers.all()
for choice in choices:
if choice.teacher in associatedTeachers:
if selected == choice.id:
preferredChoices.write('<option value="{choice_id}" selected="selected">{choice_name}</option>'.format(choice_id=choice.id, choice_name=str(choice)))
else:
preferredChoices.write('<option value="{choice_id}">{choice_name}</option>'.format(choice_id=choice.id, choice_name=str(choice)))
else:
if selected == choice.id:
otherChoices.write('<option value="{choice_id}" selected="selected">{choice_name}</option>'.format(choice_id=choice.id, choice_name=str(choice)))
else:
otherChoices.write('<option value="{choice_id}">{choice_name}</option>'.format(choice_id=choice.id, choice_name=str(choice)))
preferredChoices = preferredChoices.getvalue()
otherChoices = otherChoices.getvalue()
if preferredChoices and otherChoices:
out.write(preferredChoices)
out.write('<option value="">--</option>')
out.write(otherChoices)
else:
out.write(preferredChoices)
out.write(otherChoices)
out.write("</select>")
if context.request.user.has_perm("enrichmentmanager.can_set_admin_lock"):
#TODO: Horribly ineffecient
try:
signup = EnrichmentSignup.objects.get(slot=slot, student=student)
except EnrichmentSignup.DoesNotExist:
signup = None
if signup and signup.admin_lock:
out.write('<input type="checkbox" title="Admin Lockout" name="{key}_adminlock" class="saveTrack adminLock" checked />'.format(key=key))
else:
out.write('<input type="checkbox" title="Admin Lockout" name="{key}_adminlock" class="saveTrack adminLock" />'.format(key=key))
return out.getvalue()
else:
selectedID = context['relatedSignups'].get(key)
if selectedID:
#Ineffecient, will generate many queries if viewing in read only mode
selectedChoice = EnrichmentOption.objects.get(pk=selectedID)
return '<p class="readOnlySignup">{option}</p>'.format(option=selectedChoice)
return ""
@register.assignment_tag(takes_context=True)
def display_for(context, slot, student):
if student.lockout:
return "<em title='Lockout assigned'>{}</em>".format(student.lockout)
key = "slot_{slot_id}_{student_id}".format(slot_id=slot.id, student_id=student.id)
selected = context['relatedSignups'].get(key, "")
if selected:
return str(EnrichmentOption.objects.get(pk=selected))
return "<strong>Unassigned</strong>"
|
[
"django.template.Library",
"io.StringIO",
"enrichmentmanager.models.EnrichmentSignup.objects.get",
"enrichmentmanager.lib.canEditSignup",
"enrichmentmanager.models.EnrichmentOption.objects.get"
] |
[((212, 230), 'django.template.Library', 'template.Library', ([], {}), '()\n', (228, 230), False, 'from django import template\n'), ((454, 504), 'enrichmentmanager.lib.canEditSignup', 'canEditSignup', (['context.request.user', 'slot', 'student'], {}), '(context.request.user, slot, student)\n', (467, 504), False, 'from enrichmentmanager.lib import canEditSignup\n'), ((632, 642), 'io.StringIO', 'StringIO', ([], {}), '()\n', (640, 642), False, 'from io import StringIO\n'), ((933, 943), 'io.StringIO', 'StringIO', ([], {}), '()\n', (941, 943), False, 'from io import StringIO\n'), ((967, 977), 'io.StringIO', 'StringIO', ([], {}), '()\n', (975, 977), False, 'from io import StringIO\n'), ((3290, 3333), 'enrichmentmanager.models.EnrichmentOption.objects.get', 'EnrichmentOption.objects.get', ([], {'pk': 'selectedID'}), '(pk=selectedID)\n', (3318, 3333), False, 'from enrichmentmanager.models import EnrichmentSignup, EnrichmentOption\n'), ((3841, 3882), 'enrichmentmanager.models.EnrichmentOption.objects.get', 'EnrichmentOption.objects.get', ([], {'pk': 'selected'}), '(pk=selected)\n', (3869, 3882), False, 'from enrichmentmanager.models import EnrichmentSignup, EnrichmentOption\n'), ((2528, 2584), 'enrichmentmanager.models.EnrichmentSignup.objects.get', 'EnrichmentSignup.objects.get', ([], {'slot': 'slot', 'student': 'student'}), '(slot=slot, student=student)\n', (2556, 2584), False, 'from enrichmentmanager.models import EnrichmentSignup, EnrichmentOption\n')]
|
"""
testing module knmi_rain from acequia
"""
import acequia as aq
def hdr(msg):
print()
print('#','-'*50)
print(msg)
print('#','-'*50)
print()
if __name__ == '__main__':
hdr('# read valid file')
srcpath = r'.\testdata\knmi\neerslaggeg_EENRUM_154.txt'
prc = aq.KnmiRain(srcpath)
n = len(prc.rawdata)
print(f'Number of data rows is {n}')
hdr('# try to read invalid filepath')
prc2 = aq.KnmiRain('dummy')
n = len(prc2.rawdata)
print(f'Number of data rows is {n}')
hdr('# get table with definitions and units')
tbl = prc.units()
print(tbl)
hdr('# get all possible variables and one not possible')
for name in ['prc','dummy']:
n = len(prc.timeseries(name))
print(f'Number of {name}: {n}')
|
[
"acequia.KnmiRain"
] |
[((297, 317), 'acequia.KnmiRain', 'aq.KnmiRain', (['srcpath'], {}), '(srcpath)\n', (308, 317), True, 'import acequia as aq\n'), ((438, 458), 'acequia.KnmiRain', 'aq.KnmiRain', (['"""dummy"""'], {}), "('dummy')\n", (449, 458), True, 'import acequia as aq\n')]
|
from django.conf.urls import url, include
from rest_framework_jwt.views import obtain_jwt_token
from accounts.views import (
UserCreateView,
)
app_name = 'accounts'
urlpatterns = [
url(r'^register/$',UserCreateView.as_view(),name='accounts'),
url(r'^home/login/token/$',obtain_jwt_token),
]
|
[
"accounts.views.UserCreateView.as_view",
"django.conf.urls.url"
] |
[((249, 293), 'django.conf.urls.url', 'url', (['"""^home/login/token/$"""', 'obtain_jwt_token'], {}), "('^home/login/token/$', obtain_jwt_token)\n", (252, 293), False, 'from django.conf.urls import url, include\n'), ((205, 229), 'accounts.views.UserCreateView.as_view', 'UserCreateView.as_view', ([], {}), '()\n', (227, 229), False, 'from accounts.views import UserCreateView\n')]
|
import os
import shutil
import send2trash
import tkinter
import tkinter.filedialog
definitions=['.zip','.tar','.rar']
cur_dir='C:\\Users\\Zombie\\Downloads'
#processedobjects
compressedlist=list()
extractedfolders=list()
cur_dir = tkinter.filedialog.askdirectory(initialdir="/",title='Please select a directory')
print(cur_dir)
def ValidateFile(fileItem):
if os.path.splitext(fileItem)[1].lower() in definitions:
name=('.').join(os.path.basename(fileItem).split('.')[:-1])
compressedlist.append(name)
def ValidateFolder(folderName):
if folderName in compressedlist:
return True
else:
return False
def IterateChildZip(folder):
for fitem in os.listdir(folder):
if os.path.isdir(folder+'\\'+fitem):
IterateChildZip(folder+'\\'+fitem)
else:
ValidateFile(folder+'\\'+fitem)
def IterateChildFolder(folder):
for fitem in os.listdir(folder):
if os.path.isdir(folder+'\\'+fitem):
if ValidateFolder(fitem):
extractedfolders.append(folder+'\\'+fitem)
else:
IterateChildFolder(folder+'\\'+fitem)
#loop through each definition
IterateChildZip(cur_dir)
IterateChildFolder(cur_dir)
print(compressedlist)
print(extractedfolders)
print('Do you want to proceed with delete ? Y/N')
proceed=input()
if proceed.lower() == 'y':
for folder in extractedfolders:
try:
send2trash.send2trash(folder)
print('Moved to recycle bin-'+folder)
except Exception as e:
print(e)
print('done enter any key to exit')
input()
|
[
"os.path.basename",
"os.path.isdir",
"tkinter.filedialog.askdirectory",
"send2trash.send2trash",
"os.path.splitext",
"os.listdir"
] |
[((247, 334), 'tkinter.filedialog.askdirectory', 'tkinter.filedialog.askdirectory', ([], {'initialdir': '"""/"""', 'title': '"""Please select a directory"""'}), "(initialdir='/', title=\n 'Please select a directory')\n", (278, 334), False, 'import tkinter\n'), ((721, 739), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (731, 739), False, 'import os\n'), ((946, 964), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (956, 964), False, 'import os\n'), ((753, 789), 'os.path.isdir', 'os.path.isdir', (["(folder + '\\\\' + fitem)"], {}), "(folder + '\\\\' + fitem)\n", (766, 789), False, 'import os\n'), ((978, 1014), 'os.path.isdir', 'os.path.isdir', (["(folder + '\\\\' + fitem)"], {}), "(folder + '\\\\' + fitem)\n", (991, 1014), False, 'import os\n'), ((1483, 1512), 'send2trash.send2trash', 'send2trash.send2trash', (['folder'], {}), '(folder)\n', (1504, 1512), False, 'import send2trash\n'), ((386, 412), 'os.path.splitext', 'os.path.splitext', (['fileItem'], {}), '(fileItem)\n', (402, 412), False, 'import os\n'), ((465, 491), 'os.path.basename', 'os.path.basename', (['fileItem'], {}), '(fileItem)\n', (481, 491), False, 'import os\n')]
|
import logging
import traceback
from collections import namedtuple
from copy import deepcopy
from datetime import datetime, timedelta
from functools import lru_cache, partial
import pytz
import requests
from django.db import transaction
from django.utils.dateparse import parse_time
from django.utils.timezone import now
from django_orghierarchy.models import Organization
from events.importer.sync import ModelSyncher
from events.importer.util import clean_text
from events.importer.yso import KEYWORDS_TO_ADD_TO_AUDIENCE
from events.keywords import KeywordMatcher
from events.models import DataSource, Event, Keyword, Place
from .base import Importer, register_importer
# Per module logger
logger = logging.getLogger(__name__)
HARRASTUSHAKU_API_BASE_URL = 'http://nk.hel.fi/harrastushaku/api/'
TIMEZONE = pytz.timezone('Europe/Helsinki')
MAX_RECURRING_EVENT_LENGTH = 366 # days
MAIN_CATEGORY_KEYWORDS = {
'1': {'yso:p3466'},
'2': {'yso:p916', 'yso:p6062'},
'3': {'yso:p13084', 'yso:p2023'},
'4': {'yso:p2445', 'yso:p20405'},
'5': {'yso:p1808'},
'7': {'yso:p2851'},
'8': {'yso:p1278'},
'9': {'yso:p6940'},
'11': {'yso:p143', 'yso:p9270'},
}
AUDIENCE_BY_AGE_RANGE = (
((0, 6), {'yso:p4354'}),
((7, 16), {'yso:p16485'}),
((10, 18), {'yso:p11617'}),
)
SubEventTimeRange = namedtuple('SubEventTimeRange', ['start', 'end'])
class HarrastushakuException(Exception):
pass
@register_importer
class HarrastushakuImporter(Importer):
name = 'harrastushaku'
supported_languages = ['fi']
def setup(self):
logger.debug('Running Harrastushaku importer setup...')
self.data_source, _ = DataSource.objects.get_or_create(id=self.name, defaults={'name': 'Harrastushaku'})
self.tprek_data_source = DataSource.objects.get(id='tprek')
self.ahjo_data_source, _ = DataSource.objects.get_or_create(id='ahjo', defaults={'name': 'Ahjo'})
self.organization, _ = Organization.objects.get_or_create(origin_id='u48040030',
data_source=self.ahjo_data_source)
self.tprek_ids = {place.origin_id for place in Place.objects.filter(data_source=self.tprek_data_source)}
self.keywords = {keyword.id: keyword for keyword in Keyword.objects.all()}
self.keyword_matcher = KeywordMatcher()
def import_places(self):
"""Import Harrastushaku locations as Places
- If we can find a close-enough match for the location object coming from Harrastushaku in Toimipisterekisteri,
we do not import that location object, as this this will cause duplicate location issue due to
Harrastushaku data being of low quality.
- If, however, we cannot find a match, location object will be imported with data source "harrastushaku".
"""
logger.info('Importing places...')
locations = self.fetch_locations()
logger.debug('Handling {} locations...'.format(len(locations)))
self.location_id_to_place_id = self.map_harrastushaku_location_ids_to_tprek_ids(locations)
for location in locations:
try:
self.handle_location(location)
except Exception as e: # noqa
message = e if isinstance(e, HarrastushakuException) else traceback.format_exc()
logger.error('Error handling location {}: {}'.format(location.get('id'), message))
def map_harrastushaku_location_ids_to_tprek_ids(self, harrastushaku_locations):
'''
Example mapped dictionary result:
{
'95': 'harrastushaku:95',
'953': 'harrastushaku:953',
'968': 'tprek:20479',
'97': 'tprek:8062',
'972': 'tprek:9079',
'987': 'harrastushaku:987',
'99': 'tprek:8064',
}
'''
result = dict()
for harrastushaku_location in harrastushaku_locations:
harrastushaku_location_id = harrastushaku_location['id']
strict_filters = {
'id__startswith': self.tprek_data_source,
'name': harrastushaku_location['name'],
'address_locality': harrastushaku_location['city'],
'postal_code': harrastushaku_location['zip'],
'street_address': harrastushaku_location['address'],
}
flexible_filters = {
'id__startswith': self.tprek_data_source,
'address_locality': harrastushaku_location['city'],
'postal_code': harrastushaku_location['zip'],
'street_address': harrastushaku_location['address'],
}
tprek_place = (Place.objects.filter(**strict_filters).first() or
Place.objects.filter(**flexible_filters).first())
if tprek_place:
result[harrastushaku_location_id] = tprek_place.id
else:
result[harrastushaku_location_id] = '{}:{}'.format(self.data_source.id, harrastushaku_location_id)
return result
def import_courses(self):
"""Import Harrastushaku activities as Courses
Activities having "active" anything else than "1" or "K" will be
ignored.
When importing and an existing course isn't present in imported data:
- If the course's end time is in the past, the course will be left as
it is.
- If the course's end time is not in the past, the course will be soft
deleted alongside its sub events.
If an activity has something in field "timetables", it will be imported
as a recurring event, otherwise as a one-time event.
A recurring course will have a super event which includes the course's
whole time period, and sub events which will represent individual course
occurrences. Other than start and end times, a super event and its sub
events will all contain the same data.
A recurring course's sub event start and end datetimes will be build using
the activity's "timetables". The time tables contain info out weekday,
times, and repetition which means number of days there is between
occurrences (basically a multiple of 7).
A recurring course's sub events will be given an ID that has the
activity's ID and start and end times of the sub event in a compressed
form. This also means that between imports only sub events that are
happening exactly at the same time are considered to be the same instance,
so if a sub event's begin or end time changes at all, a new sub event will
be created instead of updating an old one (because there is no unambiguous
way to determine which old sub event the new one corresponds to).
A course's keywords will come from both of the following:
- The activity's main category. There are hardcoded keywords for every
main category.
- The activity's sub category's "searchwords". Those are manually
entered words, which are mapped to keywords using KeywordMatcher
(from events.keywords).
A course's audience will come from both of the following:
- The activity's "audience_max_age" and "audience_min_age" using
hardcoded keywords for certain age ranges.
- The course's keywords, adding the ones that are present in
KEYWORDS_TO_ADD_TO_AUDIENCE (from events.importer.yso).
"""
logger.info('Importing courses...')
locations = self.fetch_locations()
if not locations:
logger.warning('No location data fetched, aborting course import.')
return
self.location_id_to_place_id = self.map_harrastushaku_location_ids_to_tprek_ids(locations)
activities = self.fetch_courses()
if not activities:
logger.info('No activity data fetched.')
return
def event_delete(event):
if event.end_time < now():
return
event.soft_delete()
for sub_event in event.sub_events.all():
sub_event.soft_delete()
self.event_syncher = ModelSyncher(
Event.objects.filter(data_source=self.data_source, super_event=None),
lambda event: event.id,
event_delete,
)
num_of_activities = len(activities)
logger.debug('Handling {} activities...'.format(num_of_activities))
for i, activity in enumerate(activities, 1):
try:
self.handle_activity(activity)
except Exception as e: # noqa
message = e if isinstance(e, HarrastushakuException) else traceback.format_exc()
logger.error('Error handling activity {}: {}'.format(activity.get('id'), message))
if not i % 10:
logger.debug('{} / {} activities handled.'.format(i, num_of_activities))
self.event_syncher.finish(force=True)
logger.info('Course import finished.')
def fetch_locations(self):
logger.debug('Fetching locations...')
try:
url = '{}location/'.format(HARRASTUSHAKU_API_BASE_URL)
response = requests.get(url)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
logger.error('Cannot fetch locations: {}'.format(e))
return []
def fetch_courses(self):
logger.debug('Fetching courses...')
try:
url = '{}activity/'.format(HARRASTUSHAKU_API_BASE_URL)
response = requests.get(url)
response.raise_for_status()
return response.json()['data']
except requests.RequestException as e:
logger.error('Cannot fetch courses: {}'.format(e))
return []
@transaction.atomic
def handle_location(self, location_data):
harrastushaku_location_id = location_data.get('id')
harrastushaku_location_mapped_id = self.location_id_to_place_id.get(harrastushaku_location_id)
if harrastushaku_location_mapped_id.startswith(self.tprek_data_source.id):
return
else:
self.handle_non_tprek_location(location_data)
def handle_non_tprek_location(self, location_data):
get_string = bind_data_getters(location_data)[0]
place_data = {
'name': get_string('name', localized=True),
'info_url': get_string('url', localized=True),
'street_address': get_string('address', localized=True),
'address_locality': get_string('city', localized=True),
'postal_code': get_string('zip'),
'data_source': self.data_source,
'origin_id': location_data['id'],
'publisher': self.organization,
}
self.save_place(place_data)
@transaction.atomic
def handle_activity(self, activity_data):
if activity_data.get('active') not in ('1', 'K'):
logger.debug('Skipping inactive activity {}'.format(activity_data.get('id')))
return
event_data = self.get_event_data(activity_data)
if event_data['start_time'] > event_data['end_time']:
raise HarrastushakuException('Start time after end time')
time_tables = activity_data.get('timetables', [])
if time_tables:
self.handle_recurring_event(event_data, time_tables)
else:
self.handle_one_time_event(event_data)
def create_registration_links(self, activity_data):
# Harrastushaku has own registration links which should be created in the imported events as well
if activity_data.get('regavailable', 0) and '1' in activity_data['regavailable']:
# regstart and regend sometimes take "false" value which seem to mean in the cases regavailable=='1' that
# the registration is going on indefinitely
reg_start = activity_data['regstartdate'] if isinstance(activity_data['regstartdate'], int) else 0
reg_end = activity_data['regenddate'] if isinstance(activity_data['regenddate'], int) else 9999999999
if datetime.utcfromtimestamp(reg_start) <= datetime.utcnow() <= datetime.utcfromtimestamp(reg_end):
return {'fi': {'registration': f"https://harrastushaku.fi/register/{activity_data['id']}"}}
return ''
def get_event_data(self, activity_data):
get_string, get_int, get_datetime = bind_data_getters(activity_data)
keywords = self.get_event_keywords(activity_data)
audience = self.get_event_audiences_from_ages(activity_data) | self.get_event_audiences_from_keywords(keywords)
keywords |= audience
event_data = {
'name': get_string('name', localized=True),
'description': get_string('description', localized=True),
'audience_max_age': get_int('agemax'),
'audience_min_age': get_int('agemin'),
'start_time': get_datetime('startdate'),
'end_time': get_datetime('enddate'),
'date_published': get_datetime('publishdate'),
'external_links': self.create_registration_links(activity_data),
'organizer_info': self.get_organizer_info(activity_data),
'extension_course': {
'enrolment_start_date': get_datetime('regstartdate'),
'enrolment_end_date': get_datetime('regenddate'),
'maximum_attendee_capacity': get_int('maxentries'),
'remaining_attendee_capacity': get_int('regavailable'),
},
'data_source': self.data_source,
'origin_id': activity_data['id'],
'publisher': self.organization,
'location': self.get_event_location(activity_data),
'keywords': keywords,
'in_language': self.get_event_languages(activity_data),
'images': self.get_event_images(activity_data),
'offers': self.get_event_offers(activity_data),
'audience': audience,
}
return event_data
def handle_recurring_event(self, event_data, time_tables):
start_date, end_date = self.get_event_start_and_end_dates(event_data)
if not start_date:
raise HarrastushakuException('No start time')
if not end_date:
raise HarrastushakuException('No end time')
if end_date - start_date > timedelta(days=MAX_RECURRING_EVENT_LENGTH):
raise HarrastushakuException('Too long recurring activity')
sub_event_time_ranges = self.build_sub_event_time_ranges(start_date, end_date, time_tables)
if not sub_event_time_ranges:
raise HarrastushakuException('Erroneous time tables: {}'.format(time_tables))
super_event = self.save_super_event(event_data)
self.save_sub_events(event_data, sub_event_time_ranges, super_event)
def handle_one_time_event(self, event_data):
event_data['has_start_time'] = False
event_data['has_end_time'] = False
event = self.save_event(event_data)
self.event_syncher.mark(event)
def get_event_keywords(self, activity_data):
keywords = (self.get_event_keywords_from_main_categories(activity_data) |
self.get_event_keywords_from_search_words(activity_data))
return keywords
def get_event_keywords_from_main_categories(self, activity_data):
main_category_ids = {c.get('maincategory_id') for c in activity_data.get('categories', [])}
keyword_ids = set()
for main_category_id in main_category_ids:
keyword_ids |= MAIN_CATEGORY_KEYWORDS.get(main_category_id, set())
return {self.keywords.get(kw_id) for kw_id in keyword_ids if kw_id in self.keywords}
def get_event_keywords_from_search_words(self, activity_data):
keywords = set()
search_words = activity_data.get('searchwords', [])
cleaned_search_words = [s.strip().lower() for s in search_words.split(',') if s.strip()]
for kw in cleaned_search_words:
matches = self.match_keyword(kw)
if matches:
keywords |= set(matches)
return keywords
def get_event_languages(self, activity_data):
language_text = activity_data.get('languages', '').lower()
languages = {obj for code, obj in self.languages.items() if obj.name_fi and obj.name_fi in language_text}
return languages
def get_event_start_and_end_dates(self, event_data):
start_datetime = event_data.get('start_time')
start_date = start_datetime.date() if start_datetime else None
end_datetime = event_data.get('end_time')
end_date = end_datetime.date() if end_datetime else None
return start_date, end_date
def get_organizer_info(self, activity_data):
org_details = clean_text(activity_data.get('organiserdetails', ''), strip_newlines=True, parse_html=True)
reg_details = clean_text(activity_data.get('regdetails', ''), strip_newlines=True, parse_html=True)
return {'fi': f'{reg_details} {org_details}'.strip()} if org_details or reg_details else ''
def build_sub_event_time_ranges(self, start_date, end_date, time_tables):
sub_event_time_ranges = []
for time_table in time_tables:
current_date = start_date
weekday = int(time_table.get('weekday'))
start_time = parse_time(time_table.get('starttime'))
end_time = parse_time(time_table.get('endtime'))
repetition = int(time_table.get('repetition'))
if repetition == 0:
repetition = 7 # assume repetition 0 and 7 mean the same thing
if not (weekday and repetition) or start_time >= end_time:
continue
while current_date.isoweekday() != weekday:
current_date += timedelta(days=1)
while current_date <= end_date:
sub_event_time_ranges.append(SubEventTimeRange(
datetime.combine(current_date, start_time).astimezone(TIMEZONE),
datetime.combine(current_date, end_time).astimezone(TIMEZONE),
))
current_date += timedelta(days=repetition)
return sub_event_time_ranges
def save_super_event(self, event_data):
super_event_data = deepcopy(event_data)
super_event_data['super_event_type'] = Event.SuperEventType.RECURRING
event = self.save_event(super_event_data)
self.event_syncher.mark(event)
return event
def save_sub_events(self, event_data, sub_event_time_ranges, super_event):
super_event._changed = False
def delete_sub_event(obj):
logger.debug('{} deleted'.format(obj))
obj.deleted = True
obj.save()
sub_event_syncher = ModelSyncher(
super_event.sub_events.filter(deleted=False), lambda o: o.id, delete_func=delete_sub_event)
sub_event_data = deepcopy(event_data)
sub_event_data['super_event'] = super_event
for sub_event_time_range in sub_event_time_ranges:
sub_event_data['start_time'] = sub_event_time_range.start
sub_event_data['end_time'] = sub_event_time_range.end
sub_event_data['origin_id'] = (
event_data['origin_id'] + self.create_sub_event_origin_id_suffix(sub_event_time_range))
sub_event = self.save_event(sub_event_data)
if sub_event._changed:
super_event._changed = True
sub_event_syncher.mark(sub_event)
old_sub_event_count = super_event.sub_events.count()
sub_event_syncher.finish(force=True)
if super_event.sub_events.count() != old_sub_event_count:
super_event._changed = True
if super_event._changed:
super_event.save()
def create_sub_event_origin_id_suffix(self, sub_event_time_range):
start, end = sub_event_time_range
assert start.date() == end.date()
date = start.date().strftime('%Y%m%d')
times = '{}{}'.format(*(time.time().strftime('%H%M') for time in (start, end)))
return '_{}{}'.format(date, times)
def get_event_images(self, activity_data):
image_data = activity_data.get('images')
if not isinstance(image_data, dict):
return []
event_image_data = [{
'name': image_datum.get('name', ''),
'url': image_datum.get('filename', ''),
} for image_datum in image_data.values()]
return event_image_data
def get_event_location(self, activity_data):
location_id = activity_data.get('location_id')
if not location_id:
return None
return {'id': self.location_id_to_place_id.get(location_id)}
def get_event_offers(self, activity_data):
offers = []
for price_data in activity_data.get('prices', ()):
get_string = bind_data_getters(price_data)[0]
price = get_string('price', localized=False)
description = get_string('description', localized=True)
is_free = price is not None and price == '0'
if not description and len(activity_data['prices']) == 1:
description = get_string('pricedetails', localized=True)
offers.append({
'price': price if not is_free else None,
'is_free': is_free,
'description': description,
})
return offers
def get_event_audiences_from_ages(self, activity_data):
audience_keyword_ids = set()
age_min = get_int_from_data(activity_data, 'agemin') or 0
age_max = get_int_from_data(activity_data, 'agemax') or 200
for age_range, keyword_ids in AUDIENCE_BY_AGE_RANGE:
if ranges_overlap(age_min, age_max, age_range[0], age_range[1]):
audience_keyword_ids |= keyword_ids
return {self.keywords.get(k_id) for k_id in audience_keyword_ids if k_id in self.keywords}
def get_event_audiences_from_keywords(self, keywords):
return {kw for kw in keywords if kw.id in KEYWORDS_TO_ADD_TO_AUDIENCE}
@lru_cache()
def match_keyword(self, text):
return self.keyword_matcher.match(text)
def get_string_from_data(data, field, localized=False):
value = data.get(field)
if not isinstance(value, str):
return None
value = clean_text(value)
if not value:
return None
return {'fi': value} if localized else value
def get_int_from_data(data, field):
value = data.get(field)
if value in (None, False, ''):
return None
return int(value)
def get_datetime_from_data(data, field):
value = data.get(field)
if value in (None, False, ''):
return None
return datetime.utcfromtimestamp(int(value)).replace(tzinfo=pytz.utc).astimezone(TIMEZONE)
def bind_data_getters(data):
get_string = partial(get_string_from_data, data)
get_int = partial(get_int_from_data, data)
get_datetime = partial(get_datetime_from_data, data)
return get_string, get_int, get_datetime
def ranges_overlap(x1, x2, y1, y2):
return x1 <= y2 and y1 <= x2
|
[
"datetime.datetime.utcnow",
"events.models.DataSource.objects.get",
"django.utils.timezone.now",
"events.models.DataSource.objects.get_or_create",
"datetime.datetime.utcfromtimestamp",
"datetime.timedelta",
"traceback.format_exc",
"requests.get",
"events.importer.util.clean_text",
"functools.partial",
"copy.deepcopy",
"events.keywords.KeywordMatcher",
"events.models.Event.objects.filter",
"django_orghierarchy.models.Organization.objects.get_or_create",
"events.models.Place.objects.filter",
"datetime.datetime.combine",
"events.models.Keyword.objects.all",
"pytz.timezone",
"collections.namedtuple",
"functools.lru_cache",
"logging.getLogger"
] |
[((705, 732), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (722, 732), False, 'import logging\n'), ((813, 845), 'pytz.timezone', 'pytz.timezone', (['"""Europe/Helsinki"""'], {}), "('Europe/Helsinki')\n", (826, 845), False, 'import pytz\n'), ((1329, 1378), 'collections.namedtuple', 'namedtuple', (['"""SubEventTimeRange"""', "['start', 'end']"], {}), "('SubEventTimeRange', ['start', 'end'])\n", (1339, 1378), False, 'from collections import namedtuple\n'), ((22334, 22345), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (22343, 22345), False, 'from functools import lru_cache, partial\n'), ((22582, 22599), 'events.importer.util.clean_text', 'clean_text', (['value'], {}), '(value)\n', (22592, 22599), False, 'from events.importer.util import clean_text\n'), ((23099, 23134), 'functools.partial', 'partial', (['get_string_from_data', 'data'], {}), '(get_string_from_data, data)\n', (23106, 23134), False, 'from functools import lru_cache, partial\n'), ((23149, 23181), 'functools.partial', 'partial', (['get_int_from_data', 'data'], {}), '(get_int_from_data, data)\n', (23156, 23181), False, 'from functools import lru_cache, partial\n'), ((23201, 23238), 'functools.partial', 'partial', (['get_datetime_from_data', 'data'], {}), '(get_datetime_from_data, data)\n', (23208, 23238), False, 'from functools import lru_cache, partial\n'), ((1667, 1753), 'events.models.DataSource.objects.get_or_create', 'DataSource.objects.get_or_create', ([], {'id': 'self.name', 'defaults': "{'name': 'Harrastushaku'}"}), "(id=self.name, defaults={'name':\n 'Harrastushaku'})\n", (1699, 1753), False, 'from events.models import DataSource, Event, Keyword, Place\n'), ((1783, 1817), 'events.models.DataSource.objects.get', 'DataSource.objects.get', ([], {'id': '"""tprek"""'}), "(id='tprek')\n", (1805, 1817), False, 'from events.models import DataSource, Event, Keyword, Place\n'), ((1853, 1923), 'events.models.DataSource.objects.get_or_create', 'DataSource.objects.get_or_create', ([], {'id': '"""ahjo"""', 'defaults': "{'name': 'Ahjo'}"}), "(id='ahjo', defaults={'name': 'Ahjo'})\n", (1885, 1923), False, 'from events.models import DataSource, Event, Keyword, Place\n'), ((1955, 2052), 'django_orghierarchy.models.Organization.objects.get_or_create', 'Organization.objects.get_or_create', ([], {'origin_id': '"""u48040030"""', 'data_source': 'self.ahjo_data_source'}), "(origin_id='u48040030', data_source=self.\n ahjo_data_source)\n", (1989, 2052), False, 'from django_orghierarchy.models import Organization\n'), ((2341, 2357), 'events.keywords.KeywordMatcher', 'KeywordMatcher', ([], {}), '()\n', (2355, 2357), False, 'from events.keywords import KeywordMatcher\n'), ((18488, 18508), 'copy.deepcopy', 'deepcopy', (['event_data'], {}), '(event_data)\n', (18496, 18508), False, 'from copy import deepcopy\n'), ((19128, 19148), 'copy.deepcopy', 'deepcopy', (['event_data'], {}), '(event_data)\n', (19136, 19148), False, 'from copy import deepcopy\n'), ((8280, 8348), 'events.models.Event.objects.filter', 'Event.objects.filter', ([], {'data_source': 'self.data_source', 'super_event': 'None'}), '(data_source=self.data_source, super_event=None)\n', (8300, 8348), False, 'from events.models import DataSource, Event, Keyword, Place\n'), ((9292, 9309), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (9304, 9309), False, 'import requests\n'), ((9692, 9709), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (9704, 9709), False, 'import requests\n'), ((14530, 14572), 'datetime.timedelta', 'timedelta', ([], {'days': 'MAX_RECURRING_EVENT_LENGTH'}), '(days=MAX_RECURRING_EVENT_LENGTH)\n', (14539, 14572), False, 'from datetime import datetime, timedelta\n'), ((2169, 2225), 'events.models.Place.objects.filter', 'Place.objects.filter', ([], {'data_source': 'self.tprek_data_source'}), '(data_source=self.tprek_data_source)\n', (2189, 2225), False, 'from events.models import DataSource, Event, Keyword, Place\n'), ((2287, 2308), 'events.models.Keyword.objects.all', 'Keyword.objects.all', ([], {}), '()\n', (2306, 2308), False, 'from events.models import DataSource, Event, Keyword, Place\n'), ((8069, 8074), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (8072, 8074), False, 'from django.utils.timezone import now\n'), ((12255, 12291), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['reg_start'], {}), '(reg_start)\n', (12280, 12291), False, 'from datetime import datetime, timedelta\n'), ((12295, 12312), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (12310, 12312), False, 'from datetime import datetime, timedelta\n'), ((12316, 12350), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['reg_end'], {}), '(reg_end)\n', (12341, 12350), False, 'from datetime import datetime, timedelta\n'), ((18005, 18022), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (18014, 18022), False, 'from datetime import datetime, timedelta\n'), ((18351, 18377), 'datetime.timedelta', 'timedelta', ([], {'days': 'repetition'}), '(days=repetition)\n', (18360, 18377), False, 'from datetime import datetime, timedelta\n'), ((3319, 3341), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3339, 3341), False, 'import traceback\n'), ((4709, 4747), 'events.models.Place.objects.filter', 'Place.objects.filter', ([], {}), '(**strict_filters)\n', (4729, 4747), False, 'from events.models import DataSource, Event, Keyword, Place\n'), ((4786, 4826), 'events.models.Place.objects.filter', 'Place.objects.filter', ([], {}), '(**flexible_filters)\n', (4806, 4826), False, 'from events.models import DataSource, Event, Keyword, Place\n'), ((8778, 8800), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8798, 8800), False, 'import traceback\n'), ((18152, 18194), 'datetime.datetime.combine', 'datetime.combine', (['current_date', 'start_time'], {}), '(current_date, start_time)\n', (18168, 18194), False, 'from datetime import datetime, timedelta\n'), ((18237, 18277), 'datetime.datetime.combine', 'datetime.combine', (['current_date', 'end_time'], {}), '(current_date, end_time)\n', (18253, 18277), False, 'from datetime import datetime, timedelta\n')]
|
import tweepy
from textblob import TextBlob
# Twitter API variables
con_key = ""
con_secret = ""
access_token = ""
access_token_secret = ""
auth = tweepy.OAuthHandler(con_key, con_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
search_term = input("Enter term to analyse:\n")
tweets = api.search(search_term)
polarity = 0.0
subjectivity = 0.0
for tweet in tweets:
analysis = TextBlob(tweet.text)
polarity+= analysis.sentiment.polarity
subjectivity+= analysis.sentiment.subjectivity
subjectivity/= len(tweets)
polarity/= len(tweets)
print("\n"+search_term+"'s public perception is "+str(round(polarity,2))+" on a scale of -1 to 1")
print("Tweets on "+search_term+" are "+str(round(subjectivity*100,2))+" percent subjective.")
|
[
"tweepy.OAuthHandler",
"tweepy.API",
"textblob.TextBlob"
] |
[((149, 189), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['con_key', 'con_secret'], {}), '(con_key, con_secret)\n', (168, 189), False, 'import tweepy\n'), ((254, 270), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (264, 270), False, 'import tweepy\n'), ((425, 445), 'textblob.TextBlob', 'TextBlob', (['tweet.text'], {}), '(tweet.text)\n', (433, 445), False, 'from textblob import TextBlob\n')]
|
from flask import render_template
from urllib import error
# @app.errorhandler(404)
from app.main import main
@main.app_errorhandler(404)
def page_not_found(error):
return render_template('404_page.html'), 404
@main.app_errorhandler(error.HTTPError)
def http_error(error):
return render_template('404_page.html'), 404
|
[
"flask.render_template",
"app.main.main.app_errorhandler"
] |
[((115, 141), 'app.main.main.app_errorhandler', 'main.app_errorhandler', (['(404)'], {}), '(404)\n', (136, 141), False, 'from app.main import main\n'), ((221, 259), 'app.main.main.app_errorhandler', 'main.app_errorhandler', (['error.HTTPError'], {}), '(error.HTTPError)\n', (242, 259), False, 'from app.main import main\n'), ((180, 212), 'flask.render_template', 'render_template', (['"""404_page.html"""'], {}), "('404_page.html')\n", (195, 212), False, 'from flask import render_template\n'), ((294, 326), 'flask.render_template', 'render_template', (['"""404_page.html"""'], {}), "('404_page.html')\n", (309, 326), False, 'from flask import render_template\n')]
|
from UdonPie import GameObject
from UdonPie import Transform
this_trans = Transform()
this_gameObj = GameObject()
Void = None
def instantiate(arg1):
'''
:param arg1: GameObject
:type arg1: GameObject
'''
pass
|
[
"UdonPie.Transform",
"UdonPie.GameObject"
] |
[((75, 86), 'UdonPie.Transform', 'Transform', ([], {}), '()\n', (84, 86), False, 'from UdonPie import Transform\n'), ((102, 114), 'UdonPie.GameObject', 'GameObject', ([], {}), '()\n', (112, 114), False, 'from UdonPie import GameObject\n')]
|
# 数据处理
# pickle是一个将任意复杂的对象转成对象的文本或二进制表示的过程
# 也可以将这些字符串、文件或任何类似于文件的对象 unpickle 成原来的对象
import pickle
import os
import random
import numpy as np
# 标签字典
tag2label = {"O": 0,
"B-PER": 1, "I-PER": 2,
"B-LOC": 3, "I-LOC": 4,
"B-ORG": 5, "I-ORG": 6
}
def read_corpus(corpus_path): # 输入train_data文件的路径,读取训练集的语料,输出train_data
data = []
with open(corpus_path, encoding='utf-8') as fr:
lines = fr.readlines() # 返回的是一个列表,一行数据一个元素
sent_, tag_ = [], []
for line in lines:
if line != '\n':
[char, label] = line.strip().split()
sent_.append(char) # 字放进sent_
tag_.append(label) # tag放进tag_
else:
data.append((sent_, tag_))
sent_, tag_ = [], []
return data
# 由train_data来构造一个(统计非重复字)字典{'第一个字':[对应的id,该字出现的次数],'第二个字':[对应的id,该字出现的次数], , ,}
# 去除低频词,生成一个word_id的字典并保存在输入的vocab_path的路径下,
# 保存的方法是pickle模块自带的dump方法,保存后的文件格式是word2id.pkl文件
def vocab_build(vocab_path, corpus_path, min_count): # min_count设为3
data = read_corpus(corpus_path)
word2id = {}
for sent_, tag_ in data:
for word in sent_:
if word.isdigit(): # 字符是数字
word = '<NUM>'
elif ('\u0041' <= word <= '\u005a') or ('\u0061' <= word <= '\u007a'): # 字符是字母
word = '<ENG>'
if word not in word2id: # 如果不在字典中,就加入到字典中
word2id[word] = [len(word2id)+1, 1]
else: # 在字典中就次数+1
word2id[word][1] += 1
low_freq_words = [] # 低频词
for word, [word_id, word_freq] in word2id.items():
if word_freq < min_count and word != '<NUM>' and word != '<ENG>': # 统计低频词
low_freq_words.append(word)
for word in low_freq_words:
del word2id[word] # 从字典中删除低频词
new_id = 1 # 重构字典
for word in word2id.keys():
word2id[word] = new_id
new_id += 1
word2id['<UNK>'] = new_id
word2id['<PAD>'] = 0
print(len(word2id))
with open(vocab_path, 'wb') as fw:
pickle.dump(word2id, fw) # 序列化到名字为word2id.pkl文件中
def sentence2id(sent, word2id): # 输入一句话,生成一个 sentence_id
sentence_id = []
for word in sent:
if word.isdigit():
word = '<NUM>'
elif ('\u0041' <= word <= '\u005a') or ('\u0061' <= word <= '\u007a'):
word = '<ENG>'
if word not in word2id: # 在字典中找不到就用<UNK>表示
word = '<UNK>'
sentence_id.append(word2id[word])
return sentence_id
def read_dictionary(vocab_path): # 通过pickle模块自带的load方法(反序列化方法)加载输出word2id.pkl文件
vocab_path = os.path.join(vocab_path)
with open(vocab_path, 'rb') as fr:
word2id = pickle.load(fr)
print('vocab_size:', len(word2id))
return word2id
def random_embedding(vocab, embedding_dim): # 输入vocab,vocab就是前面得到的word2id,embedding_dim=300
embedding_mat = np.random.uniform(-0.25, 0.25, (len(vocab), embedding_dim))
embedding_mat = np.float32(embedding_mat)
# 返回一个len(vocab)*embedding_dim=3905*300的矩阵(每个字投射到300维)作为初始值
return embedding_mat
# padding,输入一句话,不够标准的样本用pad_mark来补齐
"""输入:seqs的形状为二维矩阵,形状为[[33,12,17,88,50]-第一句话
[52,19,14,48,66,31,89]-第二句话]
输出:seq_list为seqs经过padding后的序列
seq_len_list保留了padding之前每条样本的真实长度
seq_list和seq_len_list用来喂给feed_dict"""
def pad_sequences(sequences, pad_mark=0):
max_len = max(map(lambda x: len(x), sequences)) # 返回一个序列中长度最长的那条样本的长度
seq_list, seq_len_list = [], []
for seq in sequences:
seq = list(seq)
# 不够最大长度的样本用0补上放到列表seq_list
seq_ = seq[:max_len] + [pad_mark] * max(max_len - len(seq), 0)
seq_list.append(seq_)
seq_len_list.append(min(len(seq), max_len))
return seq_list, seq_len_list
''' seqs的形状为二维矩阵,形状为[[33,12,17,88,50....]...第一句话
[52,19,14,48,66....]...第二句话
]
labels的形状为二维矩阵,形状为[[0, 0, 3, 4]....第一句话
[0, 0, 3, 4]...第二句话
]
'''
def batch_yield(data, batch_size, vocab, tag2label, shuffle=False): # 生成batch
if shuffle: # 乱序数据
random.shuffle(data)
seqs, labels = [], []
for (sent_, tag_) in data:
sent_ = sentence2id(sent_, vocab) # 返回在字典中的编号
label_ = [tag2label[tag] for tag in tag_] # 返回tag的value值
if len(seqs) == batch_size:
yield seqs, labels # yield 是一个类似 return 的关键字,只是这个函数返回的是个生成器
seqs, labels = [], []
seqs.append(sent_)
labels.append(label_)
if len(seqs) != 0:
yield seqs, labels
|
[
"pickle.dump",
"random.shuffle",
"numpy.float32",
"pickle.load",
"os.path.join"
] |
[((2661, 2685), 'os.path.join', 'os.path.join', (['vocab_path'], {}), '(vocab_path)\n', (2673, 2685), False, 'import os\n'), ((3015, 3040), 'numpy.float32', 'np.float32', (['embedding_mat'], {}), '(embedding_mat)\n', (3025, 3040), True, 'import numpy as np\n'), ((2085, 2109), 'pickle.dump', 'pickle.dump', (['word2id', 'fw'], {}), '(word2id, fw)\n', (2096, 2109), False, 'import pickle\n'), ((2743, 2758), 'pickle.load', 'pickle.load', (['fr'], {}), '(fr)\n', (2754, 2758), False, 'import pickle\n'), ((4251, 4271), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (4265, 4271), False, 'import random\n')]
|
import pytorch_lightning as pl
import torch
from torch.utils.data import random_split
from torch_geometric import datasets
from torch_geometric.data import DataLoader
from src.settings.paths import CLEANED_DATA_PATH, NOT_CLEANED_DATA_PATH
class MUTANGDataModule(pl.LightningDataModule):
def __init__(
self,
batch_size: int = 32,
cleaned: bool = False,
split=None,
num_workers: int = 0,
seed: int = 0,
):
super().__init__()
if split is None:
split = [0.6, 0.2, 0.2]
self.split = split
self.batch_size = batch_size
self.train_set = None
self.test_set = None
self.val_set = None
self.cleaned = cleaned
self.num_workers = num_workers
self.seed = seed
if sum(self.split) != 1:
raise ValueError("Expected split list to sum to 1")
def prepare_data(self):
return datasets.TUDataset(
root=CLEANED_DATA_PATH if self.cleaned else NOT_CLEANED_DATA_PATH,
name="MUTAG",
cleaned=self.cleaned,
pre_transform=None,
)
def setup(self, stage: str = None):
torch.manual_seed(self.seed)
self.full_set = datasets.TUDataset(
root=CLEANED_DATA_PATH if self.cleaned else NOT_CLEANED_DATA_PATH,
name="MUTAG",
cleaned=self.cleaned,
pre_transform=None,
)
self.train_set, self.val_set, self.test_set = random_split(
self.full_set,
[
round(len(self.full_set) * self.split[0]),
round(len(self.full_set) * self.split[1]),
len(self.full_set)
- round(len(self.full_set) * self.split[0])
- round(len(self.full_set) * self.split[1]),
],
)
def train_dataloader(self):
return DataLoader(
self.train_set,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
)
def val_dataloader(self):
return DataLoader(
self.val_set,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
)
def test_dataloader(self):
return DataLoader(
self.test_set,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
|
[
"torch_geometric.datasets.TUDataset",
"torch.manual_seed",
"torch_geometric.data.DataLoader"
] |
[((940, 1089), 'torch_geometric.datasets.TUDataset', 'datasets.TUDataset', ([], {'root': '(CLEANED_DATA_PATH if self.cleaned else NOT_CLEANED_DATA_PATH)', 'name': '"""MUTAG"""', 'cleaned': 'self.cleaned', 'pre_transform': 'None'}), "(root=CLEANED_DATA_PATH if self.cleaned else\n NOT_CLEANED_DATA_PATH, name='MUTAG', cleaned=self.cleaned,\n pre_transform=None)\n", (958, 1089), False, 'from torch_geometric import datasets\n'), ((1190, 1218), 'torch.manual_seed', 'torch.manual_seed', (['self.seed'], {}), '(self.seed)\n', (1207, 1218), False, 'import torch\n'), ((1243, 1392), 'torch_geometric.datasets.TUDataset', 'datasets.TUDataset', ([], {'root': '(CLEANED_DATA_PATH if self.cleaned else NOT_CLEANED_DATA_PATH)', 'name': '"""MUTAG"""', 'cleaned': 'self.cleaned', 'pre_transform': 'None'}), "(root=CLEANED_DATA_PATH if self.cleaned else\n NOT_CLEANED_DATA_PATH, name='MUTAG', cleaned=self.cleaned,\n pre_transform=None)\n", (1261, 1392), False, 'from torch_geometric import datasets\n'), ((1901, 2003), 'torch_geometric.data.DataLoader', 'DataLoader', (['self.train_set'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': 'self.num_workers'}), '(self.train_set, batch_size=self.batch_size, shuffle=True,\n num_workers=self.num_workers)\n', (1911, 2003), False, 'from torch_geometric.data import DataLoader\n'), ((2105, 2205), 'torch_geometric.data.DataLoader', 'DataLoader', (['self.val_set'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': 'self.num_workers'}), '(self.val_set, batch_size=self.batch_size, shuffle=True,\n num_workers=self.num_workers)\n', (2115, 2205), False, 'from torch_geometric.data import DataLoader\n'), ((2308, 2410), 'torch_geometric.data.DataLoader', 'DataLoader', (['self.test_set'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'num_workers': 'self.num_workers'}), '(self.test_set, batch_size=self.batch_size, shuffle=False,\n num_workers=self.num_workers)\n', (2318, 2410), False, 'from torch_geometric.data import DataLoader\n')]
|
from os import getcwd
from typing import Tuple
from prompt_toolkit import prompt
from figcli.commands.config_context import ConfigContext
from figcli.commands.types.config import ConfigCommand
from figcli.io.input import Input
from figcli.svcs.observability.anonymous_usage_tracker import AnonymousUsageTracker
from figcli.svcs.observability.version_tracker import VersionTracker
from figcli.utils.utils import *
log = logging.getLogger(__name__)
class Generate(ConfigCommand):
def __init__(self, colors_enabled: bool, config_context: ConfigContext):
super().__init__(generate, colors_enabled, config_context)
self._from_path = config_context.ci_config_path if config_context.ci_config_path else Utils.find_figgy_json()
self._utils = Utils(colors_enabled)
self._errors_detected = False
self.example = f"{self.c.fg_bl}{CLI_NAME} config {self.command_printable} " \
f"--env dev --config /path/to/config{self.c.rs}"
@staticmethod
def _get_service_name_and_version(service_name: str) -> Tuple[str, int]:
base_matcher = re.compile(r"^([A-Za-z0-9_-]+)([0-9]+)$")
result = base_matcher.match(service_name)
base_name = result.group(1) if result else service_name
version = int(result.group(2)) if result else 1
return base_name, version
def _generate(self):
from_config = self._utils.get_ci_config(self._from_path)
service_name = self._utils.get_namespace(from_config).split('/')[2]
current_ns = self._utils.get_namespace(from_config)
base_name, version = self._get_service_name_and_version(service_name)
base_name = base_name if not base_name.endswith('-') else base_name[:-1]
new_service_name = f'{base_name}-{version + 1}'
new_name = Input.input(f'Please select a new service name, it CANNOT be: {service_name}: ',
default=new_service_name)
self._utils.validate(new_name != service_name, f"You must select a new service name that differs from the one"
f"designated in your source figgy.json file. "
f"(NOT {service_name})")
new_ns = f'{self.context.defaults.service_ns}/{new_name}/'
# Update all configs destinations to leverage new namespace. Easiest to search/replace across everything.
output_string = json.dumps(from_config)
output_string = output_string.replace(current_ns[:-1], new_ns[:-1])
new_config = json.loads(output_string)
# Remove existing configs that will be replicated
new_config[CONFIG_KEY] = []
# Configure replicate_from block
new_config[REPL_FROM_KEY] = {
SOURCE_NS_KEY: from_config.get(REPL_FROM_KEY, {}).get(SOURCE_NS_KEY, current_ns),
PARAMETERS_KEY: from_config.get(REPL_FROM_KEY, {}).get(PARAMETERS_KEY, [])
}
for param in from_config.get(CONFIG_KEY, []):
new_config[REPL_FROM_KEY][PARAMETERS_KEY].append(self._utils.get_parameter_only(param))
formatted_config = self._utils.format_config(new_config)
current_dir = getcwd()
output_file = prompt(f'Write new config here?: ', default=f'{current_dir}/{new_name}-config.json')
self._utils.is_valid_input(output_file, "output_file", True)
with open(output_file, "w") as file:
file.write(json.dumps(formatted_config, sort_keys=False, indent=4))
print(f'{self.c.fg_gr}New config successfully generated at location: {output_file}{self.c.rs}')
@VersionTracker.notify_user
@AnonymousUsageTracker.track_command_usage
def execute(self):
self._generate()
|
[
"os.getcwd",
"prompt_toolkit.prompt",
"figcli.io.input.Input.input"
] |
[((1804, 1920), 'figcli.io.input.Input.input', 'Input.input', (['f"""Please select a new service name, it CANNOT be: {service_name}: """'], {'default': 'new_service_name'}), "(\n f'Please select a new service name, it CANNOT be: {service_name}: ',\n default=new_service_name)\n", (1815, 1920), False, 'from figcli.io.input import Input\n'), ((3107, 3115), 'os.getcwd', 'getcwd', ([], {}), '()\n', (3113, 3115), False, 'from os import getcwd\n'), ((3138, 3227), 'prompt_toolkit.prompt', 'prompt', (['f"""Write new config here?: """'], {'default': 'f"""{current_dir}/{new_name}-config.json"""'}), "(f'Write new config here?: ', default=\n f'{current_dir}/{new_name}-config.json')\n", (3144, 3227), False, 'from prompt_toolkit import prompt\n')]
|
""" Used for training hyperparameters and running multiple simulations """
import time
from threading import Thread
from ai import simulate, show
# # Tuning parameters and weights
# MAX_DEPTH = 4
# EMPTY_TILE_POINTS = 12
# SMOOTHNESS_WEIGHT = 30
# EDGE_WEIGHT = 30
# LOSS_PENALTY = -200000
# MONOTONICITY_POWER = 3.0
# MONOTONICITY_WEIGHT = 27.0
# SUM_POWER = 3.5
# SUM_WEIGHT = 11.0
# EMPTY_WEIGHT = 270.0
test_cases = 10
best_board = 0
best_score = 0
best_moves = 0
avg_score = 0
avg_moves = 0
prog = 0
def progress_bar():
""" Increments the progress indicator """
global prog
prog += 1
print(str((prog/test_cases)*100)+"%")
def worker():
""" Runs a simulation on a seprate thread and records statistics """
global avg_moves, avg_score, best_board, best_score, best_moves
board, s, m = simulate()
avg_score += s
avg_moves += m
if s > best_score:
best_board = board
best_score = s
best_moves = m
progress_bar()
def multi_simulate():
""" Runs multiple simulation worker threads and reports results """
start_time = time.clock()
workers = []
print("0%")
for i in range(0, test_cases):
t = Thread(target=worker, args=())
t.start()
workers.append(t)
""" Block until all threads finished """
for w in workers:
w.join()
print("\nBest Score:", best_score, "Best Moves:", best_moves)
show(best_board, show_best_tile=True)
print("\nAvg Score:", (avg_score/test_cases), "Avg Moves:", (avg_moves/test_cases))
print("Time:", (time.clock()-start_time), "seconds")
multi_simulate()
|
[
"threading.Thread",
"ai.simulate",
"ai.show",
"time.clock"
] |
[((823, 833), 'ai.simulate', 'simulate', ([], {}), '()\n', (831, 833), False, 'from ai import simulate, show\n'), ((1100, 1112), 'time.clock', 'time.clock', ([], {}), '()\n', (1110, 1112), False, 'import time\n'), ((1424, 1461), 'ai.show', 'show', (['best_board'], {'show_best_tile': '(True)'}), '(best_board, show_best_tile=True)\n', (1428, 1461), False, 'from ai import simulate, show\n'), ((1193, 1223), 'threading.Thread', 'Thread', ([], {'target': 'worker', 'args': '()'}), '(target=worker, args=())\n', (1199, 1223), False, 'from threading import Thread\n'), ((1570, 1582), 'time.clock', 'time.clock', ([], {}), '()\n', (1580, 1582), False, 'import time\n')]
|
"""
Custom metric for mxnet
"""
__author__ = 'bshang'
from sklearn.metrics import f1_score
from sklearn import preprocessing
def f1(label, pred):
""" Custom evaluation metric on F1.
"""
pred_bin = preprocessing.binarize(pred, threshold=0.5)
score = f1_score(label, pred_bin, average='micro')
return score
|
[
"sklearn.metrics.f1_score",
"sklearn.preprocessing.binarize"
] |
[((212, 255), 'sklearn.preprocessing.binarize', 'preprocessing.binarize', (['pred'], {'threshold': '(0.5)'}), '(pred, threshold=0.5)\n', (234, 255), False, 'from sklearn import preprocessing\n'), ((268, 310), 'sklearn.metrics.f1_score', 'f1_score', (['label', 'pred_bin'], {'average': '"""micro"""'}), "(label, pred_bin, average='micro')\n", (276, 310), False, 'from sklearn.metrics import f1_score\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mandatory coinbase feature"""
from binascii import b2a_hex
from test_framework.blocktools import create_coinbase
from test_framework.messages import CBlock, CProof, CTxOutValue, CTxOut
from test_framework.script import CScript, OP_RETURN
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
mandatory_privkey = "<KEY>"
mandatory_address = "XP3bwB9jSxt58frSa3cJismgGL3F57ukUy"
#mandatory_pubkey = "<KEY>"
mandatory_script = "a914804b9fd9d6939c2e960b7aa31124a5d532f4e59c87"
def b2x(b):
return b2a_hex(b).decode('ascii')
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate({'data': b2x(block.serialize()), 'mode': 'proposal', 'rules': 'segwit'})
assert_equal(rsp, expect)
class MandatoryCoinbaseTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
# Non-zero coinbase outputs *must* match this. Not setting it means anything is allowed
self.extra_args = [["-con_mandatorycoinbase="+mandatory_script], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node0 = self.nodes[0]
node1 = self.nodes[1]
node0.importprivkey(mandatory_privkey)
self.log.info("generatetoaddress: Making blocks of various kinds, checking for rejection")
# Create valid blocks to get out of IBD and get some funds (subsidy goes to permitted addr)
node0.generatetoaddress(101, mandatory_address)
# Generating for another address will not work
assert_raises_rpc_error(-1, "CreateNewBlock: TestBlockValidity failed: bad-coinbase-txos", node0.generatetoaddress, 1, node0.getnewaddress())
# Have non-mandatory node make a template
self.sync_all()
tmpl = node1.getblocktemplate({'rules': ['segwit']})
# We make a block with OP_TRUE coinbase output that will fail on node0
coinbase_tx = create_coinbase(height=int(tmpl["height"]))
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.proof = CProof(bytearray.fromhex('51'))
block.vtx = [coinbase_tx]
block.block_height = int(tmpl["height"])
block.hashMerkleRoot = block.calc_merkle_root()
self.log.info("getblocktemplate: Test block on both nodes")
assert_template(node1, block, None)
assert_template(node0, block, 'bad-coinbase-txos')
self.log.info("getblocktemplate: Test non-subsidy block on both nodes")
# Without block reward anything goes, this allows commitment outputs like segwit
coinbase_tx.vout[0].nValue = CTxOutValue(0)
coinbase_tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'\xff'])
coinbase_tx.rehash()
block.vtx = [coinbase_tx]
assert_template(node0, block, None)
assert_template(node1, block, None)
#
# Also test that coinbases can't have fees.
self.sync_all()
tmpl = node1.getblocktemplate({'rules': ['segwit']})
coinbase_tx = create_coinbase(height=int(tmpl["height"]))
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
# Add fee output.
coinbase_tx.vout[0].nValue.setToAmount(coinbase_tx.vout[0].nValue.getAmount() - 1)
coinbase_tx.vout.append(CTxOut(1))
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.proof = CProof(bytearray.fromhex('51'))
block.vtx = [coinbase_tx]
block.block_height = int(tmpl["height"])
block.hashMerkleRoot = block.calc_merkle_root()
# should not be accepted
assert_template(node0, block, "bad-cb-fee")
assert_template(node1, block, "bad-cb-fee")
if __name__ == '__main__':
MandatoryCoinbaseTest().main()
|
[
"test_framework.messages.CBlock",
"binascii.b2a_hex",
"test_framework.script.CScript",
"test_framework.messages.CTxOut",
"test_framework.messages.CTxOutValue",
"test_framework.util.assert_equal"
] |
[((1064, 1089), 'test_framework.util.assert_equal', 'assert_equal', (['rsp', 'expect'], {}), '(rsp, expect)\n', (1076, 1089), False, 'from test_framework.util import assert_equal, assert_raises_rpc_error\n'), ((2534, 2542), 'test_framework.messages.CBlock', 'CBlock', ([], {}), '()\n', (2540, 2542), False, 'from test_framework.messages import CBlock, CProof, CTxOutValue, CTxOut\n'), ((3328, 3342), 'test_framework.messages.CTxOutValue', 'CTxOutValue', (['(0)'], {}), '(0)\n', (3339, 3342), False, 'from test_framework.messages import CBlock, CProof, CTxOutValue, CTxOut\n'), ((3386, 3415), 'test_framework.script.CScript', 'CScript', (["[OP_RETURN, b'\\xff']"], {}), "([OP_RETURN, b'\\xff'])\n", (3393, 3415), False, 'from test_framework.script import CScript, OP_RETURN\n'), ((4109, 4117), 'test_framework.messages.CBlock', 'CBlock', ([], {}), '()\n', (4115, 4117), False, 'from test_framework.messages import CBlock, CProof, CTxOutValue, CTxOut\n'), ((801, 811), 'binascii.b2a_hex', 'b2a_hex', (['b'], {}), '(b)\n', (808, 811), False, 'from binascii import b2a_hex\n'), ((4053, 4062), 'test_framework.messages.CTxOut', 'CTxOut', (['(1)'], {}), '(1)\n', (4059, 4062), False, 'from test_framework.messages import CBlock, CProof, CTxOutValue, CTxOut\n')]
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sanity checking for grd_helper.py. Run manually before uploading a CL."""
import io
import os
import subprocess
import sys
# Add the parent dir so that we can import from "helper".
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from helper import grd_helper
from helper import translation_helper
if sys.platform.startswith('win'):
# Use the |git.bat| in the depot_tools/ on Windows.
GIT = 'git.bat'
else:
GIT = 'git'
here = os.path.dirname(os.path.realpath(__file__))
repo_root = os.path.normpath(os.path.join(here, '..', '..', '..'))
def list_files_in_repository(repo_path, pattern):
"""Lists all files matching given pattern in the given git repository"""
# This works because git does its own glob expansion even though there is no
# shell to do it.
output = subprocess.check_output([GIT, 'ls-files', '--', pattern],
cwd=repo_path).decode('utf-8')
return output.strip().splitlines()
def read_file_as_text(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return f.read()
# Sanity checks to ensure that we can parse all grd and grdp files in the repo.
# Must not fail.
def Run():
grds = list_files_in_repository(repo_root, '*.grd')
grdps = list_files_in_repository(repo_root, '*.grdp')
print('Found %d grds, %d grdps in the repo.' % (len(grds), len(grdps)))
# Make sure we can parse all .grd files in the source tree. Grd files are
# parsed via the file path.
for grd in grds:
# This file is intentionally missing an include, skip it.
if grd == os.path.join('tools', 'translation', 'testdata', 'internal.grd'):
continue
path = os.path.join(repo_root, grd)
grd_helper.GetGrdMessages(path, os.path.dirname(path))
# Make sure we can parse all .grdp files in the source tree.
# Grdp files are parsed using file contents instead of path.
for grdp in grdps:
path = os.path.join(repo_root, grdp)
# Parse grdp files using file contents.
contents = read_file_as_text(path)
grd_helper.GetGrdpMessagesFromString(contents)
print('Successfully parsed all .grd and .grdp files in the repo.')
# Additional check for translateable grds. Translateable grds are a subset
# of all grds so this checks some files twice, but it exercises the
# get_translatable_grds() path and also doesn't need to skip internal.grd.
TRANSLATION_EXPECTATIONS_PATH = os.path.join(repo_root, 'tools',
'gritsettings',
'translation_expectations.pyl')
translateable_grds = translation_helper.get_translatable_grds(
repo_root, grds, TRANSLATION_EXPECTATIONS_PATH)
print('Found %d translateable .grd files in translation expectations.' %
len(translateable_grds))
for grd in translateable_grds:
path = os.path.join(repo_root, grd.path)
grd_helper.GetGrdMessages(path, os.path.dirname(path))
print('Successfully parsed all translateable_grds .grd files in translation '
'expectations.')
print('DONE')
if __name__ == '__main__':
Run()
|
[
"sys.platform.startswith",
"os.path.abspath",
"os.path.join",
"os.path.realpath",
"helper.grd_helper.GetGrdpMessagesFromString",
"subprocess.check_output",
"os.path.dirname",
"io.open",
"helper.translation_helper.get_translatable_grds"
] |
[((500, 530), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (523, 530), False, 'import sys\n'), ((648, 674), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (664, 674), False, 'import os\n'), ((705, 741), 'os.path.join', 'os.path.join', (['here', '""".."""', '""".."""', '""".."""'], {}), "(here, '..', '..', '..')\n", (717, 741), False, 'import os\n'), ((2575, 2660), 'os.path.join', 'os.path.join', (['repo_root', '"""tools"""', '"""gritsettings"""', '"""translation_expectations.pyl"""'], {}), "(repo_root, 'tools', 'gritsettings', 'translation_expectations.pyl'\n )\n", (2587, 2660), False, 'import os\n'), ((2773, 2865), 'helper.translation_helper.get_translatable_grds', 'translation_helper.get_translatable_grds', (['repo_root', 'grds', 'TRANSLATION_EXPECTATIONS_PATH'], {}), '(repo_root, grds,\n TRANSLATION_EXPECTATIONS_PATH)\n', (2813, 2865), False, 'from helper import translation_helper\n'), ((1179, 1220), 'io.open', 'io.open', (['path'], {'mode': '"""r"""', 'encoding': '"""utf-8"""'}), "(path, mode='r', encoding='utf-8')\n", (1186, 1220), False, 'import io\n'), ((1835, 1863), 'os.path.join', 'os.path.join', (['repo_root', 'grd'], {}), '(repo_root, grd)\n', (1847, 1863), False, 'import os\n'), ((2082, 2111), 'os.path.join', 'os.path.join', (['repo_root', 'grdp'], {}), '(repo_root, grdp)\n', (2094, 2111), False, 'import os\n'), ((2199, 2245), 'helper.grd_helper.GetGrdpMessagesFromString', 'grd_helper.GetGrdpMessagesFromString', (['contents'], {}), '(contents)\n', (2235, 2245), False, 'from helper import grd_helper\n'), ((3021, 3054), 'os.path.join', 'os.path.join', (['repo_root', 'grd.path'], {}), '(repo_root, grd.path)\n', (3033, 3054), False, 'import os\n'), ((399, 424), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (414, 424), False, 'import os\n'), ((980, 1052), 'subprocess.check_output', 'subprocess.check_output', (["[GIT, 'ls-files', '--', pattern]"], {'cwd': 'repo_path'}), "([GIT, 'ls-files', '--', pattern], cwd=repo_path)\n", (1003, 1052), False, 'import subprocess\n'), ((1743, 1807), 'os.path.join', 'os.path.join', (['"""tools"""', '"""translation"""', '"""testdata"""', '"""internal.grd"""'], {}), "('tools', 'translation', 'testdata', 'internal.grd')\n", (1755, 1807), False, 'import os\n'), ((1900, 1921), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (1915, 1921), False, 'import os\n'), ((3091, 3112), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (3106, 3112), False, 'import os\n')]
|
# Author: <NAME> <<EMAIL>>
# License: MIT
from copy import deepcopy
from collections import defaultdict
import numpy as np
import pandas as pd
from wittgenstein.base_functions import truncstr
from wittgenstein.utils import rnd
class BinTransformer:
def __init__(self, n_discretize_bins=10, names_precision=2, verbosity=0):
self.n_discretize_bins = n_discretize_bins
self.names_precision = names_precision
self.verbosity = verbosity
self.bins_ = None
def __str__(self):
return str(self.bins_)
__repr__ = __str__
def __bool__(self):
return not not self.bins_
def isempty(self):
return not self.bins_ is None and not self.bins_
def fit_or_fittransform_(self, df, ignore_feats=[]):
"""Transform df using pre-fit bins, or, if unfit, fit self and transform df"""
# Binning has already been fit
if self.bins_:
return self.transform(df)
# Binning disabled
elif not self.n_discretize_bins:
return df
# Binning enabled, and binner needs to be fit
else:
self.fit(df, ignore_feats=ignore_feats)
df, bins = self.transform(df, ignore_feats=ignore_feats)
self.bins = bins
return df
def fit_transform(self, df, ignore_feats=[]):
self.fit(df, ignore_feats=ignore_feats)
return self.transform(df)
def fit(self, df, output=False, ignore_feats=[]):
"""
Returns a dict defining fits for numerical features
A fit is an ordered list of tuples defining each bin's range (min is exclusive; max is inclusive)
Returned dict allows for fitting to training data and applying the same fit to test data
to avoid information leak.
"""
def _fit_feat(df, feat):
"""Return list of tuples defining bin ranges for a numerical feature using simple linear search"""
if len(df) == 0:
return []
n_discretize_bins = min(self.n_discretize_bins, len(df[feat].unique()))
# Collect intervals
bins = pd.qcut(
df[feat],
q=self.n_discretize_bins,
precision=self.names_precision,
duplicates="drop",
)
if (
len(bins.unique()) < 2
): # qcut can behave weirdly in heavily-skewed distributions
bins = pd.cut(
df[feat],
bins=self.n_discretize_bins,
precision=self.names_precision,
duplicates="drop",
)
# Drop empty bins and duplicate intervals to create bins
bin_counts = bins.value_counts()
bins = bin_counts[bin_counts > 0].index
bins = sorted(bins.unique())
# Extend min/max to -inf, +inf to capture any ranges not present in training set
bins[0] = pd.Interval(float("-inf"), bins[0].right)
bins[-1] = pd.Interval(bins[-1].left, float("inf"))
bins = self._intervals_to_strs(bins)
if self.verbosity >= 3:
print(
f"{feat}: fit {len(df[feat].unique())} unique vals into {len(bins)} bins"
)
return bins
# Begin fitting
feats_to_fit = self.find_continuous_feats(df, ignore_feats=ignore_feats)
if feats_to_fit:
if self.verbosity == 1:
print(f"discretizing {len(feats_to_fit)} features")
elif self.verbosity == 2:
print(f"discretizing {len(feats_to_fit)} features: {feats_to_fit}\n")
self.bins_ = {}
for feat in feats_to_fit:
self.bins_[feat] = _fit_feat(df, feat)
return self.bins_
def transform(self, df):
"""Transform DataFrame using fit bins."""
def _transform_feat(df, feat):
if self.bins_ is None:
return df
res = deepcopy(df[feat])
bins = self._strs_to_intervals(self.bins_[feat])
res = pd.cut(df[feat], bins=pd.IntervalIndex(bins))
res = res.map(
lambda x: {i: s for i, s in zip(bins, self.bins_[feat])}.get(x)
)
return res
# Exclude any feats already transformed into valid intervals
already_transformed_feats = self._find_transformed(df, raise_invalid=True)
res = df.copy()
for feat in self.bins_.keys():
if feat in res.columns and feat not in already_transformed_feats:
res[feat] = _transform_feat(res, feat)
return res
def find_continuous_feats(self, df, ignore_feats=[]):
"""Return names of df features that seem to be continuous."""
if not self.n_discretize_bins:
return []
# Find numeric features
cont_feats = df.select_dtypes(np.number).columns
# Remove discrete features
cont_feats = [
f for f in cont_feats if len(df[f].unique()) > self.n_discretize_bins
]
# Remove ignore features
cont_feats = [f for f in cont_feats if f not in ignore_feats]
return cont_feats
def _strs_to_intervals(self, strs):
return [self._str_to_interval(s) for s in strs]
def _str_to_interval(self, s):
floor, ceil = self._str_to_floor_ceil(s)
return pd.Interval(floor, ceil)
def _intervals_to_strs(self, intervals):
"""Replace a list of intervals with their string representation."""
return [self._interval_to_str(interval) for interval in intervals]
def _interval_to_str(self, interval):
if interval.left == float("-inf"):
return f"<{interval.right}"
elif interval.right == float("inf"):
return f">{interval.left}"
else:
return f"{interval.left}-{interval.right}"
def _str_to_floor_ceil(self, value):
"""Find min, max separated by a dash""" # . Return None if invalid pattern."""
if "<" in value:
floor, ceil = "-inf", value.replace("<", "")
elif ">" in value:
floor, ceil = value.replace(">", ""), "inf"
else:
split_idx = 0
for i, char in enumerate(value):
# Found a possible split and it's not the first number's minus sign
if char == "-" and i != 0:
if split_idx is not None and not split_idx:
split_idx = i
# Found a - after the split, and it's not the minus of a negative number
elif i > split_idx + 1:
return None
floor = value[:split_idx]
ceil = value[split_idx + 1 :]
return float(floor), float(ceil)
def construct_from_ruleset(self, ruleset):
MIN_N_DISCRETIZED_BINS = 10
bt = BinTransformer()
bt.bins_ = self._bin_prediscretized_features(ruleset)
bt.n_discretize_bins = (
max(
(MIN_N_DISCRETIZED_BINS, max([len(bins) for bins in bt.bins_.values()]))
)
if bt.bins_
else MIN_N_DISCRETIZED_BINS
)
bt.names_precision = self._max_dec_precision(bt.bins_)
return bt
def _bin_prediscretized_features(self, ruleset):
def is_valid_decimal(s):
try:
float(s)
except:
return False
return True
def find_floor_ceil(value):
"""id min, max separated by a dash. Return None if invalid pattern."""
split_idx = 0
for i, char in enumerate(value):
# Found a possible split and it's not the first number's minus sign
if char == "-" and i != 0:
if split_idx is not None and not split_idx:
split_idx = i
# Found a - after the split, and it's not the minus of a negative number
elif i > split_idx + 1:
return None
floor = value[:split_idx]
ceil = value[split_idx + 1 :]
if is_valid_decimal(floor) and is_valid_decimal(ceil):
return (floor, ceil)
else:
return None
# _bin_prediscretized_features
discrete = defaultdict(list)
for cond in ruleset.get_conds():
floor_ceil = self.find_floor_ceil(cond.val)
if floor_ceil:
discrete[cond.feature].append(floor_ceil)
for feat, ranges in discrete.items():
ranges.sort(key=lambda x: float(x[0]))
return dict(discrete)
def _max_dec_precision(self, bins_dict):
def dec_precision(value):
try:
return len(value) - value.index(".") - 1
except:
return 0
max_prec = 0
for bins in bins_dict.values():
for bin_ in bins:
for value in bin_:
cur_prec = dec_precision(value)
if cur_prec > max_prec:
max_prec = cur_prec
return max_prec
def _find_transformed(self, df, raise_invalid=True):
"""Find columns that appear to have already been transformed. Raise error if there is a range that doesn't match a fit bin."""
check_feats = df.select_dtypes(include=["category", "object"]).columns.tolist()
invalid_feats = {}
transformed_feats = []
for feat, bins in self.bins_.items():
if feat in check_feats:
transformed_feats.append(feat)
invalid_values = set(df[feat].tolist()) - set(bins)
if invalid_values:
invalid_feats[feat] = invalid_values
if invalid_feats and raise_invalid:
raise ValueError(
f"The following input values seem to be transformed but ranges don't match fit bins: {invalid_feats}"
)
return transformed_feats
|
[
"copy.deepcopy",
"pandas.Interval",
"collections.defaultdict",
"pandas.cut",
"pandas.IntervalIndex",
"pandas.qcut"
] |
[((5448, 5472), 'pandas.Interval', 'pd.Interval', (['floor', 'ceil'], {}), '(floor, ceil)\n', (5459, 5472), True, 'import pandas as pd\n'), ((8422, 8439), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (8433, 8439), False, 'from collections import defaultdict\n'), ((2136, 2234), 'pandas.qcut', 'pd.qcut', (['df[feat]'], {'q': 'self.n_discretize_bins', 'precision': 'self.names_precision', 'duplicates': '"""drop"""'}), "(df[feat], q=self.n_discretize_bins, precision=self.names_precision,\n duplicates='drop')\n", (2143, 2234), True, 'import pandas as pd\n'), ((4031, 4049), 'copy.deepcopy', 'deepcopy', (['df[feat]'], {}), '(df[feat])\n', (4039, 4049), False, 'from copy import deepcopy\n'), ((2463, 2564), 'pandas.cut', 'pd.cut', (['df[feat]'], {'bins': 'self.n_discretize_bins', 'precision': 'self.names_precision', 'duplicates': '"""drop"""'}), "(df[feat], bins=self.n_discretize_bins, precision=self.\n names_precision, duplicates='drop')\n", (2469, 2564), True, 'import pandas as pd\n'), ((4151, 4173), 'pandas.IntervalIndex', 'pd.IntervalIndex', (['bins'], {}), '(bins)\n', (4167, 4173), True, 'import pandas as pd\n')]
|
from collections import defaultdict
from aoc.util import load_example, load_input
def prepare_map(lines):
result = defaultdict(lambda: ".")
for y, line in enumerate(lines):
for x, c in enumerate(line):
if c == "#":
result[x, y] = "#"
return result, (len(lines) - 1) // 2
MOVEMENTS = {
0: (0, -1),
1: (1, 0),
2: (0, 1),
3: (-1, 0),
}
CLEAN = "."
WEAKENED = "W"
INFECTED = "#"
FLAGGED = "F"
def part1(lines, n=10000):
"""
>>> part1(load_example(__file__, "22"), 7)
5
>>> part1(load_example(__file__, "22"), 70)
41
>>> part1(load_example(__file__, "22"))
5587
"""
grid, mxy = prepare_map(lines)
cx = cy = mxy
direction = 0
infection_counter = 0
for _ in range(n):
if grid[cx, cy] == "#":
direction = (direction + 1) % 4
grid[cx, cy] = "."
else:
direction = (direction + 3) % 4
grid[cx, cy] = "#"
infection_counter += 1
dx, dy = MOVEMENTS[direction]
cx += dx
cy += dy
return infection_counter
def part2(lines, n=10000000):
"""
>>> part2(load_example(__file__, "22"), 100)
26
>>> part2(load_example(__file__, "22"))
2511944
"""
grid, mxy = prepare_map(lines)
cx = cy = mxy
direction = 0
infection_counter = 0
for _ in range(n):
current = grid[cx, cy]
if current == CLEAN:
grid[cx, cy] = WEAKENED
direction = (direction + 3) % 4
elif current == WEAKENED:
grid[cx, cy] = INFECTED
infection_counter += 1
elif current == INFECTED:
grid[cx, cy] = FLAGGED
direction = (direction + 1) % 4
elif current == FLAGGED:
grid[cx, cy] = CLEAN
direction = (direction + 2) % 4
else:
raise
dx, dy = MOVEMENTS[direction]
cx += dx
cy += dy
return infection_counter
if __name__ == "__main__":
data = load_input(__file__, 2017, "22")
print(part1(data))
print(part2(data))
|
[
"collections.defaultdict",
"aoc.util.load_input"
] |
[((122, 147), 'collections.defaultdict', 'defaultdict', (["(lambda : '.')"], {}), "(lambda : '.')\n", (133, 147), False, 'from collections import defaultdict\n'), ((2038, 2070), 'aoc.util.load_input', 'load_input', (['__file__', '(2017)', '"""22"""'], {}), "(__file__, 2017, '22')\n", (2048, 2070), False, 'from aoc.util import load_example, load_input\n')]
|
#!/usr/bin/env python3
from aws_cdk import App
from lambda_sqs_cdk.lambda_sqs_cdk_stack import LambdaSqsCdkStack
app = App()
LambdaSqsCdkStack(app, "LambdaSqsCdkStack")
app.synth()
|
[
"aws_cdk.App",
"lambda_sqs_cdk.lambda_sqs_cdk_stack.LambdaSqsCdkStack"
] |
[((122, 127), 'aws_cdk.App', 'App', ([], {}), '()\n', (125, 127), False, 'from aws_cdk import App\n'), ((128, 171), 'lambda_sqs_cdk.lambda_sqs_cdk_stack.LambdaSqsCdkStack', 'LambdaSqsCdkStack', (['app', '"""LambdaSqsCdkStack"""'], {}), "(app, 'LambdaSqsCdkStack')\n", (145, 171), False, 'from lambda_sqs_cdk.lambda_sqs_cdk_stack import LambdaSqsCdkStack\n')]
|
# Generated by Django 3.2 on 2021-04-22 17:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('info', '0015_attendancerange'),
]
operations = [
migrations.AlterModelOptions(
name='attendanceclass',
options={'verbose_name': 'Attendance', 'verbose_name_plural': 'Attendance'},
),
]
|
[
"django.db.migrations.AlterModelOptions"
] |
[((219, 353), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""attendanceclass"""', 'options': "{'verbose_name': 'Attendance', 'verbose_name_plural': 'Attendance'}"}), "(name='attendanceclass', options={\n 'verbose_name': 'Attendance', 'verbose_name_plural': 'Attendance'})\n", (247, 353), False, 'from django.db import migrations\n')]
|
import cv2
import numpy as np
import argparse
# we are not going to bother with objects less than 30% probability
THRESHOLD = 0.3
# the lower the value: the fewer bounding boxes will remain
SUPPRESSION_THRESHOLD = 0.3
YOLO_IMAGE_SIZE = 320
DATA_FOLDER = './data/'
CFG_FOLDER = './cfg/'
MODEL_FOLDER = './models/'
def find_objects(model_outputs):
"""
Extract the the values from prediction vectors resulted by the YOLOv3 algorithm
Returns:
box_indexes_to_keep: Idx of bounding boxes after applying "Non-max suppression"
bounding_box_locations: all vec (x, y, w, h) of each chosen bounding box
class_ids: idx for each predicted class of each bounding box based on COCO dataset's classes
confidence_values: Probability that the predicted class is correct
"""
bounding_box_locations = []
class_ids = []
confidence_values = []
# Iterate through each layers in YOLOv3 output (totally 3 layers)
for output in model_outputs:
# Iterate each bounding boxes in prediction output
for prediction in output:
class_probabilities = prediction[5:]
# "class_idx" index of object detection having the highest probability
class_idx = np.argmax(class_probabilities)
confidence = class_probabilities[class_idx]
# Only detect object having the confident larger than THRESHOLD
if confidence > THRESHOLD:
# B.c prediction[2] return between [0-1] --> Need to rescale it to match the position in 320*320 image
w, h = int(prediction[2] * YOLO_IMAGE_SIZE), int(prediction[3] * YOLO_IMAGE_SIZE)
# the center of the bounding box (we should transform these values)
x, y = int(prediction[0] * YOLO_IMAGE_SIZE - w / 2), int(prediction[1] * YOLO_IMAGE_SIZE - h / 2)
bounding_box_locations.append([x, y, w, h])
class_ids.append(class_idx)
confidence_values.append(float(confidence))
# Perform "Non-max suppression" for each prediction bounding boxes
box_indexes_to_keep = cv2.dnn.NMSBoxes(bounding_box_locations, confidence_values, THRESHOLD, SUPPRESSION_THRESHOLD)
return box_indexes_to_keep, bounding_box_locations, class_ids, confidence_values
def show_detected_images(img, bounding_box_ids, all_bounding_boxes, classes, class_ids,
confidence_values, width_ratio, height_ratio, colors):
"""
Drawing the bounding boxes on the original images
Args:
img: Original image
bounding_box_ids: Idx of predicted bounding boxes after applying "Non-max suppression"
all_bounding_boxes: all vec (x, y, w, h) of each chosen bounding box
classes: list of all classes in COCO dataset
class_ids: idx for each predicted class of each bounding box based on COCO dataset's classes
confidence_values: Probability that the predicted class is correct
width_ratio: = original_width / YOLO_IMAGE_SIZE
height_ratio: = original_height / YOLO_IMAGE_SIZE
"""
# Iterate each bounding box's idx which is kept after 'non-max suppression'
for idx in bounding_box_ids.flatten():
bounding_box = all_bounding_boxes[idx]
x, y, w, h = int(bounding_box[0]), int(bounding_box[1]), int(bounding_box[2]), int(bounding_box[3])
# Transform (x,y,w,h) from resized image (320*320) to original image size
x = int(x * width_ratio)
y = int(y * height_ratio)
w = int(w * width_ratio)
h = int(h * height_ratio)
# Color for each detected box
color_box_current = colors[class_ids[idx]].tolist()
# Draw bounding box for each detected object
cv2.rectangle(img, (x, y), (x + w, y + h), color_box_current, 2)
# Title for each box
text_box = classes[int(class_ids[idx])] + ' ' + str(int(confidence_values[idx] * 100)) + '%'
cv2.putText(img, text_box, (x, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.5, color_box_current, 1)
def parse_opt(known=False):
parser = argparse.ArgumentParser()
parser.add_argument('--video_path', type=str, default='', help='initial image path')
parser.add_argument('--class_path', type=str, default=DATA_FOLDER+'coco.names', help='initial class file path')
parser.add_argument('--cfg_path', type=str, default=CFG_FOLDER+'yolov3.cfg', help='initial cfg file path')
parser.add_argument('--weights_path', type=str, default=MODEL_FOLDER+'yolov3.weights', help='initial '
'pre-trained '
'weights file path')
opt = parser.parse_known_args()[0] if known else parser.parse_args()
return opt
def main(opt):
# Label objects for prediction (totally 80)
with open(opt.class_path) as f:
labels = list(line.strip() for line in f)
# Setting colors for each label
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
# Read the configuration file & initialize the weight of yolov3 model
neural_network = cv2.dnn.readNetFromDarknet(opt.cfg_path, opt.weights_path)
# define whether we run the algorithm with CPU or with GPU
# WE ARE GOING TO USE CPU !!!
neural_network.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
neural_network.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
# VIDEO PROCESSING
video_capture = cv2.VideoCapture(opt.video_path)
while video_capture.isOpened():
# Read each frame of video
is_grab, frame = video_capture.read()
original_width, original_height = frame.shape[1], frame.shape[0]
# Preprocess frame before inputting into model
blob = cv2.dnn.blobFromImage(frame, 1 / 255, (YOLO_IMAGE_SIZE, YOLO_IMAGE_SIZE), True, crop=False)
neural_network.setInput(blob)
# Taking the last 3 layers from pretrained models for processing the image
layer_names = neural_network.getLayerNames()
output_names = [layer_names[idx[0] - 1] for idx in neural_network.getUnconnectedOutLayers()]
# Apply "Forward propagation" with input for last 3 layers
outputs = neural_network.forward(output_names)
# Extract values from prediction vector
predicted_objects_idx, bbox_locations, class_label_ids, conf_values = find_objects(outputs)
# Show bounding boxes on the original image
show_detected_images(frame, predicted_objects_idx, bbox_locations, labels, class_label_ids, conf_values,
original_width / YOLO_IMAGE_SIZE, original_height / YOLO_IMAGE_SIZE, colors)
cv2.imshow('YOLO Algorithm', frame)
# Press "ESC" to quit the video
key = cv2.waitKey(1) & 0xff
if (key == 27) | (not is_grab): # 27 represents key "ESC"
break
# Destroy & Release the camera
video_capture.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
opt = parse_opt()
main(opt)
|
[
"cv2.putText",
"cv2.dnn.NMSBoxes",
"argparse.ArgumentParser",
"numpy.argmax",
"cv2.waitKey",
"cv2.dnn.blobFromImage",
"cv2.imshow",
"cv2.dnn.readNetFromDarknet",
"cv2.VideoCapture",
"cv2.rectangle",
"cv2.destroyAllWindows"
] |
[((2191, 2288), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['bounding_box_locations', 'confidence_values', 'THRESHOLD', 'SUPPRESSION_THRESHOLD'], {}), '(bounding_box_locations, confidence_values, THRESHOLD,\n SUPPRESSION_THRESHOLD)\n', (2207, 2288), False, 'import cv2\n'), ((4244, 4269), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4267, 4269), False, 'import argparse\n'), ((5392, 5450), 'cv2.dnn.readNetFromDarknet', 'cv2.dnn.readNetFromDarknet', (['opt.cfg_path', 'opt.weights_path'], {}), '(opt.cfg_path, opt.weights_path)\n', (5418, 5450), False, 'import cv2\n'), ((5732, 5764), 'cv2.VideoCapture', 'cv2.VideoCapture', (['opt.video_path'], {}), '(opt.video_path)\n', (5748, 5764), False, 'import cv2\n'), ((7246, 7269), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7267, 7269), False, 'import cv2\n'), ((3892, 3956), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', 'color_box_current', '(2)'], {}), '(img, (x, y), (x + w, y + h), color_box_current, 2)\n', (3905, 3956), False, 'import cv2\n'), ((4098, 4200), 'cv2.putText', 'cv2.putText', (['img', 'text_box', '(x, y - 10)', 'cv2.FONT_HERSHEY_COMPLEX_SMALL', '(0.5)', 'color_box_current', '(1)'], {}), '(img, text_box, (x, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.5,\n color_box_current, 1)\n', (4109, 4200), False, 'import cv2\n'), ((6035, 6131), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frame', '(1 / 255)', '(YOLO_IMAGE_SIZE, YOLO_IMAGE_SIZE)', '(True)'], {'crop': '(False)'}), '(frame, 1 / 255, (YOLO_IMAGE_SIZE, YOLO_IMAGE_SIZE), \n True, crop=False)\n', (6056, 6131), False, 'import cv2\n'), ((6971, 7006), 'cv2.imshow', 'cv2.imshow', (['"""YOLO Algorithm"""', 'frame'], {}), "('YOLO Algorithm', frame)\n", (6981, 7006), False, 'import cv2\n'), ((1297, 1327), 'numpy.argmax', 'np.argmax', (['class_probabilities'], {}), '(class_probabilities)\n', (1306, 1327), True, 'import numpy as np\n'), ((7065, 7079), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7076, 7079), False, 'import cv2\n')]
|
import torch
import pykitti
from torch.utils.data import Dataset
from torchvision.utils import make_grid
import torchvision.transforms.functional as TF
import matplotlib.pyplot as plt
def transform_stereo_lidar(samples):
for k in samples:
samples[k] = TF.to_tensor(samples[k])
return samples
class KittiDenseDrive(Dataset):
"""Dataset for prediction of dense (images) from the Kitti Dataset."""
def __init__(self, basedir, date, drive, transform=None):
"""
"""
self.basedir = basedir
self.date = date
self.drive = drive
self.kitti = pykitti.raw(basedir, date, drive)
self.transform = transform
def __len__(self):
return len(self.kitti)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# rgb - left, right
cam2, cam3 = self.kitti.get_rgb(idx)
# velodyne scan
velo = self.kitti.get_velo(idx)
samples = {"left_rgb": cam2, "right_rgb": cam3, "velo": velo}
samples = self.transform(samples)
return samples
|
[
"torch.is_tensor",
"torchvision.transforms.functional.to_tensor",
"pykitti.raw"
] |
[((266, 290), 'torchvision.transforms.functional.to_tensor', 'TF.to_tensor', (['samples[k]'], {}), '(samples[k])\n', (278, 290), True, 'import torchvision.transforms.functional as TF\n'), ((610, 643), 'pykitti.raw', 'pykitti.raw', (['basedir', 'date', 'drive'], {}), '(basedir, date, drive)\n', (621, 643), False, 'import pykitti\n'), ((778, 798), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (793, 798), False, 'import torch\n')]
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""For generating Beam pipeline graph in DOT representation.
This module is experimental. No backwards-compatibility guarantees.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
import pydot
class PipelineGraph(object):
"""Creates a DOT representation of the pipeline. Thread-safe."""
def __init__(self,
pipeline_proto,
default_vertex_attrs=None,
default_edge_attrs=None):
"""Constructor of PipelineGraph.
Args:
pipeline_proto: (Pipeline proto)
default_vertex_attrs: (Dict[str, str]) a dict of default vertex attributes
default_edge_attrs: (Dict[str, str]) a dict of default edge attributes
"""
self._lock = threading.Lock()
self._graph = None
# A dict from PCollection ID to a list of its consuming Transform IDs
self._consumers = collections.defaultdict(list)
# A dict from PCollection ID to its producing Transform ID
self._producers = {}
transforms = pipeline_proto.components.transforms
for transform_id, transform in transforms.items():
if not self._is_top_level_transform(transform):
continue
for pcoll_id in transform.inputs.values():
self._consumers[pcoll_id].append(transform_id)
for pcoll_id in transform.outputs.values():
self._producers[pcoll_id] = transform_id
vertex_dict, edge_dict = self._generate_graph_dicts()
self._construct_graph(vertex_dict,
edge_dict,
default_vertex_attrs,
default_edge_attrs)
def get_dot(self):
return str(self._get_graph())
def _is_top_level_transform(self, transform):
return transform.unique_name and '/' not in transform.unique_name \
and not transform.unique_name.startswith('ref_')
def _generate_graph_dicts(self):
"""From pipeline_proto and other info, generate the graph.
Returns:
vertex_dict: (Dict[str, Dict[str, str]]) vertex mapped to attributes.
edge_dict: (Dict[(str, str), Dict[str, str]]) vertex pair mapped to the
edge's attribute.
"""
transforms = self._pipeline_proto.components.transforms
# A dict from vertex name (i.e. PCollection ID) to its attributes.
vertex_dict = collections.defaultdict(dict)
# A dict from vertex name pairs defining the edge (i.e. a pair of PTransform
# IDs defining the PCollection) to its attributes.
edge_dict = collections.defaultdict(dict)
self._edge_to_vertex_pairs = collections.defaultdict(list)
for _, transform in transforms.items():
if not self._is_top_level_transform(transform):
continue
vertex_dict[transform.unique_name] = {}
for pcoll_id in transform.outputs.values():
# For PCollections without consuming PTransforms, we add an invisible
# PTransform node as the consumer.
if pcoll_id not in self._consumers:
invisible_leaf = 'leaf%s' % (hash(pcoll_id) % 10000)
vertex_dict[invisible_leaf] = {'style': 'invis'}
self._edge_to_vertex_pairs[pcoll_id].append(
(transform.unique_name, invisible_leaf))
edge_dict[(transform.unique_name, invisible_leaf)] = {}
else:
for consumer in self._consumers[pcoll_id]:
producer_name = transform.unique_name
consumer_name = transforms[consumer].unique_name
self._edge_to_vertex_pairs[pcoll_id].append(
(producer_name, consumer_name))
edge_dict[(producer_name, consumer_name)] = {}
return vertex_dict, edge_dict
def _get_graph(self):
"""Returns pydot.Dot object for the pipeline graph.
The purpose of this method is to avoid accessing the graph while it is
updated. No one except for this method should be accessing _graph directly.
Returns:
(pydot.Dot)
"""
with self._lock:
return self._graph
def _construct_graph(self, vertex_dict, edge_dict,
default_vertex_attrs, default_edge_attrs):
"""Constructs the pydot.Dot object for the pipeline graph.
Args:
vertex_dict: (Dict[str, Dict[str, str]]) maps vertex names to attributes
edge_dict: (Dict[(str, str), Dict[str, str]]) maps vertex name pairs to
attributes
default_vertex_attrs: (Dict[str, str]) a dict of attributes
default_edge_attrs: (Dict[str, str]) a dict of attributes
"""
with self._lock:
self._graph = pydot.Dot()
if default_vertex_attrs:
self._graph.set_node_defaults(**default_vertex_attrs)
if default_edge_attrs:
self._graph.set_edge_defaults(**default_edge_attrs)
self._vertex_refs = {} # Maps vertex name to pydot.Node
self._edge_refs = {} # Maps vertex name pairs to pydot.Edge
for vertex, vertex_attrs in vertex_dict.items():
vertex_ref = pydot.Node(vertex, **vertex_attrs)
self._vertex_refs[vertex] = vertex_ref
self._graph.add_node(vertex_ref)
for edge, edge_attrs in edge_dict.items():
edge_ref = pydot.Edge(edge[0], edge[1], **edge_attrs)
self._edge_refs[edge] = edge_ref
self._graph.add_edge(edge_ref)
def _update_graph(self, vertex_dict=None, edge_dict=None):
"""Updates the pydot.Dot object with the given attribute update
Args:
vertex_dict: (Dict[str, Dict[str, str]]) maps vertex names to attributes
edge_dict: This should be
Either (Dict[str, Dict[str, str]]) which maps edge names to attributes
Or (Dict[(str, str), Dict[str, str]]) which maps vertex pairs to edge
attributes
"""
def set_attrs(ref, attrs):
for attr_name, attr_val in attrs.items():
ref.set(attr_name, attr_val)
with self._lock:
if vertex_dict:
for vertex, vertex_attrs in vertex_dict.items():
set_attrs(self._vertex_refs[vertex], vertex_attrs)
if edge_dict:
for edge, edge_attrs in edge_dict.items():
if isinstance(edge, tuple):
set_attrs(self._edge_refs[edge], edge_attrs)
else:
for vertex_pair in self._edge_to_vertex_pairs[edge]:
set_attrs(self._edge_refs[vertex_pair], edge_attrs)
|
[
"pydot.Node",
"pydot.Dot",
"threading.Lock",
"collections.defaultdict",
"pydot.Edge"
] |
[((1584, 1600), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1598, 1600), False, 'import threading\n'), ((1721, 1750), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1744, 1750), False, 'import collections\n'), ((3139, 3168), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (3162, 3168), False, 'import collections\n'), ((3321, 3350), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (3344, 3350), False, 'import collections\n'), ((3385, 3414), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (3408, 3414), False, 'import collections\n'), ((5343, 5354), 'pydot.Dot', 'pydot.Dot', ([], {}), '()\n', (5352, 5354), False, 'import pydot\n'), ((5746, 5780), 'pydot.Node', 'pydot.Node', (['vertex'], {}), '(vertex, **vertex_attrs)\n', (5756, 5780), False, 'import pydot\n'), ((5937, 5979), 'pydot.Edge', 'pydot.Edge', (['edge[0]', 'edge[1]'], {}), '(edge[0], edge[1], **edge_attrs)\n', (5947, 5979), False, 'import pydot\n')]
|
"""Flask service to predict the adaptive card json from the card design"""
import os
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask
from flask_cors import CORS
from flask_restplus import Api
from mystique.utils import load_od_instance
from . import resources as res
from mystique import config
logger = logging.getLogger("mysitque")
logger.setLevel(logging.DEBUG)
# Suppress the tf warnings.
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3'
file_handler = RotatingFileHandler(
'mystique_app.log', maxBytes=1024 * 1024 * 100, backupCount=20)
formatter = logging.Formatter(
"%(asctime)s - [%(filename)s:%(lineno)s - %(funcName)20s() ] - \
%(levelname)s - %(message)s")
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
app = Flask(__name__)
CORS(app)
api = Api(app, title="Mystique", version="1.0",
default="Jobs", default_label="",
description="Mysique App For Adaptive card Json Prediction from \
UI Design")
api.add_resource(res.GetCardTemplates, '/get_card_templates',
methods=['GET'])
# Conditional loading helps to reduce the bundle size, as we don't need to
# package the tensorflow.
# TODO: Experimental API
if config.ENABLE_TF_SERVING:
api.add_resource(res.TfPredictJson, '/tf_predict_json',
methods=['POST'])
else:
api.add_resource(res.PredictJson, '/predict_json', methods=['POST'])
# Load the models and cache it for request handling.
app.od_model = load_od_instance()
# Include more debug points along with /predict_json api.
api.add_resource(res.DebugEndpoint, "/predict_json_debug", methods=["POST"])
api.add_resource(res.GetVersion, "/version", methods=["GET"])
|
[
"flask_restplus.Api",
"flask_cors.CORS",
"flask.Flask",
"logging.Formatter",
"mystique.utils.load_od_instance",
"logging.handlers.RotatingFileHandler",
"logging.getLogger"
] |
[((346, 375), 'logging.getLogger', 'logging.getLogger', (['"""mysitque"""'], {}), "('mysitque')\n", (363, 375), False, 'import logging\n'), ((493, 580), 'logging.handlers.RotatingFileHandler', 'RotatingFileHandler', (['"""mystique_app.log"""'], {'maxBytes': '(1024 * 1024 * 100)', 'backupCount': '(20)'}), "('mystique_app.log', maxBytes=1024 * 1024 * 100,\n backupCount=20)\n", (512, 580), False, 'from logging.handlers import RotatingFileHandler\n'), ((594, 718), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - [%(filename)s:%(lineno)s - %(funcName)20s() ] - %(levelname)s - %(message)s"""'], {}), "(\n '%(asctime)s - [%(filename)s:%(lineno)s - %(funcName)20s() ] - %(levelname)s - %(message)s'\n )\n", (611, 718), False, 'import logging\n'), ((830, 845), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (835, 845), False, 'from flask import Flask\n'), ((846, 855), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (850, 855), False, 'from flask_cors import CORS\n'), ((863, 1051), 'flask_restplus.Api', 'Api', (['app'], {'title': '"""Mystique"""', 'version': '"""1.0"""', 'default': '"""Jobs"""', 'default_label': '""""""', 'description': '"""Mysique App For Adaptive card Json Prediction from UI Design"""'}), "(app, title='Mystique', version='1.0', default='Jobs', default_label='',\n description=\n 'Mysique App For Adaptive card Json Prediction from UI Design'\n )\n", (866, 1051), False, 'from flask_restplus import Api\n'), ((1559, 1577), 'mystique.utils.load_od_instance', 'load_od_instance', ([], {}), '()\n', (1575, 1577), False, 'from mystique.utils import load_od_instance\n')]
|
"""
This module contains updates used with the `hic2cool update` command.
See usage in hic2cool.hic2cool_utils.hic2cool_update
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import h5py
from .hic2cool_config import *
def prepare_hic2cool_updates(version_nums):
"""
Find what must be done when actually running `hic2cool update`
Determines what updates are necessary based off of version numbers
Version numbers is a list of ints in form: [major, minor, release]
"""
updates = []
# normalization vectors were inverted before version 0.5.0
if version_nums[0] == 0 and version_nums[1] < 5:
updates.append(
{
'title': 'Invert weights',
'effect': 'Invert cooler weights so that they match original hic normalization values',
'detail': 'cooler uses multiplicative weights and hic uses divisive weights. Before version 0.5.0, hic2cool inverted normalization vectors for consistency with cooler behavior, but now that is no longer done for consistency with 4DN analysis pipelines.',
'function': update_invert_weights
}
)
# import cooler attributes added in version 0.6.0
if version_nums[0] == 0 and version_nums[1] < 6:
updates.append(
{
'title': 'Add cooler schema version',
'effect': 'Add a couple important cooler schema attributes',
'detail': 'Adds format-version and storage-mode attributes to hdf5 for compatibility with cooler schema v3.',
'function': update_cooler_schema_v3
}
)
# import mcool attributes added in version 0.7.1
if version_nums[0] == 0 and ((version_nums[1] == 7 and version_nums[2] < 1) or version_nums[1] < 7):
updates.append(
{
'title': 'Add mcool schema attributes',
'effect': 'Adds missing schema attributes if this is a multi-resolution cooler',
'detail': 'Adds format and format-version attributes to the "/" hdf5 collection for mcool schema v2.',
'function': update_mcool_schema_v2
}
)
return updates
def norm_convert(val):
"""
hic2cool now just uses hic normalization vectors as-is,
without attempting to invert them to match cooler convention. This function
is now only used with `hic2cool update` to revert cooler weights to their
original hic values.
Simply invert norm vectors, since hic norms are divisive and cooler
weights are multiplicative.
"""
if val != 0.0:
return 1 / val
else:
return np.nan
def update_invert_weights(writefile):
"""
Invert all the weights from each resolution (if a mult-res file) or the
top level (if a single-res file)
"""
# helper fxn
def update_invert_weight_for_resolution(h5_data, res=None):
"""
Access the bins table, find the weights, and invert
"""
found_weights = [val for val in h5_data if val not in ['chrom', 'start', 'end']]
for weight in found_weights:
h5_weight = h5_data[weight][:]
h5_data[weight][:] = list(map(norm_convert, h5_weight))
if res:
print('... For resolution %s, inverted following weights: %s' % (res, found_weights))
else:
print('... Inverted following weights: %s' % found_weights)
with h5py.File(writefile) as h5_file:
if 'resolutions' in h5_file:
for res in h5_file['resolutions']:
update_invert_weight_for_resolution(h5_file['resolutions'][res]['bins'], res=res)
else:
update_invert_weight_for_resolution(h5_file['bins'])
def update_cooler_schema_v3(writefile):
"""
Add format-version and storage-mode attributes to given cooler
"""
# helper fxn
def add_v3_attrs(h5_data, res=None):
info = {
'format-version': COOLER_FORMAT_VERSION,
'storage-mode': 'symmetric-upper'
}
h5_data.attrs.update(info)
if res:
print('... For resolution %s, added format-version and storage-mode attributes' % res)
else:
print('... Added format-version and storage-mode attributes')
with h5py.File(writefile) as h5_file:
if 'resolutions' in h5_file:
for res in h5_file['resolutions']:
add_v3_attrs(h5_file['resolutions'][res], res=res)
else:
add_v3_attrs(h5_file)
def update_mcool_schema_v2(writefile):
"""
Add format and format-version attributes to the base level of an mcool
"""
with h5py.File(writefile) as h5_file:
# only run if it's an mcool and 'resolutions' exist
if 'resolutions' in h5_file:
mcool_info = {
'format': MCOOL_FORMAT,
'format-version': MCOOL_FORMAT_VERSION
}
h5_file.attrs.update(mcool_info)
print('... Added format and format-version attributes for the mcool')
else:
print('... Not a multi-res file, so will not add mcool schema attributes')
|
[
"h5py.File"
] |
[((3495, 3515), 'h5py.File', 'h5py.File', (['writefile'], {}), '(writefile)\n', (3504, 3515), False, 'import h5py\n'), ((4346, 4366), 'h5py.File', 'h5py.File', (['writefile'], {}), '(writefile)\n', (4355, 4366), False, 'import h5py\n'), ((4719, 4739), 'h5py.File', 'h5py.File', (['writefile'], {}), '(writefile)\n', (4728, 4739), False, 'import h5py\n')]
|
# ----------------------------------------------------
# Generate a random correlations
# ----------------------------------------------------
import numpy as np
def randCorr(size, lower=-1, upper=1):
"""
Create a random matrix T from uniform distribution of dimensions size x m (assumed to be 10000)
normalize the rows of T to lie in the unit sphere r = r / sqrt(r'r)
RandCorr = TT'
@param size: size of the matrix
@param lower: lower limit of the uniform distribution used to create the corr matrix
@param upper: upper limit of the uniform distribution used to create the corr matrix
@return: numpy ndarray, correlation matrix
"""
m = 1000
randomMatrix = np.random.uniform(lower, upper, (size, m))
norms = np.sum(randomMatrix**2, axis=1)
T = np.divide(randomMatrix, np.sqrt(norms).reshape(size,1))
c = np.dot(T, T.T)
c[np.diag_indices(size)] = 1.
return c
|
[
"numpy.random.uniform",
"numpy.sum",
"numpy.diag_indices",
"numpy.dot",
"numpy.sqrt"
] |
[((704, 746), 'numpy.random.uniform', 'np.random.uniform', (['lower', 'upper', '(size, m)'], {}), '(lower, upper, (size, m))\n', (721, 746), True, 'import numpy as np\n'), ((759, 792), 'numpy.sum', 'np.sum', (['(randomMatrix ** 2)'], {'axis': '(1)'}), '(randomMatrix ** 2, axis=1)\n', (765, 792), True, 'import numpy as np\n'), ((863, 877), 'numpy.dot', 'np.dot', (['T', 'T.T'], {}), '(T, T.T)\n', (869, 877), True, 'import numpy as np\n'), ((884, 905), 'numpy.diag_indices', 'np.diag_indices', (['size'], {}), '(size)\n', (899, 905), True, 'import numpy as np\n'), ((823, 837), 'numpy.sqrt', 'np.sqrt', (['norms'], {}), '(norms)\n', (830, 837), True, 'import numpy as np\n')]
|
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class Test_Bitwise_Not(TestCase):
def generate_data(self, min_d, max_d, shape, dtype):
input1 = np.random.uniform(min_d, max_d, shape).astype(dtype)
npu_input1 = torch.from_numpy(input1)
return npu_input1
def generate_bool_data(self, shape):
input1 = np.random.randint(0, 2, shape).astype(np.bool_)
npu_input1 = torch.from_numpy(input1)
return npu_input1
def cpu_op_exec(self, input1):
output = torch.bitwise_not(input1)
if output.dtype not in [torch.int32, torch.int8, torch.bool]:
output = output.to(torch.int32)
output = output.numpy()
return output
def npu_op_exec(self, input1):
input1 = input1.to("npu")
output = torch.bitwise_not(input1)
output = output.to("cpu")
if output.dtype not in [torch.int32, torch.int8, torch.bool]:
output = output.to(torch.int32)
output = output.numpy()
return output
def npu_op_exec_out(self, input1, input2):
input1 = input1.to("npu")
input2 = input2.to("npu")
torch.bitwise_not(input1, out = input2)
output = input2.to("cpu")
if output.dtype not in [torch.int32, torch.int8, torch.bool]:
output = output.to(torch.int32)
output = output.numpy()
return output
def test_bitwise_not_bool(self, device):
npu_input1 = self.generate_bool_data((2, 3))
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_int16(self, device):
npu_input1 = self.generate_data(0, 2342, (2, 3), np.int16)
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_int32(self, device):
npu_input1 = self.generate_data(0, 34222, (2, 3), np.int32)
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_int64(self, device):
npu_input1 = self.generate_data(0, 355553, (2, 3), np.int64)
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_out(self, device):
shape_format = [
[[0, 2342, [2, 3], np.int16], [0, 2342, [10, 20], np.int16]],
[[0, 34222, [2, 3], np.int32], [0, 34222, [10, 20], np.int32]],
[[0, 355553, [2, 3], np.int64], [0, 355553, [1, 1], np.int64]],
]
for item in shape_format:
npu_input1 = self.generate_data(item[0][0], item[0][1], item[0][2], item[0][3])
npu_input2 = self.generate_data(item[1][0], item[1][1], item[1][2], item[1][3])
cpu_output = self.cpu_op_exec(npu_input1)
npu_output1 = self.npu_op_exec_out(npu_input1, npu_input1)
npu_output2 = self.npu_op_exec_out(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output1)
self.assertRtolEqual(cpu_output, npu_output1)
instantiate_device_type_tests(Test_Bitwise_Not, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests()
|
[
"numpy.random.uniform",
"torch.bitwise_not",
"numpy.random.randint",
"common_utils.run_tests",
"torch.from_numpy"
] |
[((4174, 4185), 'common_utils.run_tests', 'run_tests', ([], {}), '()\n', (4183, 4185), False, 'from common_utils import TestCase, run_tests\n'), ((998, 1022), 'torch.from_numpy', 'torch.from_numpy', (['input1'], {}), '(input1)\n', (1014, 1022), False, 'import torch\n'), ((1178, 1202), 'torch.from_numpy', 'torch.from_numpy', (['input1'], {}), '(input1)\n', (1194, 1202), False, 'import torch\n'), ((1282, 1307), 'torch.bitwise_not', 'torch.bitwise_not', (['input1'], {}), '(input1)\n', (1299, 1307), False, 'import torch\n'), ((1564, 1589), 'torch.bitwise_not', 'torch.bitwise_not', (['input1'], {}), '(input1)\n', (1581, 1589), False, 'import torch\n'), ((1917, 1954), 'torch.bitwise_not', 'torch.bitwise_not', (['input1'], {'out': 'input2'}), '(input1, out=input2)\n', (1934, 1954), False, 'import torch\n'), ((924, 962), 'numpy.random.uniform', 'np.random.uniform', (['min_d', 'max_d', 'shape'], {}), '(min_d, max_d, shape)\n', (941, 962), True, 'import numpy as np\n'), ((1109, 1139), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'shape'], {}), '(0, 2, shape)\n', (1126, 1139), True, 'import numpy as np\n')]
|
# BSD 3-Clause License
#
# Copyright (c) 2020, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import shiboken2
from PySide2.QtCore import *
from PySide2.QtGui import *
class LayerRenameDialog(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self._widgets = dict()
self.create_widgets()
self.create_layout()
self.create_connections()
def create_widgets(self):
self._widgets['old_name'] = QLineEdit(self)
self._widgets['new_name'] = QLineEdit(self)
self._widgets['ok'] = QPushButton("OK", self)
self._widgets['cancel'] = QPushButton("Cancel", self)
def create_layout(self):
main_layout = QGridLayout(self)
form_layout = QFormLayout(self)
form_layout.addRow("Old name:", self._widgets['old_name'])
form_layout.addRow("New name:", self._widgets['new_name'])
main_layout.addLayout(form_layout, 0, 0, 3, 3)
main_layout.addWidget(self._widgets['ok'], 3, 1)
main_layout.addWidget(self._widgets['cancel'], 3, 2)
self.setLayout(main_layout)
def create_connections(self):
self._widgets['ok'].clicked.connect(self.do_rename)
self._widgets['cancel'].clicked.connect(self.reject)
def do_rename(self):
proj = get_current_project()
if not proj:
self.reject()
old_name = self._widgets['old_name'].text()
new_name = self._widgets['new_name'].text()
layers = proj.find_layers(old_name)
if not layers:
msg = QMessageBox(self)
msg.setText("No layers with name %s" % old_name)
msg.exec_()
self.reject()
map(lambda layer: setattr(layer, 'name', new_name), layers)
self.accept()
if __name__ == "__main__":
from mocha import ui
mw = ui.get_widgets()['MainWindow']
rename = LayerRenameDialog(parent=mw)
rename.show()
|
[
"mocha.ui.get_widgets"
] |
[((3401, 3417), 'mocha.ui.get_widgets', 'ui.get_widgets', ([], {}), '()\n', (3415, 3417), False, 'from mocha import ui\n')]
|
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from rest_framework.documentation import include_docs_urls
urlpatterns = [
path('admin/', admin.site.urls),
path('api/user/', include('apps.user.urls'), name='user'),
path('api/recipe/', include('apps.recipe.urls'), name='recipe'),
path('docs/', include_docs_urls(title='Recipe App API')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"rest_framework.documentation.include_docs_urls",
"django.conf.urls.static.static",
"django.urls.path",
"django.urls.include"
] |
[((458, 519), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (464, 519), False, 'from django.conf.urls.static import static\n'), ((227, 258), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (231, 258), False, 'from django.urls import path, include\n'), ((282, 307), 'django.urls.include', 'include', (['"""apps.user.urls"""'], {}), "('apps.user.urls')\n", (289, 307), False, 'from django.urls import path, include\n'), ((347, 374), 'django.urls.include', 'include', (['"""apps.recipe.urls"""'], {}), "('apps.recipe.urls')\n", (354, 374), False, 'from django.urls import path, include\n'), ((410, 451), 'rest_framework.documentation.include_docs_urls', 'include_docs_urls', ([], {'title': '"""Recipe App API"""'}), "(title='Recipe App API')\n", (427, 451), False, 'from rest_framework.documentation import include_docs_urls\n')]
|
import numpy as np
from cyvcf2 import VCF, Variant, Writer
import os.path
HERE = os.path.dirname(__file__)
HEM_PATH = os.path.join(HERE, "test-hemi.vcf")
VCF_PATH = os.path.join(HERE, "test.vcf.gz")
def check_var(v):
s = [x.split(":")[0] for x in str(v).split("\t")[9:]]
lookup = {'0/0': 0, '0/1': 1, './1': 1, '1/.': 1, '0/.': 0, './0': 0, '1/1': 3, '.': 2, './.': 2}
expected = np.array([lookup[ss] for ss in s])
obs = v.gt_types
assert np.all(expected == obs), zip(expected, obs)
def test_hemi():
"""
make sure that we are getting the correct gt_types
for hemizygous variants
"""
for p in (HEM_PATH, VCF_PATH):
vcf = VCF(p)
for v in vcf:
check_var(v)
|
[
"cyvcf2.VCF",
"numpy.array",
"numpy.all"
] |
[((396, 430), 'numpy.array', 'np.array', (['[lookup[ss] for ss in s]'], {}), '([lookup[ss] for ss in s])\n', (404, 430), True, 'import numpy as np\n'), ((463, 486), 'numpy.all', 'np.all', (['(expected == obs)'], {}), '(expected == obs)\n', (469, 486), True, 'import numpy as np\n'), ((675, 681), 'cyvcf2.VCF', 'VCF', (['p'], {}), '(p)\n', (678, 681), False, 'from cyvcf2 import VCF, Variant, Writer\n')]
|
import pandas as pd
import haziris as hz
df = pd.DataFrame([
['President' , '<NAME>', '1789-04-30 00:00:00', '1797-03-04 00:00:00' ],
['President' , '<NAME>' , '1797-03-04 00:00:00', '1801-03-04 00:00:00' ],
['President' , '<NAME>' , '1801-03-04 00:00:00', '1809-03-04 00:00:00' ],
['Vice President' , '<NAME>' , '1789-04-21 00:00:00', '1797-03-04 00:00:00' ],
['Vice President' , '<NAME>' , '1797-03-04 00:00:00', '1801-03-04 00:00:00' ],
['Vice President' , '<NAME>' , '1801-03-04 00:00:00', '1805-03-04 00:00:00' ],
['Vice President' , '<NAME>' , '1805-03-04 00:00:00', '1812-04-20 00:00:00' ],
['Secretary of State', '<NAME>' , '1789-09-26 00:00:00', '1790-03-22 00:00:00' ],
['Secretary of State', '<NAME>' , '1790-03-22 00:00:00', '1793-12-31 00:00:00' ],
['Secretary of State', '<NAME>' , '1794-01-02 00:00:00', '1795-08-20 00:00:00' ],
['Secretary of State', '<NAME>', '1795-08-20 00:00:00', '1800-05-12 00:00:00' ],
['Secretary of State', '<NAME>' , '1800-05-13 00:00:00', '1800-06-05 00:00:00' ],
['Secretary of State', '<NAME>' , '1800-06-13 00:00:00', '1801-03-04 00:00:00' ],
['Secretary of State', '<NAME>' , '1801-03-05 00:00:00', '1801-05-01 00:00:00' ],
['Secretary of State', '<NAME>' , '1801-05-02 00:00:00', '1809-03-03 00:00:00' ]
],
columns = ['Position', 'Name', 'Start', 'End']
)
options = {
'timeline': {
'showRowLabels': False
}
}
hz.google_timeline_chart( df, "google_timeline_grouping.html", options )
|
[
"pandas.DataFrame",
"haziris.google_timeline_chart"
] |
[((47, 1344), 'pandas.DataFrame', 'pd.DataFrame', (["[['President', '<NAME>', '1789-04-30 00:00:00', '1797-03-04 00:00:00'], [\n 'President', '<NAME>', '1797-03-04 00:00:00', '1801-03-04 00:00:00'], [\n 'President', '<NAME>', '1801-03-04 00:00:00', '1809-03-04 00:00:00'], [\n 'Vice President', '<NAME>', '1789-04-21 00:00:00',\n '1797-03-04 00:00:00'], ['Vice President', '<NAME>',\n '1797-03-04 00:00:00', '1801-03-04 00:00:00'], ['Vice President',\n '<NAME>', '1801-03-04 00:00:00', '1805-03-04 00:00:00'], [\n 'Vice President', '<NAME>', '1805-03-04 00:00:00',\n '1812-04-20 00:00:00'], ['Secretary of State', '<NAME>',\n '1789-09-26 00:00:00', '1790-03-22 00:00:00'], ['Secretary of State',\n '<NAME>', '1790-03-22 00:00:00', '1793-12-31 00:00:00'], [\n 'Secretary of State', '<NAME>', '1794-01-02 00:00:00',\n '1795-08-20 00:00:00'], ['Secretary of State', '<NAME>',\n '1795-08-20 00:00:00', '1800-05-12 00:00:00'], ['Secretary of State',\n '<NAME>', '1800-05-13 00:00:00', '1800-06-05 00:00:00'], [\n 'Secretary of State', '<NAME>', '1800-06-13 00:00:00',\n '1801-03-04 00:00:00'], ['Secretary of State', '<NAME>',\n '1801-03-05 00:00:00', '1801-05-01 00:00:00'], ['Secretary of State',\n '<NAME>', '1801-05-02 00:00:00', '1809-03-03 00:00:00']]"], {'columns': "['Position', 'Name', 'Start', 'End']"}), "([['President', '<NAME>', '1789-04-30 00:00:00',\n '1797-03-04 00:00:00'], ['President', '<NAME>', '1797-03-04 00:00:00',\n '1801-03-04 00:00:00'], ['President', '<NAME>', '1801-03-04 00:00:00',\n '1809-03-04 00:00:00'], ['Vice President', '<NAME>',\n '1789-04-21 00:00:00', '1797-03-04 00:00:00'], ['Vice President',\n '<NAME>', '1797-03-04 00:00:00', '1801-03-04 00:00:00'], [\n 'Vice President', '<NAME>', '1801-03-04 00:00:00',\n '1805-03-04 00:00:00'], ['Vice President', '<NAME>',\n '1805-03-04 00:00:00', '1812-04-20 00:00:00'], ['Secretary of State',\n '<NAME>', '1789-09-26 00:00:00', '1790-03-22 00:00:00'], [\n 'Secretary of State', '<NAME>', '1790-03-22 00:00:00',\n '1793-12-31 00:00:00'], ['Secretary of State', '<NAME>',\n '1794-01-02 00:00:00', '1795-08-20 00:00:00'], ['Secretary of State',\n '<NAME>', '1795-08-20 00:00:00', '1800-05-12 00:00:00'], [\n 'Secretary of State', '<NAME>', '1800-05-13 00:00:00',\n '1800-06-05 00:00:00'], ['Secretary of State', '<NAME>',\n '1800-06-13 00:00:00', '1801-03-04 00:00:00'], ['Secretary of State',\n '<NAME>', '1801-03-05 00:00:00', '1801-05-01 00:00:00'], [\n 'Secretary of State', '<NAME>', '1801-05-02 00:00:00',\n '1809-03-03 00:00:00']], columns=['Position', 'Name', 'Start', 'End'])\n", (59, 1344), True, 'import pandas as pd\n'), ((1512, 1582), 'haziris.google_timeline_chart', 'hz.google_timeline_chart', (['df', '"""google_timeline_grouping.html"""', 'options'], {}), "(df, 'google_timeline_grouping.html', options)\n", (1536, 1582), True, 'import haziris as hz\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin
"""Operators used in TIR expression."""
import tvm._ffi
from tvm.runtime import convert, const
from tvm.schedule import Buffer
from .expr import Call
from . import _ffi_api
def _pack_buffer(buf):
"""Build intrinsics that packs the buffer.
"""
assert buf.shape
shape = Call("handle", "tvm_stack_make_shape", buf.shape,
Call.Intrinsic, None, 0)
strides = Call("handle", "tvm_stack_make_shape", buf.strides,
Call.Intrinsic, None, 0) if buf.strides else 0
pack_args = [buf.data,
shape,
strides,
len(buf.shape),
const(0, dtype=buf.dtype),
buf.elem_offset]
return Call("handle", "tvm_stack_make_array",
pack_args, Call.Intrinsic, None, 0)
def call_packed(*args):
"""Build expression by call an external packed function.
The argument to packed function can be Expr or Buffer.
The argument is the corresponding POD type when Expr is presented.
When the argument is Buffer, the corresponding PackedFunc
will recieve an TVMArrayHandle whose content is valid during the callback period.
If the PackedFunc is a python callback, then the corresponding argument is NDArray.
Parameters
----------
args : list of Expr or Buffer.
Positional arguments.
Returns
-------
call : PrimExpr
The call expression.
See Also
--------
tvm.extern : Create tensor with extern function call.
"""
call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args]
return Call(
"int32", "tvm_call_packed", call_args, Call.Intrinsic, None, 0)
def call_pure_intrin(dtype, func_name, *args):
"""Build expression by calling a pure intrinsic function.
Intrinsics can be overloaded with multiple data types via
the intrinsic translation rule.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The intrinsic function name.
args : list
Positional arguments.
Returns
-------
call : PrimExpr
The call expression.
"""
args = convert(args)
return Call(
dtype, func_name, convert(args), Call.PureIntrinsic, None, 0)
def call_intrin(dtype, func_name, *args):
"""Build expression by calling an intrinsic function.
Intrinsics can be overloaded with multiple data types via
the intrinsic translation rule.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The intrinsic function name.
args : list
Positional arguments.
Returns
-------
call : PrimExpr
The call expression.
"""
args = convert(args)
return Call(
dtype, func_name, convert(args), Call.Intrinsic, None, 0)
def call_pure_extern(dtype, func_name, *args):
"""Build expression by calling a pure extern function.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The extern function name.
args : list
Positional arguments.
Returns
-------
call : PrimExpr
The call expression.
"""
return Call(
dtype, func_name, convert(args), Call.PureExtern, None, 0)
def call_extern(dtype, func_name, *args):
"""Build expression by calling a extern function.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The extern function name.
args : list
Positional arguments.
Returns
-------
call : PrimExpr
The call expression.
"""
return Call(
dtype, func_name, convert(args), Call.Extern, None, 0)
def call_llvm_intrin(dtype, name, *args):
"""Build expression by calling an llvm intrinsic function
Parameters
----------
dtype : str
The data type of the result.
name : str
The name of the llvm intrinsic function.
args : list
Poistional arguments.
Returns
-------
call : PrimExpr
The call expression.
"""
# pylint: disable=import-outside-toplevel
from tvm.target import codegen
llvm_id = codegen.llvm_lookup_intrinsic_id(name)
assert llvm_id != 0, "%s is not an LLVM intrinsic" % name
return call_pure_intrin(dtype, 'llvm_intrin', tvm.const(llvm_id, 'uint32'), *args)
@tvm._ffi.register_func("tvm.default_trace_action")
def _tvm_default_trace_action(*args):
print(list(args))
def trace(args, trace_action="tvm.default_trace_action"):
"""Trace tensor data at the runtime.
The trace function allows to trace specific tensor at the
runtime. The tracing value should come as last argument.
The trace action should be specified, by default
tvm.default_trace_action is used.
Parameters
----------
args : list of Expr or Buffers.
Positional arguments.
trace_action : str.
The name of the trace action.
Returns
-------
call : PrimExpr
The call expression.
See Also
--------
tvm.tir.call_packed : Creates packed function.
"""
if not isinstance(args, list):
raise Exception("tvm.trace consumes the args as list type")
call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args]
call_args.insert(0, trace_action)
return tvm.tir.Call(
args[-1].dtype, "tvm_call_trace_packed", call_args, tvm.tir.Call.Intrinsic, None, 0)
def min_value(dtype):
"""minimum value of dtype
Parameters
----------
dtype : str
The data type.
Returns
-------
value : tvm.Expr
The minimum value of dtype.
"""
return _ffi_api.min_value(dtype)
def max_value(dtype):
"""maximum value of dtype
Parameters
----------
dtype : str
The data type.
Returns
-------
value : tvm.Expr
The maximum value of dtype.
"""
return _ffi_api.max_value(dtype)
def exp(x):
"""Take exponetial of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "exp", x)
def erf(x):
"""Take gauss error function of the input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "erf", x)
def tanh(x):
"""Take hyperbolic tanh of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "tanh", x)
def sigmoid(x):
"""Quick function to get sigmoid
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "sigmoid", x)
def log(x):
"""Take log of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "log", x)
def cos(x):
"""Take cos of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "cos", x)
def sin(x):
"""Take sin of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "sin", x)
def atan(x):
"""Take atan of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "atan", x)
def sqrt(x):
"""Take square root of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "sqrt", x)
def rsqrt(x):
"""Take reciprocal of square root of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "rsqrt", x)
def floor(x):
"""Take floor of float input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.floor(x)
def ceil(x):
"""Take ceil of float input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.ceil(x)
def trunc(x):
"""Get truncated value of the input.
The truncated value of the scalar x is the
nearest integer i which is closer to zero than x is.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.trunc(x)
def abs(x):
"""Get absolute value of the input element-wise.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.abs(x)
def round(x):
"""Round elements of the array to the nearest integer.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.round(x)
def nearbyint(x):
"""Round elements of the array to the nearest integer.
This intrinsic uses llvm.nearbyint instead of llvm.round
which is faster but will results different from tvm.round.
Notably nearbyint rounds according to the rounding mode,
whereas tvm.round (llvm.round) ignores that.
For differences between the two see:
https://en.cppreference.com/w/cpp/numeric/math/round
https://en.cppreference.com/w/cpp/numeric/math/nearbyint
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.nearbyint(x)
def isnan(x):
"""Check if input value is Nan.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.isnan(x)
def power(x, y):
"""x power y
Parameters
----------
x : PrimExpr
Input argument.
y : PrimExpr
The exponent
Returns
-------
z : PrimExpr
The result.
"""
return _ffi_api._OpPow(convert(x), convert(y))
def popcount(x):
"""Count the number of set bits in input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "popcount", x)
def fmod(x, y):
"""Return the remainder of x divided by y with the same sign as x.
Parameters
----------
x : PrimExpr
Input argument.
y : PrimExpr
Input argument.
Returns
-------
z : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "fmod", x, y)
def if_then_else(cond, t, f):
"""Conditional selection expression.
Parameters
----------
cond : PrimExpr
The condition
t : PrimExpr
The result expression if cond is true.
f : PrimExpr
The result expression if cond is false.
Returns
-------
result : Node
The result of conditional expression.
Note
----
Unlike Select, if_then_else will not execute
the branch that does not satisfy the condition.
You can use it to guard against out of bound access.
Unlike Select, if_then_else cannot be vectorized
if some lanes in the vector have different conditions.
"""
return _ffi_api._OpIfThenElse(convert(cond), convert(t), convert(f))
def div(a, b):
"""Compute a / b as in C/C++ semantics.
Parameters
----------
a : PrimExpr
The left hand operand, known to be non-negative.
b : PrimExpr
The right hand operand, known to be non-negative.
Returns
-------
res : PrimExpr
The result expression.
Note
----
When operands are integers, returns truncdiv(a, b).
"""
return _ffi_api._OpDiv(a, b)
def indexdiv(a, b):
"""Compute floor(a / b) where a and b are non-negative.
Parameters
----------
a : PrimExpr
The left hand operand, known to be non-negative.
b : PrimExpr
The right hand operand, known to be non-negative.
Returns
-------
res : PrimExpr
The result expression.
Note
----
Use this function to split non-negative indices.
This function may take advantage of operands'
non-negativeness.
"""
return _ffi_api._OpIndexDiv(a, b)
def indexmod(a, b):
"""Compute the remainder of indexdiv. a and b are non-negative.
Parameters
----------
a : PrimExpr
The left hand operand, known to be non-negative.
b : PrimExpr
The right hand operand, known to be non-negative.
Returns
-------
res : PrimExpr
The result expression.
Note
----
Use this function to split non-negative indices.
This function may take advantage of operands'
non-negativeness.
"""
return _ffi_api._OpIndexMod(a, b)
def truncdiv(a, b):
"""Compute the truncdiv of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
Returns
-------
res : PrimExpr
The result expression.
Note
----
This is the default integer division behavior in C.
"""
return _ffi_api._OpTruncDiv(a, b)
def truncmod(a, b):
"""Compute the truncmod of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
Returns
-------
res : PrimExpr
The result expression.
Note
----
This is the default integer division behavior in C.
"""
return _ffi_api._OpTruncMod(a, b)
def floordiv(a, b):
"""Compute the floordiv of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
Returns
-------
res : PrimExpr
The result expression.
"""
return _ffi_api._OpFloorDiv(a, b)
def floormod(a, b):
"""Compute the floormod of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
Returns
-------
res : PrimExpr
The result expression.
"""
return _ffi_api._OpFloorMod(a, b)
|
[
"tvm.runtime.const",
"tvm.runtime.convert",
"tvm.target.codegen.llvm_lookup_intrinsic_id"
] |
[((2999, 3012), 'tvm.runtime.convert', 'convert', (['args'], {}), '(args)\n', (3006, 3012), False, 'from tvm.runtime import convert, const\n'), ((3582, 3595), 'tvm.runtime.convert', 'convert', (['args'], {}), '(args)\n', (3589, 3595), False, 'from tvm.runtime import convert, const\n'), ((5059, 5097), 'tvm.target.codegen.llvm_lookup_intrinsic_id', 'codegen.llvm_lookup_intrinsic_id', (['name'], {}), '(name)\n', (5091, 5097), False, 'from tvm.target import codegen\n'), ((1459, 1484), 'tvm.runtime.const', 'const', (['(0)'], {'dtype': 'buf.dtype'}), '(0, dtype=buf.dtype)\n', (1464, 1484), False, 'from tvm.runtime import convert, const\n'), ((3056, 3069), 'tvm.runtime.convert', 'convert', (['args'], {}), '(args)\n', (3063, 3069), False, 'from tvm.runtime import convert, const\n'), ((3639, 3652), 'tvm.runtime.convert', 'convert', (['args'], {}), '(args)\n', (3646, 3652), False, 'from tvm.runtime import convert, const\n'), ((4097, 4110), 'tvm.runtime.convert', 'convert', (['args'], {}), '(args)\n', (4104, 4110), False, 'from tvm.runtime import convert, const\n'), ((4546, 4559), 'tvm.runtime.convert', 'convert', (['args'], {}), '(args)\n', (4553, 4559), False, 'from tvm.runtime import convert, const\n'), ((11614, 11624), 'tvm.runtime.convert', 'convert', (['x'], {}), '(x)\n', (11621, 11624), False, 'from tvm.runtime import convert, const\n'), ((11626, 11636), 'tvm.runtime.convert', 'convert', (['y'], {}), '(y)\n', (11633, 11636), False, 'from tvm.runtime import convert, const\n'), ((12918, 12931), 'tvm.runtime.convert', 'convert', (['cond'], {}), '(cond)\n', (12925, 12931), False, 'from tvm.runtime import convert, const\n'), ((12933, 12943), 'tvm.runtime.convert', 'convert', (['t'], {}), '(t)\n', (12940, 12943), False, 'from tvm.runtime import convert, const\n'), ((12945, 12955), 'tvm.runtime.convert', 'convert', (['f'], {}), '(f)\n', (12952, 12955), False, 'from tvm.runtime import convert, const\n')]
|
# Authors: <NAME> <<EMAIL>>
#
# License: BSD Style.
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _data_path_doc,
_get_version, _version_doc)
data_name = 'mtrf'
has_mtrf_data = partial(has_dataset, name=data_name)
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name=data_name,
download=download)
data_path.__doc__ = _data_path_doc.format(name=data_name,
conf='MNE_DATASETS_MTRF_PATH')
def get_version(): # noqa: D103
return _get_version(data_name)
get_version.__doc__ = _version_doc.format(name=data_name)
|
[
"functools.partial"
] |
[((261, 297), 'functools.partial', 'partial', (['has_dataset'], {'name': 'data_name'}), '(has_dataset, name=data_name)\n', (268, 297), False, 'from functools import partial\n')]
|
import logging
from typing import List
import math
import itertools
logger = logging.getLogger(__name__)
class RoundStats:
def __init__(self):
self._diff_history = []
self._q_history = []
def push_histories(self, diff=None, q=None):
if diff: self._diff_history.append(diff)
if q: self._q_history.append(q)
@property
def diff_history(self):
return self._diff_history or [-1]
@property
def q_history(self):
return self._q_history or [-1]
def get_stats(self):
max_diff = 0.0
min_diff = 1000.0
maxq = 100.0
minq = -1.0
dh = self._diff_history
qh = self._q_history
if dh:
max_diff = max(dh)
min_diff = min(dh)
if qh:
maxq = max(qh)
minq = min(qh)
return max_diff, min_diff, minq, maxq
@property
def min_diff(self):
if self._diff_history:
return min(self._diff_history)
return 1000
@property
def max_q(self):
if self._q_history:
return max(self._q_history)
@property
def min_q(self):
if self._q_history:
return min(self._q_history)
@staticmethod
def print_check_stats():
print('')
def __str__(self):
return " ".join([str(m) for m in self.matches])
def __repr__(self):
return str(self)
|
[
"logging.getLogger"
] |
[((78, 105), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (95, 105), False, 'import logging\n')]
|
import random
import numpy as np
import torch
from torch.utils import data
from torch.utils.data.dataset import Dataset
"""
Example of how to make your own dataset
"""
class ToyDataSet(Dataset):
"""
class that defines what a data-sample looks like
In the __init__ you could for example load in the data from file
and then return specific items in __getitem__
and return the length in __len__
"""
def __init__(self, length: int):
""" loads all stuff relevant for dataset """
# save the length, usually depends on data-file but here data is generated instead
self.length = length
# generate random binary labels
self.classes = [random.choice([0, 1]) for _ in range(length)]
# generate data from those labels
self.data = [np.random.normal(self.classes[i], 0.2, 2) for i in range(length)]
def __getitem__(self, item_index):
""" defines how to get one sample """
class_ = torch.tensor(self.classes[item_index]) # python scalar to torch tensor
tensor = torch.from_numpy(self.data[item_index]) # numpy array/tensor to torch array/tensor
return tensor, class_
def __len__(self):
""" defines how many samples in an epoch, independently of batch size"""
return self.length
def get_toy_loaders(length: int, batch_size: int):
""" converts a dataset to a batched dataloader """
train_loader = torch.utils.data.DataLoader(
ToyDataSet(int(length * 0.8)),
batch_size=batch_size,
shuffle=True,
pin_memory=True,
)
test_loader = torch.utils.data.DataLoader(
ToyDataSet(int(length * 0.2)),
batch_size=batch_size,
shuffle=True,
pin_memory=True,
)
return train_loader, test_loader
|
[
"numpy.random.normal",
"random.choice",
"torch.tensor",
"torch.from_numpy"
] |
[((981, 1019), 'torch.tensor', 'torch.tensor', (['self.classes[item_index]'], {}), '(self.classes[item_index])\n', (993, 1019), False, 'import torch\n'), ((1070, 1109), 'torch.from_numpy', 'torch.from_numpy', (['self.data[item_index]'], {}), '(self.data[item_index])\n', (1086, 1109), False, 'import torch\n'), ((701, 722), 'random.choice', 'random.choice', (['[0, 1]'], {}), '([0, 1])\n', (714, 722), False, 'import random\n'), ((811, 852), 'numpy.random.normal', 'np.random.normal', (['self.classes[i]', '(0.2)', '(2)'], {}), '(self.classes[i], 0.2, 2)\n', (827, 852), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Filename: CountingSort.py
# @Author: olenji - <EMAIL>
# @Description: 适用于K比较少的情况
# @Create: 2019-06-13 20:47
# @Last Modified: 2019-06-13 20:47
import array
import random
class CountingSort:
def counting_sort(self, data, n):
c = array.array('l', [0] * n)
for d in data:
c[d] += 1
temp = 0
for i in range(n):
c[i] += temp
temp = c[i]
output = array.array('l', [20] * len(data))
for d in data:
index = c[d] - 1
c[d] -= 1
output[index] = d
return output
def test():
max = 20
a = [random.randint(0, max - 1) for _ in range(100)]
data = array.array('l', a)
print(data)
bs = CountingSort()
data = bs.counting_sort(data, n=max)
print(data)
if __name__ == "__main__":
test()
|
[
"random.randint",
"array.array"
] |
[((731, 750), 'array.array', 'array.array', (['"""l"""', 'a'], {}), "('l', a)\n", (742, 750), False, 'import array\n'), ((294, 319), 'array.array', 'array.array', (['"""l"""', '([0] * n)'], {}), "('l', [0] * n)\n", (305, 319), False, 'import array\n'), ((672, 698), 'random.randint', 'random.randint', (['(0)', '(max - 1)'], {}), '(0, max - 1)\n', (686, 698), False, 'import random\n')]
|
'''DenseNet-BC-100 k=12 adopted from https://github.com/hysts/pytorch_image_classification'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def initialize_weights(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data, mode='fan_out')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
class BasicBlock(nn.Module):
def __init__(self, in_channels, out_channels, drop_rate):
super(BasicBlock, self).__init__()
self.drop_rate = drop_rate
self.bn = nn.BatchNorm2d(in_channels)
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False)
def forward(self, x):
y = self.conv(F.relu(self.bn(x), inplace=True))
if self.drop_rate > 0:
y = F.dropout(
y, p=self.drop_rate, training=self.training, inplace=False)
return torch.cat([x, y], dim=1)
class BottleneckBlock(nn.Module):
def __init__(self, in_channels, out_channels, drop_rate):
super(BottleneckBlock, self).__init__()
self.drop_rate = drop_rate
bottleneck_channels = out_channels * 4
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv1 = nn.Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn2 = nn.BatchNorm2d(bottleneck_channels)
self.conv2 = nn.Conv2d(
bottleneck_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False)
def forward(self, x):
y = self.conv1(F.relu(self.bn1(x), inplace=True))
if self.drop_rate > 0:
y = F.dropout(
y, p=self.drop_rate, training=self.training, inplace=False)
y = self.conv2(F.relu(self.bn2(y), inplace=True))
if self.drop_rate > 0:
y = F.dropout(
y, p=self.drop_rate, training=self.training, inplace=False)
return torch.cat([x, y], dim=1)
class TransitionBlock(nn.Module):
def __init__(self, in_channels, out_channels, drop_rate):
super(TransitionBlock, self).__init__()
self.drop_rate = drop_rate
self.bn = nn.BatchNorm2d(in_channels)
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
def forward(self, x):
x = self.conv(F.relu(self.bn(x), inplace=True))
if self.drop_rate > 0:
x = F.dropout(
x, p=self.drop_rate, training=self.training, inplace=False)
x = F.avg_pool2d(x, kernel_size=2, stride=2)
return x
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
input_shape = (1, 3, 32, 32)
n_classes = 10
block_type = 'bottleneck'
depth = 100
self.growth_rate = 12
self.drop_rate = 0
self.compression_rate = 0.5
assert block_type in ['basic', 'bottleneck']
if block_type == 'basic':
block = BasicBlock
n_blocks_per_stage = (depth - 4) // 3
assert n_blocks_per_stage * 3 + 4 == depth
else:
block = BottleneckBlock
n_blocks_per_stage = (depth - 4) // 6
assert n_blocks_per_stage * 6 + 4 == depth
in_channels = [2 * self.growth_rate]
for index in range(3):
denseblock_out_channels = int(
in_channels[-1] + n_blocks_per_stage * self.growth_rate)
if index < 2:
transitionblock_out_channels = int(
denseblock_out_channels * self.compression_rate)
else:
transitionblock_out_channels = denseblock_out_channels
in_channels.append(transitionblock_out_channels)
self.conv = nn.Conv2d(
input_shape[1],
in_channels[0],
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.stage1 = self._make_stage(in_channels[0], n_blocks_per_stage,
block, True)
self.stage2 = self._make_stage(in_channels[1], n_blocks_per_stage,
block, True)
self.stage3 = self._make_stage(in_channels[2], n_blocks_per_stage,
block, False)
self.bn = nn.BatchNorm2d(in_channels[3])
# compute conv feature size
with torch.no_grad():
self.feature_size = self._forward_conv(
torch.zeros(*input_shape)).view(-1).shape[0]
self.fc = nn.Linear(self.feature_size, n_classes)
# initialize weights
self.apply(initialize_weights)
def _make_stage(self, in_channels, n_blocks, block, add_transition_block):
stage = nn.Sequential()
for index in range(n_blocks):
stage.add_module(
'block{}'.format(index + 1),
block(in_channels + index * self.growth_rate, self.growth_rate,
self.drop_rate))
if add_transition_block:
in_channels = int(in_channels + n_blocks * self.growth_rate)
out_channels = int(in_channels * self.compression_rate)
stage.add_module(
'transition',
TransitionBlock(in_channels, out_channels, self.drop_rate))
return stage
def _forward_conv(self, x):
x = self.conv(x)
x = self.stage1(x)
x = self.stage2(x)
x = self.stage3(x)
x = F.relu(self.bn(x), inplace=True)
x = F.adaptive_avg_pool2d(x, output_size=1)
return x
def forward(self, x):
x = self._forward_conv(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
[
"torch.nn.init.kaiming_normal_",
"torch.nn.Sequential",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.functional.dropout",
"torch.cat",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.zeros",
"torch.no_grad"
] |
[((232, 286), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight.data'], {'mode': '"""fan_out"""'}), "(m.weight.data, mode='fan_out')\n", (255, 286), True, 'import torch.nn as nn\n'), ((640, 667), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['in_channels'], {}), '(in_channels)\n', (654, 667), True, 'import torch.nn as nn\n'), ((688, 776), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(in_channels, out_channels, kernel_size=3, stride=1, padding=1,\n bias=False)\n', (697, 776), True, 'import torch.nn as nn\n'), ((1078, 1102), 'torch.cat', 'torch.cat', (['[x, y]'], {'dim': '(1)'}), '([x, y], dim=1)\n', (1087, 1102), False, 'import torch\n'), ((1353, 1380), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['in_channels'], {}), '(in_channels)\n', (1367, 1380), True, 'import torch.nn as nn\n'), ((1402, 1497), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'bottleneck_channels'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(in_channels, bottleneck_channels, kernel_size=1, stride=1,\n padding=0, bias=False)\n', (1411, 1497), True, 'import torch.nn as nn\n'), ((1587, 1622), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['bottleneck_channels'], {}), '(bottleneck_channels)\n', (1601, 1622), True, 'import torch.nn as nn\n'), ((1644, 1740), 'torch.nn.Conv2d', 'nn.Conv2d', (['bottleneck_channels', 'out_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(bottleneck_channels, out_channels, kernel_size=3, stride=1,\n padding=1, bias=False)\n', (1653, 1740), True, 'import torch.nn as nn\n'), ((2236, 2260), 'torch.cat', 'torch.cat', (['[x, y]'], {'dim': '(1)'}), '([x, y], dim=1)\n', (2245, 2260), False, 'import torch\n'), ((2462, 2489), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['in_channels'], {}), '(in_channels)\n', (2476, 2489), True, 'import torch.nn as nn\n'), ((2510, 2598), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(in_channels, out_channels, kernel_size=1, stride=1, padding=0,\n bias=False)\n', (2519, 2598), True, 'import torch.nn as nn\n'), ((2897, 2937), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x'], {'kernel_size': '(2)', 'stride': '(2)'}), '(x, kernel_size=2, stride=2)\n', (2909, 2937), True, 'import torch.nn.functional as F\n'), ((4146, 4240), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_shape[1]', 'in_channels[0]'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(input_shape[1], in_channels[0], kernel_size=3, stride=1, padding=\n 1, bias=False)\n', (4155, 4240), True, 'import torch.nn as nn\n'), ((4709, 4739), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['in_channels[3]'], {}), '(in_channels[3])\n', (4723, 4739), True, 'import torch.nn as nn\n'), ((4939, 4978), 'torch.nn.Linear', 'nn.Linear', (['self.feature_size', 'n_classes'], {}), '(self.feature_size, n_classes)\n', (4948, 4978), True, 'import torch.nn as nn\n'), ((5144, 5159), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (5157, 5159), True, 'import torch.nn as nn\n'), ((5919, 5958), 'torch.nn.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['x'], {'output_size': '(1)'}), '(x, output_size=1)\n', (5940, 5958), True, 'import torch.nn.functional as F\n'), ((976, 1045), 'torch.nn.functional.dropout', 'F.dropout', (['y'], {'p': 'self.drop_rate', 'training': 'self.training', 'inplace': '(False)'}), '(y, p=self.drop_rate, training=self.training, inplace=False)\n', (985, 1045), True, 'import torch.nn.functional as F\n'), ((1942, 2011), 'torch.nn.functional.dropout', 'F.dropout', (['y'], {'p': 'self.drop_rate', 'training': 'self.training', 'inplace': '(False)'}), '(y, p=self.drop_rate, training=self.training, inplace=False)\n', (1951, 2011), True, 'import torch.nn.functional as F\n'), ((2134, 2203), 'torch.nn.functional.dropout', 'F.dropout', (['y'], {'p': 'self.drop_rate', 'training': 'self.training', 'inplace': '(False)'}), '(y, p=self.drop_rate, training=self.training, inplace=False)\n', (2143, 2203), True, 'import torch.nn.functional as F\n'), ((2798, 2867), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.drop_rate', 'training': 'self.training', 'inplace': '(False)'}), '(x, p=self.drop_rate, training=self.training, inplace=False)\n', (2807, 2867), True, 'import torch.nn.functional as F\n'), ((4790, 4805), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4803, 4805), False, 'import torch\n'), ((4875, 4900), 'torch.zeros', 'torch.zeros', (['*input_shape'], {}), '(*input_shape)\n', (4886, 4900), False, 'import torch\n')]
|
from snpx.snpx_mxnet import SNPXClassifier
import os
LOGS = os.path.join(os.path.dirname(__file__), "..", "log")
MODEL = os.path.join(os.path.dirname(__file__), "..", "model")
classif = SNPXClassifier("mini_vgg", "CIFAR-10", devices=['GPU'],logs_root=LOGS, model_bin_root=MODEL)
classif.train(1)
|
[
"snpx.snpx_mxnet.SNPXClassifier",
"os.path.dirname"
] |
[((189, 286), 'snpx.snpx_mxnet.SNPXClassifier', 'SNPXClassifier', (['"""mini_vgg"""', '"""CIFAR-10"""'], {'devices': "['GPU']", 'logs_root': 'LOGS', 'model_bin_root': 'MODEL'}), "('mini_vgg', 'CIFAR-10', devices=['GPU'], logs_root=LOGS,\n model_bin_root=MODEL)\n", (203, 286), False, 'from snpx.snpx_mxnet import SNPXClassifier\n'), ((75, 100), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (90, 100), False, 'import os\n'), ((136, 161), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (151, 161), False, 'import os\n')]
|
from PyQt5 import QtCore, QtWidgets, QtGui, uic
from utils import configs, Connection
import socket
from view import HomePage
class aboutPage(QtWidgets.QWidget):
def __init__(self, user, connection, x, y):
super().__init__()
uic.loadUi('./ui/about.ui', self)
self.user = user
self.connection = connection
self.back_button.clicked.connect(self.back)
self.close_on_purpose = True
self.contributors.setText(configs.contributors)
self.rules.setText(configs.rules)
self.setWindowTitle('About')
self.setFixedSize(800, 600)
self.setGeometry(x, y, 800, 600)
def closeEvent(self, event):
if self.close_on_purpose == False:
event.accept()
return
reply = QtWidgets.QMessageBox.question(self, 'Quit', 'Are you sure you want to quit?', \
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
request = 'LOGOUT ' + self.user.username
self.connection.send(request)
event.accept()
else:
event.ignore()
def back(self):
self.home_page = HomePage.homePage(self.user, self.connection, self.pos().x(), self.pos().y() + 30)
self.close_on_purpose = False
self.close()
self.home_page.show()
|
[
"PyQt5.QtWidgets.QMessageBox.question",
"PyQt5.uic.loadUi"
] |
[((246, 279), 'PyQt5.uic.loadUi', 'uic.loadUi', (['"""./ui/about.ui"""', 'self'], {}), "('./ui/about.ui', self)\n", (256, 279), False, 'from PyQt5 import QtCore, QtWidgets, QtGui, uic\n'), ((782, 922), 'PyQt5.QtWidgets.QMessageBox.question', 'QtWidgets.QMessageBox.question', (['self', '"""Quit"""', '"""Are you sure you want to quit?"""', 'QtWidgets.QMessageBox.Yes', 'QtWidgets.QMessageBox.No'], {}), "(self, 'Quit',\n 'Are you sure you want to quit?', QtWidgets.QMessageBox.Yes, QtWidgets.\n QMessageBox.No)\n", (812, 922), False, 'from PyQt5 import QtCore, QtWidgets, QtGui, uic\n')]
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
import odoo.tests
from odoo.tools import mute_logger
def break_view(view, fr='<p>placeholder</p>', to='<p t-field="not.exist"/>'):
view.arch = view.arch.replace(fr, to)
@odoo.tests.common.tagged('post_install', '-at_install')
class TestWebsiteResetViews(odoo.tests.HttpCase):
def fix_it(self, page, mode='soft'):
self.authenticate("admin", "admin")
resp = self.url_open(page)
self.assertEqual(resp.status_code, 500, "Waiting 500")
self.assertTrue('<button data-mode="soft" class="reset_templates_button' in resp.text)
data = {'view_id': self.find_template(resp), 'redirect': page, 'mode': mode}
resp = self.url_open('/website/reset_template', data)
self.assertEqual(resp.status_code, 200, "Waiting 200")
def find_template(self, response):
find = re.search(r'<input.*type="hidden".*name="view_id".*value="([0-9]+)?"', response.text)
return find and find.group(1)
def setUp(self):
super(TestWebsiteResetViews, self).setUp()
self.Website = self.env['website']
self.View = self.env['ir.ui.view']
self.test_view = self.Website.viewref('test_website.test_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_01_reset_specific_page_view(self):
self.test_page_view = self.Website.viewref('test_website.test_page_view')
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
break_view(self.test_page_view.with_context(website_id=1))
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view")
self.fix_it('/test_page_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_02_reset_specific_view_controller(self):
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
# `t-att-data="not.exist"` will test the case where exception.html contains branding
break_view(self.test_view.with_context(website_id=1), to='<p t-att-data="not.exist" />')
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view")
self.fix_it('/test_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_03_reset_specific_view_controller_t_called(self):
self.test_view_to_be_t_called = self.Website.viewref('test_website.test_view_to_be_t_called')
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
break_view(self.test_view_to_be_t_called.with_context(website_id=1))
break_view(self.test_view, to='<t t-call="test_website.test_view_to_be_t_called"/>')
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view")
self.fix_it('/test_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_04_reset_specific_view_controller_inherit(self):
self.test_view_child_broken = self.Website.viewref('test_website.test_view_child_broken')
# Activate and break the inherited view
self.test_view_child_broken.active = True
break_view(self.test_view_child_broken.with_context(website_id=1, load_all_views=True))
self.fix_it('/test_view')
# This test work in real life, but not in test mode since we cannot rollback savepoint.
# @mute_logger('odoo.addons.http_routing.models.ir_http', 'odoo.addons.website.models.ir_ui_view')
# def test_05_reset_specific_view_controller_broken_request(self):
# total_views = self.View.search_count([('type', '=', 'qweb')])
# # Trigger COW then break the QWEB XML on it
# break_view(self.test_view.with_context(website_id=1), to='<t t-esc="request.env[\'website\'].browse(\'a\').name" />')
# self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view (1)")
# self.fix_it('/test_view')
# also mute ir.ui.view as `get_view_id()` will raise "Could not find view object with xml_id 'not.exist'""
@mute_logger('odoo.addons.http_routing.models.ir_http', 'odoo.addons.website.models.ir_ui_view')
def test_06_reset_specific_view_controller_inexisting_template(self):
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
break_view(self.test_view.with_context(website_id=1), to='<t t-call="not.exist"/>')
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view (2)")
self.fix_it('/test_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_07_reset_page_view_complete_flow(self):
self.start_tour("/", 'test_reset_page_view_complete_flow_part1', login="admin")
self.fix_it('/test_page_view')
self.start_tour("/", 'test_reset_page_view_complete_flow_part2', login="admin")
self.fix_it('/test_page_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_08_reset_specific_page_view_hard_mode(self):
self.test_page_view = self.Website.viewref('test_website.test_page_view')
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
break_view(self.test_page_view.with_context(website_id=1))
# Break it again to have a previous arch different than file arch
break_view(self.test_page_view.with_context(website_id=1))
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view")
with self.assertRaises(AssertionError):
# soft reset should not be able to reset the view as previous
# version is also broken
self.fix_it('/test_page_view')
self.fix_it('/test_page_view', 'hard')
|
[
"odoo.tools.mute_logger",
"re.search"
] |
[((1275, 1329), 'odoo.tools.mute_logger', 'mute_logger', (['"""odoo.addons.http_routing.models.ir_http"""'], {}), "('odoo.addons.http_routing.models.ir_http')\n", (1286, 1329), False, 'from odoo.tools import mute_logger\n'), ((1805, 1859), 'odoo.tools.mute_logger', 'mute_logger', (['"""odoo.addons.http_routing.models.ir_http"""'], {}), "('odoo.addons.http_routing.models.ir_http')\n", (1816, 1859), False, 'from odoo.tools import mute_logger\n'), ((2377, 2431), 'odoo.tools.mute_logger', 'mute_logger', (['"""odoo.addons.http_routing.models.ir_http"""'], {}), "('odoo.addons.http_routing.models.ir_http')\n", (2388, 2431), False, 'from odoo.tools import mute_logger\n'), ((3041, 3095), 'odoo.tools.mute_logger', 'mute_logger', (['"""odoo.addons.http_routing.models.ir_http"""'], {}), "('odoo.addons.http_routing.models.ir_http')\n", (3052, 3095), False, 'from odoo.tools import mute_logger\n'), ((4277, 4376), 'odoo.tools.mute_logger', 'mute_logger', (['"""odoo.addons.http_routing.models.ir_http"""', '"""odoo.addons.website.models.ir_ui_view"""'], {}), "('odoo.addons.http_routing.models.ir_http',\n 'odoo.addons.website.models.ir_ui_view')\n", (4288, 4376), False, 'from odoo.tools import mute_logger\n'), ((4816, 4870), 'odoo.tools.mute_logger', 'mute_logger', (['"""odoo.addons.http_routing.models.ir_http"""'], {}), "('odoo.addons.http_routing.models.ir_http')\n", (4827, 4870), False, 'from odoo.tools import mute_logger\n'), ((5184, 5238), 'odoo.tools.mute_logger', 'mute_logger', (['"""odoo.addons.http_routing.models.ir_http"""'], {}), "('odoo.addons.http_routing.models.ir_http')\n", (5195, 5238), False, 'from odoo.tools import mute_logger\n'), ((914, 1002), 're.search', 're.search', (['"""<input.*type="hidden".*name="view_id".*value="([0-9]+)?\\""""', 'response.text'], {}), '(\'<input.*type="hidden".*name="view_id".*value="([0-9]+)?"\',\n response.text)\n', (923, 1002), False, 'import re\n')]
|
import numpy as np
from molsysmt import puw
from ..exceptions import *
def digest_box(box):
return box
def digest_box_lengths_value(box_lengths):
output = None
if type(box_lengths) is not np.ndarray:
box_lengths = np.array(box_lengths)
shape = box_lengths.shape
if len(shape)==1:
if shape[0]==3:
output = np.expand_dims(box_lengths, axis=0)
else:
raise ValueError('box_lengths array with has not the correct shape.')
elif len(shape)==2:
if shape[1]==3:
output = box_lengths
else:
raise ValueError('box_lengths array with has not the correct shape.')
else:
raise ValueError('box_lengths array with has not the correct shape.')
return output
def digest_box_lengths(box_lengths):
output = None
unit = puw.get_unit(box_lengths)
box_lengths_value = puw.get_value(box_lengths)
box_lengths_value = digest_box_lengths_value(box_lengths_value)
output = box_lengths_value*unit
return output
def digest_box_angles_value(box_angles):
output = None
if type(box_angles) is not np.ndarray:
box_angles = np.array(box_angles)
shape = box_angles.shape
if len(shape)==1:
if shape[0]==3:
output = np.expand_dims(box_angles, axis=0)
else:
raise ValueError('box_angles array with has not the correct shape.')
elif len(shape)==2:
if shape[1]==3:
output = box_angles
else:
raise ValueError('box_angles array with has not the correct shape.')
else:
raise ValueError('box_angles array with has not the correct shape.')
return output
def digest_box_angles(box_angles):
output = None
unit = puw.get_unit(box_angles)
box_angles_value = puw.get_value(box_angles)
box_angles_value = digest_box_angles_value(box_angles_value)
output = box_angles_value*unit
return output
|
[
"numpy.array",
"numpy.expand_dims",
"molsysmt.puw.get_value",
"molsysmt.puw.get_unit"
] |
[((844, 869), 'molsysmt.puw.get_unit', 'puw.get_unit', (['box_lengths'], {}), '(box_lengths)\n', (856, 869), False, 'from molsysmt import puw\n'), ((894, 920), 'molsysmt.puw.get_value', 'puw.get_value', (['box_lengths'], {}), '(box_lengths)\n', (907, 920), False, 'from molsysmt import puw\n'), ((1766, 1790), 'molsysmt.puw.get_unit', 'puw.get_unit', (['box_angles'], {}), '(box_angles)\n', (1778, 1790), False, 'from molsysmt import puw\n'), ((1814, 1839), 'molsysmt.puw.get_value', 'puw.get_value', (['box_angles'], {}), '(box_angles)\n', (1827, 1839), False, 'from molsysmt import puw\n'), ((239, 260), 'numpy.array', 'np.array', (['box_lengths'], {}), '(box_lengths)\n', (247, 260), True, 'import numpy as np\n'), ((1170, 1190), 'numpy.array', 'np.array', (['box_angles'], {}), '(box_angles)\n', (1178, 1190), True, 'import numpy as np\n'), ((360, 395), 'numpy.expand_dims', 'np.expand_dims', (['box_lengths'], {'axis': '(0)'}), '(box_lengths, axis=0)\n', (374, 395), True, 'import numpy as np\n'), ((1289, 1323), 'numpy.expand_dims', 'np.expand_dims', (['box_angles'], {'axis': '(0)'}), '(box_angles, axis=0)\n', (1303, 1323), True, 'import numpy as np\n')]
|
"""
Process REDCap DETs that are specific to the
Seattle Flu Study - Swab and Send - Asymptomatic Enrollments
"""
import re
import click
import json
import logging
from uuid import uuid4
from typing import Any, Callable, Dict, List, Mapping, Match, Optional, Union, Tuple
from datetime import datetime
from cachetools import TTLCache
from id3c.db.session import DatabaseSession
from id3c.cli.redcap import Record as REDCapRecord
from id3c.cli.command.etl import redcap_det
from id3c.cli.command.geocode import get_geocoded_address
from id3c.cli.command.location import location_lookup
from seattleflu.id3c.cli.command import age_ceiling
from .redcap_map import *
from .fhir import *
from . import race, first_record_instance, required_instruments
LOG = logging.getLogger(__name__)
REVISION = 2
REDCAP_URL = 'https://redcap.iths.org/'
INTERNAL_SYSTEM = "https://seattleflu.org"
UW_CENSUS_TRACT = '53033005302'
PROJECT_ID = 20190
REQUIRED_INSTRUMENTS = [
'consent_form',
'shipping_information',
'back_end_mail_scans',
'day_0_enrollment_questionnaire',
'post_collection_data_entry_qc'
]
@redcap_det.command_for_project(
"asymptomatic-swab-n-send",
redcap_url = REDCAP_URL,
project_id = PROJECT_ID,
revision = REVISION,
help = __doc__)
@first_record_instance
@required_instruments(REQUIRED_INSTRUMENTS)
def redcap_det_asymptomatic_swab_n_send(*, db: DatabaseSession, cache: TTLCache, det: dict, redcap_record: REDCapRecord) -> Optional[dict]:
location_resource_entries = locations(db, cache, redcap_record)
patient_entry, patient_reference = create_patient(redcap_record)
if not patient_entry:
LOG.warning("Skipping enrollment with insufficient information to construct patient")
return None
encounter_entry, encounter_reference = create_encounter(redcap_record, patient_reference, location_resource_entries)
if not encounter_entry:
LOG.warning("Skipping enrollment with insufficient information to construct an encounter")
return None
questionnaire_entry = create_questionnaire_response(redcap_record, patient_reference, encounter_reference)
specimen_entry, specimen_reference = create_specimen(redcap_record, patient_reference)
if not specimen_entry:
LOG.warning("Skipping enrollment with insufficent information to construct a specimen")
return None
specimen_observation_entry = create_specimen_observation_entry(specimen_reference, patient_reference, encounter_reference)
resource_entries = [
patient_entry,
encounter_entry,
questionnaire_entry,
specimen_entry,
*location_resource_entries,
specimen_observation_entry
]
return create_bundle_resource(
bundle_id = str(uuid4()),
timestamp = datetime.now().astimezone().isoformat(),
source = f"{REDCAP_URL}{PROJECT_ID}/{redcap_record['record_id']}",
entries = list(filter(None, resource_entries))
)
def locations(db: DatabaseSession, cache: TTLCache, record: dict) -> list:
""" Creates a list of Location resource entries from a REDCap record. """
housing_type = 'residence'
address = {
'street': record['home_street'],
'secondary': None,
'city': record['homecity_other'],
'state': record['home_state'],
'zipcode': record['home_zipcode_2'],
}
lat, lng, canonicalized_address = get_geocoded_address(address, cache)
if not canonicalized_address:
return [] # TODO
tract_location = residence_census_tract(db, (lat, lng), housing_type)
# TODO what if tract_location is null?
tract_full_url = generate_full_url_uuid()
tract_entry = create_resource_entry(tract_location, tract_full_url)
address_hash = generate_hash(canonicalized_address)
address_location = create_location(
f"{INTERNAL_SYSTEM}/location/address",
address_hash,
housing_type,
tract_full_url
)
address_entry = create_resource_entry(address_location, generate_full_url_uuid())
return [tract_entry, address_entry]
def create_location(system: str, value: str, location_type: str, parent: str=None) -> dict:
""" Returns a FHIR Location resource. """
location_type_system = "http://terminology.hl7.org/CodeSystem/v3-RoleCode"
location_type_map = {
"residence": "PTRES",
"school": "SCHOOL",
"work": "WORK",
"site": "HUSCS",
"lodging": "PTLDG",
}
location_type_cc = create_codeable_concept(location_type_system,
location_type_map[location_type])
location_identifier = create_identifier(system, value)
part_of = None
if parent:
part_of = create_reference(reference_type="Location", reference=parent)
return create_location_resource([location_type_cc], [location_identifier], part_of)
def create_patient(record: dict) -> tuple:
""" Returns a FHIR Patient resource entry and reference. """
gender = map_sex(record["sex"])
patient_id = generate_patient_hash(
names = (record['participant_first_name'], record['participant_last_name']),
gender = gender,
birth_date = record['birthday'],
postal_code = record['home_zipcode_2'])
if not patient_id:
# Some piece of information was missing, so we couldn't generate a
# hash. Fallback to treating this individual as always unique by using
# the REDCap record id.
patient_id = generate_hash(f"{REDCAP_URL}{PROJECT_ID}/{record['record_id']}")
LOG.debug(f"Generated individual identifier {patient_id}")
patient_identifier = create_identifier(f"{INTERNAL_SYSTEM}/individual", patient_id)
patient_resource = create_patient_resource([patient_identifier], gender)
return create_entry_and_reference(patient_resource, "Patient")
def create_encounter(record: REDCapRecord, patient_reference: dict, locations: list) -> tuple:
""" Returns a FHIR Encounter resource entry and reference """
def grab_symptom_keys(key: str) -> Optional[Match[str]]:
if record[key] != '':
return re.match('symptoms___[0-9]{1,3}$', key)
else:
return None
def build_conditions_list(symptom_key: str) -> dict:
return create_resource_condition(record[symptom_key], patient_reference)
def build_diagnosis_list(symptom_key: str) -> Optional[dict]:
mapped_symptom = map_symptom(record[symptom_key])
if not mapped_symptom:
return None
return { "condition": { "reference": f"#{mapped_symptom}" } }
def build_locations_list(location: dict) -> dict:
return {
"location": create_reference(
reference_type = "Location",
reference = location["fullUrl"]
)
}
def non_tract_locations(resource: dict):
return bool(resource) \
and resource['resource']['identifier'][0]['system'] != f"{INTERNAL_SYSTEM}/location/tract"
symptom_keys = list(filter(grab_symptom_keys, record))
contained = list(filter(None, map(build_conditions_list, symptom_keys)))
diagnosis = list(filter(None, map(build_diagnosis_list, symptom_keys)))
encounter_identifier = create_identifier(
system = f"{INTERNAL_SYSTEM}/encounter",
value = f"{REDCAP_URL}{PROJECT_ID}/{record['record_id']}"
)
encounter_class_coding = create_coding(
system = "http://terminology.hl7.org/CodeSystem/v3-ActCode",
code = "HH"
)
encounter_date = record['enrollment_date']
if not encounter_date:
return None, None
non_tracts = list(filter(non_tract_locations, locations))
non_tract_references = list(map(build_locations_list, non_tracts))
# Site for all swab 'n send Encounters is 'swabNSend'
site_reference = {
"location": create_reference(
reference_type = "Location",
identifier = create_identifier(f"{INTERNAL_SYSTEM}/site", 'swabNSend')
)
}
non_tract_references.append(site_reference)
encounter_resource = create_encounter_resource(
encounter_source = create_redcap_uri(record),
encounter_identifier = [encounter_identifier],
encounter_class = encounter_class_coding,
encounter_date = encounter_date,
patient_reference = patient_reference,
location_references = non_tract_references,
diagnosis = diagnosis,
contained = contained
)
return create_entry_and_reference(encounter_resource, "Encounter")
def create_resource_condition(symptom_name: str, patient_reference: dict) -> Optional[dict]:
""" Returns a FHIR Condition resource. """
mapped_symptom_name = map_symptom(symptom_name)
if not mapped_symptom_name:
return None
# XXX TODO: Define this as a TypedDict when we upgrade from Python 3.6 to
# 3.8. Until then, there's no reasonable way to type this data structure
# better than Any.
# -trs, 24 Oct 2019
condition: Any = {
"resourceType": "Condition",
"id": mapped_symptom_name,
"code": {
"coding": [
{
"system": f"{INTERNAL_SYSTEM}/symptom",
"code": mapped_symptom_name
}
]
},
"subject": patient_reference
}
return condition
def create_specimen(record: dict, patient_reference: dict) -> tuple:
""" Returns a FHIR Specimen resource entry and reference """
def specimen_barcode(record: Any) -> str:
"""
Given a REDCap *record*, returns the barcode or corrected barcode if it
exists.
"""
barcode = record['return_utm_barcode'] or record['pre_scan_barcode']
if not barcode:
barcode = record['utm_tube_barcode']
reentered_barcode = record['reenter_barcode']
if record['check_barcodes'] == "No":
#TODO: Figure out why 'corrected_barcode' doesn't always exist?
barcode = record.get('corrected_barcode')
return barcode
barcode = specimen_barcode(record)
if not barcode:
LOG.warning("Could not create Specimen Resource due to lack of barcode.")
return None, None
specimen_identifier = create_identifier(
system = f"{INTERNAL_SYSTEM}/sample",
value = barcode
)
# YYYY-MM-DD in REDCap
collected_time = record['collection_date'] or None
# YYYY-MM-DD
received_time = record['samp_process_date'] or None
specimen_type = 'NSECR' # Nasal swab. TODO we may want shared mapping function
specimen_resource = create_specimen_resource(
[specimen_identifier], patient_reference, specimen_type, received_time, collected_time
)
return create_entry_and_reference(specimen_resource, "Specimen")
def create_questionnaire_response(record: dict, patient_reference: dict,
encounter_reference: dict) -> Optional[dict]:
""" Returns a FHIR Questionnaire Response resource entry. """
def create_custom_coding_key(coded_question: str, record: dict) -> Optional[List]:
"""
Handles the 'race' edge case by combining "select all that apply"-type
responses into one list.
"""
coded_keys = list(filter(lambda r: grab_coding_keys(coded_question, r), record))
coded_names = list(map(lambda k: record[k], coded_keys))
if coded_question == 'race':
return race(coded_names)
return None
def grab_coding_keys(coded_question: str, key: str) -> Optional[Match[str]]:
if record[key] == '':
return None
return re.match(f'{coded_question}___[0-9]+$', key)
def build_questionnaire_items(question: str) -> Optional[dict]:
return questionnaire_item(record, question, category)
coding_questions = [
'race'
]
boolean_questions = [
'hispanic',
'travel_states',
'travel_countries',
]
integer_questions = [
'age',
'age_months',
]
string_questions = [
'education',
'samp_process_date',
]
question_categories = {
'valueCoding': coding_questions,
'valueBoolean': boolean_questions,
'valueInteger': integer_questions,
'valueString': string_questions,
}
# Do some pre-processing
record['race'] = create_custom_coding_key('race', record)
record['age'] = age_ceiling(int(record['age']))
record['age_months'] = age_ceiling(int(record['age_months']) / 12) * 12
items: List[dict] = []
for category in question_categories:
category_items = list(map(build_questionnaire_items, question_categories[category]))
for item in category_items:
if item:
items.append(item)
# Handle edge cases
vaccine_item = vaccine(record)
if vaccine_item:
items.append(vaccine_item)
if items:
questionnaire_reseponse_resource = create_questionnaire_response_resource(
patient_reference, encounter_reference, items
)
full_url = generate_full_url_uuid()
return create_resource_entry(questionnaire_reseponse_resource, full_url)
return None
def questionnaire_item(record: dict, question_id: str, response_type: str) -> Optional[dict]:
""" Creates a QuestionnaireResponse internal item from a REDCap record. """
response = record[question_id]
def cast_to_coding(string: str):
""" Currently the only QuestionnaireItem we code is race. """
return create_coding(
system = f"{INTERNAL_SYSTEM}/race",
code = string,
)
def cast_to_string(string: str) -> Optional[str]:
if string != '':
return string
return None
def cast_to_integer(string: str) -> Optional[int]:
try:
return int(string)
except ValueError:
return None
def cast_to_boolean(string: str) -> Optional[bool]:
if string == 'Yes':
return True
elif re.match(r'^No($|,[\w\s\'\.]*)$', string): # Starts with "No", has optional comma and text
return False
return None
def build_response_answers(response: Union[str, List]) -> List:
answers = []
if not isinstance(response, list):
response = [response]
for item in response:
type_casted_item = casting_functions[response_type](item)
# cast_to_boolean can return False, so must be `is not None`
if type_casted_item is not None:
answers.append({ response_type: type_casted_item })
return answers
casting_functions: Mapping[str, Callable[[str], Any]] = {
'valueCoding': cast_to_coding,
'valueInteger': cast_to_integer,
'valueBoolean': cast_to_boolean,
'valueString': cast_to_string,
}
answers = build_response_answers(response)
if answers:
return create_questionnaire_response_item(question_id, answers)
return None
def vaccine(record: Any) -> Optional[dict]:
"""
For a given *record*, return a questionnaire response item with the vaccine
response(s) encoded.
"""
vaccine_status = map_vaccine(record["vaccine"])
if vaccine_status is None:
return None
answers: List[Dict[str, Any]] = [{ 'valueBoolean': vaccine_status }]
date = vaccine_date(record)
if vaccine_status and date:
answers.append({ 'valueDate': date })
return create_questionnaire_response_item('vaccine', answers)
def vaccine_date(record: dict) -> Optional[str]:
""" Converts a vaccination date to 'YYYY' or 'YYYY-MM' format. """
year = record['vaccine_year']
month = record['vaccine_1']
if year == '' or year == 'Do not know':
return None
if month == 'Do not know':
return datetime.strptime(year, '%Y').strftime('%Y')
return datetime.strptime(f'{month} {year}', '%B %Y').strftime('%Y-%m')
def residence_census_tract(db: DatabaseSession, lat_lng: Tuple[float, float],
housing_type: str) -> Optional[dict]:
"""
Creates a new Location Resource for the census tract containing the given
*lat_lng* coordintes and associates it with the given *housing_type*.
"""
location = location_lookup(db, lat_lng, 'tract')
if location and location.identifier:
return create_location(
f"{INTERNAL_SYSTEM}/location/tract", location.identifier, housing_type
)
else:
LOG.warning("No census tract found for given location.")
return None
|
[
"uuid.uuid4",
"id3c.cli.command.location.location_lookup",
"re.match",
"datetime.datetime.strptime",
"id3c.cli.command.etl.redcap_det.command_for_project",
"id3c.cli.command.geocode.get_geocoded_address",
"datetime.datetime.now",
"logging.getLogger"
] |
[((755, 782), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (772, 782), False, 'import logging\n'), ((1113, 1255), 'id3c.cli.command.etl.redcap_det.command_for_project', 'redcap_det.command_for_project', (['"""asymptomatic-swab-n-send"""'], {'redcap_url': 'REDCAP_URL', 'project_id': 'PROJECT_ID', 'revision': 'REVISION', 'help': '__doc__'}), "('asymptomatic-swab-n-send', redcap_url=\n REDCAP_URL, project_id=PROJECT_ID, revision=REVISION, help=__doc__)\n", (1143, 1255), False, 'from id3c.cli.command.etl import redcap_det\n'), ((3424, 3460), 'id3c.cli.command.geocode.get_geocoded_address', 'get_geocoded_address', (['address', 'cache'], {}), '(address, cache)\n', (3444, 3460), False, 'from id3c.cli.command.geocode import get_geocoded_address\n'), ((16375, 16412), 'id3c.cli.command.location.location_lookup', 'location_lookup', (['db', 'lat_lng', '"""tract"""'], {}), "(db, lat_lng, 'tract')\n", (16390, 16412), False, 'from id3c.cli.command.location import location_lookup\n'), ((11713, 11757), 're.match', 're.match', (['f"""{coded_question}___[0-9]+$"""', 'key'], {}), "(f'{coded_question}___[0-9]+$', key)\n", (11721, 11757), False, 'import re\n'), ((6164, 6203), 're.match', 're.match', (['"""symptoms___[0-9]{1,3}$"""', 'key'], {}), "('symptoms___[0-9]{1,3}$', key)\n", (6172, 6203), False, 'import re\n'), ((14129, 14173), 're.match', 're.match', (['"""^No($|,[\\\\w\\\\s\\\\\'\\\\.]*)$"""', 'string'], {}), '("^No($|,[\\\\w\\\\s\\\\\'\\\\.]*)$", string)\n', (14137, 14173), False, 'import re\n'), ((16006, 16051), 'datetime.datetime.strptime', 'datetime.strptime', (['f"""{month} {year}"""', '"""%B %Y"""'], {}), "(f'{month} {year}', '%B %Y')\n", (16023, 16051), False, 'from datetime import datetime\n'), ((2775, 2782), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2780, 2782), False, 'from uuid import uuid4\n'), ((15949, 15978), 'datetime.datetime.strptime', 'datetime.strptime', (['year', '"""%Y"""'], {}), "(year, '%Y')\n", (15966, 15978), False, 'from datetime import datetime\n'), ((2805, 2819), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2817, 2819), False, 'from datetime import datetime\n')]
|
import nltk
import re
import string
from collections import defaultdict
nltk.download("punkt")
nltk.download('averaged_perceptron_tagger')
def tag_pos(text):
tokens = nltk.word_tokenize(text)
tagged = nltk.pos_tag(tokens)
clean = remove_punctuation(tagged)
sorted = sort_by_pos(clean)
return sorted
def remove_punctuation(tagged_text):
no_punct = []
regex = re.compile('[%s]' % string.punctuation)
for word, pos in tagged_text:
clean = regex.sub("", word).lower()
no_punct.append((clean, pos))
return no_punct
def sort_by_pos(tokens):
parts_of_speech = defaultdict(list)
punct = string.punctuation
for word, tag in tokens:
if len(word) > 2:
parts_of_speech[tag].append(word)
return parts_of_speech
|
[
"nltk.download",
"collections.defaultdict",
"nltk.pos_tag",
"nltk.word_tokenize",
"re.compile"
] |
[((74, 96), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (87, 96), False, 'import nltk\n'), ((97, 140), 'nltk.download', 'nltk.download', (['"""averaged_perceptron_tagger"""'], {}), "('averaged_perceptron_tagger')\n", (110, 140), False, 'import nltk\n'), ((175, 199), 'nltk.word_tokenize', 'nltk.word_tokenize', (['text'], {}), '(text)\n', (193, 199), False, 'import nltk\n'), ((213, 233), 'nltk.pos_tag', 'nltk.pos_tag', (['tokens'], {}), '(tokens)\n', (225, 233), False, 'import nltk\n'), ((392, 431), 're.compile', 're.compile', (["('[%s]' % string.punctuation)"], {}), "('[%s]' % string.punctuation)\n", (402, 431), False, 'import re\n'), ((617, 634), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (628, 634), False, 'from collections import defaultdict\n')]
|
"""
"""
from django.contrib import admin
from bestmoments.models import BestImage
from bestmoments.models import WebmVideo
class BestImageAdmin(admin.ModelAdmin):
"""
"""
list_display = ["image"]
class WebmVideoAdmin(admin.ModelAdmin):
"""
"""
list_display = ["video"]
admin.site.register(BestImage, BestImageAdmin)
admin.site.register(WebmVideo, WebmVideoAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((297, 343), 'django.contrib.admin.site.register', 'admin.site.register', (['BestImage', 'BestImageAdmin'], {}), '(BestImage, BestImageAdmin)\n', (316, 343), False, 'from django.contrib import admin\n'), ((344, 390), 'django.contrib.admin.site.register', 'admin.site.register', (['WebmVideo', 'WebmVideoAdmin'], {}), '(WebmVideo, WebmVideoAdmin)\n', (363, 390), False, 'from django.contrib import admin\n')]
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import pandas as pd
from pandas import DataFrame
from tabulate import tabulate
from base import BaseObject
class PythonParseAPI(BaseObject):
""" API (Orchestrator) for Python Dependency Parsing
"""
def __init__(self,
is_debug: bool = False):
"""
Created:
6-Dec-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1535
Updated:
24-Dec-2019
<EMAIL>
* refactored in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1637#issuecomment-16802191
* also refactored in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1642#issuecomment-16802836
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._base_path = os.environ["CODE_BASE"]
def _to_file(self,
df: DataFrame,
collection_type: str) -> None:
from dataingest.grammar.dmo import CollectionNameGenerator
from dataingest.grammar.svc import PerformPythonTransformation
gen_name = CollectionNameGenerator(is_debug=self._is_debug)
collection_name = gen_name.process(collection_type)
PerformPythonTransformation(df_parse=df,
is_debug=self._is_debug,
collection_name=collection_name).process()
def process(self):
from dataingest.grammar.svc import ParsePythonFiles
from dataingest.grammar.svc import ParsePythonImports
from dataingest.grammar.dmo import PythonDirectoryLoader
directory_path = f"{self._base_path}/workspace"
files = PythonDirectoryLoader(is_debug=self._is_debug,
directory_path=directory_path).process()
df_files = ParsePythonFiles(files=files,
is_debug=self._is_debug).process()
if df_files is None:
self.logger.warning("No Results Found")
return
d_imports = ParsePythonImports(files=files,
df_files=df_files,
is_debug=self._is_debug).process()
self._to_file(df_files, "src")
self._to_file(d_imports["internal"], "int-import")
self._to_file(d_imports["external"], "ext-import")
def load(self,
date: str):
def _input_path(file_name: str) -> str:
return os.path.join(os.environ["CODE_BASE"],
"resources/output/transform",
file_name)
def _to_dataframe(collection_type: str,
input_path: str) -> DataFrame:
df = pd.read_csv(input_path, sep='\t', encoding='utf-8')
if self._is_debug:
self.logger.debug('\n'.join([
f"Imported {collection_type} Collection",
f"\tTotal Records: {len(df)}",
f"\tInput Path: {input_path}",
tabulate(df.sample(3), tablefmt='psql', headers='keys')]))
return df
class Facade(object):
@staticmethod
def imports():
class ImportsFacade(object):
@staticmethod
def internal() -> DataFrame:
input_path = _input_path(f"parse_unstrut-int-import_{date}.csv")
return _to_dataframe("Internal Imports", input_path)
@staticmethod
def external() -> DataFrame:
input_path = _input_path(f"parse_unstrut-ext-import_{date}.csv")
return _to_dataframe("External Imports", input_path)
return ImportsFacade()
@staticmethod
def source() -> DataFrame:
input_path = _input_path(f"parse_unstrut-src_{date}.csv")
return _to_dataframe("Source", input_path)
return Facade()
def main():
PythonParseAPI(is_debug=False).process()
if __name__ == "__main__":
import plac
plac.call(main)
|
[
"base.BaseObject.__init__",
"dataingest.grammar.dmo.CollectionNameGenerator",
"pandas.read_csv",
"plac.call",
"dataingest.grammar.svc.PerformPythonTransformation",
"dataingest.grammar.svc.ParsePythonImports",
"dataingest.grammar.dmo.PythonDirectoryLoader",
"dataingest.grammar.svc.ParsePythonFiles",
"os.path.join"
] |
[((4324, 4339), 'plac.call', 'plac.call', (['main'], {}), '(main)\n', (4333, 4339), False, 'import plac\n'), ((890, 925), 'base.BaseObject.__init__', 'BaseObject.__init__', (['self', '__name__'], {}), '(self, __name__)\n', (909, 925), False, 'from base import BaseObject\n'), ((1273, 1321), 'dataingest.grammar.dmo.CollectionNameGenerator', 'CollectionNameGenerator', ([], {'is_debug': 'self._is_debug'}), '(is_debug=self._is_debug)\n', (1296, 1321), False, 'from dataingest.grammar.dmo import CollectionNameGenerator\n'), ((2660, 2738), 'os.path.join', 'os.path.join', (["os.environ['CODE_BASE']", '"""resources/output/transform"""', 'file_name'], {}), "(os.environ['CODE_BASE'], 'resources/output/transform', file_name)\n", (2672, 2738), False, 'import os\n'), ((2926, 2977), 'pandas.read_csv', 'pd.read_csv', (['input_path'], {'sep': '"""\t"""', 'encoding': '"""utf-8"""'}), "(input_path, sep='\\t', encoding='utf-8')\n", (2937, 2977), True, 'import pandas as pd\n'), ((1391, 1493), 'dataingest.grammar.svc.PerformPythonTransformation', 'PerformPythonTransformation', ([], {'df_parse': 'df', 'is_debug': 'self._is_debug', 'collection_name': 'collection_name'}), '(df_parse=df, is_debug=self._is_debug,\n collection_name=collection_name)\n', (1418, 1493), False, 'from dataingest.grammar.svc import PerformPythonTransformation\n'), ((1857, 1934), 'dataingest.grammar.dmo.PythonDirectoryLoader', 'PythonDirectoryLoader', ([], {'is_debug': 'self._is_debug', 'directory_path': 'directory_path'}), '(is_debug=self._is_debug, directory_path=directory_path)\n', (1878, 1934), False, 'from dataingest.grammar.dmo import PythonDirectoryLoader\n'), ((2003, 2057), 'dataingest.grammar.svc.ParsePythonFiles', 'ParsePythonFiles', ([], {'files': 'files', 'is_debug': 'self._is_debug'}), '(files=files, is_debug=self._is_debug)\n', (2019, 2057), False, 'from dataingest.grammar.svc import ParsePythonFiles\n'), ((2225, 2300), 'dataingest.grammar.svc.ParsePythonImports', 'ParsePythonImports', ([], {'files': 'files', 'df_files': 'df_files', 'is_debug': 'self._is_debug'}), '(files=files, df_files=df_files, is_debug=self._is_debug)\n', (2243, 2300), False, 'from dataingest.grammar.svc import ParsePythonImports\n')]
|
"""Package for loading and running the nuclei and cell segmentation models programmaticly."""
import os
import sys
import cv2
import imageio
import numpy as np
import torch
import torch.nn
import torch.nn.functional as F
from skimage import transform, util
from hpacellseg.constants import (MULTI_CHANNEL_CELL_MODEL_URL,
NUCLEI_MODEL_URL, TWO_CHANNEL_CELL_MODEL_URL)
from hpacellseg.utils import download_with_url
NORMALIZE = {"mean": [124 / 255, 117 / 255, 104 / 255], "std": [1 / (0.0167 * 255)] * 3}
class CellSegmentator(object):
"""Uses pretrained DPN-Unet models to segment cells from images."""
def __init__(
self,
nuclei_model="./nuclei_model.pth",
cell_model="./cell_model.pth",
model_width_height=None,
device="cuda",
multi_channel_model=True,
return_without_scale_restore=False,
scale_factor=0.25,
padding=False
):
if device != "cuda" and device != "cpu" and "cuda" not in device:
raise ValueError(f"{device} is not a valid device (cuda/cpu)")
if device != "cpu":
try:
assert torch.cuda.is_available()
except AssertionError:
print("No GPU found, using CPU.", file=sys.stderr)
device = "cpu"
self.device = device
if isinstance(nuclei_model, str):
if not os.path.exists(nuclei_model):
print(
f"Could not find {nuclei_model}. Downloading it now",
file=sys.stderr,
)
download_with_url(NUCLEI_MODEL_URL, nuclei_model)
nuclei_model = torch.load(
nuclei_model, map_location=torch.device(self.device)
)
if isinstance(nuclei_model, torch.nn.DataParallel) and device == "cpu":
nuclei_model = nuclei_model.module
self.nuclei_model = nuclei_model.to(self.device)
self.multi_channel_model = multi_channel_model
if isinstance(cell_model, str):
if not os.path.exists(cell_model):
print(
f"Could not find {cell_model}. Downloading it now", file=sys.stderr
)
if self.multi_channel_model:
download_with_url(MULTI_CHANNEL_CELL_MODEL_URL, cell_model)
else:
download_with_url(TWO_CHANNEL_CELL_MODEL_URL, cell_model)
cell_model = torch.load(cell_model, map_location=torch.device(self.device))
self.cell_model = cell_model.to(self.device)
self.model_width_height = model_width_height
self.return_without_scale_restore = return_without_scale_restore
self.scale_factor = scale_factor
self.padding = padding
def _image_conversion(self, images):
microtubule_imgs, er_imgs, nuclei_imgs = images
if self.multi_channel_model:
if not isinstance(er_imgs, list):
raise ValueError("Please speicify the image path(s) for er channels!")
else:
if not er_imgs is None:
raise ValueError(
"second channel should be None for two channel model predition!"
)
if not isinstance(microtubule_imgs, list):
raise ValueError("The microtubule images should be a list")
if not isinstance(nuclei_imgs, list):
raise ValueError("The microtubule images should be a list")
if er_imgs:
if not len(microtubule_imgs) == len(er_imgs) == len(nuclei_imgs):
raise ValueError("The lists of images needs to be the same length")
else:
if not len(microtubule_imgs) == len(nuclei_imgs):
raise ValueError("The lists of images needs to be the same length")
if not all(isinstance(item, np.ndarray) for item in microtubule_imgs):
microtubule_imgs = [
os.path.expanduser(item) for _, item in enumerate(microtubule_imgs)
]
nuclei_imgs = [
os.path.expanduser(item) for _, item in enumerate(nuclei_imgs)
]
microtubule_imgs = list(
map(lambda item: imageio.imread(item), microtubule_imgs)
)
nuclei_imgs = list(map(lambda item: imageio.imread(item), nuclei_imgs))
if er_imgs:
er_imgs = [os.path.expanduser(item) for _, item in enumerate(er_imgs)]
er_imgs = list(map(lambda item: imageio.imread(item), er_imgs))
if not er_imgs:
er_imgs = [
np.zeros(item.shape, dtype=item.dtype)
for _, item in enumerate(microtubule_imgs)
]
cell_imgs = list(
map(
lambda item: np.dstack((item[0], item[1], item[2])),
list(zip(microtubule_imgs, er_imgs, nuclei_imgs)),
)
)
return cell_imgs
def _pad(self, image):
rows, cols = image.shape[:2]
self.scaled_shape = rows, cols
img_pad= cv2.copyMakeBorder(
image,
32,
(32 - rows % 32),
32,
(32 - cols % 32),
cv2.BORDER_REFLECT,
)
return img_pad
def pred_nuclei(self, images):
def _preprocess(images):
if isinstance(images[0], str):
raise NotImplementedError('Currently the model requires images as numpy arrays, not paths.')
# images = [imageio.imread(image_path) for image_path in images]
self.target_shapes = [image.shape for image in images]
#print(images.shape)
#resize like in original implementation with https://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.resize
if self.model_width_height:
images = np.array([transform.resize(image, (self.model_width_height,self.model_width_height))
for image in images])
else:
images = [transform.rescale(image, self.scale_factor) for image in images]
if self.padding:
images = [self._pad(image) for image in images]
nuc_images = np.array([np.dstack((image[..., 2], image[..., 2], image[..., 2])) if len(image.shape) >= 3
else np.dstack((image, image, image)) for image in images])
nuc_images = nuc_images.transpose([0, 3, 1, 2])
#print("nuc", nuc_images.shape)
return nuc_images
def _segment_helper(imgs):
with torch.no_grad():
mean = torch.as_tensor(NORMALIZE["mean"], device=self.device)
std = torch.as_tensor(NORMALIZE["std"], device=self.device)
imgs = torch.tensor(imgs).float()
imgs = imgs.to(self.device)
imgs = imgs.sub_(mean[:, None, None]).div_(std[:, None, None])
imgs = self.nuclei_model(imgs)
imgs = F.softmax(imgs, dim=1)
return imgs
preprocessed_imgs = _preprocess(images)
predictions = _segment_helper(preprocessed_imgs)
predictions = predictions.to("cpu").numpy()
#dont restore scaling, just save and scale later ...
predictions = [self._restore_scaling(util.img_as_ubyte(pred), target_shape)
for pred, target_shape in zip(predictions, self.target_shapes)]
return predictions
def _restore_scaling(self, n_prediction, target_shape):
"""Restore an image from scaling and padding.
This method is intended for internal use.
It takes the output from the nuclei model as input.
"""
n_prediction = n_prediction.transpose([1, 2, 0])
if self.padding:
n_prediction = n_prediction[
32 : 32 + self.scaled_shape[0], 32 : 32 + self.scaled_shape[1], ...
]
n_prediction[..., 0] = 0
if not self.return_without_scale_restore:
n_prediction = cv2.resize(
n_prediction,
(target_shape[0], target_shape[1]),
#try INTER_NEAREST_EXACT
interpolation=cv2.INTER_NEAREST_EXACT,
)
return n_prediction
def pred_cells(self, images, precombined=False):
def _preprocess(images):
self.target_shapes = [image.shape for image in images]
for image in images:
if not len(image.shape) == 3:
raise ValueError("image should has 3 channels")
#resize like in original implementation with https://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.resize
if self.model_width_height:
images = np.array([transform.resize(image, (self.model_width_height,self.model_width_height))
for image in images])
else:
images = np.array([transform.rescale(image, self.scale_factor, multichannel=True) for image in images])
if self.padding:
images = np.array([self._pad(image) for image in images])
cell_images = images.transpose([0, 3, 1, 2])
return cell_images
def _segment_helper(imgs):
with torch.no_grad():
mean = torch.as_tensor(NORMALIZE["mean"], device=self.device)
std = torch.as_tensor(NORMALIZE["std"], device=self.device)
imgs = torch.tensor(imgs).float()
imgs = imgs.to(self.device)
imgs = imgs.sub_(mean[:, None, None]).div_(std[:, None, None])
imgs = self.cell_model(imgs)
imgs = F.softmax(imgs, dim=1)
return imgs
if not precombined:
images = self._image_conversion(images)
preprocessed_imgs = _preprocess(images)
predictions = _segment_helper(preprocessed_imgs)
predictions = predictions.to("cpu").numpy()
predictions = [self._restore_scaling(util.img_as_ubyte(pred), target_shape)
for pred, target_shape in zip(predictions, self.target_shapes)]
return predictions
|
[
"numpy.dstack",
"skimage.transform.rescale",
"skimage.util.img_as_ubyte",
"imageio.imread",
"os.path.exists",
"cv2.copyMakeBorder",
"numpy.zeros",
"torch.nn.functional.softmax",
"torch.cuda.is_available",
"skimage.transform.resize",
"torch.device",
"torch.as_tensor",
"hpacellseg.utils.download_with_url",
"torch.tensor",
"torch.no_grad",
"os.path.expanduser",
"cv2.resize"
] |
[((5151, 5241), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image', '(32)', '(32 - rows % 32)', '(32)', '(32 - cols % 32)', 'cv2.BORDER_REFLECT'], {}), '(image, 32, 32 - rows % 32, 32, 32 - cols % 32, cv2.\n BORDER_REFLECT)\n', (5169, 5241), False, 'import cv2\n'), ((8231, 8335), 'cv2.resize', 'cv2.resize', (['n_prediction', '(target_shape[0], target_shape[1])'], {'interpolation': 'cv2.INTER_NEAREST_EXACT'}), '(n_prediction, (target_shape[0], target_shape[1]), interpolation=\n cv2.INTER_NEAREST_EXACT)\n', (8241, 8335), False, 'import cv2\n'), ((1204, 1229), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1227, 1229), False, 'import torch\n'), ((1454, 1482), 'os.path.exists', 'os.path.exists', (['nuclei_model'], {}), '(nuclei_model)\n', (1468, 1482), False, 'import os\n'), ((1652, 1701), 'hpacellseg.utils.download_with_url', 'download_with_url', (['NUCLEI_MODEL_URL', 'nuclei_model'], {}), '(NUCLEI_MODEL_URL, nuclei_model)\n', (1669, 1701), False, 'from hpacellseg.utils import download_with_url\n'), ((2124, 2150), 'os.path.exists', 'os.path.exists', (['cell_model'], {}), '(cell_model)\n', (2138, 2150), False, 'import os\n'), ((4015, 4039), 'os.path.expanduser', 'os.path.expanduser', (['item'], {}), '(item)\n', (4033, 4039), False, 'import os\n'), ((4141, 4165), 'os.path.expanduser', 'os.path.expanduser', (['item'], {}), '(item)\n', (4159, 4165), False, 'import os\n'), ((4683, 4721), 'numpy.zeros', 'np.zeros', (['item.shape'], {'dtype': 'item.dtype'}), '(item.shape, dtype=item.dtype)\n', (4691, 4721), True, 'import numpy as np\n'), ((6782, 6797), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6795, 6797), False, 'import torch\n'), ((6822, 6876), 'torch.as_tensor', 'torch.as_tensor', (["NORMALIZE['mean']"], {'device': 'self.device'}), "(NORMALIZE['mean'], device=self.device)\n", (6837, 6876), False, 'import torch\n'), ((6899, 6952), 'torch.as_tensor', 'torch.as_tensor', (["NORMALIZE['std']"], {'device': 'self.device'}), "(NORMALIZE['std'], device=self.device)\n", (6914, 6952), False, 'import torch\n'), ((7197, 7219), 'torch.nn.functional.softmax', 'F.softmax', (['imgs'], {'dim': '(1)'}), '(imgs, dim=1)\n', (7206, 7219), True, 'import torch.nn.functional as F\n'), ((7512, 7535), 'skimage.util.img_as_ubyte', 'util.img_as_ubyte', (['pred'], {}), '(pred)\n', (7529, 7535), False, 'from skimage import transform, util\n'), ((9498, 9513), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9511, 9513), False, 'import torch\n'), ((9538, 9592), 'torch.as_tensor', 'torch.as_tensor', (["NORMALIZE['mean']"], {'device': 'self.device'}), "(NORMALIZE['mean'], device=self.device)\n", (9553, 9592), False, 'import torch\n'), ((9615, 9668), 'torch.as_tensor', 'torch.as_tensor', (["NORMALIZE['std']"], {'device': 'self.device'}), "(NORMALIZE['std'], device=self.device)\n", (9630, 9668), False, 'import torch\n'), ((9910, 9932), 'torch.nn.functional.softmax', 'F.softmax', (['imgs'], {'dim': '(1)'}), '(imgs, dim=1)\n', (9919, 9932), True, 'import torch.nn.functional as F\n'), ((10244, 10267), 'skimage.util.img_as_ubyte', 'util.img_as_ubyte', (['pred'], {}), '(pred)\n', (10261, 10267), False, 'from skimage import transform, util\n'), ((1784, 1809), 'torch.device', 'torch.device', (['self.device'], {}), '(self.device)\n', (1796, 1809), False, 'import torch\n'), ((2346, 2405), 'hpacellseg.utils.download_with_url', 'download_with_url', (['MULTI_CHANNEL_CELL_MODEL_URL', 'cell_model'], {}), '(MULTI_CHANNEL_CELL_MODEL_URL, cell_model)\n', (2363, 2405), False, 'from hpacellseg.utils import download_with_url\n'), ((2448, 2505), 'hpacellseg.utils.download_with_url', 'download_with_url', (['TWO_CHANNEL_CELL_MODEL_URL', 'cell_model'], {}), '(TWO_CHANNEL_CELL_MODEL_URL, cell_model)\n', (2465, 2505), False, 'from hpacellseg.utils import download_with_url\n'), ((2567, 2592), 'torch.device', 'torch.device', (['self.device'], {}), '(self.device)\n', (2579, 2592), False, 'import torch\n'), ((4478, 4502), 'os.path.expanduser', 'os.path.expanduser', (['item'], {}), '(item)\n', (4496, 4502), False, 'import os\n'), ((4867, 4905), 'numpy.dstack', 'np.dstack', (['(item[0], item[1], item[2])'], {}), '((item[0], item[1], item[2]))\n', (4876, 4905), True, 'import numpy as np\n'), ((6211, 6254), 'skimage.transform.rescale', 'transform.rescale', (['image', 'self.scale_factor'], {}), '(image, self.scale_factor)\n', (6228, 6254), False, 'from skimage import transform, util\n'), ((4289, 4309), 'imageio.imread', 'imageio.imread', (['item'], {}), '(item)\n', (4303, 4309), False, 'import imageio\n'), ((4391, 4411), 'imageio.imread', 'imageio.imread', (['item'], {}), '(item)\n', (4405, 4411), False, 'import imageio\n'), ((6035, 6110), 'skimage.transform.resize', 'transform.resize', (['image', '(self.model_width_height, self.model_width_height)'], {}), '(image, (self.model_width_height, self.model_width_height))\n', (6051, 6110), False, 'from skimage import transform, util\n'), ((6404, 6460), 'numpy.dstack', 'np.dstack', (['(image[..., 2], image[..., 2], image[..., 2])'], {}), '((image[..., 2], image[..., 2], image[..., 2]))\n', (6413, 6460), True, 'import numpy as np\n'), ((6526, 6558), 'numpy.dstack', 'np.dstack', (['(image, image, image)'], {}), '((image, image, image))\n', (6535, 6558), True, 'import numpy as np\n'), ((6976, 6994), 'torch.tensor', 'torch.tensor', (['imgs'], {}), '(imgs)\n', (6988, 6994), False, 'import torch\n'), ((8983, 9058), 'skimage.transform.resize', 'transform.resize', (['image', '(self.model_width_height, self.model_width_height)'], {}), '(image, (self.model_width_height, self.model_width_height))\n', (8999, 9058), False, 'from skimage import transform, util\n'), ((9168, 9230), 'skimage.transform.rescale', 'transform.rescale', (['image', 'self.scale_factor'], {'multichannel': '(True)'}), '(image, self.scale_factor, multichannel=True)\n', (9185, 9230), False, 'from skimage import transform, util\n'), ((9692, 9710), 'torch.tensor', 'torch.tensor', (['imgs'], {}), '(imgs)\n', (9704, 9710), False, 'import torch\n'), ((4586, 4606), 'imageio.imread', 'imageio.imread', (['item'], {}), '(item)\n', (4600, 4606), False, 'import imageio\n')]
|