code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright © 2018 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
# Other trademarks may be trademarks of their respective owners.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: <NAME>
#
import sys
from omsdk.http.sdkwsmanbase import WsManProtocolBase
from omsdk.http.sdkhttpep import HttpEndPoint, HttpEndPointOptions
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
class WsManProtocol(WsManProtocolBase):
def __init__(self, ipaddr, creds, pOptions):
if PY2:
super(WsManProtocol, self).__init__(ipaddr, creds, pOptions)
else:
super().__init__(ipaddr, creds, pOptions)
headers = {
'Content-Type': 'application/soap+xml;charset=UTF-8'
}
self.proto = HttpEndPoint(ipaddr, creds, pOptions, headers)
def _proto_connect(self):
self.proto.connect()
def _proto_ship_payload(self, payload):
return self.proto.ship_payload(payload)
def _proto_endpoint(self):
return self.proto.endpoint
def _proto_reset(self):
return self.proto.reset()
| [
"omsdk.http.sdkhttpep.HttpEndPoint"
] | [((1437, 1483), 'omsdk.http.sdkhttpep.HttpEndPoint', 'HttpEndPoint', (['ipaddr', 'creds', 'pOptions', 'headers'], {}), '(ipaddr, creds, pOptions, headers)\n', (1449, 1483), False, 'from omsdk.http.sdkhttpep import HttpEndPoint, HttpEndPointOptions\n')] |
import pymysql
# Conectar con base de datos
conexion = pymysql.connect(host="localhost",
port="3306",
user="inventario2019",
password="<PASSWORD>.",
database="inventario2019",
)
#cursor = conexion.cursor()
# Recuperar registros de la tabla 'Usuarios'
#registros = "SELECT * FROM inventario_energiamodel;"
# Mostrar registros
#cursor.execute(registros)
#filas = cursor.fetchall()
#for fila in filas:
# print(fila)
# Finalizar
#conexion.commit()
conexion.close() | [
"pymysql.connect"
] | [((56, 180), 'pymysql.connect', 'pymysql.connect', ([], {'host': '"""localhost"""', 'port': '"""3306"""', 'user': '"""inventario2019"""', 'password': '"""<PASSWORD>."""', 'database': '"""inventario2019"""'}), "(host='localhost', port='3306', user='inventario2019',\n password='<PASSWORD>.', database='inventario2019')\n", (71, 180), False, 'import pymysql\n')] |
from __future__ import print_function, division
from PIL import Image
from torchvision.transforms import ToTensor, ToPILImage, Compose, Normalize
import numpy as np
import random
import tarfile
import io
import os
import pandas as pd
import torch
from torch.utils.data import Dataset
# %% custom dataset
class PlacesDataset(Dataset):
def __init__(self, txt_path='filelist.txt', img_dir='data', transform=None, test=False):
"""
Initialize data set as a list of IDs corresponding to each item of data set
:param img_dir: path to image files as a uncompressed tar archive
:param txt_path: a text file containing names of all of images line by line
:param transform: apply some transforms like cropping, rotating, etc on input image
:param test: is inference time or not
:return: a 3-value dict containing input image (y_descreen) as ground truth,
input image X as halftone image and edge-map (y_edge) of ground truth image to feed into the network.
"""
df = pd.read_csv(txt_path, sep=' ', index_col=0)
self.img_names = df.index.values
self.txt_path = txt_path
self.img_dir = img_dir
self.transform = transform
self.to_tensor = ToTensor()
self.to_pil = ToPILImage()
self.get_image_selector = True if img_dir.__contains__('tar') else False
self.tf = tarfile.open(self.img_dir) if self.get_image_selector else None
self.transform_gt = transform if test else Compose(self.transform.transforms[:-1]) # omit noise of ground truth
def get_image_from_tar(self, name):
"""
Gets a image by a name gathered from file list csv file
:param name: name of targeted image
:return: a PIL image
"""
# tarinfo = self.tf.getmember(name)
image = self.tf.extractfile(name)
image = image.read()
image = Image.open(io.BytesIO(image))
return image
def get_image_from_folder(self, name):
"""
gets a image by a name gathered from file list text file
:param name: name of targeted image
:return: a PIL image
"""
image = Image.open(os.path.join(self.img_dir, name))
return image
def __len__(self):
"""
Return the length of data set using list of IDs
:return: number of samples in data set
"""
return len(self.img_names)
def __getitem__(self, index):
"""
Generate one item of data set. Here we apply our preprocessing things like halftone styles and
subtractive color process using CMYK color model, generating edge-maps, etc.
:param index: index of item in IDs list
:return: a sample of data as a dict
"""
if index == (self.__len__() - 1) and self.get_image_selector: # Close tarfile opened in __init__
self.tf.close()
if self.get_image_selector: # note: we prefer to extract then process!
y_descreen = self.get_image_from_tar(self.img_names[index])
else:
y_descreen = self.get_image_from_folder(self.img_names[index])
seed = np.random.randint(2147483647)
random.seed(seed)
if self.transform is not None:
y_noise = self.noisy_image(y_descreen)
y_descreen = self.transform(y_descreen)
random.seed(seed)
y_noise = self.transform_gt(y_noise)
sample = {'y_descreen': y_descreen,
'y_noise': y_noise}
return sample
def noisy_image(self, image):
"""
Add Salt and Pepper noise to image and return image as same type as input.
:param image: PIL image
:return: PIL image
"""
if type(image) == torch.Tensor:
image = self.to_pil(image)
image = np.array(image)
s_vs_p = 0.5
amount = 0.015
out = np.copy(image)
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = tuple([np.random.randint(0, i - 1, int(num_salt)) for i in image.shape])
out[coords] = 1
num_pepper = np.ceil(amount * image.size * (1. - s_vs_p))
coords = tuple([np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape])
out[coords] = 0
out = ToPILImage()(out)
return out
class RandomNoise(object):
def __init__(self, p, mean=0, std=0.1):
self.p = p
self.mean = mean
self.std = std
def __call__(self, img):
if random.random() <= self.p:
noise = torch.empty(*img.size(), dtype=torch.float, requires_grad=False)
return img+noise.normal_(self.mean, self.std)
return img
class Blend(object):
"""
Blend two input tensors(tensors) with respect to the alpha value as a weight if random number is lower than p
for each example
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, halftone, ground_truth, alpha=0.5):
"""
:param halftone: First tensor to be blended (batch_size, channel_size, height, width)
:param ground_truth: Second tensor to be blended with size (batch_size, channel_size, height, width)
:param alpha: weight of linear addition of two tensors
:return: A tensor with size of (batch_size, channel_size, height, width)
"""
p = torch.zeros(halftone.size()[0]).new_full((halftone.size()[0], ), self.p)
rand = torch.zeros(p.size()[0]).uniform_()
blend = torch.zeros((halftone.size()))
mask = rand < p
blend[mask] = halftone[mask] * (1.0 - alpha) + ground_truth[mask] * alpha
mask = rand > p
blend[mask] = halftone[mask]
return blend
class UnNormalizeNative(object):
"""
Unnormalize an input tensor given the mean and std
"""
def __init__(self, mean, std):
self.mean = torch.tensor(mean)
self.std = torch.tensor(std)
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
return Normalize((-mean / std).tolist(), (1.0 / std).tolist()) | [
"numpy.copy",
"numpy.ceil",
"tarfile.open",
"torchvision.transforms.ToPILImage",
"pandas.read_csv",
"io.BytesIO",
"os.path.join",
"random.seed",
"numpy.array",
"numpy.random.randint",
"torch.tensor",
"random.random",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Compose"
] | [((1046, 1089), 'pandas.read_csv', 'pd.read_csv', (['txt_path'], {'sep': '""" """', 'index_col': '(0)'}), "(txt_path, sep=' ', index_col=0)\n", (1057, 1089), True, 'import pandas as pd\n'), ((1255, 1265), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (1263, 1265), False, 'from torchvision.transforms import ToTensor, ToPILImage, Compose, Normalize\n'), ((1288, 1300), 'torchvision.transforms.ToPILImage', 'ToPILImage', ([], {}), '()\n', (1298, 1300), False, 'from torchvision.transforms import ToTensor, ToPILImage, Compose, Normalize\n'), ((3181, 3210), 'numpy.random.randint', 'np.random.randint', (['(2147483647)'], {}), '(2147483647)\n', (3198, 3210), True, 'import numpy as np\n'), ((3219, 3236), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3230, 3236), False, 'import random\n'), ((3863, 3878), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3871, 3878), True, 'import numpy as np\n'), ((3937, 3951), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (3944, 3951), True, 'import numpy as np\n'), ((3971, 4008), 'numpy.ceil', 'np.ceil', (['(amount * image.size * s_vs_p)'], {}), '(amount * image.size * s_vs_p)\n', (3978, 4008), True, 'import numpy as np\n'), ((4144, 4189), 'numpy.ceil', 'np.ceil', (['(amount * image.size * (1.0 - s_vs_p))'], {}), '(amount * image.size * (1.0 - s_vs_p))\n', (4151, 4189), True, 'import numpy as np\n'), ((5919, 5937), 'torch.tensor', 'torch.tensor', (['mean'], {}), '(mean)\n', (5931, 5937), False, 'import torch\n'), ((5957, 5974), 'torch.tensor', 'torch.tensor', (['std'], {}), '(std)\n', (5969, 5974), False, 'import torch\n'), ((1400, 1426), 'tarfile.open', 'tarfile.open', (['self.img_dir'], {}), '(self.img_dir)\n', (1412, 1426), False, 'import tarfile\n'), ((1515, 1554), 'torchvision.transforms.Compose', 'Compose', (['self.transform.transforms[:-1]'], {}), '(self.transform.transforms[:-1])\n', (1522, 1554), False, 'from torchvision.transforms import ToTensor, ToPILImage, Compose, Normalize\n'), ((1930, 1947), 'io.BytesIO', 'io.BytesIO', (['image'], {}), '(image)\n', (1940, 1947), False, 'import io\n'), ((2205, 2237), 'os.path.join', 'os.path.join', (['self.img_dir', 'name'], {}), '(self.img_dir, name)\n', (2217, 2237), False, 'import os\n'), ((3392, 3409), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3403, 3409), False, 'import random\n'), ((4319, 4331), 'torchvision.transforms.ToPILImage', 'ToPILImage', ([], {}), '()\n', (4329, 4331), False, 'from torchvision.transforms import ToTensor, ToPILImage, Compose, Normalize\n'), ((4538, 4553), 'random.random', 'random.random', ([], {}), '()\n', (4551, 4553), False, 'import random\n')] |
#!/usr/bin/env python
'''
python -m spacy download en_core_web_md
spacy has a number of models and languages
https://spacy.io/usage/models
'''
import spacy
spacy.load('en_core_web_md')
nlp = spacy.load('en_core_web_md')
# process a sentence using the model
doc = nlp("This is some text that I am processing with Spacy")
# It's that simple - all of the vectors and words are assigned after this point
# Get the vector for 'text':
doc[3].vector
# Get the mean vector for the entire sentence (useful for sentence classification etc.)
doc.vector
| [
"spacy.load"
] | [((163, 191), 'spacy.load', 'spacy.load', (['"""en_core_web_md"""'], {}), "('en_core_web_md')\n", (173, 191), False, 'import spacy\n'), ((199, 227), 'spacy.load', 'spacy.load', (['"""en_core_web_md"""'], {}), "('en_core_web_md')\n", (209, 227), False, 'import spacy\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-19 21:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('inventario', '0006_auto_20160319_1726'),
]
operations = [
migrations.AlterModelOptions(
name='requisicionarticulo',
options={'verbose_name': 'Requisici\xf3n de piscina', 'verbose_name_plural': 'Requesiciones de piscina'},
),
migrations.AlterField(
model_name='requisicioninsumo',
name='activo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventario.ArticuloInsumo', verbose_name='Articulo'),
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.models.ForeignKey"
] | [((335, 503), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""requisicionarticulo"""', 'options': "{'verbose_name': 'Requisición de piscina', 'verbose_name_plural':\n 'Requesiciones de piscina'}"}), "(name='requisicionarticulo', options={\n 'verbose_name': 'Requisición de piscina', 'verbose_name_plural':\n 'Requesiciones de piscina'})\n", (363, 503), False, 'from django.db import migrations, models\n'), ((654, 778), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""inventario.ArticuloInsumo"""', 'verbose_name': '"""Articulo"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'inventario.ArticuloInsumo', verbose_name='Articulo')\n", (671, 778), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
import json
import logging
import azure.functions as func
from __app__.commons.blockblob import AzureStorageBlockBlob
from __app__.commons.faceapi import AzureCognitiveFaceAPI
from __app__.commons.config import Config
from __app__.commons.cosmosdb import AssetDB, UserDB
config = Config()
"""
POST /api/createperson?user_id={user_id}
body:
{
"person_name": <person_name> // required
}
"""
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('createperson function processed a request.')
user_id = req.params.get('user_id')
try:
req_body = req.get_json()
logging.info('req_body=%s',req_body)
except Exception as e:
logging.info(e)
except ValueError as ve:
logging.info(ve)
person_name = req_body.get('person_name')
if not user_id or not person_name:
return func.HttpResponse(
"Please pass both user_id and person_name on the query string or in the request body",
status_code=400
)
## Get User Info
userdb = UserDB(config)
user = None
try:
user = userdb.get_user(user_id)
if not user:
logging.info(f"A user with user_id {user_id} not exists")
return func.HttpResponse(
f"A user with user_id {user_id} not exists",
status_code=409
)
if not 'person_group_id' in user:
return func.HttpResponse(
f"A user with user_id {user_id} does not have person_group_id",
status_code=409
)
except Exception as e:
logging.info(e)
return func.HttpResponse(
str(e),
status_code=400
)
## Create person
storage_info = AzureStorageBlockBlob.parse_storage_conn_string(config.get_value('AzureWebJobsStorage'))
faceapi = AzureCognitiveFaceAPI(
config.get_value('FACEAPI_ENDPOINT'),
config.get_value('FACEAPI_SUBKEY'),
storage_info['AccountName'],
storage_info['AccountKey'] )
created_person_id = None
try:
created_person = faceapi.create_person(user['person_group_id'], person_name)
created_person_id = created_person.person_id
except Exception as e:
logging.info(e)
return func.HttpResponse(
str(e),
status_code=400
)
## Create an asset for train for the person
asset_id = AssetDB.gen_random_id()
blobclient = AzureStorageBlockBlob(storage_info['AccountName'], storage_info['AccountKey'])
try:
blobclient.create_container(asset_id)
except Exception as e:
return func.HttpResponse(
str(e),
status_code=400
)
## Update user
try:
persons = user['persons'] if user['persons'] else []
logging.info("persons=%s", persons)
created_person ={
"person_id": created_person_id,
"person_name": person_name,
"asset_id_for_train": asset_id
}
persons.append(created_person)
user['persons'] = persons
userdb.upsert_document(document=user)
return func.HttpResponse(json.dumps(created_person))
except Exception as e:
return func.HttpResponse(
str(e),
status_code=400
)
| [
"__app__.commons.cosmosdb.AssetDB.gen_random_id",
"azure.functions.HttpResponse",
"json.dumps",
"__app__.commons.config.Config",
"__app__.commons.cosmosdb.UserDB",
"__app__.commons.blockblob.AzureStorageBlockBlob",
"logging.info"
] | [((305, 313), '__app__.commons.config.Config', 'Config', ([], {}), '()\n', (311, 313), False, 'from __app__.commons.config import Config\n'), ((474, 532), 'logging.info', 'logging.info', (['"""createperson function processed a request."""'], {}), "('createperson function processed a request.')\n", (486, 532), False, 'import logging\n'), ((1013, 1027), '__app__.commons.cosmosdb.UserDB', 'UserDB', (['config'], {}), '(config)\n', (1019, 1027), False, 'from __app__.commons.cosmosdb import AssetDB, UserDB\n'), ((2259, 2282), '__app__.commons.cosmosdb.AssetDB.gen_random_id', 'AssetDB.gen_random_id', ([], {}), '()\n', (2280, 2282), False, 'from __app__.commons.cosmosdb import AssetDB, UserDB\n'), ((2298, 2376), '__app__.commons.blockblob.AzureStorageBlockBlob', 'AzureStorageBlockBlob', (["storage_info['AccountName']", "storage_info['AccountKey']"], {}), "(storage_info['AccountName'], storage_info['AccountKey'])\n", (2319, 2376), False, 'from __app__.commons.blockblob import AzureStorageBlockBlob\n'), ((614, 651), 'logging.info', 'logging.info', (['"""req_body=%s"""', 'req_body'], {}), "('req_body=%s', req_body)\n", (626, 651), False, 'import logging\n'), ((838, 969), 'azure.functions.HttpResponse', 'func.HttpResponse', (['"""Please pass both user_id and person_name on the query string or in the request body"""'], {'status_code': '(400)'}), "(\n 'Please pass both user_id and person_name on the query string or in the request body'\n , status_code=400)\n", (855, 969), True, 'import azure.functions as func\n'), ((2613, 2648), 'logging.info', 'logging.info', (['"""persons=%s"""', 'persons'], {}), "('persons=%s', persons)\n", (2625, 2648), False, 'import logging\n'), ((680, 695), 'logging.info', 'logging.info', (['e'], {}), '(e)\n', (692, 695), False, 'import logging\n'), ((727, 743), 'logging.info', 'logging.info', (['ve'], {}), '(ve)\n', (739, 743), False, 'import logging\n'), ((1108, 1165), 'logging.info', 'logging.info', (['f"""A user with user_id {user_id} not exists"""'], {}), "(f'A user with user_id {user_id} not exists')\n", (1120, 1165), False, 'import logging\n'), ((1179, 1258), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""A user with user_id {user_id} not exists"""'], {'status_code': '(409)'}), "(f'A user with user_id {user_id} not exists', status_code=409)\n", (1196, 1258), True, 'import azure.functions as func\n'), ((1338, 1445), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""A user with user_id {user_id} does not have person_group_id"""'], {'status_code': '(409)'}), "(\n f'A user with user_id {user_id} does not have person_group_id',\n status_code=409)\n", (1355, 1445), True, 'import azure.functions as func\n'), ((1494, 1509), 'logging.info', 'logging.info', (['e'], {}), '(e)\n', (1506, 1509), False, 'import logging\n'), ((2106, 2121), 'logging.info', 'logging.info', (['e'], {}), '(e)\n', (2118, 2121), False, 'import logging\n'), ((2930, 2956), 'json.dumps', 'json.dumps', (['created_person'], {}), '(created_person)\n', (2940, 2956), False, 'import json\n')] |
from dsynth.view_datasets.tless import TlessMultiviewDataset
from dsynth import MultiviewWarper
import numpy as np
def test_tless_dataset():
dataset = TlessMultiviewDataset(obj_id=2, unit_test=True)
ibr = MultiviewWarper(dataset)
R = np.reshape(dataset[1].cam_R, (3,3)).astype(np.float32)
t = np.float32(dataset[1].cam_t)
K = np.reshape(dataset[1].cam_K, (3,3)).astype(np.float32)
W, view_id = ibr.match_and_warp( (R,t), K, (400,400))
assert view_id == '0400'
| [
"dsynth.MultiviewWarper",
"numpy.reshape",
"numpy.float32",
"dsynth.view_datasets.tless.TlessMultiviewDataset"
] | [((156, 203), 'dsynth.view_datasets.tless.TlessMultiviewDataset', 'TlessMultiviewDataset', ([], {'obj_id': '(2)', 'unit_test': '(True)'}), '(obj_id=2, unit_test=True)\n', (177, 203), False, 'from dsynth.view_datasets.tless import TlessMultiviewDataset\n'), ((214, 238), 'dsynth.MultiviewWarper', 'MultiviewWarper', (['dataset'], {}), '(dataset)\n', (229, 238), False, 'from dsynth import MultiviewWarper\n'), ((312, 340), 'numpy.float32', 'np.float32', (['dataset[1].cam_t'], {}), '(dataset[1].cam_t)\n', (322, 340), True, 'import numpy as np\n'), ((248, 284), 'numpy.reshape', 'np.reshape', (['dataset[1].cam_R', '(3, 3)'], {}), '(dataset[1].cam_R, (3, 3))\n', (258, 284), True, 'import numpy as np\n'), ((349, 385), 'numpy.reshape', 'np.reshape', (['dataset[1].cam_K', '(3, 3)'], {}), '(dataset[1].cam_K, (3, 3))\n', (359, 385), True, 'import numpy as np\n')] |
import unittest
from katas.kyu_6.which_are_in import in_array
class InArrayTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(in_array(
['live', 'arp', 'strong'],
['lively', 'alive', 'harp', 'sharp', 'armstrong']),
['arp', 'live', 'strong'])
| [
"katas.kyu_6.which_are_in.in_array"
] | [((159, 249), 'katas.kyu_6.which_are_in.in_array', 'in_array', (["['live', 'arp', 'strong']", "['lively', 'alive', 'harp', 'sharp', 'armstrong']"], {}), "(['live', 'arp', 'strong'], ['lively', 'alive', 'harp', 'sharp',\n 'armstrong'])\n", (167, 249), False, 'from katas.kyu_6.which_are_in import in_array\n')] |
import types
import sqlite3
from collections import namedtuple
from functools import reduce
import numpy
from glue.lal import LIGOTimeGPS
from glue.ligolw import ligolw, lsctables, table, ilwd
from glue.ligolw.utils import process
def assign_id(row, i):
row.simulation_id = ilwd.ilwdchar("sim_inspiral_table:sim_inspiral:%d" % i)
CMAP = { "right_ascension": "longitude",
"longitude":"longitude",
"latitude":"latitude",
"declination": "latitude",
"inclination": "inclination",
"polarization": "polarization",
"t_ref": lambda r, t: r.set_time_geocent(LIGOTimeGPS(float(t))),
"coa_phase": "coa_phase",
"distance": "distance",
"mass1": "mass1",
"mass2": "mass2",
# SHOEHORN ALERT
"sample_n": assign_id,
"alpha1":"alpha1",
"alpha2":"alpha2",
"alpha3":"alpha3",
"loglikelihood": "alpha1",
"joint_prior": "alpha2",
"joint_s_prior": "alpha3",
"eccentricity":"alpha4",
"spin1x":"spin1x",
"spin1y":"spin1y",
"spin1z":"spin1z",
"spin2x":"spin2x",
"spin2y":"spin2y",
"spin2z":"spin2z"
}
# FIXME: Find way to intersect given cols with valid cols when making table.
# Otherwise, we'll have to add them manually and ensure they all exist
sim_valid_cols = ["process_id", "simulation_id", "inclination", "longitude", "latitude", "polarization", "geocent_end_time", "geocent_end_time_ns", "coa_phase", "distance", "mass1", "mass2", "alpha1", "alpha2", "alpha3","spin1x", "spin1y", "spin1z", "spin2x", "spin2y", "spin2z"]
sngl_valid_cols = ["process_id", "event_id", "snr", "tau0", "tau3"]
multi_valid_cols = ["process_id", "event_id", "snr"]
def append_samples_to_xmldoc(xmldoc, sampdict):
try:
si_table = table.get_table(xmldoc, lsctables.SimInspiralTable.tableName)
new_table = False
# Warning: This will also get triggered if there is *more* than one table
except ValueError:
si_table = lsctables.New(lsctables.SimInspiralTable, sim_valid_cols)
new_table = True
keys = list(sampdict.keys())
# Just in case the key/value pairs don't come out synchronized
values = numpy.array([sampdict[k] for k in keys], object)
# Flatten the keys
import collections
keys = reduce(list.__add__, [list(i) if isinstance(i, tuple) else [i] for i in keys])
# Get the process
# FIXME: Assumed that only we have appended information
procid = table.get_table(xmldoc, lsctables.ProcessTable.tableName)[-1].process_id
# map the samples to sim inspiral rows
# NOTE :The list comprehension is to preserve the grouping of multiple
# parameters across the transpose operation. It's probably not necessary,
# so if speed dictates, it can be reworked by flattening before arriving
# here
for vrow in numpy.array(list(zip(*[vrow_sub.T for vrow_sub in values])), dtype=numpy.object):
#si_table.append(samples_to_siminsp_row(si_table, **dict(zip(keys, vrow.flatten()))))
vrow = reduce(list.__add__, [list(i) if isinstance(i, collections.Iterable) else [i] for i in vrow])
si_table.append(samples_to_siminsp_row(si_table, **dict(list(zip(keys, vrow)))))
si_table[-1].process_id = procid
if new_table:
xmldoc.childNodes[0].appendChild(si_table)
return xmldoc
def append_likelihood_result_to_xmldoc(xmldoc, loglikelihood, neff=0, converged=False,**cols):
try:
si_table = table.get_table(xmldoc, lsctables.SnglInspiralTable.tableName)
new_table = False
# NOTE: MultiInspiralTable has no spin columns
#si_table = table.get_table(xmldoc, lsctables.MultiInspiralTable.tableName)
# Warning: This will also get triggered if there is *more* than one table
except ValueError:
si_table = lsctables.New(lsctables.SnglInspiralTable, sngl_valid_cols + list(cols.keys()))
new_table = True
# NOTE: MultiInspiralTable has no spin columns
#si_table = lsctables.New(lsctables.MultiInspiralTable, multi_valid_cols + cols.keys())
# Get the process
# FIXME: Assumed that only we have appended information
procid = table.get_table(xmldoc, lsctables.ProcessTable.tableName)[-1].process_id
# map the samples to sim inspiral rows
si_table.append(likelihood_to_snglinsp_row(si_table, loglikelihood, neff, converged,**cols))
si_table[-1].process_id = procid
if new_table:
xmldoc.childNodes[0].appendChild(si_table)
return xmldoc
def samples_to_siminsp_row(table, colmap={}, **sampdict):
row = table.RowType()
row.simulation_id = table.get_next_id()
for key, col in list(CMAP.items()):
if key not in sampdict:
continue
if isinstance(col, types.FunctionType):
col(row, sampdict[key])
else:
setattr(row, col, sampdict[key])
return row
def likelihood_to_snglinsp_row(table, loglikelihood, neff=0, converged=False, **cols):
row = table.RowType()
row.event_id = table.get_next_id()
for col in cols:
setattr(row, col, cols[col])
row.snr = loglikelihood
row.tau0 = neff
row.tau3 = int(converged)
return row
def db_identify_param(db_fname, process_id, param):
"""
Extract the event time for a given process ID. This may fail in the case that the event time was not given on the command line (rather in a pointer to a XML file)
NOTE: This is definitely not the best way to do this.
"""
cmd_prm = "--" + param.replace("_", "-")
sql = """select value from process_params where process_id = "%s" and param = "%s" """ % (str(process_id), cmd_prm)
try:
connection = sqlite3.connect(db_fname)
result = list(connection.execute(sql))[0][0]
finally:
connection.close()
return result
def db_to_samples(db_fname, tbltype, cols):
"""
Pull samples from db_fname and return object that resembles a row from an XML table.
"""
if "geocent_end_time" in cols:
cols.append("geocent_end_time_ns")
# FIXME: Get columns from db
#if cols is None:
#colsspec = "*"
#else:
colsspec = ", ".join(cols)
if tbltype == lsctables.SimInspiralTable:
sql = """select %s from sim_inspiral""" % colsspec
elif tbltype == lsctables.SnglInspiralTable:
sql = """select %s from sngl_inspiral""" % colsspec
else:
raise ValueError("Don't know SQL for table %s" % tbltype.tableName)
Sample = namedtuple("Sample", cols)
samples = []
try:
connection = sqlite3.connect(db_fname)
connection.row_factory = sqlite3.Row
for row in connection.execute(sql):
# FIXME: UGH!
res = dict(list(zip(cols, row)))
if "geocent_end_time" in list(res.keys()):
res["geocent_end_time"] += res["geocent_end_time_ns"]*1e-9
samples.append(Sample(**res))
finally:
connection.close()
return samples
# TESTING
import sys
if __file__ == sys.argv[0]:
import numpy
# Not used yet
del CMAP["int_var"]
del CMAP["int_val"]
del CMAP["sample_n"]
# Reworked to resemble usage in pipeline
del CMAP["mass1"]
del CMAP["mass2"]
CMAP[("mass1", "mass2")] = ("mass1", "mass2")
ar = numpy.random.random((len(CMAP), 10))
samp_dict = dict(list(zip(CMAP, ar)))
ar = samp_dict[("mass1", "mass2")]
samp_dict[("mass1", "mass2")] = numpy.array([ar, ar])
del CMAP[("mass1", "mass2")]
CMAP["mass1"] = "mass1"
CMAP["mass2"] = "mass2"
samp_dict["samp_n"] = numpy.array(list(range(0,10)))
CMAP["sample_n"] = "sample_n"
xmldoc = ligolw.Document()
xmldoc.appendChild(ligolw.LIGO_LW())
process.register_to_xmldoc(xmldoc, sys.argv[0], {})
append_samples_to_xmldoc(xmldoc, samp_dict)
def gaussian(x, mu=0, std=1):
return 1/numpy.sqrt(numpy.pi*2)/std * numpy.exp(-(x-mu)**2/2/std**2)
m1m, m2m = 1.4, 1.5
m1, m2 = numpy.random.random(2000).reshape(2,1000)*1.0+1.0
loglikes = [gaussian(m1i, m1m)*gaussian(m2i, m2m) for m1i, m2i in zip(m1, m2)]
#loglikelihood = - 7.5**2/2
#append_likelihood_result_to_xmldoc(xmldoc, loglikelihood, **{"mass1": 1.4, "mass2": 1.4, "ifos": "H1,L1,V1"})
for m1i, m2i, loglikelihood in zip(m1, m2, loglikes):
append_likelihood_result_to_xmldoc(xmldoc, loglikelihood, **{"mass1": m1i, "mass2": m2i})
from glue.ligolw import utils
utils.write_filename(xmldoc, "iotest.xml.gz", gz=True)
| [
"glue.ligolw.ligolw.LIGO_LW",
"glue.ligolw.table.get_next_id",
"collections.namedtuple",
"numpy.sqrt",
"sqlite3.connect",
"numpy.random.random",
"glue.ligolw.utils.write_filename",
"glue.ligolw.table.get_table",
"glue.ligolw.ligolw.Document",
"glue.ligolw.table.RowType",
"numpy.exp",
"numpy.ar... | [((282, 337), 'glue.ligolw.ilwd.ilwdchar', 'ilwd.ilwdchar', (["('sim_inspiral_table:sim_inspiral:%d' % i)"], {}), "('sim_inspiral_table:sim_inspiral:%d' % i)\n", (295, 337), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((2121, 2169), 'numpy.array', 'numpy.array', (['[sampdict[k] for k in keys]', 'object'], {}), '([sampdict[k] for k in keys], object)\n', (2132, 2169), False, 'import numpy\n'), ((4528, 4543), 'glue.ligolw.table.RowType', 'table.RowType', ([], {}), '()\n', (4541, 4543), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((4568, 4587), 'glue.ligolw.table.get_next_id', 'table.get_next_id', ([], {}), '()\n', (4585, 4587), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((4938, 4953), 'glue.ligolw.table.RowType', 'table.RowType', ([], {}), '()\n', (4951, 4953), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((4973, 4992), 'glue.ligolw.table.get_next_id', 'table.get_next_id', ([], {}), '()\n', (4990, 4992), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((6443, 6469), 'collections.namedtuple', 'namedtuple', (['"""Sample"""', 'cols'], {}), "('Sample', cols)\n", (6453, 6469), False, 'from collections import namedtuple\n'), ((7400, 7421), 'numpy.array', 'numpy.array', (['[ar, ar]'], {}), '([ar, ar])\n', (7411, 7421), False, 'import numpy\n'), ((7621, 7638), 'glue.ligolw.ligolw.Document', 'ligolw.Document', ([], {}), '()\n', (7636, 7638), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((7684, 7735), 'glue.ligolw.utils.process.register_to_xmldoc', 'process.register_to_xmldoc', (['xmldoc', 'sys.argv[0]', '{}'], {}), '(xmldoc, sys.argv[0], {})\n', (7710, 7735), False, 'from glue.ligolw.utils import process\n'), ((8410, 8464), 'glue.ligolw.utils.write_filename', 'utils.write_filename', (['xmldoc', '"""iotest.xml.gz"""'], {'gz': '(True)'}), "(xmldoc, 'iotest.xml.gz', gz=True)\n", (8430, 8464), False, 'from glue.ligolw import utils\n'), ((1712, 1773), 'glue.ligolw.table.get_table', 'table.get_table', (['xmldoc', 'lsctables.SimInspiralTable.tableName'], {}), '(xmldoc, lsctables.SimInspiralTable.tableName)\n', (1727, 1773), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((3415, 3477), 'glue.ligolw.table.get_table', 'table.get_table', (['xmldoc', 'lsctables.SnglInspiralTable.tableName'], {}), '(xmldoc, lsctables.SnglInspiralTable.tableName)\n', (3430, 3477), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((5641, 5666), 'sqlite3.connect', 'sqlite3.connect', (['db_fname'], {}), '(db_fname)\n', (5656, 5666), False, 'import sqlite3\n'), ((6518, 6543), 'sqlite3.connect', 'sqlite3.connect', (['db_fname'], {}), '(db_fname)\n', (6533, 6543), False, 'import sqlite3\n'), ((7662, 7678), 'glue.ligolw.ligolw.LIGO_LW', 'ligolw.LIGO_LW', ([], {}), '()\n', (7676, 7678), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((1920, 1977), 'glue.ligolw.lsctables.New', 'lsctables.New', (['lsctables.SimInspiralTable', 'sim_valid_cols'], {}), '(lsctables.SimInspiralTable, sim_valid_cols)\n', (1933, 1977), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((2407, 2464), 'glue.ligolw.table.get_table', 'table.get_table', (['xmldoc', 'lsctables.ProcessTable.tableName'], {}), '(xmldoc, lsctables.ProcessTable.tableName)\n', (2422, 2464), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((4115, 4172), 'glue.ligolw.table.get_table', 'table.get_table', (['xmldoc', 'lsctables.ProcessTable.tableName'], {}), '(xmldoc, lsctables.ProcessTable.tableName)\n', (4130, 4172), False, 'from glue.ligolw import ligolw, lsctables, table, ilwd\n'), ((7866, 7906), 'numpy.exp', 'numpy.exp', (['(-(x - mu) ** 2 / 2 / std ** 2)'], {}), '(-(x - mu) ** 2 / 2 / std ** 2)\n', (7875, 7906), False, 'import numpy\n'), ((7837, 7861), 'numpy.sqrt', 'numpy.sqrt', (['(numpy.pi * 2)'], {}), '(numpy.pi * 2)\n', (7847, 7861), False, 'import numpy\n'), ((7935, 7960), 'numpy.random.random', 'numpy.random.random', (['(2000)'], {}), '(2000)\n', (7954, 7960), False, 'import numpy\n')] |
"""Deploys a Cloud Foundry application using a manifest
"""
from __future__ import print_function
import os
import sys
import json
import cf_api
from cf_api.deploy_manifest import Deploy
from getpass import getpass
print('----------')
# cloud_controller_url = 'https://api.changeme.com'
cloud_controller_url = raw_input('cloud controller url: ').strip()
username = raw_input('username: ').strip()
password = getpass('password: ').strip()
print('----------')
print('Authenticating with UAA...')
cc = cf_api.new_cloud_controller(
cloud_controller_url,
client_id='cf', # the ``cf`` command uses this client and the secret below
client_secret='',
username=username,
password=password,
)
print('Login OK!')
print('----------')
organization_name = raw_input('organization name: ').strip()
# see http://apidocs.cloudfoundry.org/280/organizations/list_all_organizations.html
# for an explanation of the query parameters
print('Searching for organization "{0}"...'.format(organization_name))
req = cc.request('organizations').set_query(q='name:' + organization_name)
res = req.get()
print(str(res.response.status_code) + ' ' + res.response.reason)
print('----------')
if res.has_error:
print(str(res.error_code) + ': ' + str(res.error_message))
sys.exit(1)
space_name = raw_input('space name: ').strip()
# see http://apidocs.cloudfoundry.org/280/spaces/list_all_spaces.html
# for an explanation of the query parameters
print('Searching for space...')
spaces_url = res.resource.spaces_url
req = cc.request(spaces_url).set_query(q='name:' + space_name)
res = req.get()
print(str(res.response.status_code) + ' ' + res.response.reason)
print('----------')
if res.has_error:
print(str(res.error_code) + ': ' + str(res.error_message))
sys.exit(1)
manifest_path = raw_input('manifest path: ').strip()
manifest_path = os.path.abspath(manifest_path)
app_entries = Deploy.parse_manifest(manifest_path, cc)
for app_entry in app_entries:
app_entry.set_org_and_space(organization_name, space_name)
app_entry.set_debug(True)
app_entry.push()
app_entry.wait_for_app_start(tailing=True)
print('Deployed {0} apps successfully!'.format(len(app_entries)))
| [
"cf_api.deploy_manifest.Deploy.parse_manifest",
"getpass.getpass",
"cf_api.new_cloud_controller",
"sys.exit",
"os.path.abspath"
] | [((502, 627), 'cf_api.new_cloud_controller', 'cf_api.new_cloud_controller', (['cloud_controller_url'], {'client_id': '"""cf"""', 'client_secret': '""""""', 'username': 'username', 'password': 'password'}), "(cloud_controller_url, client_id='cf',\n client_secret='', username=username, password=password)\n", (529, 627), False, 'import cf_api\n'), ((1844, 1874), 'os.path.abspath', 'os.path.abspath', (['manifest_path'], {}), '(manifest_path)\n', (1859, 1874), False, 'import os\n'), ((1890, 1930), 'cf_api.deploy_manifest.Deploy.parse_manifest', 'Deploy.parse_manifest', (['manifest_path', 'cc'], {}), '(manifest_path, cc)\n', (1911, 1930), False, 'from cf_api.deploy_manifest import Deploy\n'), ((1269, 1280), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1277, 1280), False, 'import sys\n'), ((1762, 1773), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1770, 1773), False, 'import sys\n'), ((410, 431), 'getpass.getpass', 'getpass', (['"""password: """'], {}), "('password: ')\n", (417, 431), False, 'from getpass import getpass\n')] |
from datetime import date
from django.db import models
from accounts.models import Account
class CleaningRoster(models.Model):
"""
Model representing a randomly-selected roster for the room's cleaning
"""
# Date where the people were selected
date = models.DateField(auto_now_add=True)
cleaners = models.ManyToManyField(Account)
def save(self, *args, **kwargs):
CleaningRoster.objects.filter(date=date.today()).delete()
return super(CleaningRoster, self).save(*args, **kwargs)
def __str__(self):
return "Ménage du %s" % self.date
| [
"datetime.date.today",
"django.db.models.DateField",
"django.db.models.ManyToManyField"
] | [((274, 309), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (290, 309), False, 'from django.db import models\n'), ((326, 357), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Account'], {}), '(Account)\n', (348, 357), False, 'from django.db import models\n'), ((439, 451), 'datetime.date.today', 'date.today', ([], {}), '()\n', (449, 451), False, 'from datetime import date\n')] |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
import logging
import os
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, Iterable, Set, Type
from pants.backend.python.target_types import PythonLibrary, PythonTests, PythonTestsSources
from pants.base.specs import AddressSpecs, MaybeEmptyDescendantAddresses
from pants.core.goals.tailor import PutativeTarget, PutativeTargets, PutativeTargetsRequest
from pants.engine.fs import PathGlobs, Paths
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.rules import collect_rules, rule
from pants.engine.target import (
Sources,
SourcesPaths,
SourcesPathsRequest,
Target,
UnexpandedTargets,
)
from pants.engine.unions import UnionRule
from pants.source.filespec import Filespec, matches_filespec
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class PutativePythonTargetsRequest:
pass
def classify_source_files(paths: Iterable[str]) -> Dict[Type[Target], Set[str]]:
"""Returns a dict of target type -> files that belong to targets of that type."""
tests_filespec = Filespec(includes=list(PythonTestsSources.default))
test_filenames = set(
matches_filespec(tests_filespec, paths=[os.path.basename(path) for path in paths])
)
test_files = {path for path in paths if os.path.basename(path) in test_filenames}
library_files = set(paths) - test_files
return {PythonTests: test_files, PythonLibrary: library_files}
def group_by_dir(paths: Iterable[str]) -> Dict[str, Set[str]]:
"""For a list of file paths, returns a dict of directory path -> files in that dir."""
ret = defaultdict(set)
for path in paths:
dirname, filename = os.path.split(path)
ret[dirname].add(filename)
return ret
@rule
async def find_putative_targets(
req: PutativePythonTargetsRequest,
) -> PutativeTargets:
all_tgts = await Get(UnexpandedTargets, AddressSpecs([MaybeEmptyDescendantAddresses("")]))
all_sources_paths = await MultiGet(
Get(SourcesPaths, SourcesPathsRequest(tgt.get(Sources))) for tgt in all_tgts
)
all_owned_sources = itertools.chain.from_iterable(spaths.files for spaths in all_sources_paths)
all_py_files = await Get(Paths, PathGlobs(["**/*.py"]))
unowned_py_files = set(all_py_files.files) - set(all_owned_sources)
classified_unowned_py_files = classify_source_files(unowned_py_files)
pts = []
for tgt_type, paths in classified_unowned_py_files.items():
for dirname, filenames in group_by_dir(paths).items():
name = "tests" if tgt_type == PythonTests else os.path.basename(dirname)
kwargs = {"name": name} if tgt_type == PythonTests else {}
pts.append(
PutativeTarget.for_target_type(
tgt_type, dirname, name, sorted(filenames), kwargs=kwargs
)
)
return PutativeTargets(pts)
def rules():
return [
*collect_rules(),
UnionRule(PutativeTargetsRequest, PutativePythonTargetsRequest),
]
| [
"logging.getLogger",
"dataclasses.dataclass",
"os.path.split",
"pants.engine.unions.UnionRule",
"itertools.chain.from_iterable",
"pants.base.specs.MaybeEmptyDescendantAddresses",
"collections.defaultdict",
"pants.engine.rules.collect_rules",
"pants.engine.fs.PathGlobs",
"pants.core.goals.tailor.Pu... | [((943, 970), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (960, 970), False, 'import logging\n'), ((974, 996), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (983, 996), False, 'from dataclasses import dataclass\n'), ((1770, 1786), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (1781, 1786), False, 'from collections import defaultdict\n'), ((2260, 2335), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['(spaths.files for spaths in all_sources_paths)'], {}), '(spaths.files for spaths in all_sources_paths)\n', (2289, 2335), False, 'import itertools\n'), ((3032, 3052), 'pants.core.goals.tailor.PutativeTargets', 'PutativeTargets', (['pts'], {}), '(pts)\n', (3047, 3052), False, 'from pants.core.goals.tailor import PutativeTarget, PutativeTargets, PutativeTargetsRequest\n'), ((1838, 1857), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (1851, 1857), False, 'import os\n'), ((3115, 3178), 'pants.engine.unions.UnionRule', 'UnionRule', (['PutativeTargetsRequest', 'PutativePythonTargetsRequest'], {}), '(PutativeTargetsRequest, PutativePythonTargetsRequest)\n', (3124, 3178), False, 'from pants.engine.unions import UnionRule\n'), ((2373, 2395), 'pants.engine.fs.PathGlobs', 'PathGlobs', (["['**/*.py']"], {}), "(['**/*.py'])\n", (2382, 2395), False, 'from pants.engine.fs import PathGlobs, Paths\n'), ((3090, 3105), 'pants.engine.rules.collect_rules', 'collect_rules', ([], {}), '()\n', (3103, 3105), False, 'from pants.engine.rules import collect_rules, rule\n'), ((1451, 1473), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1467, 1473), False, 'import os\n'), ((2742, 2767), 'os.path.basename', 'os.path.basename', (['dirname'], {}), '(dirname)\n', (2758, 2767), False, 'import os\n'), ((1358, 1380), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1374, 1380), False, 'import os\n'), ((2068, 2101), 'pants.base.specs.MaybeEmptyDescendantAddresses', 'MaybeEmptyDescendantAddresses', (['""""""'], {}), "('')\n", (2097, 2101), False, 'from pants.base.specs import AddressSpecs, MaybeEmptyDescendantAddresses\n')] |
import torch
from ..kernels import mask as mask_cuda
class OpMask(torch.autograd.Function):
@staticmethod
def forward(ctx, x : torch.Tensor, mask : torch.Tensor, value : float) -> torch.Tensor:
assert x.is_contiguous() and x.is_cuda and x.dtype == torch.float16 and x.ndim == 3
assert mask.is_contiguous() and mask.is_cuda and mask.dtype == torch.bool and mask.ndim == 2
assert x.device == mask.device
batch, n, m = x.size()
assert mask.size() == (batch, m)
out = torch.empty(x.size(), dtype=torch.float16, device=x.device)
mask_cuda(
batch, n, m,
x.data_ptr(),
mask.data_ptr(),
value,
out.data_ptr(),
torch.cuda.current_stream().cuda_stream
)
ctx.save_for_backward(mask)
return out
@staticmethod
def backward(ctx, grad_output : torch.Tensor) -> torch.Tensor:
mask = ctx.saved_tensors[0]
batch, n, m = grad_output.size()
assert grad_output.is_cuda and grad_output.is_contiguous() and grad_output.dtype == torch.float16
grad = torch.empty(grad_output.size(), dtype=torch.float16, device=grad_output.device)
mask_cuda(
batch, n, m,
grad_output.data_ptr(),
mask.data_ptr(),
0.0,
grad.data_ptr(),
torch.cuda.current_stream().cuda_stream
)
return grad, None, None
def mask(x : torch.Tensor, mask : torch.Tensor, value : float) -> torch.Tensor:
return OpMask.apply(x, mask, value)
def mask_inplace(x : torch.Tensor, mask : torch.Tensor, value : float) -> None:
assert x.is_contiguous() and x.is_cuda and x.dtype == torch.float16 and x.ndim == 3
assert mask.is_contiguous() and mask.is_cuda and mask.dtype == torch.bool and mask.ndim == 2
assert x.device == mask.device
batch, n, m = x.size()
assert mask.size() == (batch, m)
mask_cuda(
batch, n, m,
x.data_ptr(),
mask.data_ptr(),
value,
x.data_ptr(),
torch.cuda.current_stream().cuda_stream
)
def maskTH(x : torch.Tensor, mask : torch.Tensor, value : float) -> torch.Tensor:
return torch.where(
mask[:, None, :],
x,
torch.scalar_tensor(value, device=x.device, dtype=x.dtype),
)
| [
"torch.cuda.current_stream",
"torch.scalar_tensor"
] | [((2281, 2339), 'torch.scalar_tensor', 'torch.scalar_tensor', (['value'], {'device': 'x.device', 'dtype': 'x.dtype'}), '(value, device=x.device, dtype=x.dtype)\n', (2300, 2339), False, 'import torch\n'), ((2083, 2110), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (2108, 2110), False, 'import torch\n'), ((740, 767), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (765, 767), False, 'import torch\n'), ((1385, 1412), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (1410, 1412), False, 'import torch\n')] |
import random
import time
import requests
from urllib.parse import urlparse
import os
import ctypes
import time as t
import shutil
from datetime import datetime
from screeninfo import get_monitors
from infi.systray import SysTrayIcon
if not os.path.exists(os.getcwd()+"\Image"):
os.mkdir("Image")
mratio = get_monitors()[0].width/get_monitors()[0].height
APIKEY = ""
def download(src):
mypath = os.getcwd()+r"\Image\image"
r = requests.get(src, stream=True)
if r.status_code == 200:
path = urlparse(src).path
ext = os.path.splitext(path)[1]
with open(mypath+ext, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
return mypath+ext
def choose_wallpaper(query, apikey):
global mratio
while True:
try:
srces = []
URL = "https://api.pexels.com/v1/search?query="+query
for i in range(10):
HEADERS = {
"Authorization": apikey
}
data = requests.get(URL, headers=HEADERS).json()
for photo in data["photos"]:
src = photo["src"]["original"]
dr = max(photo["width"], photo["height"])/min(photo["width"], photo["height"])
if dr >= (mratio - 0.3) and dr <= (mratio + 0.2) and photo["width"] - photo["height"] > 300:
srces.append(src)
URL = data["next_page"]
src = random.choice(srces)
path = download(src)
ctypes.windll.user32.SystemParametersInfoW(20, 0, path, 0)
break
except:
pass
def dt_time(current_time):
if current_time > 17 or current_time <= 5:
time = "night sky"
if current_time > 5 and current_time <= 7:
time = "sunrise"
if current_time > 7 and current_time <= 10:
time = "morning sun"
if current_time > 10 and current_time <= 15:
time = "midday wallpaper"
if current_time > 15 and current_time <= 17:
time = "sunset"
return time
def rcWallpaper(systray):
global APIKEY
no = datetime.now()
cu = int(no.strftime("%H"))
ti = dt_time(cu)
choose_wallpaper(ti, APIKEY)
now = datetime.now()
current_time = int(now.strftime("%H"))
time = dt_time(current_time)
choose_wallpaper(time, APIKEY)
menu_options = (("Change Wallpaper", None, rcWallpaper),)
systray = SysTrayIcon("icon.ico", "Auto Wallpaper Updater", menu_options) # I realize there is no icon there, you can add one in the same directory if you would like.
systray.start()
while True:
n = datetime.now()
ct = int(n.strftime("%H"))
if ct is not current_time:
time = dt_time(ct)
choose_wallpaper(time, APIKEY)
current_time = ct
else:
pass
t.sleep(60)
| [
"ctypes.windll.user32.SystemParametersInfoW",
"random.choice",
"urllib.parse.urlparse",
"shutil.copyfileobj",
"os.path.splitext",
"time.sleep",
"requests.get",
"os.getcwd",
"datetime.datetime.now",
"os.mkdir",
"infi.systray.SysTrayIcon",
"screeninfo.get_monitors"
] | [((2293, 2307), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2305, 2307), False, 'from datetime import datetime\n'), ((2476, 2539), 'infi.systray.SysTrayIcon', 'SysTrayIcon', (['"""icon.ico"""', '"""Auto Wallpaper Updater"""', 'menu_options'], {}), "('icon.ico', 'Auto Wallpaper Updater', menu_options)\n", (2487, 2539), False, 'from infi.systray import SysTrayIcon\n'), ((284, 301), 'os.mkdir', 'os.mkdir', (['"""Image"""'], {}), "('Image')\n", (292, 301), False, 'import os\n'), ((442, 472), 'requests.get', 'requests.get', (['src'], {'stream': '(True)'}), '(src, stream=True)\n', (454, 472), False, 'import requests\n'), ((2185, 2199), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2197, 2199), False, 'from datetime import datetime\n'), ((2670, 2684), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2682, 2684), False, 'from datetime import datetime\n'), ((2867, 2878), 'time.sleep', 't.sleep', (['(60)'], {}), '(60)\n', (2874, 2878), True, 'import time as t\n'), ((406, 417), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (415, 417), False, 'import os\n'), ((257, 268), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (266, 268), False, 'import os\n'), ((312, 326), 'screeninfo.get_monitors', 'get_monitors', ([], {}), '()\n', (324, 326), False, 'from screeninfo import get_monitors\n'), ((336, 350), 'screeninfo.get_monitors', 'get_monitors', ([], {}), '()\n', (348, 350), False, 'from screeninfo import get_monitors\n'), ((517, 530), 'urllib.parse.urlparse', 'urlparse', (['src'], {}), '(src)\n', (525, 530), False, 'from urllib.parse import urlparse\n'), ((550, 572), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (566, 572), False, 'import os\n'), ((670, 698), 'shutil.copyfileobj', 'shutil.copyfileobj', (['r.raw', 'f'], {}), '(r.raw, f)\n', (688, 698), False, 'import shutil\n'), ((1531, 1551), 'random.choice', 'random.choice', (['srces'], {}), '(srces)\n', (1544, 1551), False, 'import random\n'), ((1597, 1655), 'ctypes.windll.user32.SystemParametersInfoW', 'ctypes.windll.user32.SystemParametersInfoW', (['(20)', '(0)', 'path', '(0)'], {}), '(20, 0, path, 0)\n', (1639, 1655), False, 'import ctypes\n'), ((1050, 1084), 'requests.get', 'requests.get', (['URL'], {'headers': 'HEADERS'}), '(URL, headers=HEADERS)\n', (1062, 1084), False, 'import requests\n')] |
import numpy as np
import fcl
import torch
# R = np.array([[0.0, -1.0, 0.0],
# [1.0, 0.0, 0.0],
# [0.0, 0.0, 1.0]])
R = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
T = np.array([1.0, 1.865, 0])
g1 = fcl.Box(1,2,3)
t1 = fcl.Transform()
o1 = fcl.CollisionObject(g1, t1)
# g2 = fcl.Cone(1,3)
g2 = fcl.Cylinder(0.01, 1000)
t2 = fcl.Transform()
o2 = fcl.CollisionObject(g2, t2)
# request = fcl.DistanceRequest(gjk_solver_type=fcl.GJKSolverType.GST_INDEP)
# result = fcl.DistanceResult()
request = fcl.CollisionRequest(enable_contact=True)
result = fcl.CollisionResult()
# ret = fcl.distance(o1, o2, request, result)
# ret = fcl.collide(o1, o2, request, result)
size = 50, 50
yy, xx = torch.meshgrid(torch.linspace(-5, 5, size[0]), torch.linspace(-5, 5, size[1]))
grid_points = torch.stack([xx, yy], axis=2).reshape((-1, 2))
grid_labels = torch.zeros_like(grid_points)[:, 0]
for i, (x, y) in enumerate(grid_points):
print(x, y)
o2.setTranslation([x, y, 0])
fcl.update()
ret = fcl.collide(o1, o2, request, result)
grid_labels[i] = result.is_collision
print(result.is_collision)
import matplotlib.pyplot as plt
plt.scatter(grid_points[grid_labels==True, 0], grid_points[grid_labels==True, 1])
plt.show()
# print(ret, result.contacts[0].penetration_depth) | [
"fcl.Cylinder",
"fcl.update",
"matplotlib.pyplot.show",
"fcl.Transform",
"fcl.collide",
"torch.stack",
"fcl.CollisionObject",
"numpy.array",
"matplotlib.pyplot.scatter",
"fcl.CollisionRequest",
"torch.zeros_like",
"fcl.Box",
"torch.linspace",
"fcl.CollisionResult"
] | [((151, 212), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (159, 212), True, 'import numpy as np\n'), ((247, 272), 'numpy.array', 'np.array', (['[1.0, 1.865, 0]'], {}), '([1.0, 1.865, 0])\n', (255, 272), True, 'import numpy as np\n'), ((279, 295), 'fcl.Box', 'fcl.Box', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (286, 295), False, 'import fcl\n'), ((299, 314), 'fcl.Transform', 'fcl.Transform', ([], {}), '()\n', (312, 314), False, 'import fcl\n'), ((320, 347), 'fcl.CollisionObject', 'fcl.CollisionObject', (['g1', 't1'], {}), '(g1, t1)\n', (339, 347), False, 'import fcl\n'), ((375, 399), 'fcl.Cylinder', 'fcl.Cylinder', (['(0.01)', '(1000)'], {}), '(0.01, 1000)\n', (387, 399), False, 'import fcl\n'), ((405, 420), 'fcl.Transform', 'fcl.Transform', ([], {}), '()\n', (418, 420), False, 'import fcl\n'), ((426, 453), 'fcl.CollisionObject', 'fcl.CollisionObject', (['g2', 't2'], {}), '(g2, t2)\n', (445, 453), False, 'import fcl\n'), ((574, 615), 'fcl.CollisionRequest', 'fcl.CollisionRequest', ([], {'enable_contact': '(True)'}), '(enable_contact=True)\n', (594, 615), False, 'import fcl\n'), ((625, 646), 'fcl.CollisionResult', 'fcl.CollisionResult', ([], {}), '()\n', (644, 646), False, 'import fcl\n'), ((1212, 1301), 'matplotlib.pyplot.scatter', 'plt.scatter', (['grid_points[grid_labels == True, 0]', 'grid_points[grid_labels == True, 1]'], {}), '(grid_points[grid_labels == True, 0], grid_points[grid_labels ==\n True, 1])\n', (1223, 1301), True, 'import matplotlib.pyplot as plt\n'), ((1294, 1304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1302, 1304), True, 'import matplotlib.pyplot as plt\n'), ((778, 808), 'torch.linspace', 'torch.linspace', (['(-5)', '(5)', 'size[0]'], {}), '(-5, 5, size[0])\n', (792, 808), False, 'import torch\n'), ((810, 840), 'torch.linspace', 'torch.linspace', (['(-5)', '(5)', 'size[1]'], {}), '(-5, 5, size[1])\n', (824, 840), False, 'import torch\n'), ((917, 946), 'torch.zeros_like', 'torch.zeros_like', (['grid_points'], {}), '(grid_points)\n', (933, 946), False, 'import torch\n'), ((1047, 1059), 'fcl.update', 'fcl.update', ([], {}), '()\n', (1057, 1059), False, 'import fcl\n'), ((1070, 1106), 'fcl.collide', 'fcl.collide', (['o1', 'o2', 'request', 'result'], {}), '(o1, o2, request, result)\n', (1081, 1106), False, 'import fcl\n'), ((856, 885), 'torch.stack', 'torch.stack', (['[xx, yy]'], {'axis': '(2)'}), '([xx, yy], axis=2)\n', (867, 885), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
router structs module.
"""
from werkzeug.routing import Map
from pyrin.core.structs import DTO
class CoreURLMap(Map):
"""
core url map class.
this extends the `Map` class to add some functionalities to it.
"""
def __init__(self, rules=None, default_subdomain="", charset="utf-8",
strict_slashes=True, merge_slashes=True, redirect_defaults=True,
converters=None, sort_parameters=False, sort_key=None,
encoding_errors="replace", host_matching=False):
"""
initializes an instance of CoreURLMap.
:param RouteBase rules: sequence of url rules for this map.
:param str default_subdomain: the default subdomain for rules without
a subdomain defined.
:param str charset: charset of the url.
defaults to `utf-8` if not provided.
:param bool strict_slashes: if a rule ends with a slash but the matched
url does not, redirect to the url with a trailing slash.
:param bool merge_slashes: merge consecutive slashes when matching or building
urls. matches will redirect to the normalized url.
slashes in variable parts are not merged.
:param bool redirect_defaults: this will redirect to the default rule if it
wasn't visited that way. this helps creating
unique urls.
:param dict converters: a dict of converters that adds additional converters
to the list of converters. if you redefine one
converter this will override the original one.
:param bool sort_parameters: if set to `True` the url parameters are sorted.
see `url_encode` for more details.
:param function sort_key: the sort key function for `url_encode`.
:param str encoding_errors: the error method to use for decoding.
:param bool host_matching: if set to `True` it enables the host matching
feature and disables the subdomain one. if
enabled the `host` parameter to rules is used
instead of the `subdomain` one.
"""
super().__init__(rules, default_subdomain, charset, strict_slashes,
merge_slashes, redirect_defaults, converters,
sort_parameters, sort_key, encoding_errors,
host_matching)
# a dict containing the mapping between each url and its available routes.
# in the form of: {str url: [RouteBase route]}
self._routes_by_url = DTO()
def add(self, rule_factory):
"""
adds a new rule or factory to the map and binds it.
requires that the route is not bound to another map.
:param RouteBase | RuleFactory rule_factory: a route or rule factory instance.
"""
super().add(rule_factory)
for route in rule_factory.get_rules(self):
self._routes_by_url.setdefault(route.rule, []).append(route)
def remove(self, rule_factory):
"""
removes a rule or factory from the map and unbinds it.
requires that the rule is bounded to current map.
:param RouteBase | RuleFactory rule_factory: a `RouteBase` or `RuleFactory`
instance to be removed from this map.
:raises RouteIsNotBoundedError: route is not bounded error.
:raises RouteIsNotBoundedToMapError: route is not bounded to map error.
"""
for rule in rule_factory.get_rules(self):
rule.unbind(self)
self._rules.remove(rule)
self._remove_by_endpoint(rule)
self._remove_by_url(rule)
self._remap = True
def _remove_by_endpoint(self, route):
"""
removes given route from endpoint map.
:param RouteBase route: route instance to be removed from endpoint map.
"""
routes = self._rules_by_endpoint.get(route.endpoint)
if routes is None:
routes = []
if route in routes:
routes.remove(route)
self._rules_by_endpoint[route.endpoint] = routes
if len(routes) <= 0:
self._rules_by_endpoint.pop(route.endpoint, None)
def _remove_by_url(self, route):
"""
removes given route from url map.
:param RouteBase route: route instance to be removed from url map.
"""
routes = self._routes_by_url.get(route.rule)
if routes is None:
routes = []
if route in routes:
routes.remove(route)
self._routes_by_url[route.rule] = routes
if len(routes) <= 0:
self._routes_by_url.pop(route.rule, None)
def get_routes_by_endpoint(self, endpoint):
"""
gets the available routes for given endpoint.
:param str endpoint: endpoint to get its routes.
:returns: list[RouteBase]
:rtype: list
"""
routes = self._rules_by_endpoint.get(endpoint)
if routes is None:
return []
return routes
def get_routes_by_url(self, url):
"""
gets the available routes for given url.
:param str url: url to get its routes.
:returns: list[RouteBase]
:rtype: list
"""
routes = self._routes_by_url.get(url)
if routes is None:
return []
return routes
def count_routes_by_endpoint(self, endpoint):
"""
counts routes for given endpoint.
:param str endpoint: endpoint to count its routes.
:rtype: int
"""
return len(self.get_routes_by_endpoint(endpoint))
def count_routes_by_url(self, url):
"""
counts routes for given url.
:param str url: url to count its routes.
:rtype: int
"""
return len(self.get_routes_by_url(url))
def get_routes(self):
"""
gets a tuple of all registered routes.
:rtype: tuple[pyrin.api.router.handlers.base.RouteBase]
"""
return tuple(self._rules)
| [
"pyrin.core.structs.DTO"
] | [((2878, 2883), 'pyrin.core.structs.DTO', 'DTO', ([], {}), '()\n', (2881, 2883), False, 'from pyrin.core.structs import DTO\n')] |
import torch
import torch.nn as nn
from torch import autograd
from Configs import Global_Config
def calc_Dw_loss(probs: torch.Tensor, label: int):
labels = torch.full((probs.size(0),), label, dtype=torch.float, device=Global_Config.device)
criterion = nn.BCELoss()
adversarial_loss = criterion(probs, labels)
return adversarial_loss
def R1_regulazation(r1_coefficient, probs, ws):
return (r1_coefficient / 2) * compute_grad2(probs, ws).mean()
def compute_grad2(probs, w_input):
batch_size = w_input.size(0)
grad_dout = autograd.grad(
outputs=probs.sum(), inputs=w_input,
create_graph=True, retain_graph=True, only_inputs=True
)[0]
grad_dout2 = grad_dout.pow(2)
reg = grad_dout2.view(batch_size, -1).sum(1)
return reg
| [
"torch.nn.BCELoss"
] | [((262, 274), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (272, 274), True, 'import torch.nn as nn\n')] |
# GENERATED FILE - DO NOT EDIT THIS FILE UNLESS YOU ARE A WIZZARD
#pylint: skip-file
from heat.engine import properties
from heat.engine import constraints
from heat.engine import attributes
from heat.common.i18n import _
from avi.heat.avi_resource import AviResource
from avi.heat.avi_resource import AviNestedResource
from options import *
from options import *
class SeVersion(object):
# all schemas
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
version_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
patch_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.2) Patch level for service engine"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'name',
'version',
'patch',
)
# mapping of properties to their schemas
properties_schema = {
'name': name_schema,
'version': version_schema,
'patch': patch_schema,
}
class ControllerVersion(object):
# all schemas
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
version_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
patch_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.2) Patch level for controller"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'name',
'version',
'patch',
)
# mapping of properties to their schemas
properties_schema = {
'name': name_schema,
'version': version_schema,
'patch': patch_schema,
}
| [
"heat.common.i18n._"
] | [((490, 495), 'heat.common.i18n._', '_', (['""""""'], {}), "('')\n", (491, 495), False, 'from heat.common.i18n import _\n'), ((637, 642), 'heat.common.i18n._', '_', (['""""""'], {}), "('')\n", (638, 642), False, 'from heat.common.i18n import _\n'), ((782, 841), 'heat.common.i18n._', '_', (['"""(Introduced in: 17.2.2) Patch level for service engine"""'], {}), "('(Introduced in: 17.2.2) Patch level for service engine')\n", (783, 841), False, 'from heat.common.i18n import _\n'), ((1308, 1313), 'heat.common.i18n._', '_', (['""""""'], {}), "('')\n", (1309, 1313), False, 'from heat.common.i18n import _\n'), ((1455, 1460), 'heat.common.i18n._', '_', (['""""""'], {}), "('')\n", (1456, 1460), False, 'from heat.common.i18n import _\n'), ((1600, 1655), 'heat.common.i18n._', '_', (['"""(Introduced in: 17.2.2) Patch level for controller"""'], {}), "('(Introduced in: 17.2.2) Patch level for controller')\n", (1601, 1655), False, 'from heat.common.i18n import _\n')] |
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import pointnet2_utils
class StackSAModuleMSG(nn.Module):
def __init__(self, *, radii: List[float], nsamples: List[int], mlps: List[List[int]],
use_xyz: bool = True, pool_method='max_pool'):
"""
Args:
radii: list of float, list of radii to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend([
nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features=None, empty_voxel_set_zeros=True):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
for k in range(len(self.groupers)):
new_features, ball_idxs = self.groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features
) # (M1 + M2, C, nsample)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M1 + M2 ..., nsample)
new_features = self.mlps[k](new_features) # (1, C, M1 + M2 ..., nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
new_features_list.append(new_features)
new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C)
return new_xyz, new_features
class StackSAModulePyramid(nn.Module):
def __init__(self, *, mlps: List[List[int]], nsamples, use_xyz: bool = True, pool_method='max_pool'):
"""
Args:
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
self.num_pyramid_levels = len(nsamples)
assert len(nsamples) == len(mlps)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(self.num_pyramid_levels):
nsample = nsamples[i]
self.groupers.append(pointnet2_utils.QueryAndGroupPyramid(nsample, use_xyz=use_xyz))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend([
nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def forward(self, xyz, xyz_batch_cnt, new_xyz_list, new_xyz_r_list, new_xyz_batch_cnt_list, features=None, batch_size=None, num_rois=None):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz_list: [(B, N x grid_size^3, 3)]
:param new_xyz_r_list: [(B, N x grid_size^3, 1)]
:param new_xyz_batch_cnt_list: (batch_size)
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
for i in range(self.num_pyramid_levels):
new_xyz = new_xyz_list[i]
new_xyz_r = new_xyz_r_list[i]
new_xyz_batch_cnt = new_xyz_batch_cnt_list[i]
new_xyz = new_xyz.view(-1, 3).contiguous()
new_xyz_r = new_xyz_r.view(-1, 1).contiguous()
new_features, _ = self.groupers[i](
xyz, xyz_batch_cnt, new_xyz, new_xyz_r, new_xyz_batch_cnt, features
)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M1 + M2 ..., nsample)
new_features = self.mlps[i](new_features) # (1, C, M1 + M2 ..., nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
num_features = new_features.shape[1]
new_features = new_features.view(batch_size * num_rois, -1, num_features)
new_features_list.append(new_features)
new_features = torch.cat(new_features_list, dim=1) # (B x N, \sum(grid_size^3), C)
return new_features
class StackSAModuleMSGDeform(nn.Module):
"""
Set abstraction with single radius prediction for each roi
"""
def __init__(self, *, temperatures: List[float], div_coefs: List[float], radii: List[float],
nsamples: List[int], predict_nsamples: List[int],
mlps: List[List[int]], pmlps: List[List[int]], pfcs: List[List[int]],
grid_size: int, use_xyz: bool = True):
"""
:param radii: list of float, list of radii to group with
:param nsamples: list of int, number of samples in each ball query
:param mlps: list of list of int, spec of the pointnet before the global pooling for each scale
:param use_xyz:
:param pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.grid_size = grid_size
self.MIN_R = 0.01
self.radii_list = radii
self.div_coef_list = div_coefs
self.norm_groupers = nn.ModuleList()
self.deform_groupers = nn.ModuleList()
self.feat_mlps = nn.ModuleList()
self.predict_mlps = nn.ModuleList()
self.predict_fcs = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
predict_nsample = predict_nsamples[i]
temperature = temperatures[i]
self.norm_groupers.append(
pointnet2_utils.QueryAndGroup(radius, predict_nsample, use_xyz=use_xyz)
)
self.deform_groupers.append(
pointnet2_utils.QueryAndGroupDeform(temperature, nsample, use_xyz=use_xyz)
)
mlp_spec = mlps[i]
predict_mlp_spec = pmlps[i]
if use_xyz:
mlp_spec[0] += 3
predict_mlp_spec[0] += 3
self.feat_mlps.append(self._make_mlp_layer(mlp_spec))
self.predict_mlps.append(self._make_mlp_layer(predict_mlp_spec))
fc_spec = pfcs[i]
self.predict_fcs.append(self._make_fc_layer(fc_spec))
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_mlp_layer(self, mlp_spec):
mlps = []
for i in range(len(mlp_spec) - 1):
mlps.extend([
nn.Conv2d(mlp_spec[i], mlp_spec[i + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[i + 1]),
nn.ReLU()
])
return nn.Sequential(*mlps)
def _make_fc_layer(self, fc_spec):
assert len(fc_spec) == 2
return nn.Linear(fc_spec[0], fc_spec[1], bias = True)
def forward(self, xyz, xyz_batch_cnt, rois, roi_features, features=None, temperature_decay=None):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param rois: (B, num_rois, grid_size^3, 3) roi grid points
:param roi_features: (B, num_rois, C) roi features
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
batch_size = rois.shape[0]
num_rois = rois.shape[1]
new_xyz = rois.view(batch_size, -1, 3).contiguous()
new_xyz_batch_cnt = new_xyz.new_full((batch_size), new_xyz.shape[1]).int()
new_xyz = new_xyz.view(-1, 3).contiguous()
new_features_list = []
for k in range(len(self.norm_groupers)):
# radius prediction
predicted_features, ball_idxs = self.norm_groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features
) # (M, C, nsample)
predicted_features = predicted_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M, nsample)
predicted_features = self.predict_mlps[k](predicted_features) # (1, C, M, nsample)
predicted_features = F.max_pool2d(
predicted_features, kernel_size=[1, predicted_features.size(3)]
).squeeze(dim=-1) # (1, C, M)
# M = batch_size * num_rois * grid_size^3
predicted_features = predicted_features.squeeze(0).permute(0, 1).contiguous() # (M, C)
num_predicted_features = predicted_features.shape[1]
predicted_features = predicted_features.view(batch_size, num_rois, self.grid_size ** 3, num_predicted_features)
predicted_features = predicted_features.view(batch_size, num_rois, -1).contiguous()
predicted_residual_r = self.predict_fcs[k](torch.cat([predicted_features, roi_features], dim = 2)) # (batch_size, num_rois, C -> 1)
new_xyz_r = predicted_residual_r / self.div_coef_list[k] + self.radii_list[k]
# constrain predicted radius above MIN_R
new_xyz_r = torch.clamp(new_xyz_r, min = self.MIN_R)
new_xyz_r = new_xyz_r.unsqueeze(2).repeat(1, 1, self.grid_size ** 3, 1) # (batch_size, num_rois, grid_size^3, 1)
new_xyz_r = new_xyz_r.view(-1, 1).contiguous()
# feature extraction
# new_features (M, C, nsample) weights (M, nsample)
new_features, new_weights, ball_idxs = self.deform_groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_r, new_xyz_batch_cnt, features, temperature_decay
)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M, nsample)
new_features = self.feat_mlps[k](new_features) # (1, C, M, nsample)
# multiply after mlps
new_weights = new_weights.unsqueeze(0).unsqueeze(0) # (1, 1, M, nsample)
new_features = new_weights * new_features
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C)
return new_xyz, new_features
class StackPointnetFPModule(nn.Module):
def __init__(self, *, mlp: List[int]):
"""
Args:
mlp: list of int
"""
super().__init__()
shared_mlps = []
for k in range(len(mlp) - 1):
shared_mlps.extend([
nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp[k + 1]),
nn.ReLU()
])
self.mlp = nn.Sequential(*shared_mlps)
def forward(self, unknown, unknown_batch_cnt, known, known_batch_cnt, unknown_feats=None, known_feats=None):
"""
Args:
unknown: (N1 + N2 ..., 3)
known: (M1 + M2 ..., 3)
unknow_feats: (N1 + N2 ..., C1)
known_feats: (M1 + M2 ..., C2)
Returns:
new_features: (N1 + N2 ..., C_out)
"""
dist, idx = pointnet2_utils.three_nn(unknown, unknown_batch_cnt, known, known_batch_cnt)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=-1, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
if unknown_feats is not None:
new_features = torch.cat([interpolated_feats, unknown_feats], dim=1) # (N1 + N2 ..., C2 + C1)
else:
new_features = interpolated_feats
new_features = new_features.permute(1, 0)[None, :, :, None] # (1, C, N1 + N2 ..., 1)
new_features = self.mlp(new_features)
new_features = new_features.squeeze(dim=0).squeeze(dim=-1).permute(1, 0) # (N1 + N2 ..., C)
return new_features
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.Sequential",
"torch.nn.init.kaiming_normal_",
"torch.nn.Conv2d",
"torch.sum",
"torch.nn.Linear",
"torch.clamp",
"torch.cat"
] | [((763, 778), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (776, 778), True, 'import torch.nn as nn\n'), ((799, 814), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (812, 814), True, 'import torch.nn as nn\n'), ((3674, 3709), 'torch.cat', 'torch.cat', (['new_features_list'], {'dim': '(1)'}), '(new_features_list, dim=1)\n', (3683, 3709), False, 'import torch\n'), ((4335, 4350), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (4348, 4350), True, 'import torch.nn as nn\n'), ((4371, 4386), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (4384, 4386), True, 'import torch.nn as nn\n'), ((7348, 7383), 'torch.cat', 'torch.cat', (['new_features_list'], {'dim': '(1)'}), '(new_features_list, dim=1)\n', (7357, 7383), False, 'import torch\n'), ((8463, 8478), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (8476, 8478), True, 'import torch.nn as nn\n'), ((8510, 8525), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (8523, 8525), True, 'import torch.nn as nn\n'), ((8552, 8567), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (8565, 8567), True, 'import torch.nn as nn\n'), ((8597, 8612), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (8610, 8612), True, 'import torch.nn as nn\n'), ((8640, 8655), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (8653, 8655), True, 'import torch.nn as nn\n'), ((10457, 10477), 'torch.nn.Sequential', 'nn.Sequential', (['*mlps'], {}), '(*mlps)\n', (10470, 10477), True, 'import torch.nn as nn\n'), ((10566, 10610), 'torch.nn.Linear', 'nn.Linear', (['fc_spec[0]', 'fc_spec[1]'], {'bias': '(True)'}), '(fc_spec[0], fc_spec[1], bias=True)\n', (10575, 10610), True, 'import torch.nn as nn\n'), ((14011, 14046), 'torch.cat', 'torch.cat', (['new_features_list'], {'dim': '(1)'}), '(new_features_list, dim=1)\n', (14020, 14046), False, 'import torch\n'), ((14557, 14584), 'torch.nn.Sequential', 'nn.Sequential', (['*shared_mlps'], {}), '(*shared_mlps)\n', (14570, 14584), True, 'import torch.nn as nn\n'), ((15116, 15159), 'torch.sum', 'torch.sum', (['dist_recip'], {'dim': '(-1)', 'keepdim': '(True)'}), '(dist_recip, dim=-1, keepdim=True)\n', (15125, 15159), False, 'import torch\n'), ((12958, 12996), 'torch.clamp', 'torch.clamp', (['new_xyz_r'], {'min': 'self.MIN_R'}), '(new_xyz_r, min=self.MIN_R)\n', (12969, 12996), False, 'import torch\n'), ((15351, 15404), 'torch.cat', 'torch.cat', (['[interpolated_feats, unknown_feats]'], {'dim': '(1)'}), '([interpolated_feats, unknown_feats], dim=1)\n', (15360, 15404), False, 'import torch\n'), ((1434, 1461), 'torch.nn.Sequential', 'nn.Sequential', (['*shared_mlps'], {}), '(*shared_mlps)\n', (1447, 1461), True, 'import torch.nn as nn\n'), ((1650, 1683), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {}), '(m.weight)\n', (1673, 1683), True, 'import torch.nn as nn\n'), ((1834, 1866), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1.0)'], {}), '(m.weight, 1.0)\n', (1851, 1866), True, 'import torch.nn as nn\n'), ((1883, 1911), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (1900, 1911), True, 'import torch.nn as nn\n'), ((4988, 5015), 'torch.nn.Sequential', 'nn.Sequential', (['*shared_mlps'], {}), '(*shared_mlps)\n', (5001, 5015), True, 'import torch.nn as nn\n'), ((5204, 5237), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {}), '(m.weight)\n', (5227, 5237), True, 'import torch.nn as nn\n'), ((5388, 5420), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1.0)'], {}), '(m.weight, 1.0)\n', (5405, 5420), True, 'import torch.nn as nn\n'), ((5437, 5465), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (5454, 5465), True, 'import torch.nn as nn\n'), ((9697, 9730), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {}), '(m.weight)\n', (9720, 9730), True, 'import torch.nn as nn\n'), ((9881, 9913), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1.0)'], {}), '(m.weight, 1.0)\n', (9898, 9913), True, 'import torch.nn as nn\n'), ((9930, 9958), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (9947, 9958), True, 'import torch.nn as nn\n'), ((10017, 10050), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {}), '(m.weight)\n', (10040, 10050), True, 'import torch.nn as nn\n'), ((12700, 12752), 'torch.cat', 'torch.cat', (['[predicted_features, roi_features]'], {'dim': '(2)'}), '([predicted_features, roi_features], dim=2)\n', (12709, 12752), False, 'import torch\n'), ((1743, 1771), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (1760, 1771), True, 'import torch.nn as nn\n'), ((5297, 5325), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (5314, 5325), True, 'import torch.nn as nn\n'), ((9790, 9818), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (9807, 9818), True, 'import torch.nn as nn\n'), ((10110, 10138), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (10127, 10138), True, 'import torch.nn as nn\n'), ((10284, 10350), 'torch.nn.Conv2d', 'nn.Conv2d', (['mlp_spec[i]', 'mlp_spec[i + 1]'], {'kernel_size': '(1)', 'bias': '(False)'}), '(mlp_spec[i], mlp_spec[i + 1], kernel_size=1, bias=False)\n', (10293, 10350), True, 'import torch.nn as nn\n'), ((10368, 10399), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['mlp_spec[i + 1]'], {}), '(mlp_spec[i + 1])\n', (10382, 10399), True, 'import torch.nn as nn\n'), ((10417, 10426), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (10424, 10426), True, 'import torch.nn as nn\n'), ((14395, 14451), 'torch.nn.Conv2d', 'nn.Conv2d', (['mlp[k]', 'mlp[k + 1]'], {'kernel_size': '(1)', 'bias': '(False)'}), '(mlp[k], mlp[k + 1], kernel_size=1, bias=False)\n', (14404, 14451), True, 'import torch.nn as nn\n'), ((14469, 14495), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['mlp[k + 1]'], {}), '(mlp[k + 1])\n', (14483, 14495), True, 'import torch.nn as nn\n'), ((14513, 14522), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14520, 14522), True, 'import torch.nn as nn\n'), ((1235, 1301), 'torch.nn.Conv2d', 'nn.Conv2d', (['mlp_spec[k]', 'mlp_spec[k + 1]'], {'kernel_size': '(1)', 'bias': '(False)'}), '(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False)\n', (1244, 1301), True, 'import torch.nn as nn\n'), ((1323, 1354), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['mlp_spec[k + 1]'], {}), '(mlp_spec[k + 1])\n', (1337, 1354), True, 'import torch.nn as nn\n'), ((1376, 1385), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1383, 1385), True, 'import torch.nn as nn\n'), ((4789, 4855), 'torch.nn.Conv2d', 'nn.Conv2d', (['mlp_spec[k]', 'mlp_spec[k + 1]'], {'kernel_size': '(1)', 'bias': '(False)'}), '(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False)\n', (4798, 4855), True, 'import torch.nn as nn\n'), ((4877, 4908), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['mlp_spec[k + 1]'], {}), '(mlp_spec[k + 1])\n', (4891, 4908), True, 'import torch.nn as nn\n'), ((4930, 4939), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4937, 4939), True, 'import torch.nn as nn\n')] |
# Generated by Django 2.2.10 on 2020-09-09 08:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('engine', '0024_change_shape_type_choices'),
]
operations = [
migrations.AddField(
model_name='job',
name='version',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='task',
name='times_annotated',
field=models.PositiveIntegerField(default=1),
),
]
| [
"django.db.models.PositiveIntegerField"
] | [((339, 377), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (366, 377), False, 'from django.db import migrations, models\n'), ((504, 542), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(1)'}), '(default=1)\n', (531, 542), False, 'from django.db import migrations, models\n')] |
"""User endpoints."""
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.template import loader
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from rest_framework import status
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from api_v1.models import Role, User
from api_v1.serializers.user import UsersSerializer
from api_v1.utils.database import get_model_object
from api_v1.utils.tokens import account_activation_token
from api_v1.utils.app_utils.send_mail import SendMail
class UserViewSet(ViewSet):
"""Users viewset."""
serializer_class = UsersSerializer
queryset = User.objects.all()
def create(self, request):
"""
Create a user
If successful, response payload with:
- status: 200
- data
If unsuccessful, a response payload with:
- status: 400
- error: Bad Request
- message
Status Code: 400
Request
-------
method: post
url: /api/v1/register/
"""
serializer = UsersSerializer(
data=request.data)
current_site = get_current_site(request)
if serializer.is_valid():
role = get_model_object(Role, 'role_type', 'User')
user_instance = serializer.save(role=role)
user_instance.set_password(user_instance.password)
user_instance.save()
domain = current_site.domain
# account verification
token = account_activation_token.make_token(user_instance)
uid = urlsafe_base64_encode(force_bytes(
user_instance.pk))
to_email = [
user_instance.email,
]
email_verify_template = \
'auth/email_verification.html'
subject = 'Telly Account Verification'
context = {
'template_type': 'Verify your email',
'small_text_detail': 'Thank you for '
'creating a Telly account. '
'Please verify your email '
'address to set up your account.',
'email': user_instance.email,
'domain': domain,
'uid': uid,
'token': token,
'verification_link': f"{domain}/api/v1/activate/{uid}/{token}"
}
send_mail = SendMail(
email_verify_template, context, subject, to_email)
send_mail.send()
data = {
'status': 'success',
'data': serializer.data
}
return Response(data, status=status.HTTP_201_CREATED)
data = {
'status': 'error',
'data': serializer.errors
}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request, user)
return Response('Thank you for your email confirmation. Now you can login your account.')
else:
return Response('Activation link is invalid!')
| [
"api_v1.utils.tokens.account_activation_token.make_token",
"api_v1.serializers.user.UsersSerializer",
"api_v1.models.User.objects.all",
"django.contrib.auth.login",
"api_v1.utils.tokens.account_activation_token.check_token",
"rest_framework.response.Response",
"api_v1.models.User.objects.get",
"django... | [((911, 929), 'api_v1.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (927, 929), False, 'from api_v1.models import Role, User\n'), ((1366, 1400), 'api_v1.serializers.user.UsersSerializer', 'UsersSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (1381, 1400), False, 'from api_v1.serializers.user import UsersSerializer\n'), ((1437, 1462), 'django.contrib.sites.shortcuts.get_current_site', 'get_current_site', (['request'], {}), '(request)\n', (1453, 1462), False, 'from django.contrib.sites.shortcuts import get_current_site\n'), ((3147, 3197), 'rest_framework.response.Response', 'Response', (['data'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(data, status=status.HTTP_400_BAD_REQUEST)\n', (3155, 3197), False, 'from rest_framework.response import Response\n'), ((1516, 1559), 'api_v1.utils.database.get_model_object', 'get_model_object', (['Role', '"""role_type"""', '"""User"""'], {}), "(Role, 'role_type', 'User')\n", (1532, 1559), False, 'from api_v1.utils.database import get_model_object\n'), ((1808, 1858), 'api_v1.utils.tokens.account_activation_token.make_token', 'account_activation_token.make_token', (['user_instance'], {}), '(user_instance)\n', (1843, 1858), False, 'from api_v1.utils.tokens import account_activation_token\n'), ((2751, 2810), 'api_v1.utils.app_utils.send_mail.SendMail', 'SendMail', (['email_verify_template', 'context', 'subject', 'to_email'], {}), '(email_verify_template, context, subject, to_email)\n', (2759, 2810), False, 'from api_v1.utils.app_utils.send_mail import SendMail\n'), ((2840, 2856), 'django.core.mail.send_mail.send', 'send_mail.send', ([], {}), '()\n', (2854, 2856), False, 'from django.core.mail import send_mail\n'), ((2989, 3035), 'rest_framework.response.Response', 'Response', (['data'], {'status': 'status.HTTP_201_CREATED'}), '(data, status=status.HTTP_201_CREATED)\n', (2997, 3035), False, 'from rest_framework.response import Response\n'), ((3333, 3357), 'api_v1.models.User.objects.get', 'User.objects.get', ([], {'pk': 'uid'}), '(pk=uid)\n', (3349, 3357), False, 'from api_v1.models import Role, User\n'), ((3487, 3536), 'api_v1.utils.tokens.account_activation_token.check_token', 'account_activation_token.check_token', (['user', 'token'], {}), '(user, token)\n', (3523, 3536), False, 'from api_v1.utils.tokens import account_activation_token\n'), ((3608, 3628), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (3613, 3628), False, 'from django.contrib.auth import authenticate, login\n'), ((3648, 3735), 'rest_framework.response.Response', 'Response', (['"""Thank you for your email confirmation. Now you can login your account."""'], {}), "(\n 'Thank you for your email confirmation. Now you can login your account.')\n", (3656, 3735), False, 'from rest_framework.response import Response\n'), ((3764, 3803), 'rest_framework.response.Response', 'Response', (['"""Activation link is invalid!"""'], {}), "('Activation link is invalid!')\n", (3772, 3803), False, 'from rest_framework.response import Response\n'), ((1899, 1928), 'django.utils.encoding.force_bytes', 'force_bytes', (['user_instance.pk'], {}), '(user_instance.pk)\n', (1910, 1928), False, 'from django.utils.encoding import force_bytes, force_text\n'), ((3283, 3312), 'django.utils.http.urlsafe_base64_decode', 'urlsafe_base64_decode', (['uidb64'], {}), '(uidb64)\n', (3304, 3312), False, 'from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode\n')] |
# -*- coding: utf-8 -*-
import librosa.display
import librosa as lb
import os
import numpy as np
import pickle
import matplotlib.pyplot as plt
import time
import multiprocessing
import itertools
import sys
from collections import OrderedDict
from more_itertools import unique_everseen
from scipy.stats import skew
from scipy.stats import kurtosis
#This python script will load in songs and extract features from the waveform. It will then create a dictionary of all the results, ready for plotting in another script.
#At the top we have a load of functions pre-defined, skip down to __main__ to see the steps we run
#a function to split up a song into TIME chunks
def splitT(mint,maxt,songdat):
splittime=[]
for i in range(mint,maxt):
splittime.append(songdat[:,i]) # first axis is freq, second axis is time. Return all freq for specific time range.
return (np.array(splittime))
#a function to split up a song into FREQ chunks
def splitF(minv, maxv, songdat):
splitfreq = []
for i in range(minv,maxv):
splitfreq.append(songdat[i,:]) # first axis is freq, second axis is time. Return all time for specific freq range.
return (np.array(splitfreq))
#This is the main function which gets features from the songs. Most values returned are the mean of the whole time series, hence '_a'.
def get_features_mean(song,sr,hop_length,n_fft):
try:
print('extracting features...')
y_harmonic, y_percussive = lb.effects.hpss(song) #split song into harmonic and percussive parts
stft_harmonic=lb.core.stft(y_harmonic, n_fft=n_fft, hop_length=hop_length) #Compute power spectrogram.
stft_percussive=lb.core.stft(y_percussive, n_fft=n_fft, hop_length=hop_length) #Compute power spectrogram.
#stft_all=lb.core.stft(song, n_fft=n_fft, hop_length=hop_length) #Compute power spectrogram.
band_resolution=[5] #[5,25] Choose number of bands, do low and high resolution?
bands_dict=OrderedDict()
for no_bands in band_resolution:
bands=np.logspace(1.3,4,no_bands)/10 #note that as n_fft is 2050 (I've decided this is sensible resolution), bands/10=freq
bands_int=bands.astype(int)
bands_int_unique=list(unique_everseen(bands_int)) #removing double entries less than 100Hz, because logspace bunches up down there and we don't need doubles when rounding to the nearest 10 Hz.
for i in range(0,len(bands_int_unique)-1):
_h=lb.feature.rmse(y=(splitF(bands_int_unique[i],bands_int_unique[i+1],stft_harmonic)))
_p=lb.feature.rmse(y=(splitF(bands_int_unique[i],bands_int_unique[i+1],stft_percussive)))
#Calculate statistics for harmoinc and percussive over the time series.
rms_h=np.mean(np.abs(_h))
std_h=np.std(np.abs(_h))
skew_h=skew(np.mean(np.abs(_h), axis=0)) #skew of the time series (avg along freq axis, axis=0)
kurtosis_h=kurtosis(np.mean(np.abs(_h), axis=0), fisher=True, bias=True) #kurtosis of time series (avg along freq axis=0)
rms_p=np.mean(np.abs(_p))
std_p=np.std(np.abs(_p))
skew_p=skew(np.mean(np.abs(_p), axis=0)) #skew of the time series (avg along freq axis, axis=0)
kurtosis_p=kurtosis(np.mean(np.abs(_p), axis=0), fisher=True, bias=True) #kurtosis of time series (avg along freq axis=0)
#Append results to dict, with numbers as band labels
bands_dict.update({'{0}band_rms_h{1}'.format(no_bands,i):rms_h,'{0}band_rms_p{1}'.format(no_bands,i):rms_p})
bands_dict.update({'{0}band_std_h{1}'.format(no_bands,i):std_h,'{0}band_std_p{1}'.format(no_bands,i):std_p})
bands_dict.update({'{0}band_skew_h{1}'.format(no_bands,i):skew_h,'{0}band_skew_p{1}'.format(no_bands,i):skew_p})
bands_dict.update({'{0}band_kurtosis_h{1}'.format(no_bands,i):kurtosis_h,'{0}band_kurtosis_p{1}'.format(no_bands,i):kurtosis_p})
#stft=lb.feature.chroma_stft(song, sr, n_fft=n_fft, hop_length=hop_length) #Compute a chromagram from a waveform or power spectrogram.
#stft_a=np.mean(stft[0])
#stft_std=np.std(stft[0])
#rmse=lb.feature.rmse(y=song) #Compute root-mean-square (RMS) energy for each frame, either from the audio samples y or from a spectrogram S.
#rmse_a=np.mean(rmse)
#rmse_std=np.std(rmse)
rmseH=np.abs(lb.feature.rmse(y=stft_harmonic)) #Compute root-mean-square (RMS) energy for harmonic
rmseH_a=np.mean(rmseH)
rmseH_std=np.std(rmseH)
rmseH_skew=skew(np.mean(rmseH, axis=0))
rmseH_kurtosis=kurtosis(np.mean(rmseH, axis=0), fisher=True, bias=True)
rmseP=np.abs(lb.feature.rmse(y=stft_percussive)) #Compute root-mean-square (RMS) energy for percussive
rmseP_a=np.mean(rmseP)
rmseP_std=np.std(rmseP)
rmseP_skew=skew(np.mean(rmseP, axis=0))
rmseP_kurtosis=kurtosis(np.mean(rmseP, axis=0), fisher=True, bias=True)
centroid=lb.feature.spectral_centroid(song, sr, n_fft=n_fft, hop_length=hop_length) #Compute the spectral centroid.
centroid_a=np.mean(centroid)
centroid_std=np.std(centroid)
bw=lb.feature.spectral_bandwidth(song, sr, n_fft=n_fft, hop_length=hop_length) #Compute p’th-order spectral bandwidth:
bw_a=np.mean(bw)
bw_std=np.std(bw)
contrast=lb.feature.spectral_contrast(song, sr, n_fft=n_fft, hop_length=hop_length) #Compute spectral contrast [R16]
contrast_a=np.mean(contrast)
contrast_std=np.std(contrast)
polyfeat=lb.feature.poly_features(y_harmonic, sr, n_fft=n_fft, hop_length=hop_length) #Get coefficients of fitting an nth-order polynomial to the columns of a spectrogram.
polyfeat_a=np.mean(polyfeat[0])
polyfeat_std=np.std(polyfeat[0])
tonnetz=lb.feature.tonnetz(librosa.effects.harmonic(y_harmonic), sr) #Computes the tonal centroid features (tonnetz), following the method of [R17].
tonnetz_a=np.mean(tonnetz)
tonnetz_std=np.std(tonnetz)
zcr=lb.feature.zero_crossing_rate(song, sr, hop_length=hop_length) #zero crossing rate
zcr_a=np.mean(zcr)
zcr_std=np.std(zcr)
onset_env=lb.onset.onset_strength(y_percussive, sr=sr)
onset_a=np.mean(onset_env)
onset_std=np.std(onset_env)
D = librosa.stft(song)
times = librosa.frames_to_time(np.arange(D.shape[1])) #not returned, but could be if you want to plot things as a time series
bpm,beats=lb.beat.beat_track(y=y_percussive, sr=sr, onset_envelope=onset_env, units='time')
beats_a=np.mean(beats)
beats_std=np.std(beats)
features_dict=OrderedDict({'rmseP_a':rmseP_a,'rmseP_std':rmseP_std,'rmseH_a':rmseH_a,'rmseH_std':rmseH_std,'centroid_a':centroid_a,'centroid_std':centroid_std,'bw_a':bw_a,'bw_std':bw_std,'contrast_a':contrast_a,'contrast_std':contrast_std,'polyfeat_a':polyfeat_a,'polyfeat_std':polyfeat_std,'tonnetz_a':tonnetz_a,'tonnetz_std':tonnetz_std,'zcr_a':zcr_a,'zcr_std':zcr_std,'onset_a':onset_a,'onset_std':onset_std,'bpm':bpm, 'rmseP_skew':rmseP_skew, 'rmseP_kurtosis':rmseP_kurtosis, 'rmseH_skew':rmseH_skew, 'rmseH_kurtosis':rmseH_kurtosis})
combine_features={**features_dict,**bands_dict}
print('features extracted successfully')
return combine_features
except:
print('.'*20+'FAILED'+'.'*20)
print('.'*40)
#a function to look at beat tracking... not used in machine learning yet, just random investigations.
def beattrack(song,sr,hop_length,n_fft):
y_harmonic, y_percussive = lb.effects.hpss(song)
beattrack=lb.beat.beat_track(y=y_percussive, sr=sr, onset_envelope=None, hop_length=hop_length, start_bpm=120.0, tightness=100, trim=True, bpm=None, units='frames')
#load music function, accepts any format i've encountered: mp3,wav,wma bla bla
def load_music(songname1,songpath1):
try:
print('loading the song: {0} ......... located here: {1} '.format(songname1, songpath1))
songdata1, sr1 = lb.load(songpath1) #librosa library used to grab songdata and sample rate
print ('done........ '+songname1)
return [songname1,songdata1,sr1]
except: #the song could be corrupt? you could be trying to load something which isn't a song?
print('..............................FAILED...............................')
print(songpath1)
print('...................................................................')
#functions for saving/loading the python dictionaries to disk
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
#If you want a grid-plot to test anything out, this will help. Although I've made sure get_features returns only averaged values, not time-series data, so meh.
def gridplot(data_dict,feature,size,N,ind):
f, axarr = plt.subplots(size, size, sharey=True)
i=0
j=0
for key in data_dict:
#print (i,j)
axarr[i,j].plot(np.convolve(data_dict[key][feature][ind],np.ones((N,))/N, mode='valid'))
axarr[i, j].set_title(key[:3])
if j==size-1: i+=1
j=0 if j==size-1 else j+1
for i in range(1,size,1):
plt.setp([a.get_yticklabels() for a in axarr[:, i]], visible=False)
plt.savefig('test.png')
#OK so here we go...
if __name__ == "__main__":
start_load=time.time() #we're going to want know how long this takes...
num_workers = multiprocessing.cpu_count() #if we don't use multiple cores we may as well give up now. This is how many your computer has.
print('you have {0} cores available to do your bidding...'.format(num_workers))
num_workers=32 #I was playing around with changing this
#multi=int(sys.argv[1]) #at one point I was testing this as a command line input
n_fft1=2050 #important parameter here; this is the size of the fft window. these are sensible values
hop_length1=441 #n_fft/5 is a sensisble value. too large and you don't sample properly.
#create song database, songdb:
songname_tmp=[]
songpath_tmp=[]
#path='./audio_alex/'
path=sys.argv[1] #the only command line input is the path to the folder of music
print(path)
savefile=str(path)+'_data' #it's saved with the same folder name but with _data.pkl on the end.
#now load song data in
for song in os.listdir(path):
#print (song)
songname_tmp.append(song)
songpath_tmp.append(path+'/'+song)
#print(songname)
songname=songname_tmp #i'm just reassigning the name incase of tests with commented out lines...
songpath=songpath_tmp
#if you want to test this on a small number of songs first (e.g. 32), replace previous two lines with the following:
#songname=songname_tmp[:31] #remember indices starts at zero.
#songname=songname_tmp[:31]
print('loading songs...')
#Here we go with multi-processing, loading all our song data in
with multiprocessing.Pool(processes=num_workers) as pool:
songdb=pool.starmap(load_music,zip(songname,songpath)) #btw a starmap is a way to pass multiple arguments to a function using multi-process
pool.close()
pool.join()
print('finished loading songs into songdb')
#print (songdb)
print ('loaded {0} songs into memory'.format(len(songdb)))
songdb=[x for x in songdb if x is not None] #remove any entries where loading may have failed for any reason (rare cases)
#parse song data to individual lists ready for feature extraction function (because we can't slice nested lists)
song_name=[] #text
song_data=[] #list of numbers
song_sr=[] #sample rate
for song1 in songdb:
song_name.append(song1[0])
song_data.append(song1[1])
song_sr.append(song1[2])
start_feat = time.time() #note the time
print("Data is all ready, now extracting features from the songs...")
#extract features from songs with multiprocesssing
with multiprocessing.Pool(processes=num_workers,maxtasksperchild=1) as pool:
res=pool.starmap(get_features_mean,zip(song_data,song_sr,itertools.repeat(hop_length1),itertools.repeat(n_fft1)))
pool.close()
pool.join()
#concatenate each songs features (res) into dictionary
print('concatenating results into a massive dictionary...')
data_dict_mean={}
for i in range(0,len(songdb)):
data_dict_mean.update({song_name[i]:res[i]})
#print features to screen to check
print('The features extracted from the songs are: ')
print(res[0].keys())
print('saving dictionary to disk...')
save_obj(data_dict_mean,savefile)
end_feat=time.time() #note finish time
print("loading time: {0} seconds".format(start_feat-start_load))
print("feature extraction time: {0} seconds".format(end_feat-start_feat))
print("total time: {0} seconds".format(end_feat-start_load))
print('finished')
| [
"librosa.feature.poly_features",
"librosa.feature.zero_crossing_rate",
"multiprocessing.cpu_count",
"numpy.array",
"librosa.feature.spectral_centroid",
"librosa.feature.spectral_bandwidth",
"librosa.feature.spectral_contrast",
"librosa.load",
"numpy.arange",
"numpy.mean",
"itertools.repeat",
"... | [((881, 900), 'numpy.array', 'np.array', (['splittime'], {}), '(splittime)\n', (889, 900), True, 'import numpy as np\n'), ((1169, 1188), 'numpy.array', 'np.array', (['splitfreq'], {}), '(splitfreq)\n', (1177, 1188), True, 'import numpy as np\n'), ((7649, 7670), 'librosa.effects.hpss', 'lb.effects.hpss', (['song'], {}), '(song)\n', (7664, 7670), True, 'import librosa as lb\n'), ((7685, 7849), 'librosa.beat.beat_track', 'lb.beat.beat_track', ([], {'y': 'y_percussive', 'sr': 'sr', 'onset_envelope': 'None', 'hop_length': 'hop_length', 'start_bpm': '(120.0)', 'tightness': '(100)', 'trim': '(True)', 'bpm': 'None', 'units': '"""frames"""'}), "(y=y_percussive, sr=sr, onset_envelope=None, hop_length=\n hop_length, start_bpm=120.0, tightness=100, trim=True, bpm=None, units=\n 'frames')\n", (7703, 7849), True, 'import librosa as lb\n'), ((9034, 9071), 'matplotlib.pyplot.subplots', 'plt.subplots', (['size', 'size'], {'sharey': '(True)'}), '(size, size, sharey=True)\n', (9046, 9071), True, 'import matplotlib.pyplot as plt\n'), ((9442, 9465), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test.png"""'], {}), "('test.png')\n", (9453, 9465), True, 'import matplotlib.pyplot as plt\n'), ((9541, 9552), 'time.time', 'time.time', ([], {}), '()\n', (9550, 9552), False, 'import time\n'), ((9624, 9651), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (9649, 9651), False, 'import multiprocessing\n'), ((10576, 10592), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (10586, 10592), False, 'import os\n'), ((12129, 12140), 'time.time', 'time.time', ([], {}), '()\n', (12138, 12140), False, 'import time\n'), ((13046, 13057), 'time.time', 'time.time', ([], {}), '()\n', (13055, 13057), False, 'import time\n'), ((1459, 1480), 'librosa.effects.hpss', 'lb.effects.hpss', (['song'], {}), '(song)\n', (1474, 1480), True, 'import librosa as lb\n'), ((1550, 1610), 'librosa.core.stft', 'lb.core.stft', (['y_harmonic'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(y_harmonic, n_fft=n_fft, hop_length=hop_length)\n', (1562, 1610), True, 'import librosa as lb\n'), ((1663, 1725), 'librosa.core.stft', 'lb.core.stft', (['y_percussive'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(y_percussive, n_fft=n_fft, hop_length=hop_length)\n', (1675, 1725), True, 'import librosa as lb\n'), ((1962, 1975), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1973, 1975), False, 'from collections import OrderedDict\n'), ((4557, 4571), 'numpy.mean', 'np.mean', (['rmseH'], {}), '(rmseH)\n', (4564, 4571), True, 'import numpy as np\n'), ((4590, 4603), 'numpy.std', 'np.std', (['rmseH'], {}), '(rmseH)\n', (4596, 4603), True, 'import numpy as np\n'), ((4860, 4874), 'numpy.mean', 'np.mean', (['rmseP'], {}), '(rmseP)\n', (4867, 4874), True, 'import numpy as np\n'), ((4893, 4906), 'numpy.std', 'np.std', (['rmseP'], {}), '(rmseP)\n', (4899, 4906), True, 'import numpy as np\n'), ((5053, 5127), 'librosa.feature.spectral_centroid', 'lb.feature.spectral_centroid', (['song', 'sr'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(song, sr, n_fft=n_fft, hop_length=hop_length)\n', (5081, 5127), True, 'import librosa as lb\n'), ((5179, 5196), 'numpy.mean', 'np.mean', (['centroid'], {}), '(centroid)\n', (5186, 5196), True, 'import numpy as np\n'), ((5218, 5234), 'numpy.std', 'np.std', (['centroid'], {}), '(centroid)\n', (5224, 5234), True, 'import numpy as np\n'), ((5246, 5321), 'librosa.feature.spectral_bandwidth', 'lb.feature.spectral_bandwidth', (['song', 'sr'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(song, sr, n_fft=n_fft, hop_length=hop_length)\n', (5275, 5321), True, 'import librosa as lb\n'), ((5375, 5386), 'numpy.mean', 'np.mean', (['bw'], {}), '(bw)\n', (5382, 5386), True, 'import numpy as np\n'), ((5402, 5412), 'numpy.std', 'np.std', (['bw'], {}), '(bw)\n', (5408, 5412), True, 'import numpy as np\n'), ((5430, 5504), 'librosa.feature.spectral_contrast', 'lb.feature.spectral_contrast', (['song', 'sr'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(song, sr, n_fft=n_fft, hop_length=hop_length)\n', (5458, 5504), True, 'import librosa as lb\n'), ((5557, 5574), 'numpy.mean', 'np.mean', (['contrast'], {}), '(contrast)\n', (5564, 5574), True, 'import numpy as np\n'), ((5596, 5612), 'numpy.std', 'np.std', (['contrast'], {}), '(contrast)\n', (5602, 5612), True, 'import numpy as np\n'), ((5630, 5706), 'librosa.feature.poly_features', 'lb.feature.poly_features', (['y_harmonic', 'sr'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(y_harmonic, sr, n_fft=n_fft, hop_length=hop_length)\n', (5654, 5706), True, 'import librosa as lb\n'), ((5812, 5832), 'numpy.mean', 'np.mean', (['polyfeat[0]'], {}), '(polyfeat[0])\n', (5819, 5832), True, 'import numpy as np\n'), ((5854, 5873), 'numpy.std', 'np.std', (['polyfeat[0]'], {}), '(polyfeat[0])\n', (5860, 5873), True, 'import numpy as np\n'), ((6049, 6065), 'numpy.mean', 'np.mean', (['tonnetz'], {}), '(tonnetz)\n', (6056, 6065), True, 'import numpy as np\n'), ((6086, 6101), 'numpy.std', 'np.std', (['tonnetz'], {}), '(tonnetz)\n', (6092, 6101), True, 'import numpy as np\n'), ((6114, 6176), 'librosa.feature.zero_crossing_rate', 'lb.feature.zero_crossing_rate', (['song', 'sr'], {'hop_length': 'hop_length'}), '(song, sr, hop_length=hop_length)\n', (6143, 6176), True, 'import librosa as lb\n'), ((6212, 6224), 'numpy.mean', 'np.mean', (['zcr'], {}), '(zcr)\n', (6219, 6224), True, 'import numpy as np\n'), ((6241, 6252), 'numpy.std', 'np.std', (['zcr'], {}), '(zcr)\n', (6247, 6252), True, 'import numpy as np\n'), ((6271, 6315), 'librosa.onset.onset_strength', 'lb.onset.onset_strength', (['y_percussive'], {'sr': 'sr'}), '(y_percussive, sr=sr)\n', (6294, 6315), True, 'import librosa as lb\n'), ((6332, 6350), 'numpy.mean', 'np.mean', (['onset_env'], {}), '(onset_env)\n', (6339, 6350), True, 'import numpy as np\n'), ((6369, 6386), 'numpy.std', 'np.std', (['onset_env'], {}), '(onset_env)\n', (6375, 6386), True, 'import numpy as np\n'), ((6570, 6656), 'librosa.beat.beat_track', 'lb.beat.beat_track', ([], {'y': 'y_percussive', 'sr': 'sr', 'onset_envelope': 'onset_env', 'units': '"""time"""'}), "(y=y_percussive, sr=sr, onset_envelope=onset_env, units=\n 'time')\n", (6588, 6656), True, 'import librosa as lb\n'), ((6668, 6682), 'numpy.mean', 'np.mean', (['beats'], {}), '(beats)\n', (6675, 6682), True, 'import numpy as np\n'), ((6701, 6714), 'numpy.std', 'np.std', (['beats'], {}), '(beats)\n', (6707, 6714), True, 'import numpy as np\n'), ((6738, 7336), 'collections.OrderedDict', 'OrderedDict', (["{'rmseP_a': rmseP_a, 'rmseP_std': rmseP_std, 'rmseH_a': rmseH_a,\n 'rmseH_std': rmseH_std, 'centroid_a': centroid_a, 'centroid_std':\n centroid_std, 'bw_a': bw_a, 'bw_std': bw_std, 'contrast_a': contrast_a,\n 'contrast_std': contrast_std, 'polyfeat_a': polyfeat_a, 'polyfeat_std':\n polyfeat_std, 'tonnetz_a': tonnetz_a, 'tonnetz_std': tonnetz_std,\n 'zcr_a': zcr_a, 'zcr_std': zcr_std, 'onset_a': onset_a, 'onset_std':\n onset_std, 'bpm': bpm, 'rmseP_skew': rmseP_skew, 'rmseP_kurtosis':\n rmseP_kurtosis, 'rmseH_skew': rmseH_skew, 'rmseH_kurtosis': rmseH_kurtosis}"], {}), "({'rmseP_a': rmseP_a, 'rmseP_std': rmseP_std, 'rmseH_a': rmseH_a,\n 'rmseH_std': rmseH_std, 'centroid_a': centroid_a, 'centroid_std':\n centroid_std, 'bw_a': bw_a, 'bw_std': bw_std, 'contrast_a': contrast_a,\n 'contrast_std': contrast_std, 'polyfeat_a': polyfeat_a, 'polyfeat_std':\n polyfeat_std, 'tonnetz_a': tonnetz_a, 'tonnetz_std': tonnetz_std,\n 'zcr_a': zcr_a, 'zcr_std': zcr_std, 'onset_a': onset_a, 'onset_std':\n onset_std, 'bpm': bpm, 'rmseP_skew': rmseP_skew, 'rmseP_kurtosis':\n rmseP_kurtosis, 'rmseH_skew': rmseH_skew, 'rmseH_kurtosis': rmseH_kurtosis}\n )\n", (6749, 7336), False, 'from collections import OrderedDict\n'), ((8088, 8106), 'librosa.load', 'lb.load', (['songpath1'], {}), '(songpath1)\n', (8095, 8106), True, 'import librosa as lb\n'), ((8676, 8720), 'pickle.dump', 'pickle.dump', (['obj', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(obj, f, pickle.HIGHEST_PROTOCOL)\n', (8687, 8720), False, 'import pickle\n'), ((8799, 8813), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8810, 8813), False, 'import pickle\n'), ((11217, 11260), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'num_workers'}), '(processes=num_workers)\n', (11237, 11260), False, 'import multiprocessing\n'), ((12306, 12369), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'num_workers', 'maxtasksperchild': '(1)'}), '(processes=num_workers, maxtasksperchild=1)\n', (12326, 12369), False, 'import multiprocessing\n'), ((4455, 4487), 'librosa.feature.rmse', 'lb.feature.rmse', ([], {'y': 'stft_harmonic'}), '(y=stft_harmonic)\n', (4470, 4487), True, 'import librosa as lb\n'), ((4628, 4650), 'numpy.mean', 'np.mean', (['rmseH'], {'axis': '(0)'}), '(rmseH, axis=0)\n', (4635, 4650), True, 'import numpy as np\n'), ((4684, 4706), 'numpy.mean', 'np.mean', (['rmseH'], {'axis': '(0)'}), '(rmseH, axis=0)\n', (4691, 4706), True, 'import numpy as np\n'), ((4754, 4788), 'librosa.feature.rmse', 'lb.feature.rmse', ([], {'y': 'stft_percussive'}), '(y=stft_percussive)\n', (4769, 4788), True, 'import librosa as lb\n'), ((4931, 4953), 'numpy.mean', 'np.mean', (['rmseP'], {'axis': '(0)'}), '(rmseP, axis=0)\n', (4938, 4953), True, 'import numpy as np\n'), ((4987, 5009), 'numpy.mean', 'np.mean', (['rmseP'], {'axis': '(0)'}), '(rmseP, axis=0)\n', (4994, 5009), True, 'import numpy as np\n'), ((6457, 6478), 'numpy.arange', 'np.arange', (['D.shape[1]'], {}), '(D.shape[1])\n', (6466, 6478), True, 'import numpy as np\n'), ((2035, 2064), 'numpy.logspace', 'np.logspace', (['(1.3)', '(4)', 'no_bands'], {}), '(1.3, 4, no_bands)\n', (2046, 2064), True, 'import numpy as np\n'), ((2226, 2252), 'more_itertools.unique_everseen', 'unique_everseen', (['bands_int'], {}), '(bands_int)\n', (2241, 2252), False, 'from more_itertools import unique_everseen\n'), ((12447, 12476), 'itertools.repeat', 'itertools.repeat', (['hop_length1'], {}), '(hop_length1)\n', (12463, 12476), False, 'import itertools\n'), ((12477, 12501), 'itertools.repeat', 'itertools.repeat', (['n_fft1'], {}), '(n_fft1)\n', (12493, 12501), False, 'import itertools\n'), ((2780, 2790), 'numpy.abs', 'np.abs', (['_h'], {}), '(_h)\n', (2786, 2790), True, 'import numpy as np\n'), ((2821, 2831), 'numpy.abs', 'np.abs', (['_h'], {}), '(_h)\n', (2827, 2831), True, 'import numpy as np\n'), ((3114, 3124), 'numpy.abs', 'np.abs', (['_p'], {}), '(_p)\n', (3120, 3124), True, 'import numpy as np\n'), ((3155, 3165), 'numpy.abs', 'np.abs', (['_p'], {}), '(_p)\n', (3161, 3165), True, 'import numpy as np\n'), ((9200, 9213), 'numpy.ones', 'np.ones', (['(N,)'], {}), '((N,))\n', (9207, 9213), True, 'import numpy as np\n'), ((2869, 2879), 'numpy.abs', 'np.abs', (['_h'], {}), '(_h)\n', (2875, 2879), True, 'import numpy as np\n'), ((2990, 3000), 'numpy.abs', 'np.abs', (['_h'], {}), '(_h)\n', (2996, 3000), True, 'import numpy as np\n'), ((3203, 3213), 'numpy.abs', 'np.abs', (['_p'], {}), '(_p)\n', (3209, 3213), True, 'import numpy as np\n'), ((3324, 3334), 'numpy.abs', 'np.abs', (['_p'], {}), '(_p)\n', (3330, 3334), True, 'import numpy as np\n')] |
import requests
import pandas as pd
import pandas.io.json
import json
import ast
city_name = '深圳市'
# city_name = '成都市'
# city_name = '北京市'
# anyone using this code needs to get their own keys from amap.com
api_key_web_service = open('../api_key_web_service.txt', encoding='utf-8').read() # Web Service (Web服务)
api_key_web_end = open('../api_key_web_end.txt', encoding='utf-8').read() # Web End (Web端)
def getEnglishCityName():
URL = 'https://restapi.amap.com/v3/place/text?'
PARAMS = {
'keywords': city_name,
'output':'json',
'offset':'1',
'page':'1',
'key': api_key_web_service,
'citylimit':'true',
'language':'en'
# 'extensions':'all',
}
rEn = requests.get(url = URL, params = PARAMS)
dataEn = rEn.json()
city_name_en = str(dataEn['pois'][0]['name']).split(" ")[0]
return city_name_en
def getEnglishName(location):
URL = 'https://restapi.amap.com/v3/place/text?'
PARAMS = {
'location': location,
'city': city_name,
'output':'json',
'offset':'1',
'page':'1',
'key': api_key_web_service,
'citylimit':'true',
'language':'en',
'types': '150500'
# 'extensions':'all',
}
rEn = requests.get(url = URL, params = PARAMS)
eng_name = rEn.json()
return str(eng_name['pois'][0]['name']).split('(')[0]
def getEnglishNameBatches(location_list):
'''
Return a given list of stations
:param location_list: List of coordinates for stations
'''
station_names_en = []
while len(location_list):
len_of_locations = 20 if len(location_list)>20 else len(location_list)
print("len of locations: " + str(len_of_locations))
station_names_en += getOneBatch(location_list, len_of_locations)
location_list = location_list[len_of_locations:] # update the array to only include the last data points
len_of_locations = 0 # update the length to zero
print(station_names_en)
return station_names_en
def getOneBatch(location_list, len_of_locations):
'''
Retrieve up to 20 requests from the API for the English station names
:param location_list: List of coordinates for stations
:param len_of_locations: Number of stations to be searched
'''
ops_url_list = ['' for i in range(len_of_locations)]
station_names_en = ['' for i in range(len_of_locations)]
main_query = '/v3/place/text?city=' + city_name + '&output=json&offset=1&page=1&key=' + api_key_web_service + '&citylimit=true&language=en&types=150500&location='
BURL = 'https://restapi.amap.com/v3/batch?key=' + api_key_web_service
BPARAMS = '{"ops": ['
for x in range(len_of_locations):
ops_url_list[x] = main_query + location_list[x]
BPARAMS += '{"url": "' + ops_url_list[x] + '"}'
BPARAMS += ']}' if (x == len_of_locations-1) else ','
body = json.loads(BPARAMS)
url = 'https://restapi.amap.com/v3/batch'
params= {'key':api_key_web_service}
responseBatchEn = requests.post(url,params=params,json=body)
dataEn = responseBatchEn.json()
for x in range(len_of_locations):
station_names_en[x] = str(dataEn[x]['body']['pois'][0]['name']).split('(')[0]
return station_names_en
def getOneLine(line_num, select_line = 0):
''' getOneLine returns one subway line
:param line_num: The subway line to be returned
:param select_line: specifies which of the returned results to use.
Sometimes the first result is not the correct one, or it's not the only one*
*(in the case of a line that has a split)
The default value is the first result "0"
:return: Coordinate list, line colour, and station names in English and Chinese
'''
busline_number = select_line
URL_zh = 'http://restapi.amap.com/v3/bus/linename?'
PARAMS_zh = {'s':'rsv3',
'key': api_key_web_end,
'output':'json',
'pageIndex':'1',
'city': city_name,
'offset':'10',
'keywords': line_num,
'extensions':'all'
}
rZh = requests.get(url = URL_zh, params = PARAMS_zh)
dataZh = rZh.json()
number_of_stations = len(dataZh['buslines'][busline_number]['busstops'])
print(number_of_stations)
station_names_zh = ["" for i in range(number_of_stations)]
station_coords_zh = ["" for i in range(number_of_stations)]
for x in range(number_of_stations):
station_names_zh[x] = dataZh['buslines'][busline_number]['busstops'][x]['name']
station_coords_zh[x] = dataZh['buslines'][busline_number]['busstops'][x]['location']
list_of_eng_stations = getEnglishNameBatches(station_coords_zh)
print("Chinese/English names")
stations_en_zh = '['
for x in range(len(list_of_eng_stations)):
print(station_names_zh[x] + " is " + str(list_of_eng_stations[x]))
stations_en_zh += '[\"'
stations_en_zh += list_of_eng_stations[x] + '\",\"'
stations_en_zh += station_names_zh[x] + '\",'
stations_en_zh += str(float(get_lat(station_coords_zh[x]))) + ','
stations_en_zh += str(float(get_lon(station_coords_zh[x]))) + '],'
stations_en_zh += ']'
print(stations_en_zh)
stations_en_zh = ast.literal_eval(stations_en_zh)
subway_data = pandas.io.json.json_normalize(data=dataZh['buslines'],
errors='ignore',
record_prefix='_')
df = pd.DataFrame(subway_data)
coords_list = [[0
for i in range(len(df['polyline']))]
for j in range(100)]
for x in range(len(df['polyline'])):
coords_list[x] = str(df['polyline'][x]).split(';')
coords_list_float = [0
for i in range(len(coords_list[busline_number]))]
for i in range(len(coords_list[busline_number])):
coords_list_float[i] = [float(get_lat(coords_list[busline_number][i])), float(get_lon(coords_list[busline_number][i]))]
line_name = str(df['name'].iloc[0])
line_colour = str(df['uicolor'].iloc[0])
start_time = str(df['start_time'].iloc[0])
end_time = str(df['end_time'].iloc[0])
return [coords_list_float, line_colour, stations_en_zh]
def getZhongWen():
line_num = 1
print()
# Chengdu
# line_lists = [getOneLine(1), getOneLine(1, 2), getOneLine(2), getOneLine(3), getOneLine(4), getOneLine(5), getOneLine(7), getOneLine(10), getOneLine(18)]
# Shenzhen
line_lists = [getOneLine(1), getOneLine(2), getOneLine(3), getOneLine(4),getOneLine(5),getOneLine(6,2), getOneLine(7),getOneLine(9), getOneLine(10),getOneLine(11)]
# Beijing
# line_lists = [getOneLine(1), getOneLine(2), getOneLine(3), getOneLine(4),getOneLine(5), getOneLine(7),getOneLine(9), getOneLine(11)]
# Guangzhou
# line_lists = [getOneLine(1), getOneLine(2), getOneLine(3), getOneLine(4),getOneLine(5), getOneLine(7),getOneLine(9), getOneLine(11)]
# Shanghai
# line_lists = [getOneLine(6), getOneLine(10)]
lines = [line_lists[0][0]]
lineColors = ['#'+ line_lists[0][1]]
stations = [line_lists[0][2]]
for x in range(len(line_lists)-1):
x = x + 1
lines.append(line_lists[x][0])
lineColors.append('#' + line_lists[x][1])
stations.append(line_lists[x][2])
icon = "./markers/"+ getEnglishCityName() +".png"
json_dict = {
"city": city_name,
"lines": lines,
"lineColors": lineColors,
"stations": stations,
"icon": icon
}
# convert to standardised JSON
# don't change to ascii (which doesn't have Chinese character support)
json_output = json.dumps(json_dict, ensure_ascii=False) #, indent=4)
create_file(json_output)
def create_file(json_output):
filename = city_name + '_output_lines.json'
with open(filename, 'w', encoding='utf-8') as file:
file.write(json_output)
print('--- end of csv file')
def get_lat(coords):
return str(coords).split(',')[0]
def get_lon(coords):
return str(coords).split(',')[1]
# getEnglish()
print('starting')
getZhongWen() | [
"json.loads",
"requests.post",
"json.dumps",
"requests.get",
"ast.literal_eval",
"pandas.DataFrame"
] | [((817, 853), 'requests.get', 'requests.get', ([], {'url': 'URL', 'params': 'PARAMS'}), '(url=URL, params=PARAMS)\n', (829, 853), False, 'import requests\n'), ((1455, 1491), 'requests.get', 'requests.get', ([], {'url': 'URL', 'params': 'PARAMS'}), '(url=URL, params=PARAMS)\n', (1467, 1491), False, 'import requests\n'), ((3149, 3168), 'json.loads', 'json.loads', (['BPARAMS'], {}), '(BPARAMS)\n', (3159, 3168), False, 'import json\n'), ((3277, 3321), 'requests.post', 'requests.post', (['url'], {'params': 'params', 'json': 'body'}), '(url, params=params, json=body)\n', (3290, 3321), False, 'import requests\n'), ((4380, 4422), 'requests.get', 'requests.get', ([], {'url': 'URL_zh', 'params': 'PARAMS_zh'}), '(url=URL_zh, params=PARAMS_zh)\n', (4392, 4422), False, 'import requests\n'), ((5538, 5570), 'ast.literal_eval', 'ast.literal_eval', (['stations_en_zh'], {}), '(stations_en_zh)\n', (5554, 5570), False, 'import ast\n'), ((5781, 5806), 'pandas.DataFrame', 'pd.DataFrame', (['subway_data'], {}), '(subway_data)\n', (5793, 5806), True, 'import pandas as pd\n'), ((7990, 8031), 'json.dumps', 'json.dumps', (['json_dict'], {'ensure_ascii': '(False)'}), '(json_dict, ensure_ascii=False)\n', (8000, 8031), False, 'import json\n')] |
import json
import os
from api_swgoh_help import api_swgoh_help, settings
from env import get_env
from initialise_data_structures import initialise_data_structures
from texttable import Texttable
from data_lookups import mod_set_stats, mod_slots, unit_stats, primary_stat_names_map
saved_data = initialise_data_structures()
def add_stat(stats, stat_name, value, upgrade_tier):
if stat_name in stats:
stats[stat_name].append([value, upgrade_tier])
else:
stats[stat_name] = [[value, upgrade_tier]]
def get_mods(allycode=0, force_reload=False, unit_exclusions=None, unit_inclusions=None):
env_data = get_env()
if allycode == 0:
allycode = env_data["allycode"]
saved_mods = {}
if not force_reload and os.path.isfile('saved-mods.json'):
with open('saved-mods.json', 'r', encoding='utf-8') as f:
saved_mods = json.load(f)
if allycode in saved_mods:
return saved_mods[allycode]
# Change the settings below
creds = settings(env_data["username"], env_data["password"])
client = api_swgoh_help(creds)
players_response = client.fetchRoster([allycode], enums=False)
units_without_mods = {}
units_upgradable_mods = {}
stats = {}
mods = {}
chars = {}
char_name_map = {}
for unit_id, unit_array in players_response[0].items():
unit = unit_array[0]
if unit_exclusions and saved_data["toons"][unit_id]["nameKey"] in unit_exclusions:
continue
if unit_inclusions and saved_data["toons"][unit_id]["nameKey"] in unit_exclusions not in unit_inclusions:
continue
if unit["level"] < 50:
continue
chars[unit_id] = {
"char_name": saved_data["toons"][unit_id]["nameKey"],
"starLevel": unit["starLevel"],
"level": unit["level"],
"mods": []
}
char_name_map[saved_data["toons"][unit_id]["nameKey"]] = unit_id
for x in range(len(unit["mods"])):
mod = unit["mods"][x]
mod_slot = mod_slots[x]
if "id" not in mod:
if unit["type"] == 1:
unit_without_mod = units_without_mods.get(unit_id, [])
unit_without_mod.append(mod_slot)
units_without_mods[unit_id] = unit_without_mod
continue
chars[unit_id]["mods"].append(mod["id"])
mod_stats = {}
mods[mod["id"]] = {
"set": mod_set_stats[mod["set"]],
"slot": mod_slot,
"stats": mod_stats,
"primary": primary_stat_names_map[mod["stat"][0][0]],
"char_name": saved_data["toons"][unit_id]["nameKey"]
}
for i in range(5):
if i > len(mod["stat"]) - 1:
print(unit_id, "appears to not have the correct amount of mod slots")
break
else:
if mod["stat"][i][0] == 0:
upgradable_mods = units_upgradable_mods.get(unit_id, [])
upgradable_mods.append(mod_slot)
units_upgradable_mods[unit_id] = upgradable_mods
name = unit_stats[0]
mod_stats[name] = 0
else:
name = unit_stats[mod["stat"][i][0]]
mod_stats[name] = mod["stat"][i][1]
add_stat(stats, name, mod["stat"][i][1], mod["stat"][i][2])
table = Texttable()
table.set_cols_align(["l", "l", "l"])
table.set_cols_valign(["m", "m", "m"])
rows = [["Character", "Missing Mods", "Upgradable Mods"]]
table_units = set(units_without_mods.keys())
table_units.update(set(units_upgradable_mods.keys()))
for unit_id in table_units:
rows.append([
saved_data["toons"][unit_id]["nameKey"],
", ".join(units_without_mods.get(unit_id, [])),
", ".join(units_upgradable_mods.get(unit_id, []))
])
table.add_rows(rows)
print(table.draw())
# save data
saved_mods[allycode] = {"mods": mods, "stats": stats, "chars": chars, "char_name_map": char_name_map}
with open('saved-mods.json', 'w', encoding='utf-8') as f:
json.dump(saved_mods, f, ensure_ascii=False, indent=4)
return saved_mods[allycode]
# run with force reload to update cache of stored data
all_units = [j["nameKey"] for j in saved_data["toons"].values()]
exclusions = []
get_mods(force_reload=True, unit_exclusions=exclusions, unit_inclusions=all_units)
| [
"texttable.Texttable",
"api_swgoh_help.api_swgoh_help",
"json.dump",
"initialise_data_structures.initialise_data_structures",
"api_swgoh_help.settings",
"os.path.isfile",
"json.load",
"env.get_env"
] | [((296, 324), 'initialise_data_structures.initialise_data_structures', 'initialise_data_structures', ([], {}), '()\n', (322, 324), False, 'from initialise_data_structures import initialise_data_structures\n'), ((631, 640), 'env.get_env', 'get_env', ([], {}), '()\n', (638, 640), False, 'from env import get_env\n'), ((1011, 1063), 'api_swgoh_help.settings', 'settings', (["env_data['username']", "env_data['password']"], {}), "(env_data['username'], env_data['password'])\n", (1019, 1063), False, 'from api_swgoh_help import api_swgoh_help, settings\n'), ((1077, 1098), 'api_swgoh_help.api_swgoh_help', 'api_swgoh_help', (['creds'], {}), '(creds)\n', (1091, 1098), False, 'from api_swgoh_help import api_swgoh_help, settings\n'), ((3557, 3568), 'texttable.Texttable', 'Texttable', ([], {}), '()\n', (3566, 3568), False, 'from texttable import Texttable\n'), ((752, 785), 'os.path.isfile', 'os.path.isfile', (['"""saved-mods.json"""'], {}), "('saved-mods.json')\n", (766, 785), False, 'import os\n'), ((4307, 4361), 'json.dump', 'json.dump', (['saved_mods', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(saved_mods, f, ensure_ascii=False, indent=4)\n', (4316, 4361), False, 'import json\n'), ((878, 890), 'json.load', 'json.load', (['f'], {}), '(f)\n', (887, 890), False, 'import json\n')] |
import streamlit as st
from streamlit_yellowbrick import st_yellowbrick
def run_regression():
with st.sidebar.form(key="regression_form"):
regression_visualizers = st.multiselect(
"Choose Regression Visualizers",
[
"Residuals Plot",
"Prediction Error Plot",
"Alpha Section",
],
)
submit_button = st.form_submit_button(label="Show")
if "Residuals Plot" in regression_visualizers:
with st.beta_expander("Collapse", expanded=True):
agree = st.checkbox("What is a Residuals Plot?", value=False)
if agree:
st.markdown(
"""
Residuals, in the context of regression models, are the difference between
the observed value of the target variable (y) and the predicted value (ŷ),
i.e. the error of the prediction. The residuals plot shows the difference
between residuals on the vertical axis and the dependent variable on the
horizontal axis, allowing you to detect regions within the target that may
be susceptible to more or less error.
"""
)
col1, col2 = st.beta_columns(2)
with col1:
residuals_plot()
col2.code(
"""
import streamlit as st
from streamlit_yellowbrick import st_yellowbrick
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
from yellowbrick.datasets import load_concrete
from yellowbrick.regressor import ResidualsPlot
# Load a regression dataset
X, y = load_concrete()
# Create the train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Instantiate the linear model and visualizer
model = Ridge()
visualizer = ResidualsPlot(model)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
st_yellowbrick(visualizer) # Finalize and render the figure
""",
language="python",
)
if "Prediction Error Plot" in regression_visualizers:
with st.beta_expander("Collapse", expanded=True):
agree = st.checkbox("What is a Prediction Error Plot?", value=False)
if agree:
st.markdown(
"""
A prediction error plot shows the actual targets from the dataset against
the predicted values generated by our model. This allows us to see how
much variance is in the model. Data scientists can diagnose regression
models using this plot by comparing against the 45 degree line, where
the prediction exactly matches the model.
"""
)
col1, col2 = st.beta_columns(2)
with col1:
prediction_error()
col2.code(
"""
import streamlit as st
from streamlit_yellowbrick import st_yellowbrick
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split
from yellowbrick.datasets import load_concrete
from yellowbrick.regressor import PredictionError
# Load a regression dataset
X, y = load_concrete()
# Create the train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Instantiate the linear model and visualizer
model = Lasso()
visualizer = PredictionError(model)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
st_yellowbrick(visualizer) # Finalize and render the figure
""",
language="python",
)
if "Alpha Section" in regression_visualizers:
with st.beta_expander("Collapse", expanded=True):
agree = st.checkbox("What is Alpha Section?", value=False)
if agree:
st.markdown(
"""
Regularization is designed to penalize model complexity, therefore the higher
the alpha, the less complex the model, decreasing the error due to variance
(overfit). Alphas that are too high on the other hand increase the error due
to bias (underfit). It is important, therefore to choose an optimal alpha
such that the error is minimized in both directions.
The `AlphaSelection` Visualizer demonstrates how different values of alpha
influence model selection during the regularization of linear models.
Generally speaking, alpha increases the affect of regularization, e.g.
if alpha is zero there is no regularization and the higher the alpha,
the more the regularization parameter influences the final model.
"""
)
col1, col2 = st.beta_columns(2)
with col1:
alpha_selection()
col2.code(
"""
import numpy as np
from sklearn.linear_model import LassoCV
from yellowbrick.datasets import load_concrete
from yellowbrick.regressor import AlphaSelection
# Load the regression dataset
X, y = load_concrete()
# Create a list of alphas to cross-validate against
alphas = np.logspace(-10, 1, 400)
# Instantiate the linear model and visualizer
model = LassoCV(alphas=alphas)
visualizer = AlphaSelection(model)
visualizer.fit(X, y)
st_yellowbrick(visualizer)
""",
language="python",
)
return None
def residuals_plot():
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
from yellowbrick.datasets import load_concrete
from yellowbrick.regressor import ResidualsPlot
# Load a regression dataset
X, y = load_concrete()
# Create the train and test data
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Instantiate the linear model and visualizer
model = Ridge()
visualizer = ResidualsPlot(model)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
return st_yellowbrick(visualizer) # Finalize and render the figure
def prediction_error():
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split
from yellowbrick.datasets import load_concrete
from yellowbrick.regressor import PredictionError
# Load a regression dataset
X, y = load_concrete()
# Create the train and test data
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Instantiate the linear model and visualizer
model = Lasso()
visualizer = PredictionError(model)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
return st_yellowbrick(visualizer) # Finalize and render the figure
def alpha_selection():
import numpy as np
from sklearn.linear_model import LassoCV
from yellowbrick.datasets import load_concrete
from yellowbrick.regressor import AlphaSelection
# Load the regression dataset
X, y = load_concrete()
# Create a list of alphas to cross-validate against
alphas = np.logspace(-10, 1, 400)
# Instantiate the linear model and visualizer
model = LassoCV(alphas=alphas)
visualizer = AlphaSelection(model)
visualizer.fit(X, y)
return st_yellowbrick(visualizer)
| [
"streamlit.checkbox",
"yellowbrick.datasets.load_concrete",
"yellowbrick.regressor.PredictionError",
"yellowbrick.regressor.AlphaSelection",
"streamlit.markdown",
"sklearn.linear_model.LassoCV",
"sklearn.linear_model.Lasso",
"streamlit.beta_columns",
"sklearn.model_selection.train_test_split",
"ye... | [((6089, 6104), 'yellowbrick.datasets.load_concrete', 'load_concrete', ([], {}), '()\n', (6102, 6104), False, 'from yellowbrick.datasets import load_concrete\n'), ((6182, 6236), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (6198, 6236), False, 'from sklearn.model_selection import train_test_split\n'), ((6314, 6321), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (6319, 6321), False, 'from sklearn.linear_model import Ridge\n'), ((6339, 6359), 'yellowbrick.regressor.ResidualsPlot', 'ResidualsPlot', (['model'], {}), '(model)\n', (6352, 6359), False, 'from yellowbrick.regressor import ResidualsPlot\n'), ((6528, 6554), 'streamlit_yellowbrick.st_yellowbrick', 'st_yellowbrick', (['visualizer'], {}), '(visualizer)\n', (6542, 6554), False, 'from streamlit_yellowbrick import st_yellowbrick\n'), ((6865, 6880), 'yellowbrick.datasets.load_concrete', 'load_concrete', ([], {}), '()\n', (6878, 6880), False, 'from yellowbrick.datasets import load_concrete\n'), ((6958, 7012), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (6974, 7012), False, 'from sklearn.model_selection import train_test_split\n'), ((7090, 7097), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (7095, 7097), False, 'from sklearn.linear_model import Lasso\n'), ((7115, 7137), 'yellowbrick.regressor.PredictionError', 'PredictionError', (['model'], {}), '(model)\n', (7130, 7137), False, 'from yellowbrick.regressor import PredictionError\n'), ((7306, 7332), 'streamlit_yellowbrick.st_yellowbrick', 'st_yellowbrick', (['visualizer'], {}), '(visualizer)\n', (7320, 7332), False, 'from streamlit_yellowbrick import st_yellowbrick\n'), ((7610, 7625), 'yellowbrick.datasets.load_concrete', 'load_concrete', ([], {}), '()\n', (7623, 7625), False, 'from yellowbrick.datasets import load_concrete\n'), ((7696, 7720), 'numpy.logspace', 'np.logspace', (['(-10)', '(1)', '(400)'], {}), '(-10, 1, 400)\n', (7707, 7720), True, 'import numpy as np\n'), ((7784, 7806), 'sklearn.linear_model.LassoCV', 'LassoCV', ([], {'alphas': 'alphas'}), '(alphas=alphas)\n', (7791, 7806), False, 'from sklearn.linear_model import LassoCV\n'), ((7824, 7845), 'yellowbrick.regressor.AlphaSelection', 'AlphaSelection', (['model'], {}), '(model)\n', (7838, 7845), False, 'from yellowbrick.regressor import AlphaSelection\n'), ((7882, 7908), 'streamlit_yellowbrick.st_yellowbrick', 'st_yellowbrick', (['visualizer'], {}), '(visualizer)\n', (7896, 7908), False, 'from streamlit_yellowbrick import st_yellowbrick\n'), ((106, 144), 'streamlit.sidebar.form', 'st.sidebar.form', ([], {'key': '"""regression_form"""'}), "(key='regression_form')\n", (121, 144), True, 'import streamlit as st\n'), ((179, 292), 'streamlit.multiselect', 'st.multiselect', (['"""Choose Regression Visualizers"""', "['Residuals Plot', 'Prediction Error Plot', 'Alpha Section']"], {}), "('Choose Regression Visualizers', ['Residuals Plot',\n 'Prediction Error Plot', 'Alpha Section'])\n", (193, 292), True, 'import streamlit as st\n'), ((411, 446), 'streamlit.form_submit_button', 'st.form_submit_button', ([], {'label': '"""Show"""'}), "(label='Show')\n", (432, 446), True, 'import streamlit as st\n'), ((513, 556), 'streamlit.beta_expander', 'st.beta_expander', (['"""Collapse"""'], {'expanded': '(True)'}), "('Collapse', expanded=True)\n", (529, 556), True, 'import streamlit as st\n'), ((579, 632), 'streamlit.checkbox', 'st.checkbox', (['"""What is a Residuals Plot?"""'], {'value': '(False)'}), "('What is a Residuals Plot?', value=False)\n", (590, 632), True, 'import streamlit as st\n'), ((1306, 1324), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (1321, 1324), True, 'import streamlit as st\n'), ((2303, 2346), 'streamlit.beta_expander', 'st.beta_expander', (['"""Collapse"""'], {'expanded': '(True)'}), "('Collapse', expanded=True)\n", (2319, 2346), True, 'import streamlit as st\n'), ((2369, 2429), 'streamlit.checkbox', 'st.checkbox', (['"""What is a Prediction Error Plot?"""'], {'value': '(False)'}), "('What is a Prediction Error Plot?', value=False)\n", (2380, 2429), True, 'import streamlit as st\n'), ((3001, 3019), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (3016, 3019), True, 'import streamlit as st\n'), ((3996, 4039), 'streamlit.beta_expander', 'st.beta_expander', (['"""Collapse"""'], {'expanded': '(True)'}), "('Collapse', expanded=True)\n", (4012, 4039), True, 'import streamlit as st\n'), ((4062, 4112), 'streamlit.checkbox', 'st.checkbox', (['"""What is Alpha Section?"""'], {'value': '(False)'}), "('What is Alpha Section?', value=False)\n", (4073, 4112), True, 'import streamlit as st\n'), ((5167, 5185), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (5182, 5185), True, 'import streamlit as st\n'), ((672, 1252), 'streamlit.markdown', 'st.markdown', (['"""\n Residuals, in the context of regression models, are the difference between\n the observed value of the target variable (y) and the predicted value (ŷ),\n i.e. the error of the prediction. The residuals plot shows the difference\n between residuals on the vertical axis and the dependent variable on the\n horizontal axis, allowing you to detect regions within the target that may\n be susceptible to more or less error.\n """'], {}), '(\n """\n Residuals, in the context of regression models, are the difference between\n the observed value of the target variable (y) and the predicted value (ŷ),\n i.e. the error of the prediction. The residuals plot shows the difference\n between residuals on the vertical axis and the dependent variable on the\n horizontal axis, allowing you to detect regions within the target that may\n be susceptible to more or less error.\n """\n )\n', (683, 1252), True, 'import streamlit as st\n'), ((2469, 2947), 'streamlit.markdown', 'st.markdown', (['"""\n A prediction error plot shows the actual targets from the dataset against\n the predicted values generated by our model. This allows us to see how\n much variance is in the model. Data scientists can diagnose regression\n models using this plot by comparing against the 45 degree line, where\n the prediction exactly matches the model.\n """'], {}), '(\n """\n A prediction error plot shows the actual targets from the dataset against\n the predicted values generated by our model. This allows us to see how\n much variance is in the model. Data scientists can diagnose regression\n models using this plot by comparing against the 45 degree line, where\n the prediction exactly matches the model.\n """\n )\n', (2480, 2947), True, 'import streamlit as st\n'), ((4152, 5113), 'streamlit.markdown', 'st.markdown', (['"""\n Regularization is designed to penalize model complexity, therefore the higher\n the alpha, the less complex the model, decreasing the error due to variance\n (overfit). Alphas that are too high on the other hand increase the error due\n to bias (underfit). It is important, therefore to choose an optimal alpha\n such that the error is minimized in both directions.\n\n The `AlphaSelection` Visualizer demonstrates how different values of alpha\n influence model selection during the regularization of linear models.\n Generally speaking, alpha increases the affect of regularization, e.g.\n if alpha is zero there is no regularization and the higher the alpha,\n the more the regularization parameter influences the final model.\n """'], {}), '(\n """\n Regularization is designed to penalize model complexity, therefore the higher\n the alpha, the less complex the model, decreasing the error due to variance\n (overfit). Alphas that are too high on the other hand increase the error due\n to bias (underfit). It is important, therefore to choose an optimal alpha\n such that the error is minimized in both directions.\n\n The `AlphaSelection` Visualizer demonstrates how different values of alpha\n influence model selection during the regularization of linear models.\n Generally speaking, alpha increases the affect of regularization, e.g.\n if alpha is zero there is no regularization and the higher the alpha,\n the more the regularization parameter influences the final model.\n """\n )\n', (4163, 5113), True, 'import streamlit as st\n')] |
# Generated by Django 2.2.20 on 2021-05-06 13:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("curation_portal", "0008_add_flags")]
operations = [
migrations.AddField(
model_name="curationresult",
name="flag_low_pext",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="curationresult",
name="flag_pext_less_than_half_max",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="curationresult",
name="flag_uninformative_pext",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="curationresult",
name="flag_weak_gene_conservation",
field=models.BooleanField(default=False),
),
]
| [
"django.db.models.BooleanField"
] | [((334, 368), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (353, 368), False, 'from django.db import migrations, models\n'), ((518, 552), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (537, 552), False, 'from django.db import migrations, models\n'), ((697, 731), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (716, 731), False, 'from django.db import migrations, models\n'), ((880, 914), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (899, 914), False, 'from django.db import migrations, models\n')] |
import pytest as pt
from dataclass_tools.tools import deserialize_dataclass
from gl_hsc_scantling.elements import StructuralElement
from .exp_output import ExpStiffenerElement, ExpStiffenerSection
from .fixtures_laminates import *
from .fixtures_stiffener_sections import *
from .fixtures_vessel import *
@pt.fixture
def stiffener_bottom_01_input():
return {
"name": "Bottom Stiffener 01",
"vessel": "catamaran",
"x": 8,
"z": -0.3,
"element_type": "Stiffener",
"span": 1,
"spacing_1": 0.4,
"spacing_2": 0.4,
"stiff_att_plate": 1,
"att_plate_1": "et_0900_20x",
"att_plate_2": "et_0900_20x",
"stiff_section": "lbar_01",
"location": "bottom",
"deadrise": 16,
}
@pt.fixture
def stiffener_bottom_01(et_0900_20x, lbar_01, vessel_ex1, stiffener_bottom_01_input):
laminates = {et_0900_20x.name: et_0900_20x}
stiff_sections = {lbar_01.name: lbar_01}
vessels = {vessel_ex1.name: vessel_ex1}
collections = {
"laminates": laminates,
"stiffener_sections": stiff_sections,
"vessels": vessels,
}
return deserialize_dataclass(
dct=stiffener_bottom_01_input,
dataclass=StructuralElement,
build_instance=True,
dict_of_collections=collections,
)
@pt.fixture
def stiffener_bottom_01_exp():
return ExpStiffenerElement(
stiffener_section=ExpStiffenerSection(
bend_stiffness_NA=8.33124543714909,
z_NA=0.0115411623450635,
web_shear_stiffness=2707.26407307195,
stiffness=23856.1306743935,
),
pressures={"sea": 18.46875, "impact": 21.3672413793103},
)
@pt.fixture
def stiffener_side_01_input():
return {
"name": "Side Stiffener 01",
"vessel": "catamaran",
"x": 4,
"z": 0.5,
"element_type": "Stiffener",
"span": 1,
"spacing_1": 0.4,
"spacing_2": 0.4,
"stiff_att_plate": 1,
"stiff_att_angle": 20,
"att_plate_1": "et_0900_20x",
"att_plate_2": "et_0900_20x",
"stiff_section": "lbar_01",
"location": "side",
}
@pt.fixture
def stiffener_side_01(et_0900_20x, lbar_01, vessel_ex1, stiffener_side_01_input):
laminates = {et_0900_20x.name: et_0900_20x}
stiff_sections = {lbar_01.name: lbar_01}
vessels = {vessel_ex1.name: vessel_ex1}
collections = {
"laminates": laminates,
"stiffener_sections": stiff_sections,
"vessels": vessels,
}
return deserialize_dataclass(
dct=stiffener_side_01_input,
dataclass=StructuralElement,
build_instance=True,
dict_of_collections=collections,
)
@pt.fixture
def stiffener_side_01_exp():
return ExpStiffenerElement(
stiffener_section=ExpStiffenerSection(
bend_stiffness_NA=7.98195791502877,
z_NA=0.0112865125011699,
web_shear_stiffness=2707.26407307195,
stiffness=23856.1306743935,
),
pressures={"sea": 10},
)
| [
"dataclass_tools.tools.deserialize_dataclass"
] | [((1161, 1301), 'dataclass_tools.tools.deserialize_dataclass', 'deserialize_dataclass', ([], {'dct': 'stiffener_bottom_01_input', 'dataclass': 'StructuralElement', 'build_instance': '(True)', 'dict_of_collections': 'collections'}), '(dct=stiffener_bottom_01_input, dataclass=\n StructuralElement, build_instance=True, dict_of_collections=collections)\n', (1182, 1301), False, 'from dataclass_tools.tools import deserialize_dataclass\n'), ((2568, 2706), 'dataclass_tools.tools.deserialize_dataclass', 'deserialize_dataclass', ([], {'dct': 'stiffener_side_01_input', 'dataclass': 'StructuralElement', 'build_instance': '(True)', 'dict_of_collections': 'collections'}), '(dct=stiffener_side_01_input, dataclass=\n StructuralElement, build_instance=True, dict_of_collections=collections)\n', (2589, 2706), False, 'from dataclass_tools.tools import deserialize_dataclass\n')] |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404
from django.core.exceptions import PermissionDenied
from django.db import transaction
from django.db.models import Count, Sum, F, Func
from datetime import datetime
import json
from postgresqleu.util.db import exec_to_dict
from postgresqleu.util.request import get_int_or_error
from .models import ConferenceRegistration
from .models import VolunteerSlot, VolunteerAssignment
from .util import send_conference_notification_template, get_conference_or_404
def _check_admin(request, conference):
if request.user.is_superuser:
return True
else:
return conference.administrators.filter(pk=request.user.id).exists() or conference.series.administrators.filter(pk=request.user.id).exists()
def _get_conference_and_reg(request, urlname):
conference = get_conference_or_404(urlname)
is_admin = _check_admin(request, conference)
if is_admin:
reg = ConferenceRegistration.objects.get(conference=conference, attendee=request.user)
else:
try:
reg = conference.volunteers.get(attendee=request.user)
except ConferenceRegistration.DoesNotExist:
raise Http404("Volunteer entry not found")
return (conference, is_admin, reg)
def send_volunteer_notification(conference, assignment, subject, template):
if not conference.notifyvolunteerstatus:
return
# No filter aggregates in our version of Django, so direct SQL it is
pending = exec_to_dict("SELECT count(*) FILTER (WHERE NOT org_confirmed) AS admin, count(*) FILTER (WHERE NOT vol_confirmed) AS volunteer FROM confreg_volunteerassignment a INNER JOIN confreg_volunteerslot s ON s.id=a.slot_id WHERE s.conference_id=%(confid)s", {
'confid': conference.id,
})[0]
send_conference_notification_template(
conference,
subject,
'confreg/mail/{}'.format(template), {
'conference': conference,
'assignment': assignment,
'pending': pending,
},
)
def _get_volunteer_stats(conference):
stats = ConferenceRegistration.objects.filter(conference=conference) \
.filter(volunteers_set=conference) \
.annotate(num_assignments=Count('volunteerassignment')) \
.annotate(total_time=Sum(Func(
Func(F('volunteerassignment__slot__timerange'), function='upper'),
Func(F('volunteerassignment__slot__timerange'), function='lower'),
function='age'))) \
.order_by('lastname', 'firstname')
return [{
'name': r.fullname,
'count': r.num_assignments,
'time': str(r.total_time or '0:00:00'),
} for r in stats]
def _slot_return_data(slot):
return {
'id': slot.id,
'max_staff': slot.max_staff,
'min_staff': slot.min_staff,
'assignments': [{
'id': a.id,
'volid': a.reg.id,
'volunteer': a.reg.fullname,
'vol_confirmed': a.vol_confirmed,
'org_confirmed': a.org_confirmed,
} for a in slot.volunteerassignment_set.all()],
}
@login_required
@transaction.atomic
def volunteerschedule_api(request, urlname, adm=False):
try:
(conference, can_admin, reg) = _get_conference_and_reg(request, urlname)
except ConferenceRegistration.DoesNotExist:
raise PermissionDenied()
is_admin = can_admin and adm
if request.method == 'GET':
# GET just always returns the complete volunteer schedule
slots = VolunteerSlot.objects.prefetch_related('volunteerassignment_set', 'volunteerassignment_set__reg').filter(conference=conference)
return HttpResponse(json.dumps({
'slots': [_slot_return_data(slot) for slot in slots],
'volunteers': [{
'id': vol.id,
'name': vol.fullname,
} for vol in conference.volunteers.all().order_by('firstname', 'lastname')],
'meta': {
'isadmin': is_admin,
'regid': reg.id,
},
'stats': _get_volunteer_stats(conference),
}), content_type='application/json')
if request.method != 'POST':
raise Http404()
if 'op' not in request.POST:
raise Http404()
slotid = get_int_or_error(request.POST, 'slotid')
volid = get_int_or_error(request.POST, 'volid')
# We should always have a valid slot
slot = get_object_or_404(VolunteerSlot, conference=conference, pk=slotid)
err = None
if request.POST['op'] == 'signup':
if volid != 0:
raise PermissionDenied("Invalid post data")
err = _signup(request, conference, reg, is_admin, slot)
elif request.POST['op'] == 'remove':
err = _remove(request, conference, reg, is_admin, slot, volid)
elif request.POST['op'] == 'confirm':
err = _confirm(request, conference, reg, is_admin, slot, volid)
elif request.POST['op'] == 'add':
err = _add(request, conference, reg, is_admin, slot, volid)
else:
raise Http404()
if err:
return HttpResponse(
json.dumps({'err': err}),
content_type='application/json',
status=500,
)
# Req-query the database to pick up any changes, and return the complete object
slot = VolunteerSlot.objects.prefetch_related('volunteerassignment_set', 'volunteerassignment_set__reg').filter(conference=conference, pk=slot.pk)[0]
return HttpResponse(json.dumps({
'err': None,
'slot': _slot_return_data(slot),
'stats': _get_volunteer_stats(conference),
}), content_type='application/json')
@login_required
def volunteerschedule(request, urlname, adm=False):
try:
(conference, can_admin, reg) = _get_conference_and_reg(request, urlname)
except ConferenceRegistration.DoesNotExist:
return HttpResponse("Must be registered for conference to view volunteer schedule")
is_admin = can_admin and adm
slots = VolunteerSlot.objects.filter(conference=conference).order_by('timerange', 'title')
return render(request, 'confreg/volunteer_schedule.html', {
'basetemplate': is_admin and 'confreg/confadmin_base.html' or 'confreg/volunteer_base.html',
'conference': conference,
'admin': is_admin,
'can_admin': can_admin,
'reg': reg,
'slots': slots,
'helplink': 'volunteers',
})
def _signup(request, conference, reg, adm, slot):
if VolunteerAssignment.objects.filter(slot=slot, reg=reg).exists():
return "Already a volunteer for selected slot"
elif slot.countvols >= slot.max_staff:
return "Volunteer slot is already full"
elif VolunteerAssignment.objects.filter(reg=reg, slot__timerange__overlap=slot.timerange).exists():
return "Cannot sign up for an overlapping slot"
else:
a = VolunteerAssignment(slot=slot, reg=reg, vol_confirmed=True, org_confirmed=False)
a.save()
send_volunteer_notification(conference, a, 'Volunteer signed up', 'admin_notify_volunteer_signup.txt')
def _add(request, conference, reg, adm, slot, volid):
addreg = get_object_or_404(ConferenceRegistration, conference=conference, id=volid)
if VolunteerAssignment.objects.filter(slot=slot, reg=addreg).exists():
return "Already a volunteer for selected slot"
elif slot.countvols >= slot.max_staff:
return "Volunteer slot is already full"
elif VolunteerAssignment.objects.filter(reg=addreg, slot__timerange__overlap=slot.timerange).exists():
return "Cannot add to an overlapping slot"
else:
VolunteerAssignment(slot=slot, reg=addreg, vol_confirmed=False, org_confirmed=True).save()
def _remove(request, conference, reg, is_admin, slot, aid):
if is_admin:
a = get_object_or_404(VolunteerAssignment, slot=slot, id=aid)
else:
a = get_object_or_404(VolunteerAssignment, slot=slot, reg=reg, id=aid)
if a.org_confirmed and not is_admin:
return "Cannot remove a confirmed assignment. Please contact the volunteer schedule coordinator for manual processing."
else:
a.delete()
def _confirm(request, conference, reg, is_admin, slot, aid):
if is_admin:
# Admins can make organization confirms
a = get_object_or_404(VolunteerAssignment, slot=slot, id=aid)
if a.org_confirmed:
return "Assignment already confirmed"
else:
a.org_confirmed = True
a.save()
else:
# Regular users can confirm their own sessions only
a = get_object_or_404(VolunteerAssignment, slot=slot, reg=reg, id=aid)
if a.vol_confirmed:
return "Assignment already confirmed"
else:
a.vol_confirmed = True
a.save()
send_volunteer_notification(conference, a, 'Volunteer slot confirmed', 'admin_notify_volunteer_confirmed.txt')
def ical(request, urlname, token):
conference = get_conference_or_404(urlname)
reg = get_object_or_404(ConferenceRegistration, regtoken=token)
assignments = VolunteerAssignment.objects.filter(reg=reg).order_by('slot__timerange')
resp = render(request, 'confreg/volunteer_schedule.ical', {
'conference': conference,
'assignments': assignments,
'now': datetime.utcnow(),
}, content_type='text/calendar')
resp['Content-Disposition'] = 'attachment; filename="{}_volunteer.ical"'.format(conference.urlname)
return resp
| [
"django.shortcuts.render",
"django.core.exceptions.PermissionDenied",
"postgresqleu.util.request.get_int_or_error",
"datetime.datetime.utcnow",
"django.db.models.Count",
"django.http.HttpResponse",
"django.shortcuts.get_object_or_404",
"json.dumps",
"postgresqleu.util.db.exec_to_dict",
"django.db.... | [((4615, 4655), 'postgresqleu.util.request.get_int_or_error', 'get_int_or_error', (['request.POST', '"""slotid"""'], {}), "(request.POST, 'slotid')\n", (4631, 4655), False, 'from postgresqleu.util.request import get_int_or_error\n'), ((4668, 4707), 'postgresqleu.util.request.get_int_or_error', 'get_int_or_error', (['request.POST', '"""volid"""'], {}), "(request.POST, 'volid')\n", (4684, 4707), False, 'from postgresqleu.util.request import get_int_or_error\n'), ((4761, 4827), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['VolunteerSlot'], {'conference': 'conference', 'pk': 'slotid'}), '(VolunteerSlot, conference=conference, pk=slotid)\n', (4778, 4827), False, 'from django.shortcuts import render, get_object_or_404\n'), ((6424, 6709), 'django.shortcuts.render', 'render', (['request', '"""confreg/volunteer_schedule.html"""', "{'basetemplate': is_admin and 'confreg/confadmin_base.html' or\n 'confreg/volunteer_base.html', 'conference': conference, 'admin':\n is_admin, 'can_admin': can_admin, 'reg': reg, 'slots': slots,\n 'helplink': 'volunteers'}"], {}), "(request, 'confreg/volunteer_schedule.html', {'basetemplate': \n is_admin and 'confreg/confadmin_base.html' or\n 'confreg/volunteer_base.html', 'conference': conference, 'admin':\n is_admin, 'can_admin': can_admin, 'reg': reg, 'slots': slots,\n 'helplink': 'volunteers'})\n", (6430, 6709), False, 'from django.shortcuts import render, get_object_or_404\n'), ((7486, 7560), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['ConferenceRegistration'], {'conference': 'conference', 'id': 'volid'}), '(ConferenceRegistration, conference=conference, id=volid)\n', (7503, 7560), False, 'from django.shortcuts import render, get_object_or_404\n'), ((9346, 9403), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['ConferenceRegistration'], {'regtoken': 'token'}), '(ConferenceRegistration, regtoken=token)\n', (9363, 9403), False, 'from django.shortcuts import render, get_object_or_404\n'), ((1592, 1880), 'postgresqleu.util.db.exec_to_dict', 'exec_to_dict', (['"""SELECT count(*) FILTER (WHERE NOT org_confirmed) AS admin, count(*) FILTER (WHERE NOT vol_confirmed) AS volunteer FROM confreg_volunteerassignment a INNER JOIN confreg_volunteerslot s ON s.id=a.slot_id WHERE s.conference_id=%(confid)s"""', "{'confid': conference.id}"], {}), "(\n 'SELECT count(*) FILTER (WHERE NOT org_confirmed) AS admin, count(*) FILTER (WHERE NOT vol_confirmed) AS volunteer FROM confreg_volunteerassignment a INNER JOIN confreg_volunteerslot s ON s.id=a.slot_id WHERE s.conference_id=%(confid)s'\n , {'confid': conference.id})\n", (1604, 1880), False, 'from postgresqleu.util.db import exec_to_dict\n'), ((4533, 4542), 'django.http.Http404', 'Http404', ([], {}), '()\n', (4540, 4542), False, 'from django.http import HttpResponse, Http404\n'), ((4591, 4600), 'django.http.Http404', 'Http404', ([], {}), '()\n', (4598, 4600), False, 'from django.http import HttpResponse, Http404\n'), ((8140, 8197), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['VolunteerAssignment'], {'slot': 'slot', 'id': 'aid'}), '(VolunteerAssignment, slot=slot, id=aid)\n', (8157, 8197), False, 'from django.shortcuts import render, get_object_or_404\n'), ((8220, 8286), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['VolunteerAssignment'], {'slot': 'slot', 'reg': 'reg', 'id': 'aid'}), '(VolunteerAssignment, slot=slot, reg=reg, id=aid)\n', (8237, 8286), False, 'from django.shortcuts import render, get_object_or_404\n'), ((8625, 8682), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['VolunteerAssignment'], {'slot': 'slot', 'id': 'aid'}), '(VolunteerAssignment, slot=slot, id=aid)\n', (8642, 8682), False, 'from django.shortcuts import render, get_object_or_404\n'), ((8913, 8979), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['VolunteerAssignment'], {'slot': 'slot', 'reg': 'reg', 'id': 'aid'}), '(VolunteerAssignment, slot=slot, reg=reg, id=aid)\n', (8930, 8979), False, 'from django.shortcuts import render, get_object_or_404\n'), ((3689, 3707), 'django.core.exceptions.PermissionDenied', 'PermissionDenied', ([], {}), '()\n', (3705, 3707), False, 'from django.core.exceptions import PermissionDenied\n'), ((4925, 4962), 'django.core.exceptions.PermissionDenied', 'PermissionDenied', (['"""Invalid post data"""'], {}), "('Invalid post data')\n", (4941, 4962), False, 'from django.core.exceptions import PermissionDenied\n'), ((5447, 5471), 'json.dumps', 'json.dumps', (["{'err': err}"], {}), "({'err': err})\n", (5457, 5471), False, 'import json\n'), ((6205, 6281), 'django.http.HttpResponse', 'HttpResponse', (['"""Must be registered for conference to view volunteer schedule"""'], {}), "('Must be registered for conference to view volunteer schedule')\n", (6217, 6281), False, 'from django.http import HttpResponse, Http404\n'), ((9643, 9660), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (9658, 9660), False, 'from datetime import datetime\n'), ((1289, 1325), 'django.http.Http404', 'Http404', (['"""Volunteer entry not found"""'], {}), "('Volunteer entry not found')\n", (1296, 1325), False, 'from django.http import HttpResponse, Http404\n'), ((5383, 5392), 'django.http.Http404', 'Http404', ([], {}), '()\n', (5390, 5392), False, 'from django.http import HttpResponse, Http404\n'), ((2403, 2431), 'django.db.models.Count', 'Count', (['"""volunteerassignment"""'], {}), "('volunteerassignment')\n", (2408, 2431), False, 'from django.db.models import Count, Sum, F, Func\n'), ((2559, 2600), 'django.db.models.F', 'F', (['"""volunteerassignment__slot__timerange"""'], {}), "('volunteerassignment__slot__timerange')\n", (2560, 2600), False, 'from django.db.models import Count, Sum, F, Func\n'), ((2672, 2713), 'django.db.models.F', 'F', (['"""volunteerassignment__slot__timerange"""'], {}), "('volunteerassignment__slot__timerange')\n", (2673, 2713), False, 'from django.db.models import Count, Sum, F, Func\n')] |
from rest_framework import authentication
from chat.models import Users
from .serializers import UsersSerializer
from rest_framework import viewsets
class UsersViewSet(viewsets.ModelViewSet):
serializer_class = UsersSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Users.objects.all()
| [
"chat.models.Users.objects.all"
] | [((375, 394), 'chat.models.Users.objects.all', 'Users.objects.all', ([], {}), '()\n', (392, 394), False, 'from chat.models import Users\n')] |
from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash
from flask_restful import reqparse, abort, Api, Resource
from flask_login import login_required, logout_user, current_user, login_user, LoginManager
from google.cloud import datastore
import datetime
from flask_admin import Admin
from flask_sqlalchemy import SQLAlchemy
from flask_admin.contrib.sqla import ModelView
from flask_marshmallow import Marshmallow
from flask_session import Session
import stripe
import decimal
import os
from forms import LoginForm
from models import Distributor, Zip, Product, RetailProduct, Base, User, ProductCategory, Order, OrderItem
from session import db_session, engine
from sqlalchemy.pool import SingletonThreadPool
datastore_client = datastore.Client()
app = Flask(__name__)
app.secret_key = '<KEY>'
app.config['SESSION_TYPE'] = 'sqlalchemy'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///orders.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
admin = Admin(app, name='simple123', template_mode='bootstrap3', url='/api/admin')
api = Api(app)
ma = Marshmallow(app)
sess = Session(app)
db = SQLAlchemy()
login_manager = LoginManager(app)
sess.app.session_interface.db.create_all()
Base.metadata.create_all(engine)
@login_manager.user_loader
def load_user(user_id):
"""Check if user is logged-in on every page load."""
if user_id is not None:
return db_session.query(User).filter(User.id==user_id).first()
return None
@login_manager.unauthorized_handler
def unauthorized():
"""Redirect unauthorized users to Login page."""
flash('You must be logged in to view that page.')
return redirect('/api/login')
class BaseAdminView(ModelView):
def is_accessible(self):
return current_user.is_authenticated
def inaccessible_callback(self, name, **kwargs):
# redirect to login page if user doesn't have access
return redirect('/api/login')
class ZipView(BaseAdminView):
column_searchable_list = ['zip_code']
column_filters = ['distributor']
can_create = False
can_delete = False
class DistributorView(BaseAdminView):
form_excluded_columns = ['zips', ]
class ProductCategoryView(BaseAdminView):
form_columns = ('name',)
list_columns = ('name',)
class ProductView(BaseAdminView):
form_columns = ('name', 'description', 'base_price', 'product_category', 'image')
admin.add_view(DistributorView(Distributor, db_session))
admin.add_view(ProductView(Product, db_session))
admin.add_view(ProductCategoryView(ProductCategory, db_session))
admin.add_view(BaseAdminView(RetailProduct, db_session))
admin.add_view(BaseAdminView(Order, db_session))
admin.add_view(BaseAdminView(OrderItem, db_session))
admin.add_view(ZipView(Zip, db_session))
parser = reqparse.RequestParser()
parser.add_argument('reg')
@app.route('/api/login', methods=['GET', 'POST'])
def login():
"""
User login page.
GET: Serve Log-in page.
POST: If form is valid and new user creation succeeds, redirect user to the logged-in homepage.
"""
if current_user.is_authenticated:
return redirect('/api/admin') # Bypass if user is logged in
login_form = LoginForm()
if request.method == 'POST':
if login_form.validate_on_submit():
email = login_form.data.get('email')
password = login_form.data.get('password')
user = db_session.query(User).filter(User.email==email).first() # Validate Login Attempt
if user and user.check_password(password=password):
login_user(user)
next_page = request.args.get('next')
return redirect(next_page or '/api/admin')
flash('Invalid username/password combination')
return redirect('/api/login')
return render_template('login.html',
form=login_form,
title='Log in.',
template='login-page',
body="Log in with your User account.")
class Register(Resource):
def post(self):
zipcode = request.json['reg']['zipcode']
zip = db_session.query(Zip).filter(Zip.zip_code==zipcode).first()
if not zip:
return {}, 404
session["user"] = request.json['reg']
resp = {'distributor': zip.distributor_id}
return resp, 201
api.add_resource(Register, '/api/register')
class CartItemSchema(ma.Schema):
class Meta:
fields = ("name", "price", "qty", "total")
cart_item_schema = CartItemSchema()
cart_items_schema = CartItemSchema(many=True)
class Cart(Resource):
def post(self):
product_id = request.json.get('product_id')
qty = int(request.json.get('qty'))
current_cart = session.get("cart", {})
prod_cnt = current_cart.get(product_id, 0)
current_cart[product_id] = prod_cnt + qty
session["cart"] = current_cart
resp = {'cart': product_id, 'count': len(session.get("cart", {}))}
return resp, 201
def get(self):
current_cart = session.get("cart", {})
products = db_session.query(RetailProduct).join(Product).filter(RetailProduct.id.in_(current_cart.keys()))
id_to_prod = {}
cart_items = []
for product in products:
id_to_prod[product.id] = product
grand_total = 0
for prod_id, qty in current_cart.items():
retail_prod = id_to_prod[prod_id]
price = (decimal.Decimal(retail_prod.price) / decimal.Decimal("100.0")).quantize(decimal.Decimal('0.00'))
cart_items.append({
"name": retail_prod.name,
"price": '${}'.format(price),
"qty": qty,
"total": '${}'.format(price * qty)
})
grand_total += price * qty
return {'cart': cart_items, 'user': session.get('user'),
'count': len(current_cart), 'total': '${}'.format(grand_total)}
api.add_resource(Cart, '/api/cart')
class RetailProductSchema(ma.Schema):
class Meta:
fields = ("id", "distributor_id", "product_id", "price", "name", "description", "image", "display_price", "can_order")
product_schema = RetailProductSchema()
products_schema = RetailProductSchema(many=True)
class Products(Resource):
def get(self, distributor_id):
args = request.args
products = db_session.query(RetailProduct).join(Product).filter(RetailProduct.distributor_id==distributor_id)
if args.get("cat") and args.get("cat") != 'All':
products = products.filter(Product.product_category == args.get("cat"))
return {'products': products_schema.dump(products), 'user': session.get('user'),
'count': len(session.get("cart", {}))}
api.add_resource(Products, '/api/products/<int:distributor_id>')
class ProductApi(Resource):
def get(self, id):
product = db_session.query(RetailProduct).filter(RetailProduct.id == id).first()
if not product:
return "Not found", 404
return {'product': product_schema.dump(product), 'user': session.get('user'),
'count': len(session.get("cart", {}))}
api.add_resource(ProductApi, '/api/product/<int:id>')
# @app.route('/api/register')
# def register():
# response_object = {'status': 'success'}
# if request.method == 'POST':
# post_data = request.get_json()
# response_object['message'] = 'registered!'
# else:
# response_object['message'] = "not registered"
# return jsonify(response_object)
@app.route('/api/charge', methods=['POST'])
def create_charge():
post_data = request.get_json()
amount = round(float(post_data.get('total').strip("$")) * 100)
current_cart = session.get("cart", {})
products = db_session.query(RetailProduct).join(Product).filter(RetailProduct.id.in_(current_cart.keys()))
id_to_prod = {}
distributor_id = None
for product in products:
if not distributor_id:
distributor_id = product.distributor_id
id_to_prod[product.id] = product
grand_total = 0
for prod_id, qty in current_cart.items():
retail_prod = id_to_prod[prod_id]
price = retail_prod.price
grand_total += price * qty
if grand_total != amount:
return {"something went wrong"}, 400
stripe.api_key = '<KEY>'
charge = stripe.Charge.create(
amount=amount,
currency='usd',
card=post_data.get('token'),
description="order"
)
response_object = {
'status': 'success',
'charge': charge
}
user = session["user"]
if charge.status == 'succeeded':
order = Order(distributor_id=distributor_id, submitted_at=datetime.datetime.now(),
payed_at=datetime.datetime.now(),name=user['fllname'], email=user['email'], zip=user['zipcode'],
address='1 main', city='Spokane', state='WA', phone='44455566')
db_session.add(order)
db_session.commit()
for prod_id, qty in current_cart.items():
retail_prod = id_to_prod[prod_id]
price = retail_prod.price
oi = OrderItem(order_id=order.id, purchase_price=price, qty=qty, retail_product_id=prod_id)
db_session.add(oi)
db_session.commit()
session["cart"] = {}
return jsonify(response_object), 200
@app.route('/api/charge/<charge_id>')
def get_charge(charge_id):
stripe.api_key = '<KEY>'
response_object = {
'status': 'success',
'charge': stripe.Charge.retrieve(charge_id)
}
return jsonify(response_object), 200
@app.route('/')
def root():
products = datastore_client.query(kind="deal_product").fetch(limit=5)
return render_template('index.html', products=products, num_products=5)
# @app.route('/product_test')
# def product_test():
# start = datetime.datetime.now()
#
# products = datastore_client.query(kind="social_socialagg").fetch(limit=100)
# display_products = []
# for product in products:
# display_products.append(product)
# delta_load = datetime.datetime.now() - start
#
# start = datetime.datetime.now()
# batch = datastore_client.batch()
# with batch:
# for product in display_products:
# product.update({'rel_6_34_backup_pricing_unit': 'ci'})
# batch.put(product)
# delta_save = datetime.datetime.now() - start
#
# return render_template('load_save_delta.html', products=display_products,
# delta_load=delta_load.total_seconds(), num_products=len(display_products),
# delta_save=delta_save.total_seconds()
# )
#
# @app.route('/product_test_loop_put')
# def product_test_loop_put():
# start = datetime.datetime.now()
# products = datastore_client.query(kind="social_socialagg").fetch(limit=100)
# display_products = []
# delta_each = []
# for product in products:
# start_each = datetime.datetime.now()
# product.update({'rel_6_34_backup_pricing_unit': 'ci'})
# datastore_client.put(product)
# delta = datetime.datetime.now() - start_each
# display_products.append([product, delta.total_seconds()])
# delta = datetime.datetime.now() - start
# return render_template('save_each.html', products=disaplay_products,
# microseconds=delta.total_seconds(), num_products=len(display_products),
# project=datastore_client.project
# )
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8081)
| [
"flask.render_template",
"flask_login.LoginManager",
"flask.request.args.get",
"flask.Flask",
"session.db_session.query",
"forms.LoginForm",
"flask.jsonify",
"flask_restful.reqparse.RequestParser",
"flask.flash",
"models.OrderItem",
"google.cloud.datastore.Client",
"flask_admin.Admin",
"flas... | [((773, 791), 'google.cloud.datastore.Client', 'datastore.Client', ([], {}), '()\n', (789, 791), False, 'from google.cloud import datastore\n'), ((800, 815), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (805, 815), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((1007, 1081), 'flask_admin.Admin', 'Admin', (['app'], {'name': '"""simple123"""', 'template_mode': '"""bootstrap3"""', 'url': '"""/api/admin"""'}), "(app, name='simple123', template_mode='bootstrap3', url='/api/admin')\n", (1012, 1081), False, 'from flask_admin import Admin\n'), ((1088, 1096), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (1091, 1096), False, 'from flask_restful import reqparse, abort, Api, Resource\n'), ((1102, 1118), 'flask_marshmallow.Marshmallow', 'Marshmallow', (['app'], {}), '(app)\n', (1113, 1118), False, 'from flask_marshmallow import Marshmallow\n'), ((1126, 1138), 'flask_session.Session', 'Session', (['app'], {}), '(app)\n', (1133, 1138), False, 'from flask_session import Session\n'), ((1144, 1156), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (1154, 1156), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((1173, 1190), 'flask_login.LoginManager', 'LoginManager', (['app'], {}), '(app)\n', (1185, 1190), False, 'from flask_login import login_required, logout_user, current_user, login_user, LoginManager\n'), ((1236, 1268), 'models.Base.metadata.create_all', 'Base.metadata.create_all', (['engine'], {}), '(engine)\n', (1260, 1268), False, 'from models import Distributor, Zip, Product, RetailProduct, Base, User, ProductCategory, Order, OrderItem\n'), ((2726, 2750), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (2748, 2750), False, 'from flask_restful import reqparse, abort, Api, Resource\n'), ((1588, 1637), 'flask.flash', 'flash', (['"""You must be logged in to view that page."""'], {}), "('You must be logged in to view that page.')\n", (1593, 1637), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((1646, 1668), 'flask.redirect', 'redirect', (['"""/api/login"""'], {}), "('/api/login')\n", (1654, 1668), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((3134, 3145), 'forms.LoginForm', 'LoginForm', ([], {}), '()\n', (3143, 3145), False, 'from forms import LoginForm\n'), ((3743, 3873), 'flask.render_template', 'render_template', (['"""login.html"""'], {'form': 'login_form', 'title': '"""Log in."""', 'template': '"""login-page"""', 'body': '"""Log in with your User account."""'}), "('login.html', form=login_form, title='Log in.', template=\n 'login-page', body='Log in with your User account.')\n", (3758, 3873), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((7198, 7216), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (7214, 7216), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((7298, 7321), 'flask.session.get', 'session.get', (['"""cart"""', '{}'], {}), "('cart', {})\n", (7309, 7321), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((9078, 9142), 'flask.render_template', 'render_template', (['"""index.html"""'], {'products': 'products', 'num_products': '(5)'}), "('index.html', products=products, num_products=5)\n", (9093, 9142), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((1883, 1905), 'flask.redirect', 'redirect', (['"""/api/login"""'], {}), "('/api/login')\n", (1891, 1905), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((3062, 3084), 'flask.redirect', 'redirect', (['"""/api/admin"""'], {}), "('/api/admin')\n", (3070, 3084), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((3646, 3692), 'flask.flash', 'flash', (['"""Invalid username/password combination"""'], {}), "('Invalid username/password combination')\n", (3651, 3692), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((3708, 3730), 'flask.redirect', 'redirect', (['"""/api/login"""'], {}), "('/api/login')\n", (3716, 3730), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((4545, 4575), 'flask.request.json.get', 'request.json.get', (['"""product_id"""'], {}), "('product_id')\n", (4561, 4575), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((4631, 4654), 'flask.session.get', 'session.get', (['"""cart"""', '{}'], {}), "('cart', {})\n", (4642, 4654), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((4901, 4924), 'flask.session.get', 'session.get', (['"""cart"""', '{}'], {}), "('cart', {})\n", (4912, 4924), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((8358, 8379), 'session.db_session.add', 'db_session.add', (['order'], {}), '(order)\n', (8372, 8379), False, 'from session import db_session, engine\n'), ((8382, 8401), 'session.db_session.commit', 'db_session.commit', ([], {}), '()\n', (8399, 8401), False, 'from session import db_session, engine\n'), ((8632, 8651), 'session.db_session.commit', 'db_session.commit', ([], {}), '()\n', (8649, 8651), False, 'from session import db_session, engine\n'), ((8685, 8709), 'flask.jsonify', 'jsonify', (['response_object'], {}), '(response_object)\n', (8692, 8709), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((8882, 8915), 'stripe.Charge.retrieve', 'stripe.Charge.retrieve', (['charge_id'], {}), '(charge_id)\n', (8904, 8915), False, 'import stripe\n'), ((8933, 8957), 'flask.jsonify', 'jsonify', (['response_object'], {}), '(response_object)\n', (8940, 8957), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((4588, 4611), 'flask.request.json.get', 'request.json.get', (['"""qty"""'], {}), "('qty')\n", (4604, 4611), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((5558, 5577), 'flask.session.get', 'session.get', (['"""user"""'], {}), "('user')\n", (5569, 5577), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((6332, 6351), 'flask.session.get', 'session.get', (['"""user"""'], {}), "('user')\n", (6343, 6351), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((6699, 6718), 'flask.session.get', 'session.get', (['"""user"""'], {}), "('user')\n", (6710, 6718), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((8521, 8611), 'models.OrderItem', 'OrderItem', ([], {'order_id': 'order.id', 'purchase_price': 'price', 'qty': 'qty', 'retail_product_id': 'prod_id'}), '(order_id=order.id, purchase_price=price, qty=qty,\n retail_product_id=prod_id)\n', (8530, 8611), False, 'from models import Distributor, Zip, Product, RetailProduct, Base, User, ProductCategory, Order, OrderItem\n'), ((8611, 8629), 'session.db_session.add', 'db_session.add', (['oi'], {}), '(oi)\n', (8625, 8629), False, 'from session import db_session, engine\n'), ((3509, 3525), 'flask_login.login_user', 'login_user', (['user'], {}), '(user)\n', (3519, 3525), False, 'from flask_login import login_required, logout_user, current_user, login_user, LoginManager\n'), ((3554, 3578), 'flask.request.args.get', 'request.args.get', (['"""next"""'], {}), "('next')\n", (3570, 3578), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((3602, 3637), 'flask.redirect', 'redirect', (["(next_page or '/api/admin')"], {}), "(next_page or '/api/admin')\n", (3610, 3637), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((4822, 4845), 'flask.session.get', 'session.get', (['"""cart"""', '{}'], {}), "('cart', {})\n", (4833, 4845), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((5316, 5339), 'decimal.Decimal', 'decimal.Decimal', (['"""0.00"""'], {}), "('0.00')\n", (5331, 5339), False, 'import decimal\n'), ((6370, 6393), 'flask.session.get', 'session.get', (['"""cart"""', '{}'], {}), "('cart', {})\n", (6381, 6393), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((6737, 6760), 'flask.session.get', 'session.get', (['"""cart"""', '{}'], {}), "('cart', {})\n", (6748, 6760), False, 'from flask import Flask, render_template, jsonify, request, session, Blueprint, redirect, flash\n'), ((8155, 8178), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8176, 8178), False, 'import datetime\n'), ((8196, 8219), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8217, 8219), False, 'import datetime\n'), ((7334, 7365), 'session.db_session.query', 'db_session.query', (['RetailProduct'], {}), '(RetailProduct)\n', (7350, 7365), False, 'from session import db_session, engine\n'), ((1410, 1432), 'session.db_session.query', 'db_session.query', (['User'], {}), '(User)\n', (1426, 1432), False, 'from session import db_session, engine\n'), ((4072, 4093), 'session.db_session.query', 'db_session.query', (['Zip'], {}), '(Zip)\n', (4088, 4093), False, 'from session import db_session, engine\n'), ((4938, 4969), 'session.db_session.query', 'db_session.query', (['RetailProduct'], {}), '(RetailProduct)\n', (4954, 4969), False, 'from session import db_session, engine\n'), ((5244, 5278), 'decimal.Decimal', 'decimal.Decimal', (['retail_prod.price'], {}), '(retail_prod.price)\n', (5259, 5278), False, 'import decimal\n'), ((5281, 5305), 'decimal.Decimal', 'decimal.Decimal', (['"""100.0"""'], {}), "('100.0')\n", (5296, 5305), False, 'import decimal\n'), ((6043, 6074), 'session.db_session.query', 'db_session.query', (['RetailProduct'], {}), '(RetailProduct)\n', (6059, 6074), False, 'from session import db_session, engine\n'), ((6524, 6555), 'session.db_session.query', 'db_session.query', (['RetailProduct'], {}), '(RetailProduct)\n', (6540, 6555), False, 'from session import db_session, engine\n'), ((3346, 3368), 'session.db_session.query', 'db_session.query', (['User'], {}), '(User)\n', (3362, 3368), False, 'from session import db_session, engine\n')] |
# -*- coding: utf-8 -*-
"""
Overall wrapper class for AMBER
"""
import tensorflow as tf
from keras import backend as K
try:
from tensorflow import Session
except ImportError:
from tensorflow.compat.v1 import Session
tf.compat.v1.disable_eager_execution()
import os
from . import getter
class Amber:
"""The main wrapper class for AMBER
This class facilitates the GUI and TUI caller, and should always be maintained
Parameters
----------
types: dict
specs: dict
Attributes
----------
type_dict: dict
is_built: bool
model_space: amber.architect.ModelSpace
controller: amber.architect.BaseController
Example
----------
PENDING EDITION
"""
def __init__(self, types, specs=None):
self.type_dict = types
self.is_built = False
self.model_space = None
self.controller = None
self.model_fn = None
self.knowledge_fn = None
self.reward_fn = None
self.manager = None
self.env = None
# use one tf.Session throughout one DA instance
self.session = Session()
try:
K.set_session(self.session)
except Exception as e:
print("Failed to set Keras backend becasue of %s" % e)
if specs is not None:
self.from_dict(specs)
def from_dict(self, d):
assert type(d) is dict
print("BUILDING")
print("-" * 10)
self.model_space = getter.get_model_space(d['model_space'])
self.controller = getter.get_controller(controller_type=self.type_dict['controller_type'],
model_space=self.model_space,
session=self.session,
**d['controller'])
self.model_fn = getter.get_modeler(model_fn_type=self.type_dict['modeler_type'],
model_space=self.model_space,
session=self.session,
controller=self.controller,
**d['model_builder'])
self.knowledge_fn = getter.get_knowledge_fn(knowledge_fn_type=self.type_dict['knowledge_fn_type'],
knowledge_data_dict=d['knowledge_fn']['data'],
**d['knowledge_fn']['params'])
self.reward_fn = getter.get_reward_fn(reward_fn_type=self.type_dict['reward_fn_type'],
knowledge_fn=self.knowledge_fn,
**d['reward_fn'])
self.manager = getter.get_manager(manager_type=self.type_dict['manager_type'],
model_fn=self.model_fn,
reward_fn=self.reward_fn,
data_dict=d['manager']['data'],
session=self.session,
**d['manager']['params'])
self.env = getter.get_train_env(env_type=self.type_dict['env_type'],
controller=self.controller,
manager=self.manager,
**d['train_env'])
self.is_built = True
return self
def run(self):
assert self.is_built
self.env.train()
self.controller.save_weights(os.path.join(self.env.working_dir, "controller_weights.h5"))
| [
"os.path.join",
"tensorflow.compat.v1.disable_eager_execution",
"keras.backend.set_session",
"tensorflow.compat.v1.Session"
] | [((232, 270), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (268, 270), True, 'import tensorflow as tf\n'), ((1114, 1123), 'tensorflow.compat.v1.Session', 'Session', ([], {}), '()\n', (1121, 1123), False, 'from tensorflow.compat.v1 import Session\n'), ((1149, 1176), 'keras.backend.set_session', 'K.set_session', (['self.session'], {}), '(self.session)\n', (1162, 1176), True, 'from keras import backend as K\n'), ((3578, 3637), 'os.path.join', 'os.path.join', (['self.env.working_dir', '"""controller_weights.h5"""'], {}), "(self.env.working_dir, 'controller_weights.h5')\n", (3590, 3637), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 30 10:45:11 2021
This script was used to change the filenames of already generated prompts, that had repetitions of over 10 character.
@author: tavastm1
"""
import re
import os
path = 'prompts/GamesAsArt/'
#file_start= 'thinking_game_' # we have two versions of the query
file_start= 'game_'
file_end = 'davinci.txt'
# Find the game events
examples_list = []
index_list = []
for file in os.listdir(path):
if file.startswith(file_start) and file.endswith(file_end):
examples_list.append(open(path + file, "r").read())
index_list.append(file)
# Stackoverflow question 9079797
# Finds repetitions in a string
def repetitions(s):
r = re.compile(r"(.+?)\1+")
for match in r.finditer(s):
yield (match.group(1))
# Append files that have repetitions >10 to a list, later to be discarded
ToBeDiscarded = []
FileNameList = []
for i in range(len(examples_list)):
temp = examples_list[i]
temp2 = list(repetitions(temp))
if len(temp2) > 0:
for j in range(len(temp2)):
if len(temp2[j]) > 10:
ToBeDiscarded.append(i)
FileNameList.append(index_list[i])
print(i, temp2[j])
# Discard the game events by renaming the files
for file in os.listdir(path):
if file.startswith('DISCARDED'):
temp = (open(path + file, "r").read())
print(file, temp)
for k in range(len(ToBeDiscarded)):
temp3 = FileNameList[k]
os.rename(f'prompts/GamesAsArt/{temp3}', f'prompts/GamesAsArt/DISCARDED_{temp3}') | [
"os.rename",
"os.listdir",
"re.compile"
] | [((470, 486), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (480, 486), False, 'import os\n'), ((1352, 1368), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1362, 1368), False, 'import os\n'), ((746, 769), 're.compile', 're.compile', (['"""(.+?)\\\\1+"""'], {}), "('(.+?)\\\\1+')\n", (756, 769), False, 'import re\n'), ((1547, 1632), 'os.rename', 'os.rename', (['f"""prompts/GamesAsArt/{temp3}"""', 'f"""prompts/GamesAsArt/DISCARDED_{temp3}"""'], {}), "(f'prompts/GamesAsArt/{temp3}',\n f'prompts/GamesAsArt/DISCARDED_{temp3}')\n", (1556, 1632), False, 'import os\n')] |
import os
from io import StringIO
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
class PipelineTestCase(TestCase):
def setUp(self):
self.file_path = os.path.join(settings.STATIC_ROOT, "staticfiles.json")
if os.path.isfile(self.file_path):
os.remove(self.file_path)
def test_success(self):
call_command("collectstatic", "--noinput", stdout=StringIO())
call_command("clean_staticfilesjson", stdout=StringIO())
with open(self.file_path) as f:
contents = f.read()
start_content = '{\n "paths": {\n'
self.assertTrue(
contents.startswith(start_content),
'staticfiles.json doesn\'t start with "{}"'.format(
contents[: len(start_content)]
),
)
def test_missing_staticfilesjson(self):
with self.assertRaises(CommandError):
call_command("clean_staticfilesjson", stdout=StringIO())
| [
"os.path.isfile",
"io.StringIO",
"os.path.join",
"os.remove"
] | [((284, 338), 'os.path.join', 'os.path.join', (['settings.STATIC_ROOT', '"""staticfiles.json"""'], {}), "(settings.STATIC_ROOT, 'staticfiles.json')\n", (296, 338), False, 'import os\n'), ((350, 380), 'os.path.isfile', 'os.path.isfile', (['self.file_path'], {}), '(self.file_path)\n', (364, 380), False, 'import os\n'), ((394, 419), 'os.remove', 'os.remove', (['self.file_path'], {}), '(self.file_path)\n', (403, 419), False, 'import os\n'), ((507, 517), 'io.StringIO', 'StringIO', ([], {}), '()\n', (515, 517), False, 'from io import StringIO\n'), ((572, 582), 'io.StringIO', 'StringIO', ([], {}), '()\n', (580, 582), False, 'from io import StringIO\n'), ((1087, 1097), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1095, 1097), False, 'from io import StringIO\n')] |
import numpy as np
from PIL import Image
from skimage import color
from skimage.feature import hog
from pelops.features.feature_producer import FeatureProducer
class HOGFeatureProducer(FeatureProducer):
def __init__(self, chip_producer, image_size=(224,224), cells=(16, 16), orientations=8, histogram_bins_per_channel=256):
self.image_size = image_size
self.cells = cells
self.orientations = orientations
self.histogram_bins_per_channel = histogram_bins_per_channel
super().__init__(chip_producer)
def produce_features(self, chip):
"""Takes a chip object and returns a feature vector of size
self.feat_size. """
img = self.get_image(chip)
img = img.resize(self.image_size, Image.BICUBIC)
img_x, img_y = img.size
# Calculate histogram of each channel
channels = img.split()
hist_features = np.full(shape=3 * self.histogram_bins_per_channel, fill_value=-1)
# We expect RGB images. If something else is passed warn the user and
# continue.
if len(channels) < 3:
print("Non-RBG image! Vector will be padded with -1!")
if len(channels) > 3:
print("Non-RBG image! Channels beyond the first three will be ignored!")
channels = channel[:3]
for i, channel in enumerate(channels):
channel_array = np.array(channel)
values, _ = np.histogram(channel_array.flat, bins=self.histogram_bins_per_channel)
start = i * self.histogram_bins_per_channel
end = (i+1) * self.histogram_bins_per_channel
hist_features[start:end] = values
# Calculate HOG features, which require a grayscale image
img = color.rgb2gray(np.array(img))
features = hog(
img,
orientations=self.orientations,
pixels_per_cell=(img_x / self.cells[0], img_y / self.cells[1]),
cells_per_block=self.cells, # Normalize over the whole image
)
return np.concatenate((features, hist_features))
def set_variables(self):
hog_size = self.cells[0] * self.cells[1] * self.orientations
hist_size = 3 * self.histogram_bins_per_channel
self.feat_size = hog_size + hist_size
| [
"numpy.histogram",
"numpy.array",
"numpy.concatenate",
"numpy.full",
"skimage.feature.hog"
] | [((907, 972), 'numpy.full', 'np.full', ([], {'shape': '(3 * self.histogram_bins_per_channel)', 'fill_value': '(-1)'}), '(shape=3 * self.histogram_bins_per_channel, fill_value=-1)\n', (914, 972), True, 'import numpy as np\n'), ((1798, 1935), 'skimage.feature.hog', 'hog', (['img'], {'orientations': 'self.orientations', 'pixels_per_cell': '(img_x / self.cells[0], img_y / self.cells[1])', 'cells_per_block': 'self.cells'}), '(img, orientations=self.orientations, pixels_per_cell=(img_x / self.\n cells[0], img_y / self.cells[1]), cells_per_block=self.cells)\n', (1801, 1935), False, 'from skimage.feature import hog\n'), ((2040, 2081), 'numpy.concatenate', 'np.concatenate', (['(features, hist_features)'], {}), '((features, hist_features))\n', (2054, 2081), True, 'import numpy as np\n'), ((1395, 1412), 'numpy.array', 'np.array', (['channel'], {}), '(channel)\n', (1403, 1412), True, 'import numpy as np\n'), ((1437, 1507), 'numpy.histogram', 'np.histogram', (['channel_array.flat'], {'bins': 'self.histogram_bins_per_channel'}), '(channel_array.flat, bins=self.histogram_bins_per_channel)\n', (1449, 1507), True, 'import numpy as np\n'), ((1764, 1777), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1772, 1777), True, 'import numpy as np\n')] |
import numpy as np
arr = np.random.choice([6, 8, 3, 1, 5], p=[0.0, 0.5, 0.2, 0.2, 0.1], size=(100))
print(arr)
arr = np.random.choice([6, 8, 3, 1, 5], p=[0.0, 0.5, 0.2, 0.2, 0.1], size=100)
print(arr)
arr = np.random.choice([6, 8, 3, 1, 5], p=[0.0, 0.5, 0.2, 0.2, 0.1], size=(5, 10))
print(arr)
| [
"numpy.random.choice"
] | [((28, 100), 'numpy.random.choice', 'np.random.choice', (['[6, 8, 3, 1, 5]'], {'p': '[0.0, 0.5, 0.2, 0.2, 0.1]', 'size': '(100)'}), '([6, 8, 3, 1, 5], p=[0.0, 0.5, 0.2, 0.2, 0.1], size=100)\n', (44, 100), True, 'import numpy as np\n'), ((121, 193), 'numpy.random.choice', 'np.random.choice', (['[6, 8, 3, 1, 5]'], {'p': '[0.0, 0.5, 0.2, 0.2, 0.1]', 'size': '(100)'}), '([6, 8, 3, 1, 5], p=[0.0, 0.5, 0.2, 0.2, 0.1], size=100)\n', (137, 193), True, 'import numpy as np\n'), ((212, 288), 'numpy.random.choice', 'np.random.choice', (['[6, 8, 3, 1, 5]'], {'p': '[0.0, 0.5, 0.2, 0.2, 0.1]', 'size': '(5, 10)'}), '([6, 8, 3, 1, 5], p=[0.0, 0.5, 0.2, 0.2, 0.1], size=(5, 10))\n', (228, 288), True, 'import numpy as np\n')] |
import toolbox as tb
from structures import IP, IS
from statistics import Statistics
from kanji import Kanji
S = Statistics()
| [
"statistics.Statistics"
] | [((114, 126), 'statistics.Statistics', 'Statistics', ([], {}), '()\n', (124, 126), False, 'from statistics import Statistics\n')] |
#!/usr/bin/python3
# get_connection_meta.py: get connection metadata
import sys
import mysql.connector
import cookbook
try:
conn = cookbook.connect()
except mysql.connector.Error as e:
print("Error code: %s" % e.errno)
print("Error message: %s" % e.msg)
sys.exit(1)
try:
#@ _CURRENT_DATABASE_
cursor = conn.cursor()
cursor.execute("SELECT DATABASE()")
row = cursor.fetchone()
cursor.close()
if row is None or len(row) == 0 or row[0] is None:
db = "(no database selected)"
else:
db = row[0]
print("Default database: %s" % db)
#@ _CURRENT_DATABASE_
except mysql.connector.Error as e:
print("Error code: %s" % e.errno)
print("Error message: %s" % e.msg)
sys.exit(1)
conn.close()
| [
"cookbook.connect",
"sys.exit"
] | [((135, 153), 'cookbook.connect', 'cookbook.connect', ([], {}), '()\n', (151, 153), False, 'import cookbook\n'), ((264, 275), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (272, 275), False, 'import sys\n'), ((690, 701), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (698, 701), False, 'import sys\n')] |
from utils.initLog import initLog
def main_function():
logger = initLog('dev', '/home/kuan/DL_template')
logger.critical('CRITICAL')
logger.fatal('FATAL')
logger.error('ERROR')
logger.warning('WARNING')
logger.info('INFO')
logger.debug('DEBUG')
if __name__ == '__main__':
main_function()
| [
"utils.initLog.initLog"
] | [((68, 108), 'utils.initLog.initLog', 'initLog', (['"""dev"""', '"""/home/kuan/DL_template"""'], {}), "('dev', '/home/kuan/DL_template')\n", (75, 108), False, 'from utils.initLog import initLog\n')] |
import asyncio
import asyncssh
import sys
import os
import crypt
from importlib.util import find_spec
class MySSHServerSession(asyncssh.SSHServerSession):
def __init__(self):
self._input = ''
self._data = None
self.devtype = 'iosxr'
self.run_as_shell = False
self.prompt = '#'
self.vendor = 'cisco'
self.sample_data_dir = self.get_testinput_dir()
self.cmd_data = {
'show version':
f'{self.sample_data_dir}/cisco/iosxr/show_version.txt',
'show run hostname':
f'{self.sample_data_dir}/cisco/iosxr/show_run_hostname.txt',
'show interfaces':
f'{self.sample_data_dir}/cisco/iosxr/show_interfaces.txt',
}
def get_testinput_dir(self):
'''Get the dir where the test input data is stored'''
return(os.path.dirname(find_spec('suzieq').loader.path) +
'/../tests/integration/nossim')
def connection_made(self, chan):
self._chan = chan
def shell_requested(self):
self.run_as_shell = True
return True
def _exec_cmd(self, command):
'''The routine to execute command and return data'''
data = 'Command not found\n'
if command in ['exit', 'quit']:
self.eof_received()
return ''
if command in self.cmd_data:
with open(self.cmd_data[command], 'r') as f:
data = f.read()
return data
def exec_requested(self, command):
'''Return the data for the specified command is possible'''
self.run_as_shell = False
command = command.rstrip('\n')
self._data = self._exec_cmd(command)
return True
def data_received(self, input, datatype):
'''Shell handler'''
command = input.rstrip('\n')
data = self._exec_cmd(command)
self._chan.write(data)
self._chan.write(self.prompt)
def session_started(self):
if self._data:
self._chan.write(self._data)
self._chan.exit(0)
elif not self.run_as_shell:
self._chan.exit(1)
elif self.run_as_shell:
self._chan.write(f'{self.prompt}')
class MySSHServer(asyncssh.SSHServer):
def __init__(self):
self.passwords = {'vagrant': '<PASSWORD>'} # password of '<PASSWORD>'
def connection_made(self, conn):
print('SSH connection received from %s.' %
conn.get_extra_info('peername')[0])
def connection_lost(self, exc):
if exc:
print('SSH connection error: ' + str(exc), file=sys.stderr)
else:
print('SSH connection closed.')
def begin_auth(self, username):
# If the user's password is the empty string, no auth is required
return self.passwords.get(username) != ''
def password_auth_supported(self):
return True
def validate_password(self, username, password):
pw = self.passwords.get(username, '*')
return crypt.crypt(password, pw) == pw
def session_requested(self):
return MySSHServerSession()
async def start_server():
await asyncssh.listen('', 10000, server_factory=MySSHServer,
server_host_keys=['/tmp/ssh_host_key'])
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(start_server())
except (OSError, asyncssh.Error) as exc:
sys.exit('Error starting server: ' + str(exc))
loop.run_forever()
| [
"importlib.util.find_spec",
"crypt.crypt",
"asyncio.get_event_loop",
"asyncssh.listen"
] | [((3295, 3319), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3317, 3319), False, 'import asyncio\n'), ((3166, 3265), 'asyncssh.listen', 'asyncssh.listen', (['""""""', '(10000)'], {'server_factory': 'MySSHServer', 'server_host_keys': "['/tmp/ssh_host_key']"}), "('', 10000, server_factory=MySSHServer, server_host_keys=[\n '/tmp/ssh_host_key'])\n", (3181, 3265), False, 'import asyncssh\n'), ((3026, 3051), 'crypt.crypt', 'crypt.crypt', (['password', 'pw'], {}), '(password, pw)\n', (3037, 3051), False, 'import crypt\n'), ((876, 895), 'importlib.util.find_spec', 'find_spec', (['"""suzieq"""'], {}), "('suzieq')\n", (885, 895), False, 'from importlib.util import find_spec\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Grove Base Hat for the Raspberry Pi, used to connect grove sensors.
# Copyright (C) 2018 Seeed Technology Co.,Ltd.
'''
This is the code for
- `Grove - Sound Sensor <https://www.seeedstudio.com/Grove-Sound-Sensor-p-752.html>`_
Examples:
.. code-block:: python
import time
from grove.grove_sound_sensor import GroveSoundSensor
# connect to alalog pin 2(slot A2)
PIN = 2
sensor = GroveSoundSensor(PIN)
print('Detecting sound...')
while True:
print('Sound value: {0}'.format(sensor.sound))
time.sleep(.3)
'''
import math
import time
from grove.adc import ADC
__all__ = ['GroveSoundSensor']
class GroveSoundSensor(object):
'''
Grove Sound Sensor class
Args:
pin(int): number of analog pin/channel the sensor connected.
'''
def __init__(self, channel):
self.channel = channel
self.adc = ADC()
@property
def sound(self):
'''
Get the sound strength value
Returns:
(int): ratio, 0(0.0%) - 1000(100.0%)
'''
value = self.adc.read(self.channel)
return value
Grove = GroveSoundSensor
def main():
from grove.helper import SlotHelper
sh = SlotHelper(SlotHelper.ADC)
pin = sh.argv2pin()
sensor = GroveSoundSensor(pin)
print('Detecting sound...')
while True:
print('Sound value: {0}'.format(sensor.sound))
time.sleep(.3)
if __name__ == '__main__':
main()
| [
"time.sleep",
"grove.helper.SlotHelper",
"grove.adc.ADC"
] | [((1323, 1349), 'grove.helper.SlotHelper', 'SlotHelper', (['SlotHelper.ADC'], {}), '(SlotHelper.ADC)\n', (1333, 1349), False, 'from grove.helper import SlotHelper\n'), ((999, 1004), 'grove.adc.ADC', 'ADC', ([], {}), '()\n', (1002, 1004), False, 'from grove.adc import ADC\n'), ((1522, 1537), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (1532, 1537), False, 'import time\n')] |
"""
Adding simple implementations or interfaces to some data types that are missing from the standard library.
"""
import lazy_import
from .graph_common import (Edge, EdgeExistedError, InvariantError, Node,
NodeIndexError)
lattice = lazy_import.lazy_module("seutil.ds.lattice")
trie = lazy_import.lazy_module("seutil.ds.trie")
| [
"lazy_import.lazy_module"
] | [((263, 307), 'lazy_import.lazy_module', 'lazy_import.lazy_module', (['"""seutil.ds.lattice"""'], {}), "('seutil.ds.lattice')\n", (286, 307), False, 'import lazy_import\n'), ((315, 356), 'lazy_import.lazy_module', 'lazy_import.lazy_module', (['"""seutil.ds.trie"""'], {}), "('seutil.ds.trie')\n", (338, 356), False, 'import lazy_import\n')] |
"""Support for MQTT platform config setup."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_DISCOVERY,
CONF_PASSWORD,
CONF_PORT,
CONF_PROTOCOL,
CONF_USERNAME,
Platform,
)
from homeassistant.helpers import config_validation as cv
from . import (
alarm_control_panel as alarm_control_panel_platform,
binary_sensor as binary_sensor_platform,
button as button_platform,
camera as camera_platform,
climate as climate_platform,
cover as cover_platform,
device_tracker as device_tracker_platform,
fan as fan_platform,
humidifier as humidifier_platform,
light as light_platform,
lock as lock_platform,
number as number_platform,
scene as scene_platform,
select as select_platform,
sensor as sensor_platform,
siren as siren_platform,
switch as switch_platform,
vacuum as vacuum_platform,
)
from .const import (
ATTR_PAYLOAD,
ATTR_QOS,
ATTR_RETAIN,
ATTR_TOPIC,
CONF_BIRTH_MESSAGE,
CONF_BROKER,
CONF_CERTIFICATE,
CONF_CLIENT_CERT,
CONF_CLIENT_KEY,
CONF_DISCOVERY_PREFIX,
CONF_KEEPALIVE,
CONF_TLS_INSECURE,
CONF_TLS_VERSION,
CONF_WILL_MESSAGE,
DEFAULT_BIRTH,
DEFAULT_DISCOVERY,
DEFAULT_PREFIX,
DEFAULT_QOS,
DEFAULT_RETAIN,
DEFAULT_WILL,
PROTOCOL_31,
PROTOCOL_311,
)
from .util import _VALID_QOS_SCHEMA, valid_publish_topic
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
DEFAULT_PROTOCOL = PROTOCOL_311
DEFAULT_TLS_PROTOCOL = "auto"
DEFAULT_VALUES = {
CONF_BIRTH_MESSAGE: DEFAULT_BIRTH,
CONF_DISCOVERY: DEFAULT_DISCOVERY,
CONF_PORT: DEFAULT_PORT,
CONF_TLS_VERSION: DEFAULT_TLS_PROTOCOL,
CONF_WILL_MESSAGE: DEFAULT_WILL,
}
PLATFORM_CONFIG_SCHEMA_BASE = vol.Schema(
{
Platform.ALARM_CONTROL_PANEL.value: vol.All(
cv.ensure_list, [alarm_control_panel_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.BINARY_SENSOR.value: vol.All(
cv.ensure_list, [binary_sensor_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.BUTTON.value: vol.All(
cv.ensure_list, [button_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.CAMERA.value: vol.All(
cv.ensure_list, [camera_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.CLIMATE.value: vol.All(
cv.ensure_list, [climate_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.COVER.value: vol.All(
cv.ensure_list, [cover_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.DEVICE_TRACKER.value: vol.All(
cv.ensure_list, [device_tracker_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.FAN.value: vol.All(
cv.ensure_list, [fan_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.HUMIDIFIER.value: vol.All(
cv.ensure_list, [humidifier_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.LOCK.value: vol.All(
cv.ensure_list, [lock_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.LIGHT.value: vol.All(
cv.ensure_list, [light_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.NUMBER.value: vol.All(
cv.ensure_list, [number_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.SCENE.value: vol.All(
cv.ensure_list, [scene_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.SELECT.value: vol.All(
cv.ensure_list, [select_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.SENSOR.value: vol.All(
cv.ensure_list, [sensor_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.SIREN.value: vol.All(
cv.ensure_list, [siren_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.SWITCH.value: vol.All(
cv.ensure_list, [switch_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
Platform.VACUUM.value: vol.All(
cv.ensure_list, [vacuum_platform.PLATFORM_SCHEMA_MODERN] # type: ignore[has-type]
),
}
)
CLIENT_KEY_AUTH_MSG = (
"client_key and client_cert must both be present in "
"the MQTT broker configuration"
)
MQTT_WILL_BIRTH_SCHEMA = vol.Schema(
{
vol.Inclusive(ATTR_TOPIC, "topic_payload"): valid_publish_topic,
vol.Inclusive(ATTR_PAYLOAD, "topic_payload"): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
},
required=True,
)
CONFIG_SCHEMA_BASE = PLATFORM_CONFIG_SCHEMA_BASE.extend(
{
vol.Optional(CONF_CLIENT_ID): cv.string,
vol.Optional(CONF_KEEPALIVE, default=DEFAULT_KEEPALIVE): vol.All(
vol.Coerce(int), vol.Range(min=15)
),
vol.Optional(CONF_BROKER): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_CERTIFICATE): vol.Any("auto", cv.isfile),
vol.Inclusive(
CONF_CLIENT_KEY, "client_key_auth", msg=CLIENT_KEY_AUTH_MSG
): cv.isfile,
vol.Inclusive(
CONF_CLIENT_CERT, "client_key_auth", msg=CLIENT_KEY_AUTH_MSG
): cv.isfile,
vol.Optional(CONF_TLS_INSECURE): cv.boolean,
vol.Optional(CONF_TLS_VERSION): vol.Any("auto", "1.0", "1.1", "1.2"),
vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL): vol.All(
cv.string, vol.In([PROTOCOL_31, PROTOCOL_311])
),
vol.Optional(CONF_WILL_MESSAGE): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(CONF_BIRTH_MESSAGE): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(CONF_DISCOVERY): cv.boolean,
# discovery_prefix must be a valid publish topic because if no
# state topic is specified, it will be created with the given prefix.
vol.Optional(
CONF_DISCOVERY_PREFIX, default=DEFAULT_PREFIX
): valid_publish_topic,
}
)
DEPRECATED_CONFIG_KEYS = [
CONF_BIRTH_MESSAGE,
CONF_BROKER,
CONF_DISCOVERY,
CONF_PASSWORD,
CONF_PORT,
CONF_TLS_VERSION,
CONF_USERNAME,
CONF_WILL_MESSAGE,
]
| [
"voluptuous.Inclusive",
"voluptuous.Any",
"voluptuous.Range",
"voluptuous.Optional",
"voluptuous.Coerce",
"voluptuous.All",
"voluptuous.In"
] | [((1880, 1958), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[alarm_control_panel_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [alarm_control_panel_platform.PLATFORM_SCHEMA_MODERN])\n', (1887, 1958), True, 'import voluptuous as vol\n'), ((2046, 2118), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[binary_sensor_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [binary_sensor_platform.PLATFORM_SCHEMA_MODERN])\n', (2053, 2118), True, 'import voluptuous as vol\n'), ((2199, 2264), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[button_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [button_platform.PLATFORM_SCHEMA_MODERN])\n', (2206, 2264), True, 'import voluptuous as vol\n'), ((2345, 2410), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[camera_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [camera_platform.PLATFORM_SCHEMA_MODERN])\n', (2352, 2410), True, 'import voluptuous as vol\n'), ((2492, 2558), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[climate_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [climate_platform.PLATFORM_SCHEMA_MODERN])\n', (2499, 2558), True, 'import voluptuous as vol\n'), ((2638, 2702), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[cover_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [cover_platform.PLATFORM_SCHEMA_MODERN])\n', (2645, 2702), True, 'import voluptuous as vol\n'), ((2791, 2864), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[device_tracker_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [device_tracker_platform.PLATFORM_SCHEMA_MODERN])\n', (2798, 2864), True, 'import voluptuous as vol\n'), ((2942, 3004), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[fan_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [fan_platform.PLATFORM_SCHEMA_MODERN])\n', (2949, 3004), True, 'import voluptuous as vol\n'), ((3089, 3158), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[humidifier_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [humidifier_platform.PLATFORM_SCHEMA_MODERN])\n', (3096, 3158), True, 'import voluptuous as vol\n'), ((3237, 3300), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[lock_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [lock_platform.PLATFORM_SCHEMA_MODERN])\n', (3244, 3300), True, 'import voluptuous as vol\n'), ((3380, 3444), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[light_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [light_platform.PLATFORM_SCHEMA_MODERN])\n', (3387, 3444), True, 'import voluptuous as vol\n'), ((3525, 3590), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[number_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [number_platform.PLATFORM_SCHEMA_MODERN])\n', (3532, 3590), True, 'import voluptuous as vol\n'), ((3670, 3734), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[scene_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [scene_platform.PLATFORM_SCHEMA_MODERN])\n', (3677, 3734), True, 'import voluptuous as vol\n'), ((3815, 3880), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[select_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [select_platform.PLATFORM_SCHEMA_MODERN])\n', (3822, 3880), True, 'import voluptuous as vol\n'), ((3961, 4026), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[sensor_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [sensor_platform.PLATFORM_SCHEMA_MODERN])\n', (3968, 4026), True, 'import voluptuous as vol\n'), ((4106, 4170), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[siren_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [siren_platform.PLATFORM_SCHEMA_MODERN])\n', (4113, 4170), True, 'import voluptuous as vol\n'), ((4251, 4316), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[switch_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [switch_platform.PLATFORM_SCHEMA_MODERN])\n', (4258, 4316), True, 'import voluptuous as vol\n'), ((4397, 4462), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[vacuum_platform.PLATFORM_SCHEMA_MODERN]'], {}), '(cv.ensure_list, [vacuum_platform.PLATFORM_SCHEMA_MODERN])\n', (4404, 4462), True, 'import voluptuous as vol\n'), ((4694, 4736), 'voluptuous.Inclusive', 'vol.Inclusive', (['ATTR_TOPIC', '"""topic_payload"""'], {}), "(ATTR_TOPIC, 'topic_payload')\n", (4707, 4736), True, 'import voluptuous as vol\n'), ((4767, 4811), 'voluptuous.Inclusive', 'vol.Inclusive', (['ATTR_PAYLOAD', '"""topic_payload"""'], {}), "(ATTR_PAYLOAD, 'topic_payload')\n", (4780, 4811), True, 'import voluptuous as vol\n'), ((4832, 4875), 'voluptuous.Optional', 'vol.Optional', (['ATTR_QOS'], {'default': 'DEFAULT_QOS'}), '(ATTR_QOS, default=DEFAULT_QOS)\n', (4844, 4875), True, 'import voluptuous as vol\n'), ((4904, 4953), 'voluptuous.Optional', 'vol.Optional', (['ATTR_RETAIN'], {'default': 'DEFAULT_RETAIN'}), '(ATTR_RETAIN, default=DEFAULT_RETAIN)\n', (4916, 4953), True, 'import voluptuous as vol\n'), ((5067, 5095), 'voluptuous.Optional', 'vol.Optional', (['CONF_CLIENT_ID'], {}), '(CONF_CLIENT_ID)\n', (5079, 5095), True, 'import voluptuous as vol\n'), ((5116, 5171), 'voluptuous.Optional', 'vol.Optional', (['CONF_KEEPALIVE'], {'default': 'DEFAULT_KEEPALIVE'}), '(CONF_KEEPALIVE, default=DEFAULT_KEEPALIVE)\n', (5128, 5171), True, 'import voluptuous as vol\n'), ((5248, 5273), 'voluptuous.Optional', 'vol.Optional', (['CONF_BROKER'], {}), '(CONF_BROKER)\n', (5260, 5273), True, 'import voluptuous as vol\n'), ((5294, 5317), 'voluptuous.Optional', 'vol.Optional', (['CONF_PORT'], {}), '(CONF_PORT)\n', (5306, 5317), True, 'import voluptuous as vol\n'), ((5336, 5363), 'voluptuous.Optional', 'vol.Optional', (['CONF_USERNAME'], {}), '(CONF_USERNAME)\n', (5348, 5363), True, 'import voluptuous as vol\n'), ((5384, 5411), 'voluptuous.Optional', 'vol.Optional', (['CONF_PASSWORD'], {}), '(CONF_PASSWORD)\n', (5396, 5411), True, 'import voluptuous as vol\n'), ((5432, 5462), 'voluptuous.Optional', 'vol.Optional', (['CONF_CERTIFICATE'], {}), '(CONF_CERTIFICATE)\n', (5444, 5462), True, 'import voluptuous as vol\n'), ((5500, 5574), 'voluptuous.Inclusive', 'vol.Inclusive', (['CONF_CLIENT_KEY', '"""client_key_auth"""'], {'msg': 'CLIENT_KEY_AUTH_MSG'}), "(CONF_CLIENT_KEY, 'client_key_auth', msg=CLIENT_KEY_AUTH_MSG)\n", (5513, 5574), True, 'import voluptuous as vol\n'), ((5617, 5692), 'voluptuous.Inclusive', 'vol.Inclusive', (['CONF_CLIENT_CERT', '"""client_key_auth"""'], {'msg': 'CLIENT_KEY_AUTH_MSG'}), "(CONF_CLIENT_CERT, 'client_key_auth', msg=CLIENT_KEY_AUTH_MSG)\n", (5630, 5692), True, 'import voluptuous as vol\n'), ((5735, 5766), 'voluptuous.Optional', 'vol.Optional', (['CONF_TLS_INSECURE'], {}), '(CONF_TLS_INSECURE)\n', (5747, 5766), True, 'import voluptuous as vol\n'), ((5788, 5818), 'voluptuous.Optional', 'vol.Optional', (['CONF_TLS_VERSION'], {}), '(CONF_TLS_VERSION)\n', (5800, 5818), True, 'import voluptuous as vol\n'), ((5866, 5919), 'voluptuous.Optional', 'vol.Optional', (['CONF_PROTOCOL'], {'default': 'DEFAULT_PROTOCOL'}), '(CONF_PROTOCOL, default=DEFAULT_PROTOCOL)\n', (5878, 5919), True, 'import voluptuous as vol\n'), ((6008, 6039), 'voluptuous.Optional', 'vol.Optional', (['CONF_WILL_MESSAGE'], {}), '(CONF_WILL_MESSAGE)\n', (6020, 6039), True, 'import voluptuous as vol\n'), ((6073, 6105), 'voluptuous.Optional', 'vol.Optional', (['CONF_BIRTH_MESSAGE'], {}), '(CONF_BIRTH_MESSAGE)\n', (6085, 6105), True, 'import voluptuous as vol\n'), ((6139, 6167), 'voluptuous.Optional', 'vol.Optional', (['CONF_DISCOVERY'], {}), '(CONF_DISCOVERY)\n', (6151, 6167), True, 'import voluptuous as vol\n'), ((6338, 6397), 'voluptuous.Optional', 'vol.Optional', (['CONF_DISCOVERY_PREFIX'], {'default': 'DEFAULT_PREFIX'}), '(CONF_DISCOVERY_PREFIX, default=DEFAULT_PREFIX)\n', (6350, 6397), True, 'import voluptuous as vol\n'), ((5464, 5490), 'voluptuous.Any', 'vol.Any', (['"""auto"""', 'cv.isfile'], {}), "('auto', cv.isfile)\n", (5471, 5490), True, 'import voluptuous as vol\n'), ((5820, 5856), 'voluptuous.Any', 'vol.Any', (['"""auto"""', '"""1.0"""', '"""1.1"""', '"""1.2"""'], {}), "('auto', '1.0', '1.1', '1.2')\n", (5827, 5856), True, 'import voluptuous as vol\n'), ((5194, 5209), 'voluptuous.Coerce', 'vol.Coerce', (['int'], {}), '(int)\n', (5204, 5209), True, 'import voluptuous as vol\n'), ((5211, 5228), 'voluptuous.Range', 'vol.Range', ([], {'min': '(15)'}), '(min=15)\n', (5220, 5228), True, 'import voluptuous as vol\n'), ((5953, 5988), 'voluptuous.In', 'vol.In', (['[PROTOCOL_31, PROTOCOL_311]'], {}), '([PROTOCOL_31, PROTOCOL_311])\n', (5959, 5988), True, 'import voluptuous as vol\n')] |
import aiohttp
import discord
from discord.ext import commands
class Silphroad(commands.Cog):
"""
Commands related to Silphroad.
"""
def __init__(self, bot):
self.bot = bot
@commands.command(
aliases=["Silphcard", "Scard", "scard", "s-card", "S-card", "silph", "Silph", "Silphroad", "silphroad"])
async def silphcard(self, ctx, name: str):
"""
Shows information of a players silphcard.
Example: *!silphcard trnrtipsnick*
"""
await ctx.message.delete()
async with aiohttp.ClientSession() as client_session:
async with client_session.get(f"https://sil.ph/{name}.json") as response:
json = await response.json()
try:
json = json['data']
except:
embed = discord.Embed(title=f"Error", description=f"{json['error']}",
color=discord.Colour.dark_red())
await self.bot.say(embed=embed, delete_after=10)
return
username = json['in_game_username']
title = json['title']
playstyle = json['playstyle']
goal = json['goal']
team = json['team']
trainer_level = json['trainer_level']
nest_migrations = json['nest_migrations']
avatar_url = json['avatar']
joined = json['joined']
total_xp = json['xp']
home_region = json['home_region']
pokedex_count = json['pokedex_count']
raid_average = json['raid_average']
handshakes = json['handshakes']
checkins = len(json['checkins'])
badges = json['badges']
edited = json['modified']
top_6_pokemon_id = json['top_6_pokemon']
top_6_pokemon_name = ""
try:
for pokemon_id in top_6_pokemon_id:
pokemon_name = await self.bot.get_cog("Utils").get_pokemon_name("%03d" % ((pokemon_id),))
top_6_pokemon_name += "► " + pokemon_name + "\n"
# No favorite mons
except:
pass
embed = discord.Embed(title=f"{title} {username} in {home_region}", description=f"{playstyle}, {goal}",
color=discord.Colour.orange())
embed.add_field(name=":iphone: In-Game",
value=f"**► Level:** {trainer_level}\n**► Team:** {team}\n**► Pokedex:** {pokedex_count}\n**► XP:** {total_xp}\n**► Raids:** ~{raid_average} per week\n\u200b",
inline=True)
embed.add_field(name=":trophy: Silphroad",
value=f"**► Badges:** {len(badges)}\n**► Check-Ins:** {checkins}\n**► Handshakes:** {handshakes}\n**► Joined:** {joined[:10]}\n**► Nest-Migrations:** {nest_migrations}\n\u200b",
inline=True)
embed.add_field(name=":heartpulse: Favourite Pokémon", value=f"{top_6_pokemon_name}\n\u200b", inline=True)
embed.add_field(name=":military_medal: Latest Badge", value=f"► {badges[-1]['Badge']['name']}\n\u200b",
inline=False)
embed.set_thumbnail(url=avatar_url)
embed.set_image(url=f"{badges[-1]['Badge']['image']}")
embed.set_footer(text=f"The Silph Road ▪ Last edit {edited}",
icon_url="https://assets.thesilphroad.com/img/snoo_sr_icon.png")
await ctx.message.channel.send(embed=embed)
def setup(bot):
bot.add_cog(Silphroad(bot))
| [
"aiohttp.ClientSession",
"discord.Colour.orange",
"discord.Colour.dark_red",
"discord.ext.commands.command"
] | [((206, 331), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['Silphcard', 'Scard', 'scard', 's-card', 'S-card', 'silph', 'Silph',\n 'Silphroad', 'silphroad']"}), "(aliases=['Silphcard', 'Scard', 'scard', 's-card', 'S-card',\n 'silph', 'Silph', 'Silphroad', 'silphroad'])\n", (222, 331), False, 'from discord.ext import commands\n'), ((556, 579), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (577, 579), False, 'import aiohttp\n'), ((2188, 2211), 'discord.Colour.orange', 'discord.Colour.orange', ([], {}), '()\n', (2209, 2211), False, 'import discord\n'), ((914, 939), 'discord.Colour.dark_red', 'discord.Colour.dark_red', ([], {}), '()\n', (937, 939), False, 'import discord\n')] |
from zope.interface import implementer
from rtmlparse.irtml import ITemplate
from rtmlparse.elements import *
@implementer(ITemplate)
class SimplePhotometry(object):
name = "SimplePhotometry"
type = Setup
def __init__(self, element=None, rtml=None):
# define our elements
self.setup = None
self.Camera = None
self.Filter = None
self.Target = None
self.Exposure = None
self.ExposureConstraint = None
# existing or new?
if element is None:
# do we have rtml?
if rtml is None:
raise ValueError('Need RTML instance for creating new template.')
# create all necessary elements
self.create(rtml)
else:
# store it, just use the first matching elements found
self.setup = element
self.Camera = self.setup.find_first(Camera)
self.Target = self.setup.find_first(Target)
self.Filter = self.setup.find_first(Filter)
self.Exposure = self.setup.find_first(Exposure)
self.ExposureConstraint = self.setup.find_first(ExposureConstraint)
def create(self, rtml):
self.setup = Setup(rtml, name='template:SimplePhotometry')
self.Camera = Camera(self.setup)
self.Filter = Filter(self.setup)
self.Target = Target(self.setup)
self.Exposure = Exposure(self.setup)
self.ExposureConstraint = ExposureConstraint(self.setup)
| [
"zope.interface.implementer"
] | [((113, 135), 'zope.interface.implementer', 'implementer', (['ITemplate'], {}), '(ITemplate)\n', (124, 135), False, 'from zope.interface import implementer\n')] |
"""
某些网络数据在一定周期内不会发生变化。如股票代码列表。如果在
同一个会话中,使用funtools的cache机制,可以避免多次下载同
一数据。但计划任务程序中,由于每次运行会话不同,如果需要使
用网络数据,就需要多次从网络下载。数据代理类使用本地文件持
久化存储,在特点时点自动更新,确保数据时效性,同时减少网络
下载,提高运行效率。
数据代理主要用于计划任务程序。适用于每天变动,但在24小时内,
数据一直为静态的网络数据采集。如股票列表等。
也可用于频繁访问,但单次查询需要较长时间的数据提取。
读取代理数据,直接使用类的`read`方法,少数需要进一步转换。
好处:
1、避免当天重复下载同一网络数据
2、时间点数据统一
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import pickle
import pandas as pd
from pandas.tseries.offsets import BDay, Week, MonthBegin, QuarterBegin, Hour, Minute, Second
from hashlib import md5
from six import iteritems
import logbook
from cnswd.constants import MARKET_START
from cnswd.utils import data_root
logger = logbook.Logger(__name__)
TEMP_DIR = data_root('webcache')
DEFAULT_TIME_STR = '18:00:00' # 网站更新数据时间
DEFAULT_FREQ = 'D'
def hash_args(*args, **kwargs):
"""Define a unique string for any set of representable args."""
arg_string = '_'.join([str(arg) for arg in args])
kwarg_string = '_'.join([str(key) + '=' + str(value)
for key, value in iteritems(kwargs)])
combined = ':'.join([arg_string, kwarg_string])
hasher = md5()
hasher.update(combined.encode('utf-8'))
return hasher.hexdigest()
def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
Notes:
------
舍弃秒以下单位的数字
"""
return pd.Timestamp(int(os.path.getmtime(path)), unit='s', tz='Asia/Shanghai')
def next_update_time(last_updated, freq='D', hour=18, minute=0, second=0):
"""计算下次更新时间
说明:
'S':移动到下一秒
'm':移动到下一分钟
'H':移动到下一小时
'D':移动到下一天
'W':移动到下周一
'M':移动到下月第一天
'Q':下一季度的第一天
将时间调整到指定的hour和minute
"""
if pd.isnull(last_updated):
return MARKET_START
if freq == 'S':
off = Second()
return last_updated + off
elif freq == 'm':
off = Minute()
return last_updated + off
elif freq == 'H':
off = Hour()
return last_updated + off
elif freq == 'D':
d = BDay(n=1, normalize=True)
res = last_updated + d
return res.replace(hour=hour, minute=minute, second=second)
elif freq == 'W':
w = Week(normalize=True, weekday=0)
res = last_updated + w
return res.replace(hour=hour, minute=minute, second=second)
elif freq == 'M':
m = MonthBegin(n=1, normalize=True)
res = last_updated + m
return res.replace(hour=hour, minute=minute, second=second)
elif freq == 'Q':
q = QuarterBegin(normalize=True, startingMonth=1)
res = last_updated + q
return res.replace(hour=hour, minute=minute, second=second)
else:
raise TypeError('不能识别的周期类型,仅接受{}'.format(
('S', 'm', 'H', 'D', 'W', 'M', 'Q')))
class DataProxy(object):
def __init__(self, fetch_fun, time_str=None, freq=None):
self._fetch_fun = fetch_fun
if time_str:
self._time_str = time_str
else:
self._time_str = DEFAULT_TIME_STR
if freq:
self._freq = freq
else:
self._freq = DEFAULT_FREQ
# 验证
self._validate()
self._ensure_root_dir()
def _validate(self):
assert isinstance(self._freq, str), 'freq必须是str实例'
assert isinstance(self._time_str, str), 'time_str必须是str实例'
assert ':' in self._time_str, 'time_str要包含":"字符'
parts = self._time_str.split(':')
assert len(parts) == 3, '时间字符串格式为"小时:分钟:秒"'
assert hasattr(self._fetch_fun, '__call__'), '{}必须是函数'.format(
self._fetch_fun)
def _ensure_root_dir(self):
"""确保函数根目录存在"""
subdir = os.path.join(TEMP_DIR, self._fetch_fun.__name__)
if not os.path.exists(subdir):
os.makedirs(subdir)
return subdir
def get_cache_file_path(self, *args, **kwargs):
"""获取本地缓存文件路径"""
name = '{}'.format(hash_args(*args, **kwargs))
subdir = self._ensure_root_dir()
file_path = os.path.join(TEMP_DIR, subdir, name)
return file_path
def need_refresh(self, now, *args, **kwargs):
if now.tz is None:
now = now.tz_localize('Asia/Shanghai')
file_path = self.get_cache_file_path(*args, **kwargs)
if not os.path.exists(file_path):
return True
else:
last_time = last_modified_time(file_path)
next_time = self.expiration
# 如now介于二者之间,则无需要刷新
return not (last_time < now < next_time)
@property
def expiration(self):
"""将时间字符串转换为时间戳"""
parts = self._time_str.split(':')
hour = int(parts[0])
minute = int(parts[1])
second = int(parts[2])
now = pd.Timestamp('now', tz='Asia/Shanghai').normalize()
next_time = next_update_time(now, self._freq, hour, minute)
next_time = next_time.replace(hour=hour, minute=minute, second=second)
return next_time
def read(self, *args, **kwargs):
"""读取网页数据。如果存在本地数据,使用缓存;否则从网页下载。"""
now = pd.Timestamp('now', tz='Asia/Shanghai')
file_path = self.get_cache_file_path(*args, **kwargs)
download_from_web = self.need_refresh(now, *args, **kwargs)
if download_from_web:
try:
data = self._fetch_fun(*args, **kwargs)
with open(file_path, 'wb') as f:
pickle.dump(data, f)
except Exception as e:
raise e
with open(file_path, 'rb') as f:
return pickle.load(f)
| [
"pandas.tseries.offsets.Week",
"os.path.exists",
"logbook.Logger",
"pandas.tseries.offsets.Minute",
"hashlib.md5",
"pandas.tseries.offsets.MonthBegin",
"cnswd.utils.data_root",
"pickle.load",
"pandas.tseries.offsets.BDay",
"os.path.getmtime",
"pandas.isnull",
"pickle.dump",
"pandas.tseries.o... | [((745, 769), 'logbook.Logger', 'logbook.Logger', (['__name__'], {}), '(__name__)\n', (759, 769), False, 'import logbook\n'), ((782, 803), 'cnswd.utils.data_root', 'data_root', (['"""webcache"""'], {}), "('webcache')\n", (791, 803), False, 'from cnswd.utils import data_root\n'), ((1211, 1216), 'hashlib.md5', 'md5', ([], {}), '()\n', (1214, 1216), False, 'from hashlib import md5\n'), ((1803, 1826), 'pandas.isnull', 'pd.isnull', (['last_updated'], {}), '(last_updated)\n', (1812, 1826), True, 'import pandas as pd\n'), ((1890, 1898), 'pandas.tseries.offsets.Second', 'Second', ([], {}), '()\n', (1896, 1898), False, 'from pandas.tseries.offsets import BDay, Week, MonthBegin, QuarterBegin, Hour, Minute, Second\n'), ((3757, 3805), 'os.path.join', 'os.path.join', (['TEMP_DIR', 'self._fetch_fun.__name__'], {}), '(TEMP_DIR, self._fetch_fun.__name__)\n', (3769, 3805), False, 'import os\n'), ((4093, 4129), 'os.path.join', 'os.path.join', (['TEMP_DIR', 'subdir', 'name'], {}), '(TEMP_DIR, subdir, name)\n', (4105, 4129), False, 'import os\n'), ((5140, 5179), 'pandas.Timestamp', 'pd.Timestamp', (['"""now"""'], {'tz': '"""Asia/Shanghai"""'}), "('now', tz='Asia/Shanghai')\n", (5152, 5179), True, 'import pandas as pd\n'), ((1464, 1486), 'os.path.getmtime', 'os.path.getmtime', (['path'], {}), '(path)\n', (1480, 1486), False, 'import os\n'), ((1969, 1977), 'pandas.tseries.offsets.Minute', 'Minute', ([], {}), '()\n', (1975, 1977), False, 'from pandas.tseries.offsets import BDay, Week, MonthBegin, QuarterBegin, Hour, Minute, Second\n'), ((3821, 3843), 'os.path.exists', 'os.path.exists', (['subdir'], {}), '(subdir)\n', (3835, 3843), False, 'import os\n'), ((3857, 3876), 'os.makedirs', 'os.makedirs', (['subdir'], {}), '(subdir)\n', (3868, 3876), False, 'import os\n'), ((4361, 4386), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (4375, 4386), False, 'import os\n'), ((5622, 5636), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5633, 5636), False, 'import pickle\n'), ((1126, 1143), 'six.iteritems', 'iteritems', (['kwargs'], {}), '(kwargs)\n', (1135, 1143), False, 'from six import iteritems\n'), ((2048, 2054), 'pandas.tseries.offsets.Hour', 'Hour', ([], {}), '()\n', (2052, 2054), False, 'from pandas.tseries.offsets import BDay, Week, MonthBegin, QuarterBegin, Hour, Minute, Second\n'), ((4820, 4859), 'pandas.Timestamp', 'pd.Timestamp', (['"""now"""'], {'tz': '"""Asia/Shanghai"""'}), "('now', tz='Asia/Shanghai')\n", (4832, 4859), True, 'import pandas as pd\n'), ((2123, 2148), 'pandas.tseries.offsets.BDay', 'BDay', ([], {'n': '(1)', 'normalize': '(True)'}), '(n=1, normalize=True)\n', (2127, 2148), False, 'from pandas.tseries.offsets import BDay, Week, MonthBegin, QuarterBegin, Hour, Minute, Second\n'), ((5482, 5502), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (5493, 5502), False, 'import pickle\n'), ((2282, 2313), 'pandas.tseries.offsets.Week', 'Week', ([], {'normalize': '(True)', 'weekday': '(0)'}), '(normalize=True, weekday=0)\n', (2286, 2313), False, 'from pandas.tseries.offsets import BDay, Week, MonthBegin, QuarterBegin, Hour, Minute, Second\n'), ((2447, 2478), 'pandas.tseries.offsets.MonthBegin', 'MonthBegin', ([], {'n': '(1)', 'normalize': '(True)'}), '(n=1, normalize=True)\n', (2457, 2478), False, 'from pandas.tseries.offsets import BDay, Week, MonthBegin, QuarterBegin, Hour, Minute, Second\n'), ((2612, 2657), 'pandas.tseries.offsets.QuarterBegin', 'QuarterBegin', ([], {'normalize': '(True)', 'startingMonth': '(1)'}), '(normalize=True, startingMonth=1)\n', (2624, 2657), False, 'from pandas.tseries.offsets import BDay, Week, MonthBegin, QuarterBegin, Hour, Minute, Second\n')] |
import setuptools
setuptools.setup(
name="nix-cage",
version="0.1",
description="Sandboxed environments with bwarp and nix-shell",
scripts=["nix-cage"],
)
| [
"setuptools.setup"
] | [((19, 157), 'setuptools.setup', 'setuptools.setup', ([], {'name': '"""nix-cage"""', 'version': '"""0.1"""', 'description': '"""Sandboxed environments with bwarp and nix-shell"""', 'scripts': "['nix-cage']"}), "(name='nix-cage', version='0.1', description=\n 'Sandboxed environments with bwarp and nix-shell', scripts=['nix-cage'])\n", (35, 157), False, 'import setuptools\n')] |
"""
Example of a simple genetic algorithm based on DeepNEAT
Miikkulainen, Risto, et al. "Evolving deep neural networks."
Artificial Intelligence in the Age of Neural Networks and Brain Computing.
Academic Press, 2019. 293-312.
"""
import time
import traceback
import numpy as np
import torch.optim
from nord.design.metaheuristics.genetics.neat import Genome, Innovation
from nord.neural_nets import LocalEvaluator
from nord.utils import assure_reproducibility
assure_reproducibility()
# Genetic Algorithm Parameters
add_node_rate = 0.03
add_connection_rate = 0.05
mutation_rate = 0.5
crossover_rate = 0.75
generations = 10
population_sz = 5
tournament_sz = 2
# Evaluation parameters
EPOCHS = 1
dataset = 'cifar10' # can also be 'fashion-mnist'
output_file = '../results/genetic_cifar10.out'
def write_to_file(msg):
with open(output_file, 'a') as f:
f.write(msg)
f.write('\n')
write_to_file('Generation_No, Individual_No, Fitness, Genome')
# no_filters, dropout_rate, weight_scaling, kernel_size, max_pooling
layer_bound_types = [int, float, float, int, bool]
layer_bounds = [[32, 0.0, 0, 1, 0],
[256, 0.7, 2.0, 3, 1]]
evaluator = LocalEvaluator(torch.optim.Adam, {}, False)
cache = dict()
i = Innovation()
i.new_generation()
population = []
# Population initialization
for _ in range(population_sz):
g = Genome(layer_bound_types,
layer_bounds,
add_node_rate, add_connection_rate,
mutation_rate, i)
population.append(g)
for r in range(generations):
t = time.time()
i.new_generation()
# Evaluation
for j in range(len(population)):
g = population[j]
try:
if g not in cache:
print('Evaluating', g)
d = g.to_descriptor(dimensions=2)
loss, fitness, total_time = evaluator.descriptor_evaluate(
d, EPOCHS, data_percentage=1, dataset=dataset)
fitness = fitness['accuracy']
cache[g] = fitness
else:
fitness = cache[g]
g.connections.fitness = fitness
g.nodes.fitness = fitness
write_to_file(str((r, j, fitness, g)))
if fitness == 0:
print(g.__repr__())
except Exception:
traceback.print_exc()
print(g.__repr__())
continue
new_population = []
# Offspring Generation
for _ in range(population_sz//2):
pool_1 = np.random.choice(
population, size=tournament_sz, replace=False)
pool_2 = np.random.choice(
population, size=tournament_sz, replace=False)
parent_1 = np.argmax([f.nodes.fitness for f in pool_1])
parent_2 = np.argmax([f.nodes.fitness for f in pool_2])
parent_1 = pool_1[parent_1]
parent_2 = pool_2[parent_2]
offspring_1 = parent_1.crossover(parent_2)
offspring_2 = parent_2.crossover(parent_1)
new_population.append(offspring_1)
new_population.append(offspring_2)
population = new_population
| [
"nord.neural_nets.LocalEvaluator",
"nord.design.metaheuristics.genetics.neat.Innovation",
"numpy.random.choice",
"numpy.argmax",
"nord.utils.assure_reproducibility",
"nord.design.metaheuristics.genetics.neat.Genome",
"traceback.print_exc",
"time.time"
] | [((503, 527), 'nord.utils.assure_reproducibility', 'assure_reproducibility', ([], {}), '()\n', (525, 527), False, 'from nord.utils import assure_reproducibility\n'), ((1248, 1291), 'nord.neural_nets.LocalEvaluator', 'LocalEvaluator', (['torch.optim.Adam', '{}', '(False)'], {}), '(torch.optim.Adam, {}, False)\n', (1262, 1291), False, 'from nord.neural_nets import LocalEvaluator\n'), ((1313, 1325), 'nord.design.metaheuristics.genetics.neat.Innovation', 'Innovation', ([], {}), '()\n', (1323, 1325), False, 'from nord.design.metaheuristics.genetics.neat import Genome, Innovation\n'), ((1433, 1530), 'nord.design.metaheuristics.genetics.neat.Genome', 'Genome', (['layer_bound_types', 'layer_bounds', 'add_node_rate', 'add_connection_rate', 'mutation_rate', 'i'], {}), '(layer_bound_types, layer_bounds, add_node_rate, add_connection_rate,\n mutation_rate, i)\n', (1439, 1530), False, 'from nord.design.metaheuristics.genetics.neat import Genome, Innovation\n'), ((1646, 1657), 'time.time', 'time.time', ([], {}), '()\n', (1655, 1657), False, 'import time\n'), ((2622, 2685), 'numpy.random.choice', 'np.random.choice', (['population'], {'size': 'tournament_sz', 'replace': '(False)'}), '(population, size=tournament_sz, replace=False)\n', (2638, 2685), True, 'import numpy as np\n'), ((2718, 2781), 'numpy.random.choice', 'np.random.choice', (['population'], {'size': 'tournament_sz', 'replace': '(False)'}), '(population, size=tournament_sz, replace=False)\n', (2734, 2781), True, 'import numpy as np\n'), ((2818, 2862), 'numpy.argmax', 'np.argmax', (['[f.nodes.fitness for f in pool_1]'], {}), '([f.nodes.fitness for f in pool_1])\n', (2827, 2862), True, 'import numpy as np\n'), ((2883, 2927), 'numpy.argmax', 'np.argmax', (['[f.nodes.fitness for f in pool_2]'], {}), '([f.nodes.fitness for f in pool_2])\n', (2892, 2927), True, 'import numpy as np\n'), ((2431, 2452), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2450, 2452), False, 'import traceback\n')] |
import sys
import os
import random
import re
import time
import torch
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
from static_model import StaticModel
from dyna_model import DynamicModel
from data_utils import *
def init_command_line(argv):
from argparse import ArgumentParser
usage = "train"
description = ArgumentParser(usage)
description.add_argument("--w2v_file", type=str, default="./data/train_200e.w2v")
description.add_argument("--train_file", type=str, default="./data/train_sessions.txt")
description.add_argument("--max_context_size", type=int, default=9)
description.add_argument("--batch_size", type=int, default=80)
description.add_argument("--hidden_size", type=int, default=1024)
description.add_argument("--max_senten_len", type=int, default=15)
description.add_argument("--type_model", type=int, default=1)
description.add_argument("--lr", type=float, default=0.001)
description.add_argument("--weight_decay", type=float, default=1e-5)
description.add_argument("--dropout", type=float, default=0.5)
description.add_argument("--epochs", type=int, default=10)
description.add_argument("--teach_forcing", type=int, default=1)
description.add_argument("--shuffle", type=int, default=1)
description.add_argument("--print_every", type=int, default=200)
description.add_argument("--save_model", type=int, default=1)
description.add_argument("--weights", type=str, default=None)
return description.parse_args(argv)
opts = init_command_line(sys.argv[1:])
print ("Configure:")
print (" train_file:",opts.train_file)
print (" w2v_file:",opts.w2v_file)
print (" max_context_size:",opts.max_context_size)
print (" batch_size:",opts.batch_size)
print (" hidden_size:",opts.hidden_size)
print (" max_senten_len:",opts.max_senten_len)
if opts.type_model:
print (" static model")
else:
print (" dynamic model")
print (" learning rate:",opts.lr)
print (" weight_decay:",opts.weight_decay)
print (" dropout:",opts.dropout)
print (" epochs:",opts.epochs)
print (" teach_forcing:",opts.teach_forcing)
print (" shuffle:",opts.shuffle)
print (" print_every:",opts.print_every)
print (" save_model:",opts.save_model)
print (" weights:",opts.weights)
print ("")
def train_batch(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,model,model_optimizer,criterion,ini_idx):
loss = 0
model_optimizer.zero_grad()
list_pred = model(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,ini_idx)
for idx,reply_tensor in enumerate(reply_tensor_batch):
loss_s = criterion(list_pred[idx],Variable(reply_tensor).cuda())
loss += loss_s
loss.backward()
model_optimizer.step()
return loss.data[0]
def train_model(word2index,ini_idx,corpus_pairs,model,model_optimizer,criterion,epochs,
batch_size,max_senten_len,max_context_size,print_every,save_model,shuffle):
print ("start training...")
model.train()
state_loss = 10000.0
for ei in range(epochs):
print ("Iteration {}: ".format(ei+1))
epoch_loss = 0
every_loss = 0
t0 = time.time()
pairs_batches,num_batches = buildingPairsBatch(corpus_pairs,batch_size,shuffle=shuffle)
print ("num_batches:",num_batches)
idx_batch = 0
for reply_tensor_batch, contexts_tensor_batch, pad_matrix_batch in getTensorsPairsBatch(word2index,pairs_batches,max_context_size):
loss = train_batch(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,model,model_optimizer,criterion,ini_idx)
epoch_loss += loss
every_loss += loss
if (idx_batch+1)%print_every == 0:
every_avg_loss = every_loss/(max_senten_len*(idx_batch+1))
print ("{} batches finished, avg_loss:{}".format(idx_batch+1, every_avg_loss))
idx_batch += 1
epoch_avg_loss = epoch_loss/(max_senten_len*num_batches)
print ("epoch_avg_loss:",epoch_avg_loss)
if save_model and epoch_avg_loss < state_loss:
print ("save model...")
if opts.type_model:
torch.save(model.state_dict(), "./static_parameters_IterEnd")
else:
torch.save(model.state_dict(), "./dyna_parameters_IterEnd")
state_loss = epoch_avg_loss
print ("Iteration time:",time.time()-t0)
print ("=============================================" )
print ("")
if __name__ == '__main__':
ini_char = '</i>'
unk_char = '<unk>'
t0 = time.time()
print ("loading word2vec...")
ctable = W2vCharacterTable(opts.w2v_file,ini_char,unk_char)
print(" dict size:",ctable.getDictSize())
print (" emb size:",ctable.getEmbSize())
print ("")
ctable,corpus_pairs = readingData(ctable,opts.train_file,opts.max_senten_len,opts.max_context_size)
print (time.time()-t0)
print ("")
if opts.type_model:
model = StaticModel(ctable.getDictSize(),ctable.getEmbSize(),opts.hidden_size,opts.batch_size,opts.dropout,
opts.max_senten_len,opts.teach_forcing).cuda()
else:
model = DynamicModel(ctable.getDictSize(),ctable.getEmbSize(),opts.hidden_size,opts.batch_size,opts.dropout,
opts.max_senten_len,opts.teach_forcing).cuda()
if opts.weights != None:
print ("load weights...")
model.load_state_dict(torch.load(opts.weights))
else:
model.init_parameters(ctable.getEmbMatrix())
model_optimizer = optim.Adam(model.parameters(), lr=opts.lr, weight_decay=opts.weight_decay)
criterion = nn.NLLLoss()
print ("")
word2index = ctable.getWord2Index()
ini_idx = word2index[ini_char]
train_model(word2index,ini_idx,corpus_pairs,model,model_optimizer,criterion,opts.epochs,opts.batch_size,
opts.max_senten_len,opts.max_context_size,opts.print_every,opts.save_model,opts.shuffle)
print ("")
| [
"argparse.ArgumentParser",
"torch.load",
"torch.nn.NLLLoss",
"torch.autograd.Variable",
"time.time"
] | [((366, 387), 'argparse.ArgumentParser', 'ArgumentParser', (['usage'], {}), '(usage)\n', (380, 387), False, 'from argparse import ArgumentParser\n'), ((4348, 4359), 'time.time', 'time.time', ([], {}), '()\n', (4357, 4359), False, 'import time\n'), ((5335, 5347), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (5345, 5347), True, 'import torch.nn as nn\n'), ((3099, 3110), 'time.time', 'time.time', ([], {}), '()\n', (3108, 3110), False, 'import time\n'), ((4667, 4678), 'time.time', 'time.time', ([], {}), '()\n', (4676, 4678), False, 'import time\n'), ((5142, 5166), 'torch.load', 'torch.load', (['opts.weights'], {}), '(opts.weights)\n', (5152, 5166), False, 'import torch\n'), ((4180, 4191), 'time.time', 'time.time', ([], {}), '()\n', (4189, 4191), False, 'import time\n'), ((2627, 2649), 'torch.autograd.Variable', 'Variable', (['reply_tensor'], {}), '(reply_tensor)\n', (2635, 2649), False, 'from torch.autograd import Variable\n')] |
"""
Handles the operation of the options menu.
@author: RichardFlanagan - A00193644
@version: 16 April 2014
"""
import pygame
import sys
class OptionsMenu():
def __init__(self, params, debugParam):
"""
Initialize variables.
@param params: The list of parameter objects.
"""
(self.screen, self.FPS, self.clock, self.music, self.sfx) = params
self.DEBUG = debugParam
def run(self):
"""
Runs the options menu screen.
@return: Exit status. Signals what screen comes next.
"""
pygame.mouse.set_visible(True)
background = pygame.Surface(self.screen.get_size())
background.fill((0, 0, 0))
self.screen.blit(background, (0, 0))
# Load button images.
if self.music.playMusic:
musicButton = pygame.image.load("../res/images/b_musicOn.png")
else:
musicButton = pygame.image.load("../res/images/b_musicOff.png")
if self.sfx.playSFX:
sfxButton = pygame.image.load("../res/images/b_soundOn.png")
else:
sfxButton = pygame.image.load("../res/images/b_soundOff.png")
if self.DEBUG:
debugButton = pygame.image.load("../res/images/b_debugOn.png")
else:
debugButton = pygame.image.load("../res/images/b_debugOff.png")
backButton = pygame.image.load("../res/images/b_back.png")
while True:
self.clock.tick(self.FPS)
for event in pygame.event.get():
# Close the window.
if event.type == pygame.QUIT:
pygame.quit()
sys.exit(0)
# Click on screen.
if event.type == pygame.MOUSEBUTTONDOWN:
# Music Play-back.
if (self.testMouse(self.screen.get_width()/2-100, 200, 200, 50)):
if self.music.playMusic == True:
musicButton = pygame.image.load("../res/images/b_musicOff.png")
self.music.toggleMusic()
elif self.music.playMusic == False:
musicButton = pygame.image.load("../res/images/b_musicOn.png")
self.music.toggleMusic()
self.sfx.play(self.sfx.button)
# Sound Effect Play-back.
if (self.testMouse(self.screen.get_width()/2-100, 300, 200, 50)):
if self.sfx.playSFX == True:
sfxButton = pygame.image.load("../res/images/b_soundOff.png")
self.sfx.toggleSFX()
elif self.sfx.playSFX == False:
sfxButton = pygame.image.load("../res/images/b_soundOn.png")
self.sfx.toggleSFX()
self.sfx.play(self.sfx.button)
# Debug mode.
if (self.testMouse(self.screen.get_width()/2-100, 400, 200, 50)):
if self.DEBUG == True:
debugButton = pygame.image.load("../res/images/b_debugOff.png")
self.DEBUG = False
elif self.DEBUG == False:
debugButton = pygame.image.load("../res/images/b_debugOn.png")
self.DEBUG = True
self.sfx.play(self.sfx.button)
# Back to menu.
elif (self.testMouse(self.screen.get_width()-250, 650, 200, 50)):
self.sfx.play(self.sfx.button)
return (0, self.DEBUG)
# Keyboard input.
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit(0)
self.printText()
self.screen.blit(musicButton, (self.screen.get_width()/2-100,200))
self.screen.blit(sfxButton, (self.screen.get_width()/2-100,300))
self.screen.blit(debugButton, (self.screen.get_width()/2-100,400))
self.screen.blit(backButton, (self.screen.get_width()-250, 650))
pygame.display.flip()
return (0, self.DEBUG)
def testMouse(self, x, y, width, height):
"""
Tests to see if the mouse is within the defined box.
@param x: The x position of the top left point of the box.
@param y: The y position of the top left point of the box.
@param width: The width of the box.
@param height: The height of the box.
@return: True if mouse is in box, false if not.
"""
if (pygame.mouse.get_pos()[0] > x
and pygame.mouse.get_pos()[0] < x+width
and pygame.mouse.get_pos()[1] > y
and pygame.mouse.get_pos()[1] < y+height):
return True
else:
return False
def printText(self):
"""
Print text to the screen.
"""
font1 = pygame.font.SysFont("COURIER", 32)
t = ("Options")
self.screen.blit(font1.render(t, 0, (0, 255, 0)), (self.screen.get_width()/2-font1.size(t)[0]/2, 50))
| [
"pygame.quit",
"pygame.event.get",
"pygame.display.flip",
"pygame.mouse.get_pos",
"pygame.mouse.set_visible",
"sys.exit",
"pygame.image.load",
"pygame.font.SysFont"
] | [((580, 610), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(True)'], {}), '(True)\n', (604, 610), False, 'import pygame\n'), ((1383, 1428), 'pygame.image.load', 'pygame.image.load', (['"""../res/images/b_back.png"""'], {}), "('../res/images/b_back.png')\n", (1400, 1428), False, 'import pygame\n'), ((5113, 5147), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""COURIER"""', '(32)'], {}), "('COURIER', 32)\n", (5132, 5147), False, 'import pygame\n'), ((842, 890), 'pygame.image.load', 'pygame.image.load', (['"""../res/images/b_musicOn.png"""'], {}), "('../res/images/b_musicOn.png')\n", (859, 890), False, 'import pygame\n'), ((931, 980), 'pygame.image.load', 'pygame.image.load', (['"""../res/images/b_musicOff.png"""'], {}), "('../res/images/b_musicOff.png')\n", (948, 980), False, 'import pygame\n'), ((1035, 1083), 'pygame.image.load', 'pygame.image.load', (['"""../res/images/b_soundOn.png"""'], {}), "('../res/images/b_soundOn.png')\n", (1052, 1083), False, 'import pygame\n'), ((1122, 1171), 'pygame.image.load', 'pygame.image.load', (['"""../res/images/b_soundOff.png"""'], {}), "('../res/images/b_soundOff.png')\n", (1139, 1171), False, 'import pygame\n'), ((1222, 1270), 'pygame.image.load', 'pygame.image.load', (['"""../res/images/b_debugOn.png"""'], {}), "('../res/images/b_debugOn.png')\n", (1239, 1270), False, 'import pygame\n'), ((1311, 1360), 'pygame.image.load', 'pygame.image.load', (['"""../res/images/b_debugOff.png"""'], {}), "('../res/images/b_debugOff.png')\n", (1328, 1360), False, 'import pygame\n'), ((1514, 1532), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1530, 1532), False, 'import pygame\n'), ((4278, 4299), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4297, 4299), False, 'import pygame\n'), ((1636, 1649), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1647, 1649), False, 'import pygame\n'), ((1670, 1681), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1678, 1681), False, 'import sys\n'), ((4760, 4782), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (4780, 4782), False, 'import pygame\n'), ((4806, 4828), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (4826, 4828), False, 'import pygame\n'), ((4858, 4880), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (4878, 4880), False, 'import pygame\n'), ((4904, 4926), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (4924, 4926), False, 'import pygame\n'), ((2000, 2049), 'pygame.image.load', 'pygame.image.load', (['"""../res/images/b_musicOff.png"""'], {}), "('../res/images/b_musicOff.png')\n", (2017, 2049), False, 'import pygame\n'), ((2588, 2637), 'pygame.image.load', 'pygame.image.load', (['"""../res/images/b_soundOff.png"""'], {}), "('../res/images/b_soundOff.png')\n", (2605, 2637), False, 'import pygame\n'), ((3146, 3195), 'pygame.image.load', 'pygame.image.load', (['"""../res/images/b_debugOff.png"""'], {}), "('../res/images/b_debugOff.png')\n", (3163, 3195), False, 'import pygame\n'), ((3873, 3886), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (3884, 3886), False, 'import pygame\n'), ((3911, 3922), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3919, 3922), False, 'import sys\n'), ((2205, 2253), 'pygame.image.load', 'pygame.image.load', (['"""../res/images/b_musicOn.png"""'], {}), "('../res/images/b_musicOn.png')\n", (2222, 2253), False, 'import pygame\n'), ((2783, 2831), 'pygame.image.load', 'pygame.image.load', (['"""../res/images/b_soundOn.png"""'], {}), "('../res/images/b_soundOn.png')\n", (2800, 2831), False, 'import pygame\n'), ((3335, 3383), 'pygame.image.load', 'pygame.image.load', (['"""../res/images/b_debugOn.png"""'], {}), "('../res/images/b_debugOn.png')\n", (3352, 3383), False, 'import pygame\n')] |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
from transformers.data.data_collator import PaddingStrategy
import torch
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
"""
A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary
of Tensors.
"""
@dataclass
class DataCollatorForTargetedMasking:
"""
Data collator used for random masking of targeted classes of token.
Useful for learning language models based on masking part-of-speech tokens.
Instead of masking any random token as in MLM, only token that belong to a defined class are masked.
Inputs, labels and masks are dynamically padded to the maximum length of a batch if they are not all of the same length.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
mlm_probability (:obj:`float`, `optional`, defaults to 1.0):
The probability with which to mask tokens in the input, when :obj:`mlm` is set to :obj:`True`.
.. note::
This data collator expects a dataset having items that are dictionaries
with the "special_tokens_mask" and "pos_mask" keys.
"""
tokenizer: PreTrainedTokenizerBase
mlm_probability: float = 1.0
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
def __post_init__(self):
if self.tokenizer.mask_token_id is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. "
)
def __call__(self, features) -> Dict[str, torch.Tensor]:
"""
In addition to input_ids, a feature 'tag_mask' needs to be provided to specify which token might be masked.
"""
tag_mask = [feature['tag_mask'] for feature in features] if 'tag_mask' in features[0].keys() else None
if tag_mask is None:
raise ValueError(
"A mask should be provided to indicate which input token class to mask."
)
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of
)
batch['tag_mask'] = tag_mask
sequence_length = len(batch["input_ids"][0])
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch["tag_mask"] = [
x + [0] * (sequence_length - len(x)) for x in batch["tag_mask"]
]
else:
batch["tag_mask"] = [
[0] * (sequence_length - len(x)) + x for x in batch["tag_mask"]
]
batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}
batch["input_ids"], batch["labels"] = self.tag_mask_tokens(batch["input_ids"], batch["tag_mask"])
batch.pop("tag_mask")
return batch
def tag_mask_tokens(self, inputs: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Masks the input as specified by the tag mask prepared by the loader"""
targets = inputs.clone()
# create and initialize the probability matric for masking to zeros
probability_matrix = torch.zeros_like(targets, dtype=torch.float64)
# update in-place probability to the set mlm_probability value where mask is true
probability_matrix.masked_fill_(mask.bool(), value=self.mlm_probability)
# use the probability at each position to randomly mask or not
masked_indices = torch.bernoulli(probability_matrix).bool()
# reolace input_ids by the mask token id at position that need to be masked
inputs[masked_indices] = self.tokenizer.mask_token_id
# we train to only predict the masked position
targets[~masked_indices] = -100 # We only compute loss on masked tokens
return inputs, targets
| [
"torch.tensor",
"torch.zeros_like",
"torch.bernoulli"
] | [((4070, 4116), 'torch.zeros_like', 'torch.zeros_like', (['targets'], {'dtype': 'torch.float64'}), '(targets, dtype=torch.float64)\n', (4086, 4116), False, 'import torch\n'), ((3520, 3554), 'torch.tensor', 'torch.tensor', (['v'], {'dtype': 'torch.int64'}), '(v, dtype=torch.int64)\n', (3532, 3554), False, 'import torch\n'), ((4384, 4419), 'torch.bernoulli', 'torch.bernoulli', (['probability_matrix'], {}), '(probability_matrix)\n', (4399, 4419), False, 'import torch\n')] |
#!flask/bin/python
import flask.ext.whooshalchemy
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_appconfig import AppConfig
from flask.ext.sqlalchemy import SQLAlchemy
from app.config import SQLALCHEMY_DATABASE_URI, SECRET_KEY, WHOOSH_BASE
app = Flask(__name__)
AppConfig(app)
Bootstrap(app)
app.config['SECRET_KEY'] = SECRET_KEY
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
app.config['WHOOSH_BASE'] = WHOOSH_BASE
db = SQLAlchemy(app)
from app import views, models
flask.ext.whooshalchemy.whoosh_index(app, models.Produs) | [
"flask_bootstrap.Bootstrap",
"flask.ext.sqlalchemy.SQLAlchemy",
"flask_appconfig.AppConfig",
"flask.Flask"
] | [((278, 293), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (283, 293), False, 'from flask import Flask\n'), ((294, 308), 'flask_appconfig.AppConfig', 'AppConfig', (['app'], {}), '(app)\n', (303, 308), False, 'from flask_appconfig import AppConfig\n'), ((309, 323), 'flask_bootstrap.Bootstrap', 'Bootstrap', (['app'], {}), '(app)\n', (318, 323), False, 'from flask_bootstrap import Bootstrap\n'), ((471, 486), 'flask.ext.sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (481, 486), False, 'from flask.ext.sqlalchemy import SQLAlchemy\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-06-15 15:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0047_auto_20160614_1201'),
]
operations = [
migrations.AlterModelOptions(
name='projecttemplate',
options={'ordering': ['order', 'name'], 'verbose_name': 'project template', 'verbose_name_plural': 'project templates'},
),
migrations.AddField(
model_name='projecttemplate',
name='order',
field=models.IntegerField(default=10000, verbose_name='user order'),
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.models.IntegerField"
] | [((300, 482), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""projecttemplate"""', 'options': "{'ordering': ['order', 'name'], 'verbose_name': 'project template',\n 'verbose_name_plural': 'project templates'}"}), "(name='projecttemplate', options={'ordering': [\n 'order', 'name'], 'verbose_name': 'project template',\n 'verbose_name_plural': 'project templates'})\n", (328, 482), False, 'from django.db import migrations, models\n'), ((625, 686), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(10000)', 'verbose_name': '"""user order"""'}), "(default=10000, verbose_name='user order')\n", (644, 686), False, 'from django.db import migrations, models\n')] |
import os
import sys
from copy import copy
from contextlib import contextmanager
from subprocess import Popen, PIPE
@contextmanager
def cd(path):
old_dir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_dir)
@contextmanager
def sysargs(args):
sys_argv = copy(sys.argv)
sys.argv[1:] = args
try:
yield
finally:
sys.argv = sys_argv
def generate_doxygen(doxygen_input):
"""
Borrowed from exhale
"""
if not isinstance(doxygen_input, str):
return "Error: the `doxygen_input` variable must be of type `str`."
doxyfile = doxygen_input == "Doxyfile"
try:
# Setup the arguments to launch doxygen
if doxyfile:
args = ["doxygen"]
kwargs = {}
else:
args = ["doxygen", "-"]
kwargs = {"stdin": PIPE}
# Note: overload of args / kwargs, Popen is expecting a list as the
# first parameter (aka no *args, just args)!
doxygen_proc = Popen(args, **kwargs)
# Communicate can only be called once, arrange whether or not stdin has
# value
if not doxyfile:
# In Py3, make sure we are communicating a bytes-like object which
# is no longer interchangeable with strings (as was the case in Py2)
if sys.version[0] == "3":
doxygen_input = bytes(doxygen_input, "utf-8")
comm_kwargs = {"input": doxygen_input}
else:
comm_kwargs = {}
# Waits until doxygen has completed
doxygen_proc.communicate(**comm_kwargs)
# Make sure we had a valid execution of doxygen
exit_code = doxygen_proc.returncode
if exit_code != 0:
raise RuntimeError("Non-zero return code of [{0}] from 'doxygen'...".format(exit_code))
except Exception as e:
return "Unable to execute 'doxygen': {0}".format(e)
# returning None signals _success_
return None
| [
"os.chdir",
"subprocess.Popen",
"copy.copy",
"os.getcwd"
] | [((171, 182), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (180, 182), False, 'import os\n'), ((188, 202), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (196, 202), False, 'import os\n'), ((326, 340), 'copy.copy', 'copy', (['sys.argv'], {}), '(sys.argv)\n', (330, 340), False, 'from copy import copy\n'), ((251, 268), 'os.chdir', 'os.chdir', (['old_dir'], {}), '(old_dir)\n', (259, 268), False, 'import os\n'), ((1074, 1095), 'subprocess.Popen', 'Popen', (['args'], {}), '(args, **kwargs)\n', (1079, 1095), False, 'from subprocess import Popen, PIPE\n')] |
import pymysql
import pandas as pd
import pickle
import numpy as np
import databaseInfo as db
databaseName = 'root'
databasePasswd = '<PASSWORD>'
def user_info_query(user_id):
conn = pymysql.connect(host=db.databaseAddress, user=db.databaseLoginName, password=db.databasePasswd, database=db.databaseName)
cur = conn.cursor()
users = pd.read_sql("select * from User", conn)
users = users.filter(regex='UserID|Gender|Age|JobID')
# 改变User数据中性别和年龄
gender_map = {'F': 0, 'M': 1}
users['Gender'] = users['Gender'].map(gender_map)
age_map = {val: ii for ii, val in enumerate(set(users['Age']))}
users['Age'] = users['Age'].map(age_map)
users_list = users.values
# print(users.head())
cur.close() # 归还资源
conn.close()
num_line = 0
for index in range(len(users_list)):
if int(users_list[index][0]) == user_id:
num_line = index
break
#return users_list[user_id-1][0],users_list[user_id-1][1],users_list[user_id-1][2],users_list[user_id-1][3]
return users_list[num_line][0],users_list[num_line][1],users_list[num_line][2],users_list[num_line][3]
# print(user_info_query(4))
def movie_info_query(user_id):
conn = pymysql.connect(host=db.databaseAddress, user=db.databaseLoginName, password=db.databasePasswd, database=db.databaseName)
cur = conn.cursor()
title_count, title_set, genres2int, features, targets_values, ratings, users, movies, data, movies_orig, users_orig = pickle.load(
open('preprocess.p', mode='rb'))
sentences_size = title_count
# 电影ID转下标的字典,数据集中电影ID跟下标不一致,比如第5行的数据电影ID不一定是5
movieid2idx = {val[0]: i for i, val in enumerate(movies.values)}
movies_id_list = pd.read_sql("select MovieID from watch_history where UserID={}".format(user_id), conn)
# print(movies.head())
movies_id_list = movies_id_list.values
# print(movies_id_list)
history_movie_feature_list = []
for i in range(len(movies_id_list)):
movie_feature = []
movie_id_val = movies_id_list[i][0]
# print(movie_id_val)
categories = np.zeros([1, 18])
categories[0] = movies.values[movieid2idx[movie_id_val]][2]
titles = np.zeros([1, sentences_size])
titles[0] = movies.values[movieid2idx[movie_id_val]][1]
movie_id = np.reshape(movies.values[movieid2idx[movie_id_val]][0], [1])
movie_categories = categories
movie_titles = titles
movie_feature.append(movie_id)
movie_feature.append(movie_categories)
movie_feature.append(movie_titles)
history_movie_feature_list.append(movie_feature)
cur.close() # 归还资源
conn.close()
return history_movie_feature_list
# print(movie_info_query(1))
| [
"pandas.read_sql",
"numpy.zeros",
"pymysql.connect",
"numpy.reshape"
] | [((189, 314), 'pymysql.connect', 'pymysql.connect', ([], {'host': 'db.databaseAddress', 'user': 'db.databaseLoginName', 'password': 'db.databasePasswd', 'database': 'db.databaseName'}), '(host=db.databaseAddress, user=db.databaseLoginName,\n password=db.databasePasswd, database=db.databaseName)\n', (204, 314), False, 'import pymysql\n'), ((347, 386), 'pandas.read_sql', 'pd.read_sql', (['"""select * from User"""', 'conn'], {}), "('select * from User', conn)\n", (358, 386), True, 'import pandas as pd\n'), ((1211, 1336), 'pymysql.connect', 'pymysql.connect', ([], {'host': 'db.databaseAddress', 'user': 'db.databaseLoginName', 'password': 'db.databasePasswd', 'database': 'db.databaseName'}), '(host=db.databaseAddress, user=db.databaseLoginName,\n password=db.databasePasswd, database=db.databaseName)\n', (1226, 1336), False, 'import pymysql\n'), ((2091, 2108), 'numpy.zeros', 'np.zeros', (['[1, 18]'], {}), '([1, 18])\n', (2099, 2108), True, 'import numpy as np\n'), ((2194, 2223), 'numpy.zeros', 'np.zeros', (['[1, sentences_size]'], {}), '([1, sentences_size])\n', (2202, 2223), True, 'import numpy as np\n'), ((2307, 2367), 'numpy.reshape', 'np.reshape', (['movies.values[movieid2idx[movie_id_val]][0]', '[1]'], {}), '(movies.values[movieid2idx[movie_id_val]][0], [1])\n', (2317, 2367), True, 'import numpy as np\n')] |
"""
This file download the latest data for Myanmar
"""
import pandas as pd
from autumn.settings import INPUT_DATA_PATH
from pathlib import Path
INPUT_DATA_PATH = Path(INPUT_DATA_PATH)
COVID_MMR_TESTING_CSV = INPUT_DATA_PATH / "covid_mmr" / "cases.csv"
URL = "https://docs.google.com/spreadsheets/d/1VeUof9_-s0bsndo8tLsCwnAhkUUZgsdV-r980gumMPA/export?format=csv&id=1VeUof9_-s0bsndo8tLsCwnAhkUUZgsdV-r980gumMPA"
def fetch_covid_mmr_data():
mmr_df = pd.read_csv(URL)
mmr_df.to_csv(COVID_MMR_TESTING_CSV)
| [
"pandas.read_csv",
"pathlib.Path"
] | [((164, 185), 'pathlib.Path', 'Path', (['INPUT_DATA_PATH'], {}), '(INPUT_DATA_PATH)\n', (168, 185), False, 'from pathlib import Path\n'), ((458, 474), 'pandas.read_csv', 'pd.read_csv', (['URL'], {}), '(URL)\n', (469, 474), True, 'import pandas as pd\n')] |
"""
Dual Doppler Lobe Utility
------------------------------------------------------
Example for using the utily to plot up dual doppler lobes.
Can easily be used on cartopy maps using
code-block:: python
tiler = Stamen('terrain-background')
mercator = tiler.crs
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=mercator)
ax.add_image(tiler, 8)
ax.coastlines('10m')
"""
import radtraq
import matplotlib.pyplot as plt
d = {'Cullman': {'lat': 34.26274649951493, 'lon': -86.85874523934974},
'Courtland': {'lat': 34.658302981847655, 'lon': -87.34389529761859}}
data = radtraq.utils.calculate_dual_dop_lobes(d)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for dat in data.keys():
ax.plot(data[dat]['lon'], data[dat]['lat'], 'k.')
for s in d:
ax.plot(d[s]['lon'], d[s]['lat'], 'k*', ms=14)
plt.text(d[s]['lon'], d[s]['lat'], s)
plt.show()
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure",
"radtraq.utils.calculate_dual_dop_lobes",
"matplotlib.pyplot.show"
] | [((611, 652), 'radtraq.utils.calculate_dual_dop_lobes', 'radtraq.utils.calculate_dual_dop_lobes', (['d'], {}), '(d)\n', (649, 652), False, 'import radtraq\n'), ((660, 672), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (670, 672), True, 'import matplotlib.pyplot as plt\n'), ((887, 897), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (895, 897), True, 'import matplotlib.pyplot as plt\n'), ((848, 885), 'matplotlib.pyplot.text', 'plt.text', (["d[s]['lon']", "d[s]['lat']", 's'], {}), "(d[s]['lon'], d[s]['lat'], s)\n", (856, 885), True, 'import matplotlib.pyplot as plt\n')] |
import json
import os
import time
def get_cache_path():
home = os.path.expanduser("~")
return home + '/package_list.cdncache'
def time_has_passed(last_time, time_now):
time_is_blank = time_now is None or last_time is None
if time_is_blank:
return time_is_blank
time_difference = int(time.time()) - int(last_time)
time_has_passed = time_difference > int(time_now)
print(time_difference)
print(time_has_passed)
return time_has_passed
def get_package_list(path):
packageList = {}
with open(path, 'r') as f:
packageList = json.loads(f.read())
return packageList
def set_package_list(path, packageList):
with open(path, 'w') as f:
f.write(json.dumps(packageList)) | [
"json.dumps",
"time.time",
"os.path.expanduser"
] | [((68, 91), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (86, 91), False, 'import os\n'), ((314, 325), 'time.time', 'time.time', ([], {}), '()\n', (323, 325), False, 'import time\n'), ((722, 745), 'json.dumps', 'json.dumps', (['packageList'], {}), '(packageList)\n', (732, 745), False, 'import json\n')] |
#!/usr/bin/python
# Classification (U)
"""Program: gitmerge_get_untracked.py
Description: Unit testing of gitmerge.get_untracked in git_class.py.
Usage:
test/unit/git_class/gitmerge_get_untracked.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import collections
# Local
sys.path.append(os.getcwd())
import git_class
import version
__version__ = version.__version__
class Index(object):
"""Class: Index
Description: Class stub holder for git.gitrepo.index.
Methods:
__init__
"""
def __init__(self):
"""Function: __init__
Description: Initialization of class instance.
Arguments:
"""
pass
class Diff(Index):
"""Class: Diff
Description: Class stub holder for git.gitrepo.index.diff.
Methods:
__init__
add
commit
"""
def __init__(self):
"""Function: __init__
Description: Initialization of class instance.
Arguments:
"""
super(Diff, self).__init__()
self.new_files = None
self.msg = None
def add(self, new_files):
"""Method: add
Description: Method stub holder for git.gitrepo.index.add().
Arguments:
"""
self.new_files = new_files
return True
def commit(self, msg):
"""Method: commit
Description: Method stub holder for git.gitrepo.index.commit().
Arguments:
"""
self.msg = msg
return True
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_process_data_list
test_process_empty_list
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.repo_name = "Repo_name"
self.git_dir = "/directory/git"
self.url = "URL"
self.branch = "Remote_branch"
self.mod_branch = "Mod_branch"
self.gitr = git_class.GitMerge(self.repo_name, self.git_dir, self.url,
self.branch, self.mod_branch)
self.new_list1 = []
self.new_list2 = ["file1"]
def test_process_data_list(self):
"""Function: test_process_data_list
Description: Test with data in list set.
Arguments:
"""
giti = collections.namedtuple('GIT', 'index untracked_files')
diff = Diff()
self.gitr.gitrepo = giti(diff, self.new_list2)
self.gitr.get_untracked()
self.assertEqual(self.gitr.new_files, self.new_list2)
def test_process_empty_list(self):
"""Function: test_process_empty_list
Description: Test with empty list set.
Arguments:
"""
giti = collections.namedtuple('GIT', 'index untracked_files')
diff = Diff()
self.gitr.gitrepo = giti(diff, self.new_list1)
self.gitr.get_untracked()
self.assertEqual(self.gitr.new_files, self.new_list1)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"collections.namedtuple",
"git_class.GitMerge",
"os.getcwd"
] | [((457, 468), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (466, 468), False, 'import os\n'), ((3309, 3324), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3322, 3324), False, 'import unittest\n'), ((2254, 2346), 'git_class.GitMerge', 'git_class.GitMerge', (['self.repo_name', 'self.git_dir', 'self.url', 'self.branch', 'self.mod_branch'], {}), '(self.repo_name, self.git_dir, self.url, self.branch,\n self.mod_branch)\n', (2272, 2346), False, 'import git_class\n'), ((2631, 2685), 'collections.namedtuple', 'collections.namedtuple', (['"""GIT"""', '"""index untracked_files"""'], {}), "('GIT', 'index untracked_files')\n", (2653, 2685), False, 'import collections\n'), ((3046, 3100), 'collections.namedtuple', 'collections.namedtuple', (['"""GIT"""', '"""index untracked_files"""'], {}), "('GIT', 'index untracked_files')\n", (3068, 3100), False, 'import collections\n')] |
#
# locally sensitive hashing code
#
from collections import defaultdict
import numpy as np
import xxhash
import sys
import pyximport
pyximport.install()
sys.path.insert(0, 'tools')
import simcore as csimcore
# k-shingles: pairs of adjacent k-length substrings (in order)
def shingle(s, k=2):
k = min(len(s), k)
for i in range(len(s)-k+1):
yield s[i:i+k]
# split into words
def tokenize(s):
return s.split()
def murmur(x):
return np.uint64(xxhash.xxh64_intdigest(x))
# compute actual simhash
class Simhash:
def __init__(self):
self.dim = 64
self.unums = list(map(np.uint64,range(self.dim)))
self.masks = [self.unums[1] << n for n in self.unums]
def simhash(self, features, weights=None):
if weights is None:
weights = [1.0]*len(features)
hashish = [murmur(f) for f in features]
v = [0.0]*self.dim
for h, w in zip(hashish, weights):
for i in range(self.dim):
v[i] += w if h & self.masks[i] else -w
ans = self.unums[0]
for i in range(self.dim):
if v[i] >= 0:
ans |= self.masks[i]
return ans
# compute actual simhash with C - only 64 width
class CSimhash():
def __init__(self):
self.simcore = csimcore.simcore
def simhash(self, features, weights=None):
if weights is None:
weights = [1.0]*len(features)
hashish = [murmur(f) for f in features]
ret = np.uint64(self.simcore(hashish, weights))
return ret
class Cluster:
# dim is the simhash width, k is the tolerance
def __init__(self, dim=64, k=4, thresh=1):
self.dim = dim
self.k = k
self.thresh = thresh
self.unions = []
self.hashmaps = [defaultdict(list) for _ in range(k)] # defaultdict(list)
self.offsets = [np.uint64(dim//k*i) for i in range(k)]
self.bin_masks = [np.uint64(2**(dim-offset)-1) if (i == len(self.offsets)-1) else np.uint64(2**(self.offsets[i+1]-offset)-1) for i, offset in enumerate(self.offsets)]
self.csim = CSimhash()
self.hasher = self.csim.simhash
# add item to the cluster
def add(self, features, label, weights=None):
# get subkeys
sign = self.hasher(features, weights)
keyvec = self.get_keys(sign)
# Unite labels with the same keys in the same band
matches = defaultdict(int)
for idx, key in enumerate(keyvec):
others = self.hashmaps[idx][key]
for l in others:
matches[l] += 1
others.append(label)
for out, val in matches.items():
if val > self.thresh:
self.unions.append((label, out))
# bin simhash into chunks
def get_keys(self, simhash):
return [simhash >> offset & mask for offset, mask in zip(self.offsets, self.bin_masks)]
| [
"sys.path.insert",
"xxhash.xxh64_intdigest",
"pyximport.install",
"numpy.uint64",
"collections.defaultdict"
] | [((136, 155), 'pyximport.install', 'pyximport.install', ([], {}), '()\n', (153, 155), False, 'import pyximport\n'), ((156, 183), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""tools"""'], {}), "(0, 'tools')\n", (171, 183), False, 'import sys\n'), ((469, 494), 'xxhash.xxh64_intdigest', 'xxhash.xxh64_intdigest', (['x'], {}), '(x)\n', (491, 494), False, 'import xxhash\n'), ((2415, 2431), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2426, 2431), False, 'from collections import defaultdict\n'), ((1784, 1801), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1795, 1801), False, 'from collections import defaultdict\n'), ((1865, 1888), 'numpy.uint64', 'np.uint64', (['(dim // k * i)'], {}), '(dim // k * i)\n', (1874, 1888), True, 'import numpy as np\n'), ((1930, 1964), 'numpy.uint64', 'np.uint64', (['(2 ** (dim - offset) - 1)'], {}), '(2 ** (dim - offset) - 1)\n', (1939, 1964), True, 'import numpy as np\n'), ((1994, 2044), 'numpy.uint64', 'np.uint64', (['(2 ** (self.offsets[i + 1] - offset) - 1)'], {}), '(2 ** (self.offsets[i + 1] - offset) - 1)\n', (2003, 2044), True, 'import numpy as np\n')] |
# GPLv3 License
#
# Copyright (C) 2020 Ubisoft
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
This module defines Blender Preferences for the addon.
"""
import os
import logging
import random
import bpy
from mixer.bl_panels import draw_preferences_ui, update_panels_category
from mixer.broadcaster import common
from mixer.broadcaster.common import ClientAttributes
from mixer.os_utils import getuser
from mixer.share_data import share_data
from mixer.local_data import get_data_directory
logger = logging.getLogger(__name__)
def gen_random_color():
r = random.random()
g = random.random()
b = random.random()
return [r, g, b]
def set_log_level(self, value):
logging.getLogger(__package__).setLevel(value)
logger.log(value, "Logging level changed")
class SharedFolderItem(bpy.types.PropertyGroup):
shared_folder: bpy.props.StringProperty(default="", subtype="DIR_PATH", name="Shared Folder")
class MixerPreferences(bpy.types.AddonPreferences):
"""
Preferences class, store persistent properties and options.
Note for developers using blender-vscode - when an addon is disabled, its preferences are erased, so you will
loose them regularly while developing with hot-reload.
A possible solution is to make the addon fully reloadable like described here https://developer.blender.org/T67387#982929
and avoid using hot-reload of blender-vscode.
A task exists to support keeping preferences of disabled add-ons: https://developer.blender.org/T71486
"""
bl_idname = __package__
def on_user_changed(self, context):
client = share_data.client
if client and client.is_connected():
client.set_client_attributes({ClientAttributes.USERNAME: self.user})
def on_user_color_changed(self, context):
client = share_data.client
if client and client.is_connected():
client.set_client_attributes({ClientAttributes.USERCOLOR: list(self.color)})
category: bpy.props.StringProperty(
name="Tab Category",
description="Choose a name for the category of the panel.",
default=os.environ.get("MIXER_CATEGORY", "Mixer"),
update=update_panels_category,
)
vrtist_category: bpy.props.StringProperty(
name="Tab Category",
description="VRtist Panel.",
default=os.environ.get("VRTIST_CATEGORY", "VRtist"),
update=update_panels_category,
)
display_mixer_vrtist_panels: bpy.props.EnumProperty(
name="Display Mixer and VRtist Panels",
description="Control which panel is displayed between Mixer and VRtist",
items=[
("MIXER", "Mixer", ""),
("VRTIST", "VRtit", ""),
("MIXER_AND_VRTIST", "Mixer And VRtist", ""),
],
default="MIXER",
)
host: bpy.props.StringProperty(
name="Host", description="Server Host Name", default=os.environ.get("VRTIST_HOST", common.DEFAULT_HOST)
)
port: bpy.props.IntProperty(
name="Port",
description="Port to use to connect the server host",
default=int(os.environ.get("VRTIST_PORT", common.DEFAULT_PORT)),
)
room: bpy.props.StringProperty(
name="Room", description="Name of the session room", default="RM_" + os.environ.get("VRTIST_ROOM", getuser())
)
# User name as displayed in peers user list
user: bpy.props.StringProperty(
name="User Name",
description="Name by which the other users will identify you during\na cooperative session",
default=getuser(),
update=on_user_changed,
)
color: bpy.props.FloatVectorProperty(
name="User Color",
subtype="COLOR",
size=3,
min=0.0,
max=1.0,
precision=2,
description="Color used in the viewport of the cooperative session\nto differenciate you from the other users",
default=gen_random_color(),
update=on_user_color_changed,
)
display_selected_room_properties: bpy.props.BoolProperty(
name="Room Properties", description="Display the properties of the selected room", default=False
)
users_list_panel_opened: bpy.props.BoolProperty(
name="Users List", description="Display the list of the users in the selected room", default=True
)
def get_log_level(self):
return logging.getLogger(__package__).level
log_level: bpy.props.EnumProperty(
name="Log Level",
description="Logging level to use",
items=[
("ERROR", "Error", "", logging.ERROR),
("WARNING", "Warning", "", logging.WARNING),
("INFO", "Info", "", logging.INFO),
("DEBUG", "Debug", "", logging.DEBUG),
],
set=set_log_level,
get=get_log_level,
)
vrtist_protocol: bpy.props.BoolProperty(
name="VRtist Protocol", default=os.environ.get("MIXER_VRTIST_PROTOCOL") == "0"
)
ignore_version_check: bpy.props.BoolProperty(default=False, name="Ignore Room Version Check")
show_server_console: bpy.props.BoolProperty(name="Show Server Console", default=False)
VRtist: bpy.props.StringProperty(
name="VRtist", default=os.environ.get("VRTIST_EXE", "D:/unity/VRtist/Build/VRtist.exe"), subtype="FILE_PATH"
)
VRtist_suffix: bpy.props.StringProperty(name="VRtist_suffix", default="_VRtist")
data_directory: bpy.props.StringProperty(
name="Data Directory", default=os.environ.get("MIXER_DATA_DIR", get_data_directory()), subtype="FILE_PATH"
)
shared_folders: bpy.props.CollectionProperty(name="Shared Folders", type=SharedFolderItem)
# Developer option to avoid sending scene content to server at the first connexion
# Allow to quickly iterate debugging/test on large scenes with only one client in room
# Main usage: optimization of client timers to check if updates are required
no_send_scene_content: bpy.props.BoolProperty(name="Do Not Send Scene Content", default=False)
no_start_server: bpy.props.BoolProperty(
name="Do Not Start Server on Connect", default=os.environ.get("MIXER_NO_START_SERVER") is not None
)
send_base_meshes: bpy.props.BoolProperty(default=True)
send_baked_meshes: bpy.props.BoolProperty(default=True)
display_own_gizmos: bpy.props.BoolProperty(default=False, name="Display Own Gizmos")
display_ids_gizmos: bpy.props.BoolProperty(default=False, name="Display ID Gizmos")
display_debugging_tools: bpy.props.BoolProperty(default=False, name="Display Debugging Tools")
display_frustums_gizmos: bpy.props.BoolProperty(default=True, name="Display Frustums Gizmos")
display_frustums_names_gizmos: bpy.props.BoolProperty(default=True, name="Display Frustums User Names")
display_selections_gizmos: bpy.props.BoolProperty(default=True, name="Display Selection Gizmos")
display_selections_names_gizmos: bpy.props.BoolProperty(default=True, name="Display Selection User Names")
commands_send_interval: bpy.props.FloatProperty(
name="Command Send Interval",
description="Debug tool to specify a number of seconds to wait between each command emission toward the server.",
default=0,
)
def draw(self, context):
draw_preferences_ui(self, context)
classes = (
SharedFolderItem,
MixerPreferences,
)
register_factory, unregister_factory = bpy.utils.register_classes_factory(classes)
def register():
register_factory()
def unregister():
unregister_factory()
| [
"logging.getLogger",
"bpy.utils.register_classes_factory",
"bpy.props.StringProperty",
"bpy.props.BoolProperty",
"bpy.props.CollectionProperty",
"bpy.props.FloatProperty",
"os.environ.get",
"bpy.props.EnumProperty",
"mixer.bl_panels.draw_preferences_ui",
"mixer.local_data.get_data_directory",
"r... | [((1097, 1124), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1114, 1124), False, 'import logging\n'), ((7963, 8006), 'bpy.utils.register_classes_factory', 'bpy.utils.register_classes_factory', (['classes'], {}), '(classes)\n', (7997, 8006), False, 'import bpy\n'), ((1159, 1174), 'random.random', 'random.random', ([], {}), '()\n', (1172, 1174), False, 'import random\n'), ((1183, 1198), 'random.random', 'random.random', ([], {}), '()\n', (1196, 1198), False, 'import random\n'), ((1207, 1222), 'random.random', 'random.random', ([], {}), '()\n', (1220, 1222), False, 'import random\n'), ((1446, 1524), 'bpy.props.StringProperty', 'bpy.props.StringProperty', ([], {'default': '""""""', 'subtype': '"""DIR_PATH"""', 'name': '"""Shared Folder"""'}), "(default='', subtype='DIR_PATH', name='Shared Folder')\n", (1470, 1524), False, 'import bpy\n'), ((3059, 3328), 'bpy.props.EnumProperty', 'bpy.props.EnumProperty', ([], {'name': '"""Display Mixer and VRtist Panels"""', 'description': '"""Control which panel is displayed between Mixer and VRtist"""', 'items': "[('MIXER', 'Mixer', ''), ('VRTIST', 'VRtit', ''), ('MIXER_AND_VRTIST',\n 'Mixer And VRtist', '')]", 'default': '"""MIXER"""'}), "(name='Display Mixer and VRtist Panels', description=\n 'Control which panel is displayed between Mixer and VRtist', items=[(\n 'MIXER', 'Mixer', ''), ('VRTIST', 'VRtit', ''), ('MIXER_AND_VRTIST',\n 'Mixer And VRtist', '')], default='MIXER')\n", (3081, 3328), False, 'import bpy\n'), ((4592, 4717), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'name': '"""Room Properties"""', 'description': '"""Display the properties of the selected room"""', 'default': '(False)'}), "(name='Room Properties', description=\n 'Display the properties of the selected room', default=False)\n", (4614, 4717), False, 'import bpy\n'), ((4756, 4882), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'name': '"""Users List"""', 'description': '"""Display the list of the users in the selected room"""', 'default': '(True)'}), "(name='Users List', description=\n 'Display the list of the users in the selected room', default=True)\n", (4778, 4882), False, 'import bpy\n'), ((4990, 5283), 'bpy.props.EnumProperty', 'bpy.props.EnumProperty', ([], {'name': '"""Log Level"""', 'description': '"""Logging level to use"""', 'items': "[('ERROR', 'Error', '', logging.ERROR), ('WARNING', 'Warning', '', logging.\n WARNING), ('INFO', 'Info', '', logging.INFO), ('DEBUG', 'Debug', '',\n logging.DEBUG)]", 'set': 'set_log_level', 'get': 'get_log_level'}), "(name='Log Level', description='Logging level to use',\n items=[('ERROR', 'Error', '', logging.ERROR), ('WARNING', 'Warning', '',\n logging.WARNING), ('INFO', 'Info', '', logging.INFO), ('DEBUG', 'Debug',\n '', logging.DEBUG)], set=set_log_level, get=get_log_level)\n", (5012, 5283), False, 'import bpy\n'), ((5544, 5615), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'default': '(False)', 'name': '"""Ignore Room Version Check"""'}), "(default=False, name='Ignore Room Version Check')\n", (5566, 5615), False, 'import bpy\n'), ((5642, 5707), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'name': '"""Show Server Console"""', 'default': '(False)'}), "(name='Show Server Console', default=False)\n", (5664, 5707), False, 'import bpy\n'), ((5889, 5954), 'bpy.props.StringProperty', 'bpy.props.StringProperty', ([], {'name': '"""VRtist_suffix"""', 'default': '"""_VRtist"""'}), "(name='VRtist_suffix', default='_VRtist')\n", (5913, 5954), False, 'import bpy\n'), ((6144, 6218), 'bpy.props.CollectionProperty', 'bpy.props.CollectionProperty', ([], {'name': '"""Shared Folders"""', 'type': 'SharedFolderItem'}), "(name='Shared Folders', type=SharedFolderItem)\n", (6172, 6218), False, 'import bpy\n'), ((6506, 6577), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'name': '"""Do Not Send Scene Content"""', 'default': '(False)'}), "(name='Do Not Send Scene Content', default=False)\n", (6528, 6577), False, 'import bpy\n'), ((6758, 6794), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'default': '(True)'}), '(default=True)\n', (6780, 6794), False, 'import bpy\n'), ((6818, 6854), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'default': '(True)'}), '(default=True)\n', (6840, 6854), False, 'import bpy\n'), ((6880, 6944), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'default': '(False)', 'name': '"""Display Own Gizmos"""'}), "(default=False, name='Display Own Gizmos')\n", (6902, 6944), False, 'import bpy\n'), ((6969, 7032), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'default': '(False)', 'name': '"""Display ID Gizmos"""'}), "(default=False, name='Display ID Gizmos')\n", (6991, 7032), False, 'import bpy\n'), ((7062, 7131), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'default': '(False)', 'name': '"""Display Debugging Tools"""'}), "(default=False, name='Display Debugging Tools')\n", (7084, 7131), False, 'import bpy\n'), ((7162, 7230), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'default': '(True)', 'name': '"""Display Frustums Gizmos"""'}), "(default=True, name='Display Frustums Gizmos')\n", (7184, 7230), False, 'import bpy\n'), ((7266, 7338), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'default': '(True)', 'name': '"""Display Frustums User Names"""'}), "(default=True, name='Display Frustums User Names')\n", (7288, 7338), False, 'import bpy\n'), ((7370, 7439), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'default': '(True)', 'name': '"""Display Selection Gizmos"""'}), "(default=True, name='Display Selection Gizmos')\n", (7392, 7439), False, 'import bpy\n'), ((7477, 7550), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'default': '(True)', 'name': '"""Display Selection User Names"""'}), "(default=True, name='Display Selection User Names')\n", (7499, 7550), False, 'import bpy\n'), ((7580, 7768), 'bpy.props.FloatProperty', 'bpy.props.FloatProperty', ([], {'name': '"""Command Send Interval"""', 'description': '"""Debug tool to specify a number of seconds to wait between each command emission toward the server."""', 'default': '(0)'}), "(name='Command Send Interval', description=\n 'Debug tool to specify a number of seconds to wait between each command emission toward the server.'\n , default=0)\n", (7603, 7768), False, 'import bpy\n'), ((7828, 7862), 'mixer.bl_panels.draw_preferences_ui', 'draw_preferences_ui', (['self', 'context'], {}), '(self, context)\n', (7847, 7862), False, 'from mixer.bl_panels import draw_preferences_ui, update_panels_category\n'), ((1282, 1312), 'logging.getLogger', 'logging.getLogger', (['__package__'], {}), '(__package__)\n', (1299, 1312), False, 'import logging\n'), ((2717, 2758), 'os.environ.get', 'os.environ.get', (['"""MIXER_CATEGORY"""', '"""Mixer"""'], {}), "('MIXER_CATEGORY', 'Mixer')\n", (2731, 2758), False, 'import os\n'), ((2935, 2978), 'os.environ.get', 'os.environ.get', (['"""VRTIST_CATEGORY"""', '"""VRtist"""'], {}), "('VRTIST_CATEGORY', 'VRtist')\n", (2949, 2978), False, 'import os\n'), ((3499, 3549), 'os.environ.get', 'os.environ.get', (['"""VRTIST_HOST"""', 'common.DEFAULT_HOST'], {}), "('VRTIST_HOST', common.DEFAULT_HOST)\n", (3513, 3549), False, 'import os\n'), ((4139, 4148), 'mixer.os_utils.getuser', 'getuser', ([], {}), '()\n', (4146, 4148), False, 'from mixer.os_utils import getuser\n'), ((4937, 4967), 'logging.getLogger', 'logging.getLogger', (['__package__'], {}), '(__package__)\n', (4954, 4967), False, 'import logging\n'), ((5778, 5842), 'os.environ.get', 'os.environ.get', (['"""VRTIST_EXE"""', '"""D:/unity/VRtist/Build/VRtist.exe"""'], {}), "('VRTIST_EXE', 'D:/unity/VRtist/Build/VRtist.exe')\n", (5792, 5842), False, 'import os\n'), ((3692, 3742), 'os.environ.get', 'os.environ.get', (['"""VRTIST_PORT"""', 'common.DEFAULT_PORT'], {}), "('VRTIST_PORT', common.DEFAULT_PORT)\n", (3706, 3742), False, 'import os\n'), ((5464, 5503), 'os.environ.get', 'os.environ.get', (['"""MIXER_VRTIST_PROTOCOL"""'], {}), "('MIXER_VRTIST_PROTOCOL')\n", (5478, 5503), False, 'import os\n'), ((6074, 6094), 'mixer.local_data.get_data_directory', 'get_data_directory', ([], {}), '()\n', (6092, 6094), False, 'from mixer.local_data import get_data_directory\n'), ((6678, 6717), 'os.environ.get', 'os.environ.get', (['"""MIXER_NO_START_SERVER"""'], {}), "('MIXER_NO_START_SERVER')\n", (6692, 6717), False, 'import os\n'), ((3894, 3903), 'mixer.os_utils.getuser', 'getuser', ([], {}), '()\n', (3901, 3903), False, 'from mixer.os_utils import getuser\n')] |
#!/usr/bin/env python
import colorsys
import math
import time
import unicornhathd
print("""Ubercorn rainbow 2x1
An example of how to use a 2-wide by 1-tall pair of Ubercorn matrices.
Press Ctrl+C to exit!
""")
unicornhathd.brightness(0.6)
# Enable addressing for Ubercorn matrices
unicornhathd.enable_addressing()
# Set up buffer shape to be 32 wide and 16 tall
unicornhathd.setup_buffer(32, 16)
# Set up display 0 on left, and display 1 on right
unicornhathd.setup_display(0, 0, 0, 0)
unicornhathd.setup_display(1, 16, 0, 0)
step = 0
try:
while True:
step += 1
for x in range(0, 32):
for y in range(0, 16):
dx = 7
dy = 7
dx = (math.sin(step / 20.0) * 15.0) + 7.0
dy = (math.cos(step / 15.0) * 15.0) + 7.0
sc = (math.cos(step / 10.0) * 10.0) + 16.0
h = math.sqrt(math.pow(x - dx, 2) + math.pow(y - dy, 2)) / sc
r, g, b = colorsys.hsv_to_rgb(h, 1, 1)
r *= 255.0
g *= 255.0
b *= 255.0
unicornhathd.set_pixel(x, y, r, g, b)
unicornhathd.show()
time.sleep(1.0 / 60)
except KeyboardInterrupt:
unicornhathd.off()
| [
"unicornhathd.show",
"unicornhathd.set_pixel",
"unicornhathd.brightness",
"math.pow",
"time.sleep",
"unicornhathd.enable_addressing",
"colorsys.hsv_to_rgb",
"math.sin",
"math.cos",
"unicornhathd.setup_buffer",
"unicornhathd.off",
"unicornhathd.setup_display"
] | [((218, 246), 'unicornhathd.brightness', 'unicornhathd.brightness', (['(0.6)'], {}), '(0.6)\n', (241, 246), False, 'import unicornhathd\n'), ((290, 322), 'unicornhathd.enable_addressing', 'unicornhathd.enable_addressing', ([], {}), '()\n', (320, 322), False, 'import unicornhathd\n'), ((372, 405), 'unicornhathd.setup_buffer', 'unicornhathd.setup_buffer', (['(32)', '(16)'], {}), '(32, 16)\n', (397, 405), False, 'import unicornhathd\n'), ((458, 496), 'unicornhathd.setup_display', 'unicornhathd.setup_display', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (484, 496), False, 'import unicornhathd\n'), ((497, 536), 'unicornhathd.setup_display', 'unicornhathd.setup_display', (['(1)', '(16)', '(0)', '(0)'], {}), '(1, 16, 0, 0)\n', (523, 536), False, 'import unicornhathd\n'), ((1156, 1175), 'unicornhathd.show', 'unicornhathd.show', ([], {}), '()\n', (1173, 1175), False, 'import unicornhathd\n'), ((1184, 1204), 'time.sleep', 'time.sleep', (['(1.0 / 60)'], {}), '(1.0 / 60)\n', (1194, 1204), False, 'import time\n'), ((1236, 1254), 'unicornhathd.off', 'unicornhathd.off', ([], {}), '()\n', (1252, 1254), False, 'import unicornhathd\n'), ((981, 1009), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['h', '(1)', '(1)'], {}), '(h, 1, 1)\n', (1000, 1009), False, 'import colorsys\n'), ((1109, 1146), 'unicornhathd.set_pixel', 'unicornhathd.set_pixel', (['x', 'y', 'r', 'g', 'b'], {}), '(x, y, r, g, b)\n', (1131, 1146), False, 'import unicornhathd\n'), ((722, 743), 'math.sin', 'math.sin', (['(step / 20.0)'], {}), '(step / 20.0)\n', (730, 743), False, 'import math\n'), ((780, 801), 'math.cos', 'math.cos', (['(step / 15.0)'], {}), '(step / 15.0)\n', (788, 801), False, 'import math\n'), ((838, 859), 'math.cos', 'math.cos', (['(step / 10.0)'], {}), '(step / 10.0)\n', (846, 859), False, 'import math\n'), ((906, 925), 'math.pow', 'math.pow', (['(x - dx)', '(2)'], {}), '(x - dx, 2)\n', (914, 925), False, 'import math\n'), ((928, 947), 'math.pow', 'math.pow', (['(y - dy)', '(2)'], {}), '(y - dy, 2)\n', (936, 947), False, 'import math\n')] |
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class Bindings(NewOpenCVTests):
def test_inheritance(self):
bm = cv.StereoBM_create()
bm.getPreFilterCap() # from StereoBM
bm.getBlockSize() # from SteroMatcher
boost = cv.ml.Boost_create()
boost.getBoostType() # from ml::Boost
boost.getMaxDepth() # from ml::DTrees
boost.isClassifier() # from ml::StatModel
def test_redirectError(self):
try:
cv.imshow("", None) # This causes an assert
self.assertEqual("Dead code", 0)
except cv.error as e:
pass
handler_called = [False]
def test_error_handler(status, func_name, err_msg, file_name, line):
handler_called[0] = True
cv.redirectError(test_error_handler)
try:
cv.imshow("", None) # This causes an assert
self.assertEqual("Dead code", 0)
except cv.error as e:
self.assertEqual(handler_called[0], True)
pass
cv.redirectError(None)
try:
cv.imshow("", None) # This causes an assert
self.assertEqual("Dead code", 0)
except cv.error as e:
pass
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
| [
"cv2.StereoBM_create",
"cv2.redirectError",
"tests_common.NewOpenCVTests.bootstrap",
"cv2.imshow",
"cv2.ml.Boost_create"
] | [((1341, 1367), 'tests_common.NewOpenCVTests.bootstrap', 'NewOpenCVTests.bootstrap', ([], {}), '()\n', (1365, 1367), False, 'from tests_common import NewOpenCVTests\n'), ((217, 237), 'cv2.StereoBM_create', 'cv.StereoBM_create', ([], {}), '()\n', (235, 237), True, 'import cv2 as cv\n'), ((346, 366), 'cv2.ml.Boost_create', 'cv.ml.Boost_create', ([], {}), '()\n', (364, 366), True, 'import cv2 as cv\n'), ((863, 899), 'cv2.redirectError', 'cv.redirectError', (['test_error_handler'], {}), '(test_error_handler)\n', (879, 899), True, 'import cv2 as cv\n'), ((1124, 1146), 'cv2.redirectError', 'cv.redirectError', (['None'], {}), '(None)\n', (1140, 1146), True, 'import cv2 as cv\n'), ((570, 589), 'cv2.imshow', 'cv.imshow', (['""""""', 'None'], {}), "('', None)\n", (579, 589), True, 'import cv2 as cv\n'), ((925, 944), 'cv2.imshow', 'cv.imshow', (['""""""', 'None'], {}), "('', None)\n", (934, 944), True, 'import cv2 as cv\n'), ((1172, 1191), 'cv2.imshow', 'cv.imshow', (['""""""', 'None'], {}), "('', None)\n", (1181, 1191), True, 'import cv2 as cv\n')] |
import json
import re
import requests
import urlparse
class Investigate(object):
BASE_URL = 'https://investigate.api.opendns.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
def __init__(self, api_key):
self.api_key = api_key
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"tags": "domains/{}/latest_tags",
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return requests.get(urlparse.urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return requests.post(
urlparse.urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urlparse.urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://sgraph.opendns.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://sgraph.opendns.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://sgraph.opendns.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://sgraph.opendns.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def domain_tags(self, domain):
'''Get the domain tagging dates for the given domain.
For details, see https://sgraph.opendns.com/docs/api#latest_tags
'''
uri = self._uris["tags"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://sgraph.opendns.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in d.iteritems() if key == 'name' ]
| [
"urlparse.urljoin",
"json.dumps",
"re.compile"
] | [((261, 299), 're.compile', 're.compile', (['"""(\\\\d{1,3}\\\\.){3}\\\\d{1,3}"""'], {}), "('(\\\\d{1,3}\\\\.){3}\\\\d{1,3}')\n", (271, 299), False, 'import re\n'), ((2566, 2620), 'urlparse.urljoin', 'urlparse.urljoin', (["self._uris['categorization']", 'domain'], {}), "(self._uris['categorization'], domain)\n", (2582, 2620), False, 'import urlparse\n'), ((1381, 1424), 'urlparse.urljoin', 'urlparse.urljoin', (['Investigate.BASE_URL', 'uri'], {}), '(Investigate.BASE_URL, uri)\n', (1397, 1424), False, 'import urlparse\n'), ((1696, 1739), 'urlparse.urljoin', 'urlparse.urljoin', (['Investigate.BASE_URL', 'uri'], {}), '(Investigate.BASE_URL, uri)\n', (1712, 1739), False, 'import urlparse\n'), ((2911, 2930), 'json.dumps', 'json.dumps', (['domains'], {}), '(domains)\n', (2921, 2930), False, 'import json\n')] |
from .models.type_validator import TypeValidator
from .models.typed_list import TypedList
from utils.star_class_map import spectral_class_map, oddity_map
from config import NMSConfig
class StarClass(object):
config = TypeValidator(dict)
spectral_class_str = TypeValidator(str)
spectral_class = TypeValidator(str)
brightness = TypeValidator(int)
oddities = TypedList(str)
def __init__(self, spectral_class):
self.config = NMSConfig()
self.spectral_class_str = spectral_class.upper()
self.spectral_class = self.spectral_class_str[0]
self.oddities = []
if 2 > len(self.spectral_class_str) > 4:
raise ValueError('Spectral class input must be between 2 and 4 characters')
elif self.spectral_class not in spectral_class_map:
raise ValueError(f'Spectral class {self.spectral_class_str[0].upper()} does not exist')
self.brightness_index = int(self.spectral_class_str[1]) // 2
self.deity = spectral_class_map[self.spectral_class][self.brightness_index]
if len(self.spectral_class_str) > 2:
for char in self.spectral_class_str[2:]:
if char.upper() not in oddity_map:
print(f'"{char.upper()}" not a recognized oddity code; ignoring...')
elif char.upper() not in self.oddities:
self.oddities.append(char.upper())
def generate_names(self, region, number=10, min_len=4):
prefix = f'{oddity_map[self.oddities[0]]}-' if len(self.oddities) == 2 else ''
suffix = f'-{oddity_map[self.oddities[-1]]}' if len(self.oddities) >= 1 else ''
return {
f'{prefix}{name}{suffix}'
for name in self.config.generator.get_prospects(
input_words=[self.deity, region],
number=number,
min_len=min_len
)
}
| [
"config.NMSConfig"
] | [((457, 468), 'config.NMSConfig', 'NMSConfig', ([], {}), '()\n', (466, 468), False, 'from config import NMSConfig\n')] |
"""Classes related to the ancient-auth authenticator module."""
import calendar
from datetime import datetime, timedelta
from ancientsolutions.crypttools import rsa, x509
from Crypto.PublicKey import RSA
from os.path import exists
try:
from urlparse import urlparse, urljoin, parse_qs
except Exception as e:
from urllib.parse import urlparse, urljoin, parse_qs
try:
from urllib import urlencode
except Exception as e:
from urllib.parse import urlencode
import token_cookie
import token_pb2
import logging
class Authenticator(object):
"""Authentification client for the Ancient Login Service."""
def __init__(self, app_name, cert=None, key=None, ca_bundle=None,
authserver="login.ancient-solutions.com"):
"""Set up the authentication client so it can be used lateron.
Args:
app_name: The name the login server shall display to the user.
cert: Path to the certificate to use for signing the
requests, or the contents of the certificate.
key: Path to the private key to use for signing the
requests, or the contents of the key.
ca_bundle: path to a CA bundle file to use for authenticating
the server.
"""
if key is not None and type(key) == str and exists(key):
self._rsa_key = rsa.UnwrapRSAKey(key)
elif key is not None and isinstance(key, RSA._RSAobj):
self._rsa_key = key
elif key is not None:
self._rsa_key = RSA.importKey(key)
else:
self._rsa_key = None
if cert is not None and type(cert) == str and exists(cert):
self._cert = x509.parse_certificate_file(cert)
f = open(cert)
self._plain_cert = f.read()
f.close()
elif cert is not None and isinstance(cert, x509.Certificate):
self._cert = cert
# TODO: We'll need the plaintext certificate here...
elif cert is not None:
self._cert = x509.parse_certificate(cert)
self._plain_cert = cert
else:
self._cert = None
self._plain_cert = None
if ca_bundle is not None and type(ca_bundle) == str and exists(ca_bundle):
self._ca = x509.parse_certificate_file(ca_bundle)
elif ca_bundle is not None and isinstance(ca_bundle, x509.Certificate):
self._ca = ca_bundle
elif ca_bundle is not None:
self._ca = x509.parse_certificate(ca_bundle)
else:
self._ca = None
self._app_name = app_name
self._authserver = authserver
def get_authenticated_user(self, auth_cookie):
"""Determine the name of the currently logged-in user, if any.
Parses the authentication cookie passsed in as a string, verifies
the signatures and, if all checks succeed, returns the name of
the authenticated user.
Args:
auth_cookie: value of the cookie used for authentication.
Returns:
Name of the authenticated user, or an empty string if the user
authentication cannot be verified or is empty.
"""
if len(auth_cookie) == 0:
return ""
tc = token_pb2.TokenCookie()
tcc = token_cookie.TokenCookieCodec(tc,
pubkey=self._cert.get_pub_key())
try:
tcc.decode(auth_cookie)
except token_cookie.SignatureException as e:
return ""
except token_cookie.TokenExpiredException as e:
return ""
return tc.basic_creds.user_name
def get_authenticated_scopes(self, auth_cookie):
"""Determines the scopes the authenticated user belongs to.
Returns a list of the names of all scopes the user is in. If no user
is authenticated, an empty list is returned.
Args:
auth_cookie: value of the cookie used for authentication.
Returns:
List of all scopes the user is authorized for.
"""
tc = token_pb2.TokenCookie()
tcc = token_cookie.TokenCookieCodec(tc,
pubkey=self._cert.get_pub_key())
tcc.decode(auth_cookie)
return tc.basic_creds.scope
def is_authenticated_scope(self, auth_cookie, scope):
"""Determines if the currently authenticated user is a member of the
given "scope". Returns true if an user is authenticated and is a
member of the scope.
Please note that if this is False but get_authenticated_user returns
a nonempty string, running request_authorization() won't help since
the user is already authenticated but doesn't have the requested
authorization.
Args:
auth_cookie: value of the cookie used for authentication.
scope: name of the scope we are interested in.
Returns:
True if the user is a member of the scope, False otherwise.
"""
scopes = self.get_authenticated_scopes(auth_cookie)
return scope in scopes
def request_authorization(self, destination_url):
"""Generates an authentication request URL for the destination_url.
Generates an URL which the web user needs to go to in order to log in
and/or provide their authorization token to the current web
application. After authentication has succeeded (if at all), the
user will eventually be sent back to destination_url.
Args:
destination_url: URL which we would like to return to after a
authenticating the user successfully.
Returns: URL the user should be redirected to in order to commence
authentication.
"""
atr = token_pb2.AuthTokenRequest()
returnuri = urlparse(urljoin(destination_url, '/login'))
atr.app_name = self._app_name
atr.certificate = self._plain_cert
atr.return_uri = returnuri.geturl()
atr.original_uri = destination_url
if not self._rsa_key:
raise token_cookie.NoKeyException()
atrc = token_cookie.AuthTokenRequestCodec(atr,
privkey=self._rsa_key, pubkey=self._cert.get_pub_key())
atrdata = atrc.encode()
params = {
"client_id": atrdata,
"redirect_uri": destination_url,
"response_type": "token",
"debug": str(atr),
}
newurl = ("https://" + self._authserver +
"/?" + urlencode(params))
return newurl
def login_handler(self, access_token):
"""Handle a login response from the login server.
This should be invoked when an HTTP post from the login server
occurs. This method will return any local cookies to set up and
redirects the user back to the URL requested with the login
operation.
Please note that when the /login handler is invoked, the peer
will not be the login server, but the user.
Args:
access_token: the content of the access_token HTTP parameter
sent by the login server. This will essentially be a signed,
base64 encoded token with user information.
Returns:
Tuple with the cookie value to set the authentication token,
and the URL to redirect the user to.
"""
atr = token_pb2.AuthTokenResponse()
atrc = token_cookie.AuthTokenResponseCodec(atr,
cacert=self._ca)
atrc.decode(access_token)
expiry = datetime.now() + timedelta(1)
tc = token_pb2.TokenCookie()
tc.basic_creds.user_name = atr.basic_creds.user_name
tc.basic_creds.scope.extend(atr.basic_creds.scope)
tc.basic_creds.expires = calendar.timegm(expiry.utctimetuple())
tcc = token_cookie.TokenCookieCodec(tc, privkey=self._rsa_key)
cookiedata = tcc.encode()
return (cookiedata, atr.original_uri)
| [
"os.path.exists",
"ancientsolutions.crypttools.rsa.UnwrapRSAKey",
"token_pb2.TokenCookie",
"token_cookie.TokenCookieCodec",
"datetime.timedelta",
"datetime.datetime.now",
"ancientsolutions.crypttools.x509.parse_certificate",
"token_cookie.NoKeyException",
"token_cookie.AuthTokenResponseCodec",
"ur... | [((3228, 3251), 'token_pb2.TokenCookie', 'token_pb2.TokenCookie', ([], {}), '()\n', (3249, 3251), False, 'import token_pb2\n'), ((4021, 4044), 'token_pb2.TokenCookie', 'token_pb2.TokenCookie', ([], {}), '()\n', (4042, 4044), False, 'import token_pb2\n'), ((5690, 5718), 'token_pb2.AuthTokenRequest', 'token_pb2.AuthTokenRequest', ([], {}), '()\n', (5716, 5718), False, 'import token_pb2\n'), ((7295, 7324), 'token_pb2.AuthTokenResponse', 'token_pb2.AuthTokenResponse', ([], {}), '()\n', (7322, 7324), False, 'import token_pb2\n'), ((7340, 7397), 'token_cookie.AuthTokenResponseCodec', 'token_cookie.AuthTokenResponseCodec', (['atr'], {'cacert': 'self._ca'}), '(atr, cacert=self._ca)\n', (7375, 7397), False, 'import token_cookie\n'), ((7505, 7528), 'token_pb2.TokenCookie', 'token_pb2.TokenCookie', ([], {}), '()\n', (7526, 7528), False, 'import token_pb2\n'), ((7736, 7792), 'token_cookie.TokenCookieCodec', 'token_cookie.TokenCookieCodec', (['tc'], {'privkey': 'self._rsa_key'}), '(tc, privkey=self._rsa_key)\n', (7765, 7792), False, 'import token_cookie\n'), ((1287, 1298), 'os.path.exists', 'exists', (['key'], {}), '(key)\n', (1293, 1298), False, 'from os.path import exists\n'), ((1328, 1349), 'ancientsolutions.crypttools.rsa.UnwrapRSAKey', 'rsa.UnwrapRSAKey', (['key'], {}), '(key)\n', (1344, 1349), False, 'from ancientsolutions.crypttools import rsa, x509\n'), ((1624, 1636), 'os.path.exists', 'exists', (['cert'], {}), '(cert)\n', (1630, 1636), False, 'from os.path import exists\n'), ((1663, 1696), 'ancientsolutions.crypttools.x509.parse_certificate_file', 'x509.parse_certificate_file', (['cert'], {}), '(cert)\n', (1690, 1696), False, 'from ancientsolutions.crypttools import rsa, x509\n'), ((2217, 2234), 'os.path.exists', 'exists', (['ca_bundle'], {}), '(ca_bundle)\n', (2223, 2234), False, 'from os.path import exists\n'), ((2259, 2297), 'ancientsolutions.crypttools.x509.parse_certificate_file', 'x509.parse_certificate_file', (['ca_bundle'], {}), '(ca_bundle)\n', (2286, 2297), False, 'from ancientsolutions.crypttools import rsa, x509\n'), ((5748, 5782), 'urllib.parse.urljoin', 'urljoin', (['destination_url', '"""/login"""'], {}), "(destination_url, '/login')\n", (5755, 5782), False, 'from urllib.parse import urlparse, urljoin, parse_qs\n'), ((6002, 6031), 'token_cookie.NoKeyException', 'token_cookie.NoKeyException', ([], {}), '()\n', (6029, 6031), False, 'import token_cookie\n'), ((6435, 6452), 'urllib.parse.urlencode', 'urlencode', (['params'], {}), '(params)\n', (6444, 6452), False, 'from urllib.parse import urlencode\n'), ((7462, 7476), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7474, 7476), False, 'from datetime import datetime, timedelta\n'), ((7479, 7491), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (7488, 7491), False, 'from datetime import datetime, timedelta\n'), ((1503, 1521), 'Crypto.PublicKey.RSA.importKey', 'RSA.importKey', (['key'], {}), '(key)\n', (1516, 1521), False, 'from Crypto.PublicKey import RSA\n'), ((2007, 2035), 'ancientsolutions.crypttools.x509.parse_certificate', 'x509.parse_certificate', (['cert'], {}), '(cert)\n', (2029, 2035), False, 'from ancientsolutions.crypttools import rsa, x509\n'), ((2470, 2503), 'ancientsolutions.crypttools.x509.parse_certificate', 'x509.parse_certificate', (['ca_bundle'], {}), '(ca_bundle)\n', (2492, 2503), False, 'from ancientsolutions.crypttools import rsa, x509\n')] |
"""
File: sample_generator.py
Author: Nrupatunga
Email: <EMAIL>
Github: https://github.com/nrupatunga
Description: Generating samples from single frame
"""
import sys
import cv2
import numpy as np
from loguru import logger
try:
from goturn.helper.BoundingBox import BoundingBox
from goturn.helper.image_proc import cropPadImage
from goturn.helper.vis_utils import Visualizer
from goturn.helper import image_io
from goturn.helper.draw_util import draw
except ImportError:
logger.error('Please run $source settings.sh from root directory')
sys.exit(1)
class bbParams:
"""Docstring for bbParams. """
def __init__(self, lamda_shift, lamda_scale, min_scale, max_scale):
"""parameters for generating synthetic data"""
self.lamda_shift = lamda_shift
self.lamda_scale = lamda_scale
self.min_scale = min_scale
self.max_scale = max_scale
def __repr__(self):
return str({'lamda_shift': self.lamda_shift, 'lamda_scale':
self.lamda_scale, 'min_scale': self.min_scale,
'max_scale': self.max_scale})
class sample_generator:
"""Generate samples from single frame"""
def __init__(self, lamda_shift, lamda_scale, min_scale, max_scale,
dbg=False, env='sample_generator'):
"""set parameters """
self._lamda_shift = lamda_shift
self._lamda_scale = lamda_scale
self._min_scale = min_scale
self._max_scale = max_scale
self._kSamplesPerImage = 10 # number of synthetic samples per image
self._viz = None
if dbg:
self._env = env
self._viz = Visualizer(env=self._env)
self._dbg = dbg
def make_true_sample(self):
"""Generate true target:search_region pair"""
curr_prior_tight = self.bbox_prev_gt_
target_pad = self.target_pad_
# To find out the region in which we need to search in the
# current frame, we use the previous frame bbox to get the
# region in which we can make the search
output = cropPadImage(curr_prior_tight, self.img_curr_,
self._dbg, self._viz)
curr_search_region, curr_search_location, edge_spacing_x, edge_spacing_y = output
bbox_curr_gt = self.bbox_curr_gt_
bbox_curr_gt_recentered = BoundingBox(0, 0, 0, 0)
bbox_curr_gt_recentered = bbox_curr_gt.recenter(curr_search_location, edge_spacing_x, edge_spacing_y, bbox_curr_gt_recentered)
if self._dbg:
env = self._env + '_make_true_sample'
search_dbg = draw.bbox(self.img_curr_, curr_search_location)
search_dbg = draw.bbox(search_dbg, bbox_curr_gt, color=(255, 255, 0))
self._viz.plot_image_opencv(search_dbg, 'search_region', env=env)
recentered_img = draw.bbox(curr_search_region,
bbox_curr_gt_recentered,
color=(255, 255, 0))
self._viz.plot_image_opencv(recentered_img,
'cropped_search_region', env=env)
del recentered_img
del search_dbg
bbox_curr_gt_recentered.scale(curr_search_region)
return curr_search_region, target_pad, bbox_curr_gt_recentered
def make_training_samples(self, num_samples, images, targets, bbox_gt_scales):
"""
@num_samples: number of samples
@images: set of num_samples appended to images list
@target: set of num_samples targets appended to targets list
@bbox_gt_scales: bounding box to be regressed (scaled version)
"""
for i in range(num_samples):
image_rand_focus, target_pad, bbox_gt_scaled = self.make_training_sample_BBShift()
images.append(image_rand_focus)
targets.append(target_pad)
bbox_gt_scales.append(bbox_gt_scaled)
if self._dbg:
self.visualize(image_rand_focus, target_pad, bbox_gt_scaled, i)
return images, targets, bbox_gt_scales
def visualize(self, image, target, bbox, idx):
"""
sample generator prepares image and the respective targets (with
bounding box). This function helps you to visualize it.
The visualization is based on the Visdom server, please
initialize the visdom server by running the command
$ python -m visdom.server
open http://localhost:8097 in your browser to visualize the
images
"""
if image_io._is_pil_image(image):
image = np.asarray(image)
if image_io._is_pil_image(target):
target = np.asarray(target)
target = cv2.resize(target, (227, 227))
target = cv2.cvtColor(target, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (227, 227))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
bbox.unscale(image)
bbox.x1, bbox.x2, bbox.y1, bbox.y2 = int(bbox.x1), int(bbox.x2), int(bbox.y1), int(bbox.y2)
image_bb = draw.bbox(image, bbox)
out = np.concatenate((target[np.newaxis, ...], image_bb[np.newaxis, ...]), axis=0)
out = np.transpose(out, [0, 3, 1, 2])
self._viz.plot_images_np(out, title='sample_{}'.format(idx),
env=self._env + '_train')
def get_default_bb_params(self):
"""default bb parameters"""
default_params = bbParams(self._lamda_shift, self._lamda_scale,
self._min_scale, self._max_scale)
return default_params
def make_training_sample_BBShift_(self, bbParams, dbg=False):
"""generate training samples based on bbparams"""
bbox_curr_gt = self.bbox_curr_gt_
bbox_curr_shift = BoundingBox(0, 0, 0, 0)
bbox_curr_shift = bbox_curr_gt.shift(self.img_curr_, bbParams.lamda_scale, bbParams.lamda_shift, bbParams.min_scale, bbParams.max_scale, True, bbox_curr_shift)
rand_search_region, rand_search_location, edge_spacing_x, edge_spacing_y = cropPadImage(bbox_curr_shift, self.img_curr_,
dbg=self._dbg, viz=self._viz)
bbox_curr_gt = self.bbox_curr_gt_
bbox_gt_recentered = BoundingBox(0, 0, 0, 0)
bbox_gt_recentered = bbox_curr_gt.recenter(rand_search_location, edge_spacing_x, edge_spacing_y, bbox_gt_recentered)
if dbg:
env = self._env + '_make_training_sample_bbshift'
viz = self._viz
curr_img_bbox = draw.bbox(self.img_curr_,
bbox_curr_gt)
recentered_img = draw.bbox(rand_search_region,
bbox_gt_recentered)
viz.plot_image_opencv(curr_img_bbox, 'curr shifted bbox', env=env)
viz.plot_image_opencv(recentered_img, 'recentered shifted bbox', env=env)
bbox_gt_recentered.scale(rand_search_region)
bbox_gt_scaled = bbox_gt_recentered
return rand_search_region, self.target_pad_, bbox_gt_scaled
def make_training_sample_BBShift(self):
"""
bb_params consists of shift, scale, min-max scale for shifting
the current bounding box
"""
default_bb_params = self.get_default_bb_params()
image_rand_focus, target_pad, bbox_gt_scaled = self.make_training_sample_BBShift_(default_bb_params, self._dbg)
return image_rand_focus, target_pad, bbox_gt_scaled
def reset(self, bbox_curr, bbox_prev, img_curr, img_prev):
"""This prepares the target image with enough context (search
region)
@bbox_curr: current frame bounding box
@bbox_prev: previous frame bounding box
@img_curr: current frame
@img_prev: previous frame
"""
target_pad, pad_image_location, _, _ = cropPadImage(bbox_prev,
img_prev, dbg=self._dbg, viz=self._viz)
self.img_curr_ = img_curr
self.bbox_curr_gt_ = bbox_curr
self.bbox_prev_gt_ = bbox_prev
self.target_pad_ = target_pad # crop kContextFactor * bbox_curr copied
if self._dbg:
env = self._env + '_targetpad'
search_dbg = draw.bbox(img_prev, bbox_prev, color=(0, 0, 255))
search_dbg = draw.bbox(search_dbg, pad_image_location)
self._viz.plot_image_opencv(search_dbg, 'target_region', env=env)
self._viz.plot_image_opencv(target_pad,
'cropped_target_region', env=env)
del search_dbg
| [
"goturn.helper.image_io._is_pil_image",
"goturn.helper.draw_util.draw.bbox",
"goturn.helper.image_proc.cropPadImage",
"goturn.helper.BoundingBox.BoundingBox",
"numpy.asarray",
"loguru.logger.error",
"goturn.helper.vis_utils.Visualizer",
"cv2.cvtColor",
"sys.exit",
"numpy.concatenate",
"cv2.resiz... | [((498, 564), 'loguru.logger.error', 'logger.error', (['"""Please run $source settings.sh from root directory"""'], {}), "('Please run $source settings.sh from root directory')\n", (510, 564), False, 'from loguru import logger\n'), ((569, 580), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (577, 580), False, 'import sys\n'), ((2095, 2163), 'goturn.helper.image_proc.cropPadImage', 'cropPadImage', (['curr_prior_tight', 'self.img_curr_', 'self._dbg', 'self._viz'], {}), '(curr_prior_tight, self.img_curr_, self._dbg, self._viz)\n', (2107, 2163), False, 'from goturn.helper.image_proc import cropPadImage\n'), ((2361, 2384), 'goturn.helper.BoundingBox.BoundingBox', 'BoundingBox', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (2372, 2384), False, 'from goturn.helper.BoundingBox import BoundingBox\n'), ((4563, 4592), 'goturn.helper.image_io._is_pil_image', 'image_io._is_pil_image', (['image'], {}), '(image)\n', (4585, 4592), False, 'from goturn.helper import image_io\n'), ((4644, 4674), 'goturn.helper.image_io._is_pil_image', 'image_io._is_pil_image', (['target'], {}), '(target)\n', (4666, 4674), False, 'from goturn.helper import image_io\n'), ((4734, 4764), 'cv2.resize', 'cv2.resize', (['target', '(227, 227)'], {}), '(target, (227, 227))\n', (4744, 4764), False, 'import cv2\n'), ((4782, 4821), 'cv2.cvtColor', 'cv2.cvtColor', (['target', 'cv2.COLOR_BGR2RGB'], {}), '(target, cv2.COLOR_BGR2RGB)\n', (4794, 4821), False, 'import cv2\n'), ((4838, 4867), 'cv2.resize', 'cv2.resize', (['image', '(227, 227)'], {}), '(image, (227, 227))\n', (4848, 4867), False, 'import cv2\n'), ((4884, 4922), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (4896, 4922), False, 'import cv2\n'), ((5072, 5094), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['image', 'bbox'], {}), '(image, bbox)\n', (5081, 5094), False, 'from goturn.helper.draw_util import draw\n'), ((5109, 5185), 'numpy.concatenate', 'np.concatenate', (['(target[np.newaxis, ...], image_bb[np.newaxis, ...])'], {'axis': '(0)'}), '((target[np.newaxis, ...], image_bb[np.newaxis, ...]), axis=0)\n', (5123, 5185), True, 'import numpy as np\n'), ((5200, 5231), 'numpy.transpose', 'np.transpose', (['out', '[0, 3, 1, 2]'], {}), '(out, [0, 3, 1, 2])\n', (5212, 5231), True, 'import numpy as np\n'), ((5798, 5821), 'goturn.helper.BoundingBox.BoundingBox', 'BoundingBox', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (5809, 5821), False, 'from goturn.helper.BoundingBox import BoundingBox\n'), ((6073, 6148), 'goturn.helper.image_proc.cropPadImage', 'cropPadImage', (['bbox_curr_shift', 'self.img_curr_'], {'dbg': 'self._dbg', 'viz': 'self._viz'}), '(bbox_curr_shift, self.img_curr_, dbg=self._dbg, viz=self._viz)\n', (6085, 6148), False, 'from goturn.helper.image_proc import cropPadImage\n'), ((6317, 6340), 'goturn.helper.BoundingBox.BoundingBox', 'BoundingBox', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (6328, 6340), False, 'from goturn.helper.BoundingBox import BoundingBox\n'), ((7913, 7976), 'goturn.helper.image_proc.cropPadImage', 'cropPadImage', (['bbox_prev', 'img_prev'], {'dbg': 'self._dbg', 'viz': 'self._viz'}), '(bbox_prev, img_prev, dbg=self._dbg, viz=self._viz)\n', (7925, 7976), False, 'from goturn.helper.image_proc import cropPadImage\n'), ((1672, 1697), 'goturn.helper.vis_utils.Visualizer', 'Visualizer', ([], {'env': 'self._env'}), '(env=self._env)\n', (1682, 1697), False, 'from goturn.helper.vis_utils import Visualizer\n'), ((2618, 2665), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['self.img_curr_', 'curr_search_location'], {}), '(self.img_curr_, curr_search_location)\n', (2627, 2665), False, 'from goturn.helper.draw_util import draw\n'), ((2691, 2747), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['search_dbg', 'bbox_curr_gt'], {'color': '(255, 255, 0)'}), '(search_dbg, bbox_curr_gt, color=(255, 255, 0))\n', (2700, 2747), False, 'from goturn.helper.draw_util import draw\n'), ((2856, 2931), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['curr_search_region', 'bbox_curr_gt_recentered'], {'color': '(255, 255, 0)'}), '(curr_search_region, bbox_curr_gt_recentered, color=(255, 255, 0))\n', (2865, 2931), False, 'from goturn.helper.draw_util import draw\n'), ((4614, 4631), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (4624, 4631), True, 'import numpy as np\n'), ((4697, 4715), 'numpy.asarray', 'np.asarray', (['target'], {}), '(target)\n', (4707, 4715), True, 'import numpy as np\n'), ((6601, 6640), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['self.img_curr_', 'bbox_curr_gt'], {}), '(self.img_curr_, bbox_curr_gt)\n', (6610, 6640), False, 'from goturn.helper.draw_util import draw\n'), ((6708, 6757), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['rand_search_region', 'bbox_gt_recentered'], {}), '(rand_search_region, bbox_gt_recentered)\n', (6717, 6757), False, 'from goturn.helper.draw_util import draw\n'), ((8320, 8369), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['img_prev', 'bbox_prev'], {'color': '(0, 0, 255)'}), '(img_prev, bbox_prev, color=(0, 0, 255))\n', (8329, 8369), False, 'from goturn.helper.draw_util import draw\n'), ((8395, 8436), 'goturn.helper.draw_util.draw.bbox', 'draw.bbox', (['search_dbg', 'pad_image_location'], {}), '(search_dbg, pad_image_location)\n', (8404, 8436), False, 'from goturn.helper.draw_util import draw\n')] |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function, division, absolute_import, unicode_literals
import os.path as op
def hcp_workflow(name='Evaluation_HCP', settings={},
map_metric=False, compute_fmb=False):
"""
The regseg evaluation workflow for the human connectome project (HCP)
"""
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from nipype.interfaces import io as nio
from nipype.algorithms.mesh import ComputeMeshWarp, WarpPoints
from nipype.algorithms.misc import AddCSVRow
from nipype.workflows.dmri.fsl.artifacts import sdc_fmb
from .. import data
from ..interfaces.utility import (ExportSlices, TileSlicesGrid,
SlicesGridplot)
from .registration import regseg_wf, sdc_t2b
from .preprocess import preprocess
from .fieldmap import process_vsm
from .dti import mrtrix_dti
import evaluation as ev
wf = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(
fields=['subject_id', 'data_dir']), name='inputnode')
inputnode.inputs.data_dir = settings['data_dir']
inputnode.iterables = [('subject_id', settings['subject_id'])]
# Generate the distorted set, including surfaces
pre = preprocess()
rdti = mrtrix_dti('ReferenceDTI')
wdti = mrtrix_dti('WarpedDTI')
mdti = pe.Node(niu.Merge(2), name='MergeDTI')
wf.connect([
(inputnode, pre, [('subject_id', 'inputnode.subject_id'),
('data_dir', 'inputnode.data_dir')]),
(pre, rdti, [('outputnode.dwi', 'inputnode.in_dwi'),
('outputnode.dwi_mask', 'inputnode.in_mask'),
('outputnode.bvec', 'inputnode.in_bvec'),
('outputnode.bval', 'inputnode.in_bval')]),
(pre, wdti, [('outputnode.warped_dwi', 'inputnode.in_dwi'),
('outputnode.warped_msk', 'inputnode.in_mask'),
('outputnode.bvec', 'inputnode.in_bvec'),
('outputnode.bval', 'inputnode.in_bval')]),
(wdti, mdti, [('outputnode.fa', 'in1'),
('outputnode.md', 'in2')]),
])
regseg = regseg_wf(usemask=True)
regseg.inputs.inputnode.options = data.get('regseg_hcp')
exprs = pe.Node(ExportSlices(slices=[38, 48, 57, 67, 76, 86],
axis=['axial', 'sagittal']), name='ExportREGSEG')
gridrs = pe.Node(SlicesGridplot(
label=['regseg', 'regseg'], slices=[38, 48, 57, 67, 76, 86],
view=['axial', 'sagittal']), name='GridPlotREGSEG')
meshrs = pe.MapNode(ComputeMeshWarp(),
iterfield=['surface1', 'surface2'],
name='REGSEGSurfDistance')
csvrs = pe.Node(AddCSVRow(in_file=settings['out_csv']),
name="REGSEGAddRow")
csvrs.inputs.method = 'REGSEG'
wf.connect([
(mdti, regseg, [('out', 'inputnode.in_fixed')]),
(pre, regseg, [('outputnode.surf', 'inputnode.in_surf'),
('outputnode.warped_msk', 'inputnode.in_mask')]),
(pre, exprs, [('outputnode.warped_surf', 'sgreen')]),
(regseg, exprs, [('outputnode.out_surf', 'syellow')]),
(wdti, exprs, [('outputnode.fa', 'reference')]),
(exprs, gridrs, [('out_files', 'in_files')]),
(pre, meshrs, [('outputnode.warped_surf', 'surface1')]),
(regseg, meshrs, [('outputnode.out_surf', 'surface2')]),
(inputnode, csvrs, [('subject_id', 'subject_id')]),
(meshrs, csvrs, [('distance', 'surf_dist')])
])
if compute_fmb:
cmethod0 = sdc_fmb()
selbmap = pe.Node(niu.Split(splits=[1, 1], squeeze=True),
name='SelectBmap')
dfm = process_vsm()
dfm.inputs.inputnode.scaling = 1.0
dfm.inputs.inputnode.enc_dir = 'y-'
wrpsurf = pe.MapNode(WarpPoints(), iterfield=['points'],
name='UnwarpSurfs')
export0 = pe.Node(ExportSlices(slices=[38, 48, 57, 67, 76, 86],
axis=['axial', 'sagittal']), name='ExportFMB')
mesh0 = pe.MapNode(ComputeMeshWarp(),
iterfield=['surface1', 'surface2'],
name='FMBSurfDistance')
grid0 = pe.Node(SlicesGridplot(
label=['FMB']*2, slices=[38, 48, 57, 67, 76, 86],
view=['axial', 'sagittal']), name='GridPlotFMB')
csv0 = pe.Node(AddCSVRow(in_file=settings['out_csv']),
name="FMBAddRow")
csv0.inputs.method = 'FMB'
wf.connect([
(pre, cmethod0, [
('outputnode.warped_dwi', 'inputnode.in_file'),
('outputnode.warped_msk', 'inputnode.in_mask'),
('outputnode.bval', 'inputnode.in_bval'),
('outputnode.mr_param', 'inputnode.settings')]),
(pre, selbmap, [('outputnode.bmap_wrapped', 'inlist')]),
(selbmap, cmethod0, [('out1', 'inputnode.bmap_mag'),
('out2', 'inputnode.bmap_pha')]),
(cmethod0, dfm, [('outputnode.out_vsm', 'inputnode.vsm')]),
(pre, dfm, [
('outputnode.warped_msk', 'inputnode.reference')]),
(dfm, wrpsurf, [('outputnode.dfm', 'warp')]),
(pre, wrpsurf, [('outputnode.surf', 'points')])
(wrpsurf, export0, [('out_points', 'syellow')]),
(pre, export0, [('outputnode.warped_surf', 'sgreen')]),
(wdti, export0, [('outputnode.fa', 'reference')]),
(export0, grid0, [('out_files', 'in_files')]),
(pre, mesh0, [('outputnode.warped_surf', 'surface1')]),
(wrpsurf, mesh0, [('out_points', 'surface2')]),
(inputnode, csv0, [('subject_id', 'subject_id')]),
(mesh0, csv0, [('distance', 'surf_dist')])
])
cmethod1 = sdc_t2b(num_threads=settings['nthreads'])
export1 = pe.Node(ExportSlices(slices=[38, 48, 57, 67, 76, 86],
axis=['axial', 'sagittal']), name='ExportT2B')
grid1 = pe.Node(SlicesGridplot(
label=['T2B']*2, slices=[38, 48, 57, 67, 76, 86],
view=['axial', 'sagittal']), name='GridPlotT2B')
mesh1 = pe.MapNode(ComputeMeshWarp(),
iterfield=['surface1', 'surface2'],
name='T2BSurfDistance')
csv1 = pe.Node(AddCSVRow(in_file=settings['out_csv']),
name="T2BAddRow")
csv1.inputs.method = 'T2B'
wf.connect([
(pre, cmethod1, [
('outputnode.warped_dwi', 'inputnode.in_dwi'),
('outputnode.warped_msk', 'inputnode.dwi_mask'),
('outputnode.t2w_brain', 'inputnode.in_t2w'),
('outputnode.t1w_mask', 'inputnode.t2w_mask'),
('outputnode.surf', 'inputnode.in_surf'),
('outputnode.bval', 'inputnode.in_bval'),
('outputnode.mr_param', 'inputnode.in_param')]),
(cmethod1, export1, [('outputnode.out_surf', 'syellow')]),
(pre, export1, [('outputnode.warped_surf', 'sgreen')]),
(wdti, export1, [('outputnode.fa', 'reference')]),
(export1, grid1, [('out_files', 'in_files')]),
(pre, mesh1, [('outputnode.warped_surf', 'surface1')]),
(cmethod1, mesh1, [('outputnode.out_surf', 'surface2')]),
(inputnode, csv1, [('subject_id', 'subject_id')]),
(mesh1, csv1, [('distance', 'surf_dist')])
])
tile = pe.Node(TileSlicesGrid(), name='TileGridplots')
csvtile = pe.Node(AddCSVRow(
in_file=op.join(op.dirname(settings['out_csv']), 'tiles.csv')),
name="TileAddRow")
wf.connect([
(inputnode, tile, [('subject_id', 'out_file')]),
(gridrs, tile, [('out_file', 'in_reference')]),
(grid1, tile, [('out_file', 'in_competing')]),
(tile, csvtile, [('out_file', 'names')])
])
if map_metric:
out_csv = op.abspath(op.join(name, 'energiesmapping.csv'))
mapen = ev.map_energy(out_csv=out_csv)
wf.connect([
(inputnode, mapen, [('subject_id', 'inputnode.subject_id')]),
(regseg, mapen, [('outputnode.out_enh', 'inputnode.reference'),
('outputnode.reg_msk', 'inputnode.in_mask')]),
(pre, mapen, [
('outputnode.warped_surf', 'inputnode.surfaces0'),
('outputnode.surf', 'inputnode.surfaces1')])
])
return wf
| [
"nipype.algorithms.mesh.ComputeMeshWarp",
"nipype.pipeline.engine.Workflow",
"nipype.interfaces.utility.Merge",
"nipype.algorithms.misc.AddCSVRow",
"nipype.interfaces.utility.Split",
"os.path.join",
"nipype.workflows.dmri.fsl.artifacts.sdc_fmb",
"evaluation.map_energy",
"os.path.dirname",
"nipype.... | [((1099, 1121), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', ([], {'name': 'name'}), '(name=name)\n', (1110, 1121), True, 'from nipype.pipeline import engine as pe\n'), ((1147, 1203), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['subject_id', 'data_dir']"}), "(fields=['subject_id', 'data_dir'])\n", (1168, 1203), True, 'from nipype.interfaces import utility as niu\n'), ((1521, 1533), 'nipype.interfaces.utility.Merge', 'niu.Merge', (['(2)'], {}), '(2)\n', (1530, 1533), True, 'from nipype.interfaces import utility as niu\n'), ((2799, 2816), 'nipype.algorithms.mesh.ComputeMeshWarp', 'ComputeMeshWarp', ([], {}), '()\n', (2814, 2816), False, 'from nipype.algorithms.mesh import ComputeMeshWarp, WarpPoints\n'), ((2949, 2987), 'nipype.algorithms.misc.AddCSVRow', 'AddCSVRow', ([], {'in_file': "settings['out_csv']"}), "(in_file=settings['out_csv'])\n", (2958, 2987), False, 'from nipype.algorithms.misc import AddCSVRow\n'), ((3856, 3865), 'nipype.workflows.dmri.fsl.artifacts.sdc_fmb', 'sdc_fmb', ([], {}), '()\n', (3863, 3865), False, 'from nipype.workflows.dmri.fsl.artifacts import sdc_fmb\n'), ((6572, 6589), 'nipype.algorithms.mesh.ComputeMeshWarp', 'ComputeMeshWarp', ([], {}), '()\n', (6587, 6589), False, 'from nipype.algorithms.mesh import ComputeMeshWarp, WarpPoints\n'), ((6716, 6754), 'nipype.algorithms.misc.AddCSVRow', 'AddCSVRow', ([], {'in_file': "settings['out_csv']"}), "(in_file=settings['out_csv'])\n", (6725, 6754), False, 'from nipype.algorithms.misc import AddCSVRow\n'), ((8379, 8409), 'evaluation.map_energy', 'ev.map_energy', ([], {'out_csv': 'out_csv'}), '(out_csv=out_csv)\n', (8392, 8409), True, 'import evaluation as ev\n'), ((3892, 3930), 'nipype.interfaces.utility.Split', 'niu.Split', ([], {'splits': '[1, 1]', 'squeeze': '(True)'}), '(splits=[1, 1], squeeze=True)\n', (3901, 3930), True, 'from nipype.interfaces import utility as niu\n'), ((4121, 4133), 'nipype.algorithms.mesh.WarpPoints', 'WarpPoints', ([], {}), '()\n', (4131, 4133), False, 'from nipype.algorithms.mesh import ComputeMeshWarp, WarpPoints\n'), ((4378, 4395), 'nipype.algorithms.mesh.ComputeMeshWarp', 'ComputeMeshWarp', ([], {}), '()\n', (4393, 4395), False, 'from nipype.algorithms.mesh import ComputeMeshWarp, WarpPoints\n'), ((4697, 4735), 'nipype.algorithms.misc.AddCSVRow', 'AddCSVRow', ([], {'in_file': "settings['out_csv']"}), "(in_file=settings['out_csv'])\n", (4706, 4735), False, 'from nipype.algorithms.misc import AddCSVRow\n'), ((8325, 8361), 'os.path.join', 'op.join', (['name', '"""energiesmapping.csv"""'], {}), "(name, 'energiesmapping.csv')\n", (8332, 8361), True, 'import os.path as op\n'), ((7934, 7965), 'os.path.dirname', 'op.dirname', (["settings['out_csv']"], {}), "(settings['out_csv'])\n", (7944, 7965), True, 'import os.path as op\n')] |
from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler
from config import settings, command as cmd
from src import bot
import logging
updater = Updater(token=settings.BOT_TOKEN)
dispatcher = updater.dispatcher
bot = bot.ScheduleBot()
# ********* BASE DISPATCH *********
start_hand = CommandHandler(cmd.START, bot.menu_before_register)
dispatcher.add_handler(start_hand)
main_menu_hand = RegexHandler(cmd.MAIN_MENU, bot.menu_after_register)
dispatcher.add_handler(main_menu_hand)
# ********* AFTER REGISTER DISPATCH *********
today_hand = RegexHandler(cmd.TODAY, bot.get_today)
dispatcher.add_handler(today_hand)
tomorrow_hand = RegexHandler(cmd.TOMORROW, bot.get_tomorrow)
dispatcher.add_handler(tomorrow_hand)
two_day_hand = RegexHandler(cmd.DAY_AFTER_TOMORROW, bot.get_day_after_tomorrow)
dispatcher.add_handler(two_day_hand)
week_hand = RegexHandler(cmd.WEEK, bot.get_week)
dispatcher.add_handler(week_hand)
two_week_hand = RegexHandler(cmd.TWO_WEEK, bot.get_two_week)
dispatcher.add_handler(two_week_hand)
# ********* BEFORE REGISTER DISPATCH *********
about_bot_hand = RegexHandler(cmd.ABOUT_BOT, bot.about)
dispatcher.add_handler(about_bot_hand)
call_back_handler = CallbackQueryHandler(bot.init_search_field)
dispatcher.add_handler(call_back_handler)
register_hand = MessageHandler(Filters.text, bot.register)
dispatcher.add_handler(register_hand)
# ********* ADMIN DISPATCH *********
admin_hand = CommandHandler(cmd.ADMIN_PANEL, bot.admin_panel)
dispatcher.add_handler(admin_hand)
get_my_id = CommandHandler(cmd.GET_MY_ID, bot.get_my_id)
dispatcher.add_handler(get_my_id)
# ========== LOGGING ==========
if settings.LOGGING_ENABLE:
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def error(bot, update, error):
logger.warning('Update "%s" caused error "%s"', update, error)
dispatcher.add_error_handler(error)
| [
"logging.basicConfig",
"logging.getLogger",
"telegram.ext.RegexHandler",
"src.bot.ScheduleBot",
"telegram.ext.MessageHandler",
"telegram.ext.CallbackQueryHandler",
"telegram.ext.CommandHandler",
"telegram.ext.Updater"
] | [((200, 233), 'telegram.ext.Updater', 'Updater', ([], {'token': 'settings.BOT_TOKEN'}), '(token=settings.BOT_TOKEN)\n', (207, 233), False, 'from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((272, 289), 'src.bot.ScheduleBot', 'bot.ScheduleBot', ([], {}), '()\n', (287, 289), False, 'from src import bot\n'), ((341, 392), 'telegram.ext.CommandHandler', 'CommandHandler', (['cmd.START', 'bot.menu_before_register'], {}), '(cmd.START, bot.menu_before_register)\n', (355, 392), False, 'from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((446, 498), 'telegram.ext.RegexHandler', 'RegexHandler', (['cmd.MAIN_MENU', 'bot.menu_after_register'], {}), '(cmd.MAIN_MENU, bot.menu_after_register)\n', (458, 498), False, 'from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((599, 637), 'telegram.ext.RegexHandler', 'RegexHandler', (['cmd.TODAY', 'bot.get_today'], {}), '(cmd.TODAY, bot.get_today)\n', (611, 637), False, 'from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((690, 734), 'telegram.ext.RegexHandler', 'RegexHandler', (['cmd.TOMORROW', 'bot.get_tomorrow'], {}), '(cmd.TOMORROW, bot.get_tomorrow)\n', (702, 734), False, 'from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((789, 853), 'telegram.ext.RegexHandler', 'RegexHandler', (['cmd.DAY_AFTER_TOMORROW', 'bot.get_day_after_tomorrow'], {}), '(cmd.DAY_AFTER_TOMORROW, bot.get_day_after_tomorrow)\n', (801, 853), False, 'from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((904, 940), 'telegram.ext.RegexHandler', 'RegexHandler', (['cmd.WEEK', 'bot.get_week'], {}), '(cmd.WEEK, bot.get_week)\n', (916, 940), False, 'from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((992, 1036), 'telegram.ext.RegexHandler', 'RegexHandler', (['cmd.TWO_WEEK', 'bot.get_two_week'], {}), '(cmd.TWO_WEEK, bot.get_two_week)\n', (1004, 1036), False, 'from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((1141, 1179), 'telegram.ext.RegexHandler', 'RegexHandler', (['cmd.ABOUT_BOT', 'bot.about'], {}), '(cmd.ABOUT_BOT, bot.about)\n', (1153, 1179), False, 'from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((1240, 1283), 'telegram.ext.CallbackQueryHandler', 'CallbackQueryHandler', (['bot.init_search_field'], {}), '(bot.init_search_field)\n', (1260, 1283), False, 'from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((1343, 1385), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'bot.register'], {}), '(Filters.text, bot.register)\n', (1357, 1385), False, 'from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((1476, 1524), 'telegram.ext.CommandHandler', 'CommandHandler', (['cmd.ADMIN_PANEL', 'bot.admin_panel'], {}), '(cmd.ADMIN_PANEL, bot.admin_panel)\n', (1490, 1524), False, 'from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((1573, 1617), 'telegram.ext.CommandHandler', 'CommandHandler', (['cmd.GET_MY_ID', 'bot.get_my_id'], {}), '(cmd.GET_MY_ID, bot.get_my_id)\n', (1587, 1617), False, 'from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler\n'), ((1717, 1824), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n", (1736, 1824), False, 'import logging\n'), ((1853, 1880), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1870, 1880), False, 'import logging\n')] |
import argparse
import os
from ..common_util.misc import makedirs
from ..video_inpainting import create_padded_masked_video_dataset
def main(frames_dataset_path, masks_dataset_path, final_dataset_path):
dataset = create_padded_masked_video_dataset(frames_dataset_path, masks_dataset_path)
for i in range(len(dataset)):
video_name = dataset.get_video_name(i)
extract_video_path = os.path.join(final_dataset_path, video_name)
makedirs(extract_video_path)
dataset.extract_masked_video(i, extract_video_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('frames_dataset_path', type=str)
parser.add_argument('masks_dataset_path', type=str)
parser.add_argument('final_dataset_path', type=str)
args = parser.parse_args()
main(**vars(args))
| [
"os.path.join",
"argparse.ArgumentParser"
] | [((590, 615), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (613, 615), False, 'import argparse\n'), ((406, 450), 'os.path.join', 'os.path.join', (['final_dataset_path', 'video_name'], {}), '(final_dataset_path, video_name)\n', (418, 450), False, 'import os\n')] |
import os
import h5py
import numpy as np
import pandas as pd
import multiprocessing as mp
class Scaler:
def __init__(self, mean=None, std=None):
self.mean = mean
self.std = std
def fit(self, data):
self.mean = np.mean(data)
self.std = np.std(data)
def set_mean(self, mean):
self.mean = mean
def set_std(self, std):
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return data * self.std + self.mean
def load_h5(filename, keywords):
f = h5py.File(filename, 'r')
data = []
for name in keywords:
data.append(np.array(f[name]))
f.close()
if len(data) == 1:
return data[0]
return data
def write_h5(filename, d):
f = h5py.File(filename, 'w')
for key, value in d.items():
f.create_dataset(key, data=value)
f.close()
def from_str_to_np(s):
arr = np.fromstring(s, dtype=np.int32, sep=',')
arr = arr.reshape(-1, 3)
return arr
def load_trajectory(filename):
with open(filename) as f:
lines = f.readlines()
pool = mp.Pool()
trajectory = pool.map(from_str_to_np, lines)
return trajectory
def fill_missing(data):
T, N, D = data.shape
data = np.reshape(data, (T, N * D))
df = pd.DataFrame(data)
df = df.fillna(method='pad')
df = df.fillna(method='bfill')
data = df.values
data = np.reshape(data, (T, N, D))
data[np.isnan(data)] = 0
return data | [
"numpy.mean",
"numpy.reshape",
"h5py.File",
"numpy.array",
"numpy.isnan",
"multiprocessing.Pool",
"numpy.std",
"pandas.DataFrame",
"numpy.fromstring"
] | [((562, 586), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (571, 586), False, 'import h5py\n'), ((758, 782), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (767, 782), False, 'import h5py\n'), ((897, 938), 'numpy.fromstring', 'np.fromstring', (['s'], {'dtype': 'np.int32', 'sep': '""","""'}), "(s, dtype=np.int32, sep=',')\n", (910, 938), True, 'import numpy as np\n'), ((1078, 1087), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (1085, 1087), True, 'import multiprocessing as mp\n'), ((1216, 1244), 'numpy.reshape', 'np.reshape', (['data', '(T, N * D)'], {}), '(data, (T, N * D))\n', (1226, 1244), True, 'import numpy as np\n'), ((1252, 1270), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1264, 1270), True, 'import pandas as pd\n'), ((1363, 1390), 'numpy.reshape', 'np.reshape', (['data', '(T, N, D)'], {}), '(data, (T, N, D))\n', (1373, 1390), True, 'import numpy as np\n'), ((232, 245), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (239, 245), True, 'import numpy as np\n'), ((260, 272), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (266, 272), True, 'import numpy as np\n'), ((1398, 1412), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (1406, 1412), True, 'import numpy as np\n'), ((638, 655), 'numpy.array', 'np.array', (['f[name]'], {}), '(f[name])\n', (646, 655), True, 'import numpy as np\n')] |
from django.conf import settings
from sklearn.svm import SVC
from systems.plugins.index import BaseProvider
from systems.remote_ai.tfidf_trainer import TfidfTrainer
class Provider(BaseProvider('remote_ai_model', 'tdidf_svc')):
def tfidf_processor_class(self):
return TfidfTrainer
def init_model(self):
self.tfidf_trainer = self.tfidf_processor_class()(self)
def build_model(self):
return SVC(
probability = True,
kernel = self.field_kernel,
random_state = self.field_random_state
)
def train_model(self, predictors, targets):
self.command.notice("Fitting TFIDF trainer")
self.tfidf_trainer.fit(predictors)
self.command.notice("Fitting classification model")
self.model.fit(
self.tfidf_trainer.transform(predictors),
targets
)
def predict_model(self, data):
return self.model.predict_proba(
self.tfidf_trainer.transform(data)
)[:, 1]
def normalize_predictions(self, predictions):
predictions = predictions > settings.REMOTE_AI_PREDICTION_THRESHOLD
return predictions.astype(int)
def classify_prediction(self, prediction):
classification = None
if prediction >= settings.REMOTE_AI_PREDICTION_THRESHOLD:
if prediction >= settings.REMOTE_AI_PREDICTION_HIGH_CONFIDENCE:
classification = 'ELIGIBLE high confidence'
else:
classification = 'ELIGIBLE medium confidence'
elif 1 - prediction >= settings.REMOTE_AI_PREDICTION_THRESHOLD:
if 1 - prediction >= settings.REMOTE_AI_PREDICTION_HIGH_CONFIDENCE:
classification = 'INELIGIBLE high confidence'
else:
classification = 'INELIGIBLE medium confidence'
return classification
| [
"sklearn.svm.SVC",
"systems.plugins.index.BaseProvider"
] | [((183, 227), 'systems.plugins.index.BaseProvider', 'BaseProvider', (['"""remote_ai_model"""', '"""tdidf_svc"""'], {}), "('remote_ai_model', 'tdidf_svc')\n", (195, 227), False, 'from systems.plugins.index import BaseProvider\n'), ((431, 521), 'sklearn.svm.SVC', 'SVC', ([], {'probability': '(True)', 'kernel': 'self.field_kernel', 'random_state': 'self.field_random_state'}), '(probability=True, kernel=self.field_kernel, random_state=self.\n field_random_state)\n', (434, 521), False, 'from sklearn.svm import SVC\n')] |
# -*- coding: utf-8 -*-
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
### Alias : cx_Freeze & PyInstaller builder & Last Modded : 2022.01.10. ###
Coded with Python 3.10 Grammar by IRACK000
# PyInstaller 3.10.0 빌드 오류 해결 관련
# https://github.com/pyinstaller/pyinstaller/issues/6301
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import os
import platform
if __name__ == "__main__":
tool = "cx_Freeze"
if input("1. cx_Freeze\n2. PyInstaller\n빌드에 사용할 툴을 선택하세요. : ") == "2":
tool = "PyInstaller"
if platform.system() == "Windows":
os.system(f"TITLE {tool} builder")
elif platform.system() == "Linux":
os.system("sudo apt-get install patchelf")
from src.main.cli.apis import check_py_version # 상대 경로 import; 파일 위치에 따라 코드가 수정 되어야 함.
check_py_version()
version = input("\n1. 컴퓨터 기본 파이썬 버전(python)\n2. 파이썬 3.8.10(python3.8, Windows 7용)\n빌드에 사용할 파이썬 버전을 선택하세요. : ")
py = "python"
if version == "1":
opt = input("\n1. python\n2. python3\n3. 직접 입력\n환경변수에 등록된 python3의 호출 키워드를 선택하세요. : ")
py += ("3" if opt == "2" else "" if opt == "1" else input("버전을 입력하세요(python3.?) : ").replace("python", ""))
if "python3.8" in py:
raise Exception("Do not use python3.8 with build option 1.")
elif version == "2":
py += "3.8"
print("\n\nWindows 7 호환성을 위해 Pyside버전을 2버전으로 변경합니다. 스크립트가 정상 종료되지 못한 경우 수동으로 src/main/qt_core.py 파일을 원래대로 되돌려주세요.\n\n")
os.rename("src/main/gui/qt_core.py", "src/main/gui/qt_core.py.bak")
with open("src/main/gui/qt_core.py.bak", "rt", encoding='utf-8') as ori, \
open("src/main/gui/qt_core.py", "wt", encoding='utf-8') as new:
for line in ori.readlines():
if "SUPPORT_WINDOWS_7" in line:
print(line)
line = line.replace("False", "True")
print(">> " + line, end="\n\n\n")
new.write(line)
print("선택된 파이썬 버전 : ", end='', flush=True)
if 1 == os.system(f"{py} --version"):
raise Exception(f"파이썬 버전({py})을 찾을 수 없습니다.")
os.system(f"{py} -m pip install wheel")
os.system(f"{py} -m pip install --upgrade pip")
os.system(f"{py} -m pip install tinyaes cryptography " + ("PySide2" if version == "2" else "PySide6"))
if tool == "cx_Freeze":
print("cx_Freeze 설치")
os.system(f"{py} -m pip install cx_Freeze pywin32")
opt = input("\n1. 빌드만\n2. MSI 만들기(실제 배포시에는 다른 패키징 방식을 사용하세요!)\n작업을 선택하세요. : ")
os.system(f"{py} ./setup.py " + ("bdist_msi " if opt == "2" else "build " + py))
else:
if input("PyInstaller의 버전을 변경할까요? (y to yes) : ") == "y":
os.system(f"{py} -m pip uninstall PyInstaller")
if input("PyInstaller의 dev 버전을 사용해서 빌드할까요? (y to Yes) : ") == "y":
os.system(f"{py} -m pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip")
else:
os.system(f"{py} -m pip install pyinstaller")
else:
os.system(f"{py} -m pip install pyinstaller")
# 새로운 setup.spec 생성하려면 주석 처리된 것을 이용하세요.
# os.system(f"{py} -m PyInstaller -n newName --icon=icon.ico --hidden-import PySide6.QtSvg src/main/main.py --clean")
os.system(f"{py} -m PyInstaller setup.spec --noconfirm")
if version == "2":
os.remove("src/main/gui/qt_core.py")
os.rename("src/main/gui/qt_core.py.bak", "src/main/gui/qt_core.py")
print("\nqt_core.py 파일을 원래대로 되돌렸습니다.")
input("\n작업이 종료되었습니다. 오류 로그가 있는지 확인하세요. 아무 키나 눌러서 종료합니다. ")
| [
"os.rename",
"platform.system",
"src.main.cli.apis.check_py_version",
"os.system",
"os.remove"
] | [((842, 860), 'src.main.cli.apis.check_py_version', 'check_py_version', ([], {}), '()\n', (858, 860), False, 'from src.main.cli.apis import check_py_version\n'), ((2155, 2194), 'os.system', 'os.system', (['f"""{py} -m pip install wheel"""'], {}), "(f'{py} -m pip install wheel')\n", (2164, 2194), False, 'import os\n'), ((2199, 2246), 'os.system', 'os.system', (['f"""{py} -m pip install --upgrade pip"""'], {}), "(f'{py} -m pip install --upgrade pip')\n", (2208, 2246), False, 'import os\n'), ((2251, 2358), 'os.system', 'os.system', (["(f'{py} -m pip install tinyaes cryptography ' + ('PySide2' if version ==\n '2' else 'PySide6'))"], {}), "(f'{py} -m pip install tinyaes cryptography ' + ('PySide2' if \n version == '2' else 'PySide6'))\n", (2260, 2358), False, 'import os\n'), ((580, 597), 'platform.system', 'platform.system', ([], {}), '()\n', (595, 597), False, 'import platform\n'), ((620, 654), 'os.system', 'os.system', (['f"""TITLE {tool} builder"""'], {}), "(f'TITLE {tool} builder')\n", (629, 654), False, 'import os\n'), ((2067, 2095), 'os.system', 'os.system', (['f"""{py} --version"""'], {}), "(f'{py} --version')\n", (2076, 2095), False, 'import os\n'), ((2420, 2471), 'os.system', 'os.system', (['f"""{py} -m pip install cx_Freeze pywin32"""'], {}), "(f'{py} -m pip install cx_Freeze pywin32')\n", (2429, 2471), False, 'import os\n'), ((2568, 2653), 'os.system', 'os.system', (["(f'{py} ./setup.py ' + ('bdist_msi ' if opt == '2' else 'build ' + py))"], {}), "(f'{py} ./setup.py ' + ('bdist_msi ' if opt == '2' else 'build ' + py)\n )\n", (2577, 2653), False, 'import os\n'), ((3311, 3367), 'os.system', 'os.system', (['f"""{py} -m PyInstaller setup.spec --noconfirm"""'], {}), "(f'{py} -m PyInstaller setup.spec --noconfirm')\n", (3320, 3367), False, 'import os\n'), ((3400, 3436), 'os.remove', 'os.remove', (['"""src/main/gui/qt_core.py"""'], {}), "('src/main/gui/qt_core.py')\n", (3409, 3436), False, 'import os\n'), ((3445, 3512), 'os.rename', 'os.rename', (['"""src/main/gui/qt_core.py.bak"""', '"""src/main/gui/qt_core.py"""'], {}), "('src/main/gui/qt_core.py.bak', 'src/main/gui/qt_core.py')\n", (3454, 3512), False, 'import os\n'), ((664, 681), 'platform.system', 'platform.system', ([], {}), '()\n', (679, 681), False, 'import platform\n'), ((702, 744), 'os.system', 'os.system', (['"""sudo apt-get install patchelf"""'], {}), "('sudo apt-get install patchelf')\n", (711, 744), False, 'import os\n'), ((1513, 1580), 'os.rename', 'os.rename', (['"""src/main/gui/qt_core.py"""', '"""src/main/gui/qt_core.py.bak"""'], {}), "('src/main/gui/qt_core.py', 'src/main/gui/qt_core.py.bak')\n", (1522, 1580), False, 'import os\n'), ((2737, 2784), 'os.system', 'os.system', (['f"""{py} -m pip uninstall PyInstaller"""'], {}), "(f'{py} -m pip uninstall PyInstaller')\n", (2746, 2784), False, 'import os\n'), ((3083, 3128), 'os.system', 'os.system', (['f"""{py} -m pip install pyinstaller"""'], {}), "(f'{py} -m pip install pyinstaller')\n", (3092, 3128), False, 'import os\n'), ((2880, 2986), 'os.system', 'os.system', (['f"""{py} -m pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip"""'], {}), "(\n f'{py} -m pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip'\n )\n", (2889, 2986), False, 'import os\n'), ((3011, 3056), 'os.system', 'os.system', (['f"""{py} -m pip install pyinstaller"""'], {}), "(f'{py} -m pip install pyinstaller')\n", (3020, 3056), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
lantz.drivers.aeroflex.a2023a
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements the drivers for an signal generator.
Sources::
- Aeroflex 2023a Manual.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import enum
from lantz.core import Feat, Action, MessageBasedDriver
from lantz.core.mfeats import BoolFeat, QuantityFeat, QuantityDictFeat, EnumFeat
class A2023a(MessageBasedDriver):
"""Aeroflex Test Solutions 2023A 9 kHz to 1.2 GHz Signal Generator.
"""
DEFAULTS = {'ASRL': {'write_termination': '\n',
'read_termination': chr(256)}}
#: Carrier frequency.
frequency = QuantityFeat(('CFRQ?', ':CFRQ:VALUE {0:f};{_}'),
'CFRQ:VALUE {0:f}HZ', units='Hz')
#: RF amplitude.
amplitude = QuantityFeat(('RFLV?', ':RFLV:UNITS {_};TYPE {_};VALUE {0:f};INC {_};<status>'),
'RFLV:VALUE {0:f}V', units='V')
#: Offset amplitude.
offset = QuantityFeat(('RFLV:OFFS?', ':RFLV:OFFS:VALUE {0:f};{_}'),
'RFLV:OFFS:VALUE {0:f}', units='V')
#: Enable or disable the RF output
output_enabled = BoolFeat('OUTPUT?', 'OUTPUT:{}', 'ENABLED', 'DISABLED')
#: Phase offset
phase = QuantityFeat(('CFRQ?', ':CFRQ:VALUE {:f}; INC {_};MODE {_}'), 'CFRQ:PHASE {}', units='degree')
#: Get internal or external frequency standard.
class FREQUENCY_STANDARD(enum):
INT = 'INT'
EXT10DIR = 'EXT10DIR'
EXTIND = 'EXTIND'
EXT10IND = 'EXT10IND'
INT10OUT = 'INT10OUT'
#: Set RF output level max.
rflimit = QuantityFeat('RFLV:LIMIT?', 'RFLV:LIMIT {}')
def remote(self, value):
if value:
self.write('^A')
else:
self.write('^D')
@Action(units='ms')
def expose(self, exposure_time=1):
self.write('EXPOSE {}'.format(exposure_time))
@Feat(values={True: 'on', False: 'off'})
def time(self):
# TODO: ??
self.write('')
return self.read()
@time.setter
def time(self, value):
self.write("vlal ".format(value))
def local_lockout(self, value):
if value:
self.write('^R')
else:
self.write('^P')
def software_handshake(self, value):
if value:
self.write('^Q')
else:
self.write('^S')
if __name__ == '__main__':
import argparse
import lantz.log
parser = argparse.ArgumentParser(description='Test Kentech HRI')
parser.add_argument('-i', '--interactive', action='store_true',
default=False, help='Show interactive GUI')
parser.add_argument('-p', '--port', type=str, default='17',
help='Serial port to connect to')
args = parser.parse_args()
lantz.log.log_to_socket(lantz.log.DEBUG)
with A2023a.from_serial_port(args.port) as inst:
if args.interactive:
from lantz.ui.app import start_test_app
start_test_app(inst)
else:
print(inst.idn)
inst.fstd = "EXT10DIR"
print(inst.fstd)
print(inst.freq)
inst.freq = 41.006
print(inst.rflevel)
inst.rflevel = -13
inst.phase=0
print(inst.phase)
inst.phase=30
print(inst.phase)
inst.phase=60
print(inst.phase)
| [
"lantz.core.Feat",
"argparse.ArgumentParser",
"lantz.ui.app.start_test_app",
"lantz.core.mfeats.QuantityFeat",
"lantz.core.Action",
"lantz.core.mfeats.BoolFeat"
] | [((741, 827), 'lantz.core.mfeats.QuantityFeat', 'QuantityFeat', (["('CFRQ?', ':CFRQ:VALUE {0:f};{_}')", '"""CFRQ:VALUE {0:f}HZ"""'], {'units': '"""Hz"""'}), "(('CFRQ?', ':CFRQ:VALUE {0:f};{_}'), 'CFRQ:VALUE {0:f}HZ',\n units='Hz')\n", (753, 827), False, 'from lantz.core.mfeats import BoolFeat, QuantityFeat, QuantityDictFeat, EnumFeat\n'), ((891, 1011), 'lantz.core.mfeats.QuantityFeat', 'QuantityFeat', (["('RFLV?', ':RFLV:UNITS {_};TYPE {_};VALUE {0:f};INC {_};<status>')", '"""RFLV:VALUE {0:f}V"""'], {'units': '"""V"""'}), "(('RFLV?',\n ':RFLV:UNITS {_};TYPE {_};VALUE {0:f};INC {_};<status>'),\n 'RFLV:VALUE {0:f}V', units='V')\n", (903, 1011), False, 'from lantz.core.mfeats import BoolFeat, QuantityFeat, QuantityDictFeat, EnumFeat\n'), ((1072, 1170), 'lantz.core.mfeats.QuantityFeat', 'QuantityFeat', (["('RFLV:OFFS?', ':RFLV:OFFS:VALUE {0:f};{_}')", '"""RFLV:OFFS:VALUE {0:f}"""'], {'units': '"""V"""'}), "(('RFLV:OFFS?', ':RFLV:OFFS:VALUE {0:f};{_}'),\n 'RFLV:OFFS:VALUE {0:f}', units='V')\n", (1084, 1170), False, 'from lantz.core.mfeats import BoolFeat, QuantityFeat, QuantityDictFeat, EnumFeat\n'), ((1254, 1309), 'lantz.core.mfeats.BoolFeat', 'BoolFeat', (['"""OUTPUT?"""', '"""OUTPUT:{}"""', '"""ENABLED"""', '"""DISABLED"""'], {}), "('OUTPUT?', 'OUTPUT:{}', 'ENABLED', 'DISABLED')\n", (1262, 1309), False, 'from lantz.core.mfeats import BoolFeat, QuantityFeat, QuantityDictFeat, EnumFeat\n'), ((1343, 1441), 'lantz.core.mfeats.QuantityFeat', 'QuantityFeat', (["('CFRQ?', ':CFRQ:VALUE {:f}; INC {_};MODE {_}')", '"""CFRQ:PHASE {}"""'], {'units': '"""degree"""'}), "(('CFRQ?', ':CFRQ:VALUE {:f}; INC {_};MODE {_}'),\n 'CFRQ:PHASE {}', units='degree')\n", (1355, 1441), False, 'from lantz.core.mfeats import BoolFeat, QuantityFeat, QuantityDictFeat, EnumFeat\n'), ((1710, 1754), 'lantz.core.mfeats.QuantityFeat', 'QuantityFeat', (['"""RFLV:LIMIT?"""', '"""RFLV:LIMIT {}"""'], {}), "('RFLV:LIMIT?', 'RFLV:LIMIT {}')\n", (1722, 1754), False, 'from lantz.core.mfeats import BoolFeat, QuantityFeat, QuantityDictFeat, EnumFeat\n'), ((1881, 1899), 'lantz.core.Action', 'Action', ([], {'units': '"""ms"""'}), "(units='ms')\n", (1887, 1899), False, 'from lantz.core import Feat, Action, MessageBasedDriver\n'), ((1999, 2042), 'lantz.core.Feat', 'Feat', ([], {'values': "{(True): 'on', (False): 'off'}"}), "(values={(True): 'on', (False): 'off'})\n", (2003, 2042), False, 'from lantz.core import Feat, Action, MessageBasedDriver\n'), ((2558, 2613), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test Kentech HRI"""'}), "(description='Test Kentech HRI')\n", (2581, 2613), False, 'import argparse\n'), ((3095, 3115), 'lantz.ui.app.start_test_app', 'start_test_app', (['inst'], {}), '(inst)\n', (3109, 3115), False, 'from lantz.ui.app import start_test_app\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#AUTHOR: <NAME>
#DATE: Wed Apr 3 16:15:13 2019
#VERSION:
#PYTHON_VERSION: 3.6
'''
DESCRIPTION
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from neural_network import neural_network
stimuli = np.random.rand(6, 11)
c = neural_network(stimuli=stimuli, threshold=0.05)
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches((12, 5))
ax1.scatter(stimuli[:, -2], stimuli[:, -1], s=10, c='r')
agent = ax1.scatter(0, 0, s=10, c='b')
ax1.legend(labels=['Stimuli', 'Agent'])
xlabels = ['Odor\n1', 'Odor\n2', 'Odor\n3', 'Odor\n4', 'Odor\n5',
'Odor\n6', 'Taste\n1', 'Taste\n2', 'Taste\n3']
sensors = ax2.bar(xlabels, c.get_sensory_inputs())
ax2.set_title('Agent\'s sensory inputs')
def update(pos):
agent.set_offsets(pos)
sensory_inputs = c.jump(pos)
max_h = sensory_inputs.max() + 1
ax2.set_ylim([0, max_h])
for bar, h in zip(sensors, sensory_inputs):
bar.set_height(h)
return agent, sensors
if __name__ == '__main__':
anim = FuncAnimation(fig, update, interval=1, repeat_delay=10,
frames=[(i/100, i/100) for i in range(1, 100)])
anim.save('animation.gif', dpi=100, writer='pillow')
plt.show()
| [
"matplotlib.pyplot.subplots",
"neural_network.neural_network",
"numpy.random.rand",
"matplotlib.pyplot.show"
] | [((298, 319), 'numpy.random.rand', 'np.random.rand', (['(6)', '(11)'], {}), '(6, 11)\n', (312, 319), True, 'import numpy as np\n'), ((324, 371), 'neural_network.neural_network', 'neural_network', ([], {'stimuli': 'stimuli', 'threshold': '(0.05)'}), '(stimuli=stimuli, threshold=0.05)\n', (338, 371), False, 'from neural_network import neural_network\n'), ((391, 409), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (403, 409), True, 'import matplotlib.pyplot as plt\n'), ((1268, 1278), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1276, 1278), True, 'import matplotlib.pyplot as plt\n')] |
import pkg_resources
import sys
import logging
import click
import confight
from docker_volume_nest.defaults import DEFAULTS
from docker_volume_nest.util import exec_command
from .service import serve_app
logger = logging.getLogger(__name__)
def get_version():
return pkg_resources.get_distribution('docker_volume_nest').version
def cb_load_config(ctx, param, value):
return confight.load_app(value) or DEFAULTS
app_name_option = click.option(
"--app-name", "config",
default="docker_volume_nest", callback=cb_load_config
)
volname = click.argument("volname", nargs=1)
@click.group()
def cli():
"""
Grouping commands needed
:return:
"""
@cli.command()
@app_name_option
def serve(config):
"""
Serve unix socket with plugin service api
:param: app_name allows reuse driver more than one time in a machine
:return:
"""
serve_app(config)
@cli.command()
@app_name_option
def init(config):
"""
Init real storage
:return:
"""
exec_command(config, "init")
@cli.command()
@app_name_option
@volname
def create(config, volname):
"""
Create a volname exportable via nfs
:param volname:
:return:
"""
exec_command(config, "create", volname)
@cli.command()
@app_name_option
def list(config):
"""
Return a list of volumes created
:return:
"""
exec_command(config, "list")
@cli.command()
@app_name_option
@volname
def path(config, volname):
"""
Return nfs mount point of a given `volname`
:return:
"""
exec_command(config, "path", volname)
@cli.command()
@app_name_option
@volname
def remove(config, volname):
"""
Destroy a volname and his mounts
:param volname:
:return:
"""
exec_command(config, "remove", volname)
@cli.command()
@app_name_option
@volname
def mount(config, volname):
"""
Mounts nfs mountpoint of volname
:param volname:
:return:
"""
exec_command(config, "mount", volname)
@cli.command()
@app_name_option
@volname
def unmount(config, volname):
"""
Unmounts nfs mountpoint of volname
:param volname:
:return:
"""
exec_command(config, "unmount", volname)
@cli.command()
@app_name_option
def scope(config):
exec_command(config, "scope")
@cli.command()
def version():
"""
Prints program version
:return:
"""
click.echo(get_version())
def main():
try:
cli(auto_envvar_prefix="NEST")
except Exception as e:
logger.exception("Uncatched exception")
click.secho(str(e), color="red")
sys.exit(1)
| [
"logging.getLogger",
"click.argument",
"click.option",
"click.group",
"confight.load_app",
"sys.exit",
"docker_volume_nest.util.exec_command",
"pkg_resources.get_distribution"
] | [((218, 245), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (235, 245), False, 'import logging\n'), ((448, 544), 'click.option', 'click.option', (['"""--app-name"""', '"""config"""'], {'default': '"""docker_volume_nest"""', 'callback': 'cb_load_config'}), "('--app-name', 'config', default='docker_volume_nest', callback\n =cb_load_config)\n", (460, 544), False, 'import click\n'), ((561, 595), 'click.argument', 'click.argument', (['"""volname"""'], {'nargs': '(1)'}), "('volname', nargs=1)\n", (575, 595), False, 'import click\n'), ((599, 612), 'click.group', 'click.group', ([], {}), '()\n', (610, 612), False, 'import click\n'), ((1014, 1042), 'docker_volume_nest.util.exec_command', 'exec_command', (['config', '"""init"""'], {}), "(config, 'init')\n", (1026, 1042), False, 'from docker_volume_nest.util import exec_command\n'), ((1208, 1247), 'docker_volume_nest.util.exec_command', 'exec_command', (['config', '"""create"""', 'volname'], {}), "(config, 'create', volname)\n", (1220, 1247), False, 'from docker_volume_nest.util import exec_command\n'), ((1370, 1398), 'docker_volume_nest.util.exec_command', 'exec_command', (['config', '"""list"""'], {}), "(config, 'list')\n", (1382, 1398), False, 'from docker_volume_nest.util import exec_command\n'), ((1550, 1587), 'docker_volume_nest.util.exec_command', 'exec_command', (['config', '"""path"""', 'volname'], {}), "(config, 'path', volname)\n", (1562, 1587), False, 'from docker_volume_nest.util import exec_command\n'), ((1750, 1789), 'docker_volume_nest.util.exec_command', 'exec_command', (['config', '"""remove"""', 'volname'], {}), "(config, 'remove', volname)\n", (1762, 1789), False, 'from docker_volume_nest.util import exec_command\n'), ((1951, 1989), 'docker_volume_nest.util.exec_command', 'exec_command', (['config', '"""mount"""', 'volname'], {}), "(config, 'mount', volname)\n", (1963, 1989), False, 'from docker_volume_nest.util import exec_command\n'), ((2155, 2195), 'docker_volume_nest.util.exec_command', 'exec_command', (['config', '"""unmount"""', 'volname'], {}), "(config, 'unmount', volname)\n", (2167, 2195), False, 'from docker_volume_nest.util import exec_command\n'), ((2253, 2282), 'docker_volume_nest.util.exec_command', 'exec_command', (['config', '"""scope"""'], {}), "(config, 'scope')\n", (2265, 2282), False, 'from docker_volume_nest.util import exec_command\n'), ((278, 330), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""docker_volume_nest"""'], {}), "('docker_volume_nest')\n", (308, 330), False, 'import pkg_resources\n'), ((391, 415), 'confight.load_app', 'confight.load_app', (['value'], {}), '(value)\n', (408, 415), False, 'import confight\n'), ((2587, 2598), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2595, 2598), False, 'import sys\n')] |
import pytest
from numerous.engine.variables import _VariableFactory, VariableType, VariableDescription
from tests.test_equations import *
@pytest.fixture
def constant():
var_desc = VariableDescription(tag='test_derivative', initial_value=0,
type=VariableType.CONSTANT)
return _VariableFactory._create_from_variable_desc_unbound(variable_description=var_desc, initial_value=0)
@pytest.fixture
def derivative():
var_desc = VariableDescription(tag='test_constant', initial_value=0,
type=VariableType.DERIVATIVE)
return _VariableFactory._create_from_variable_desc_unbound(variable_description=var_desc, initial_value=0)
def test_error_on_nonnumeric_state(constant):
with pytest.raises(ValueError, match=r".*State must be float or integer.*"):
TestEq_dictState()
| [
"numerous.engine.variables.VariableDescription",
"numerous.engine.variables._VariableFactory._create_from_variable_desc_unbound",
"pytest.raises"
] | [((190, 282), 'numerous.engine.variables.VariableDescription', 'VariableDescription', ([], {'tag': '"""test_derivative"""', 'initial_value': '(0)', 'type': 'VariableType.CONSTANT'}), "(tag='test_derivative', initial_value=0, type=\n VariableType.CONSTANT)\n", (209, 282), False, 'from numerous.engine.variables import _VariableFactory, VariableType, VariableDescription\n'), ((324, 428), 'numerous.engine.variables._VariableFactory._create_from_variable_desc_unbound', '_VariableFactory._create_from_variable_desc_unbound', ([], {'variable_description': 'var_desc', 'initial_value': '(0)'}), '(variable_description=\n var_desc, initial_value=0)\n', (375, 428), False, 'from numerous.engine.variables import _VariableFactory, VariableType, VariableDescription\n'), ((475, 567), 'numerous.engine.variables.VariableDescription', 'VariableDescription', ([], {'tag': '"""test_constant"""', 'initial_value': '(0)', 'type': 'VariableType.DERIVATIVE'}), "(tag='test_constant', initial_value=0, type=VariableType\n .DERIVATIVE)\n", (494, 567), False, 'from numerous.engine.variables import _VariableFactory, VariableType, VariableDescription\n'), ((609, 713), 'numerous.engine.variables._VariableFactory._create_from_variable_desc_unbound', '_VariableFactory._create_from_variable_desc_unbound', ([], {'variable_description': 'var_desc', 'initial_value': '(0)'}), '(variable_description=\n var_desc, initial_value=0)\n', (660, 713), False, 'from numerous.engine.variables import _VariableFactory, VariableType, VariableDescription\n'), ((766, 835), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*State must be float or integer.*"""'}), "(ValueError, match='.*State must be float or integer.*')\n", (779, 835), False, 'import pytest\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import mkt.webapps.models
import mkt.translations.models
import mkt.translations.fields
class Migration(migrations.Migration):
dependencies = [
('translations', '__first__'),
]
operations = [
migrations.CreateModel(
name='InAppProduct',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True, db_index=True)),
('guid', models.CharField(max_length=255, unique=True, null=True, blank=True)),
('default_locale', models.CharField(default=b'en-us', max_length=10)),
('logo_url', models.URLField(max_length=1024, null=True, blank=True)),
('simulate', models.CharField(max_length=100, null=True, blank=True)),
('stub', models.BooleanField(default=False, db_index=True)),
('name', mkt.translations.fields.TranslatedField(related_name='InAppProduct_name_set+', null=True, on_delete=django.db.models.deletion.SET_NULL, db_column=b'name', to_field=b'id', blank=True, to=mkt.translations.models.Translation, short=True, require_locale=False, unique=True)),
],
options={
'db_table': 'inapp_products',
},
bases=(mkt.webapps.models.UUIDModelMixin, models.Model),
),
]
| [
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.URLField",
"django.db.models.CharField"
] | [((464, 557), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (480, 557), False, 'from django.db import models, migrations\n'), ((584, 623), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (604, 623), False, 'from django.db import models, migrations\n'), ((655, 690), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (675, 690), False, 'from django.db import models, migrations\n'), ((720, 768), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'db_index': '(True)'}), '(default=True, db_index=True)\n', (739, 768), False, 'from django.db import models, migrations\n'), ((796, 864), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'unique': '(True)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, unique=True, null=True, blank=True)\n', (812, 864), False, 'from django.db import models, migrations\n'), ((902, 951), 'django.db.models.CharField', 'models.CharField', ([], {'default': "b'en-us'", 'max_length': '(10)'}), "(default=b'en-us', max_length=10)\n", (918, 951), False, 'from django.db import models, migrations\n'), ((983, 1038), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(1024)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1024, null=True, blank=True)\n', (998, 1038), False, 'from django.db import models, migrations\n'), ((1070, 1125), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (1086, 1125), False, 'from django.db import models, migrations\n'), ((1153, 1202), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'db_index': '(True)'}), '(default=False, db_index=True)\n', (1172, 1202), False, 'from django.db import models, migrations\n')] |
# Copyright 2019 <NAME>, <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains a collection of functions used to gather basic information
that could be commonly used by many different scripts.
"""
import re
from first import first
# unicon is part of genie framework
from unicon.core.errors import SubCommandFailure
__all__ = [
'find_os_name',
'find_cdp_neighbor',
'find_ipaddr_by_arp',
'find_macaddr_by_arp',
'find_macaddr_via_iface',
'find_portchan_members',
'ping'
]
def ping(device, target):
try:
device.ping(target)
return True
except SubCommandFailure:
return False
def find_macaddr_by_arp(dev, ipaddr):
""" '10.9.2.171 00:13:37 0050.abcd.de17 Vlan18' """
cli_text = dev.execute(f'show ip arp {ipaddr} | inc {ipaddr}')
if not cli_text or 'Invalid' in cli_text:
return None
found_ipaddr, timestamp, macaddr, ifname = re.split(r'\s+', cli_text)
return {
'macaddr': macaddr,
'interface': ifname
}
def find_ipaddr_by_arp(dev, macaddr):
""" '10.9.2.171 00:13:37 0050.abcd.de17 Vlan18' """
cli_text = dev.execute(f'show ip arp | inc {macaddr}')
if not cli_text or 'Invalid' in cli_text:
return None
ipaddr, timestamp, macaddr, ifname = re.split(r'\s+', cli_text)
return {
'ipaddr': ipaddr,
'interface': ifname
}
def find_macaddr_via_iface(dev, macaddr):
""" '* 17 0050.abcd.de17 dynamic 0 F F Po1' """
cli_text = dev.execute(f'show mac address-table | inc {macaddr}')
if not cli_text:
return None
# the last item is the interface name
return cli_text.split()[-1]
def find_portchan_members(dev, ifname):
""" '1 Po1(SU) Eth LACP Eth2/1(P) Eth2/2(P)' """
cli_text = dev.execute(f'show port-channel summary interface {ifname} | inc {ifname}')
if not cli_text:
return None
members = re.split(r'\s+', cli_text)[4:]
return [member.split('(')[0] for member in members]
def find_os_name(dev=None, content=None):
if not content:
content = dev.execute('show version')
# look for specific Cisco OS names. If one is not found, it means that the
# CDP neighbor is not a recognized device, and return None. If it is
# recognized then the re will return a list, for which we need to extract
# the actual found NOS name; thus using the first() function twice.
os_name = first(re.findall('(IOSXE)|(NX-OS)|(IOS)', content, re.M))
if not os_name:
return None
os_name = first(os_name)
# convert OS name from show output to os name required by genie, if the OS
# is not found, then return None
return {'IOSXE': 'iosxe', 'NX-OS': 'nxos', 'IOS': 'ios'}[os_name]
def find_cdp_neighbor(dev, ifname):
if dev.os == 'nxos':
cli_text = dev.execute(f'show cdp neighbor interface {ifname} detail')
if not cli_text or 'Invalid' in cli_text:
return None
else:
cli_text = dev.execute(f'show cdp neighbors {ifname} detail')
if "Total cdp entries displayed : 0" in cli_text:
return None
device = first(re.findall('Device ID:(.*)$', cli_text, re.M))
if device and '.' in device:
device = first(device.split('.'))
platform = first(re.findall('Platform: (.*),', cli_text, re.M))
os_name = find_os_name(content=cli_text)
return {
'device': device,
'platform': platform,
'os_name': os_name
}
| [
"first.first",
"re.split",
"re.findall"
] | [((1456, 1482), 're.split', 're.split', (['"""\\\\s+"""', 'cli_text'], {}), "('\\\\s+', cli_text)\n", (1464, 1482), False, 'import re\n'), ((1824, 1850), 're.split', 're.split', (['"""\\\\s+"""', 'cli_text'], {}), "('\\\\s+', cli_text)\n", (1832, 1850), False, 'import re\n'), ((3123, 3137), 'first.first', 'first', (['os_name'], {}), '(os_name)\n', (3128, 3137), False, 'from first import first\n'), ((2493, 2519), 're.split', 're.split', (['"""\\\\s+"""', 'cli_text'], {}), "('\\\\s+', cli_text)\n", (2501, 2519), False, 'import re\n'), ((3016, 3066), 're.findall', 're.findall', (['"""(IOSXE)|(NX-OS)|(IOS)"""', 'content', 're.M'], {}), "('(IOSXE)|(NX-OS)|(IOS)', content, re.M)\n", (3026, 3066), False, 'import re\n'), ((3725, 3770), 're.findall', 're.findall', (['"""Device ID:(.*)$"""', 'cli_text', 're.M'], {}), "('Device ID:(.*)$', cli_text, re.M)\n", (3735, 3770), False, 'import re\n'), ((3869, 3914), 're.findall', 're.findall', (['"""Platform: (.*),"""', 'cli_text', 're.M'], {}), "('Platform: (.*),', cli_text, re.M)\n", (3879, 3914), False, 'import re\n')] |
# encoding: UTF-8
"""
定时服务,可无人值守运行,实现每日自动下载更新历史行情数据到数据库中。
"""
from DataService.tushareData import *
from datetime import datetime
if __name__ == '__main__':
taskCompletedDate = None
# 生成一个随机的任务下载时间,用于避免所有用户在同一时间访问数据服务器
taskTime = datetime.now().replace(hour=17, minute=30, second=0)
# 进入主循环
while True:
t = datetime.now()
# 每天到达任务下载时间后,执行数据下载的操作
if t.time() > taskTime.time() and (taskCompletedDate is None or t.date() != taskCompletedDate):
downloadTradeCalendar()
downloadAllStock()
if (taskCompletedDate is None):
downloadTradeDataDaily(5)
else:
downloadTradeDataDaily(1)
downloadTradeDataTick(2)
downloadTradeDataRealtimeQuotes()
# 更新任务完成的日期
taskCompletedDate = t.date()
sleep(600)
#break
| [
"datetime.datetime.now"
] | [((345, 359), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (357, 359), False, 'from datetime import datetime\n'), ((251, 265), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (263, 265), False, 'from datetime import datetime\n')] |
"""System utilities."""
import socket
import sys
import os
import csv
import yaml
import torch
import torchvision
import random
import numpy as np
import datetime
import hydra
from omegaconf import OmegaConf, open_dict
import logging
def system_startup(process_idx, local_group_size, cfg):
"""Decide and print GPU / CPU / hostname info. Generate local distributed setting if running in distr. mode."""
log = get_log(cfg)
torch.backends.cudnn.benchmark = cfg.case.impl.benchmark
torch.multiprocessing.set_sharing_strategy(cfg.case.impl.sharing_strategy)
huggingface_offline_mode(cfg.case.impl.enable_huggingface_offline_mode)
# 100% reproducibility?
if cfg.case.impl.deterministic:
set_deterministic()
if cfg.seed is not None:
set_random_seed(cfg.seed + 10 * process_idx)
dtype = getattr(torch, cfg.case.impl.dtype) # :> dont mess this up
device = torch.device(f"cuda:{process_idx}") if torch.cuda.is_available() else torch.device("cpu")
setup = dict(device=device, dtype=dtype)
python_version = sys.version.split(" (")[0]
log.info(f"Platform: {sys.platform}, Python: {python_version}, PyTorch: {torch.__version__}")
log.info(f"CPUs: {torch.get_num_threads()}, GPUs: {torch.cuda.device_count()} on {socket.gethostname()}.")
if torch.cuda.is_available():
torch.cuda.set_device(process_idx)
log.info(f"GPU : {torch.cuda.get_device_name(device=device)}")
# if not torch.cuda.is_available() and not cfg.dryrun:
# raise ValueError('No GPU allocated to this process. Running in CPU-mode is likely a bad idea. Complain to your admin.')
return setup
def is_main_process():
return not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0
def get_log(cfg, name=os.path.basename(__file__)):
"""Solution via https://github.com/facebookresearch/hydra/issues/1126#issuecomment-727826513"""
if is_main_process():
logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg, resolve=True))
logger = logging.getLogger(name)
else:
def logger(*args, **kwargs):
pass
logger.info = logger
return logger
def initialize_multiprocess_log(cfg):
with open_dict(cfg):
# manually save log config to cfg
log_config = hydra.core.hydra_config.HydraConfig.get().job_logging
# but resolve any filenames
cfg.job_logging_cfg = OmegaConf.to_container(log_config, resolve=True)
cfg.original_cwd = hydra.utils.get_original_cwd()
def save_summary(cfg, metrics, stats, local_time, original_cwd=True, table_name="breach"):
"""Save two summary tables. A detailed table of iterations/loss+acc and a summary of the end results."""
# 1) detailed table:
for step in range(len(stats["train_loss"])):
iteration = dict()
for key in stats:
iteration[key] = stats[key][step] if step < len(stats[key]) else None
save_to_table(".", f"{cfg.attack.type}_convergence_results", dryrun=cfg.dryrun, **iteration)
try:
local_folder = os.getcwd().split("outputs/")[1]
except IndexError:
local_folder = ""
# 2) save a reduced summary
summary = dict(
name=cfg.name,
usecase=cfg.case.name,
model=cfg.case.model,
datapoints=cfg.case.user.num_data_points,
model_state=cfg.case.server.model_state,
attack=cfg.attack.type,
attacktype=cfg.attack.attack_type,
**{k: v for k, v in metrics.items() if k != "order"},
score=stats["opt_value"],
total_time=str(datetime.timedelta(seconds=local_time)).replace(",", ""),
user_type=cfg.case.user.user_type,
gradient_noise=cfg.case.user.local_diff_privacy.gradient_noise,
seed=cfg.seed,
# dump extra values from here:
**{f"ATK_{k}": v for k, v in cfg.attack.items()},
**{k: v for k, v in cfg.case.items() if k not in ["name", "model"]},
folder=local_folder,
)
location = os.path.join(cfg.original_cwd, "tables") if original_cwd else "tables"
save_to_table(location, f"{table_name}_{cfg.case.name}_{cfg.case.data.name}_reports", dryrun=cfg.dryrun, **summary)
def save_to_table(out_dir, table_name, dryrun, **kwargs):
"""Save keys to .csv files. Function adapted from Micah."""
# Check for file
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
fname = os.path.join(out_dir, f"table_{table_name}.csv")
fieldnames = list(kwargs.keys())
# Read or write header
try:
with open(fname, "r") as f:
reader = csv.reader(f, delimiter="\t")
header = next(reader) # noqa # this line is testing the header
# assert header == fieldnames[:len(header)] # new columns are ok, but old columns need to be consistent
# dont test, always write when in doubt to prevent erroneous table rewrites
except Exception as e: # noqa
if not dryrun:
# print('Creating a new .csv table...')
with open(fname, "w") as f:
writer = csv.DictWriter(f, delimiter="\t", fieldnames=fieldnames)
writer.writeheader()
else:
pass
# print(f'Would create new .csv table {fname}.')
# Write a new row
if not dryrun:
# Add row for this experiment
with open(fname, "a") as f:
writer = csv.DictWriter(f, delimiter="\t", fieldnames=fieldnames)
writer.writerow(kwargs)
# print('\nResults saved to ' + fname + '.')
else:
pass
# print(f'Would save results to {fname}.')
def set_random_seed(seed=233):
"""."""
torch.manual_seed(seed + 1)
torch.cuda.manual_seed(seed + 2)
torch.cuda.manual_seed_all(seed + 3)
np.random.seed(seed + 4)
torch.cuda.manual_seed_all(seed + 5)
random.seed(seed + 6)
# Can't be too careful :>
def set_deterministic():
"""Switch pytorch into a deterministic computation mode."""
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
def avg_n_dicts(dicts):
"""https://github.com/wronnyhuang/metapoison/blob/master/utils.py."""
# given a list of dicts with the same exact schema, return a single dict with same schema whose values are the
# key-wise average over all input dicts
means = {}
for dic in dicts:
for key in dic:
if key not in means:
if isinstance(dic[key], list):
means[key] = [0 for entry in dic[key]]
else:
means[key] = 0
if isinstance(dic[key], list):
for idx, entry in enumerate(dic[key]):
means[key][idx] += entry / len(dicts)
else:
means[key] += dic[key] / len(dicts)
return means
def get_base_cwd():
try:
return hydra.utils.get_original_cwd()
except ValueError: # Hydra not initialized:
return os.getcwd()
def overview(server, user, attacker):
num_params, num_buffers = (
sum([p.numel() for p in user.model.parameters()]),
sum([b.numel() for b in user.model.buffers()]),
)
target_information = user.num_data_points * torch.as_tensor(server.cfg_data.shape).prod()
print(f"Model architecture {user.model.name} loaded with {num_params:,} parameters and {num_buffers:,} buffers.")
print(
f"Overall this is a data ratio of {server.num_queries * num_params / target_information:7.0f}:1 "
f"for target shape {[user.num_data_points, *server.cfg_data.shape]} given that num_queries={server.num_queries}."
)
print(user)
print(server)
print(attacker)
def save_reconstruction(
reconstructed_user_data, server_payload, true_user_data, cfg, side_by_side=True, target_indx=None
):
"""If target_indx is not None, only the datapoints at target_indx will be saved to file."""
os.makedirs("reconstructions", exist_ok=True)
metadata = server_payload[0]["metadata"]
if metadata["modality"] == "text":
from breaching.cases.data.datasets_text import _get_tokenizer
tokenizer = _get_tokenizer(
server_payload[0]["metadata"]["tokenizer"],
server_payload[0]["metadata"]["vocab_size"],
cache_dir=cfg.case.data.path,
)
text_rec = tokenizer.batch_decode(reconstructed_user_data["data"])
text_ref = tokenizer.batch_decode(true_user_data["data"])
if target_indx is not None:
text_rec = text_rec[target_indx]
text_ref = text_ref[target_indx]
filepath = os.path.join(
"reconstructions", f"text_rec_{cfg.case.data.name}_{cfg.case.model}_user{cfg.case.user.user_idx}.txt"
)
with open(filepath, "w") as f:
f.writelines(text_rec)
if side_by_side:
f.write("\n")
f.write("========== GROUND TRUTH TEXT ===========")
f.write("\n")
f.writelines(text_ref)
else:
if hasattr(metadata, "mean"):
dm = torch.as_tensor(metadata.mean)[None, :, None, None]
ds = torch.as_tensor(metadata.std)[None, :, None, None]
else:
dm, ds = torch.tensor(0,), torch.tensor(1)
rec_denormalized = torch.clamp(reconstructed_user_data["data"].cpu() * ds + dm, 0, 1)
ground_truth_denormalized = torch.clamp(true_user_data["data"].cpu() * ds + dm, 0, 1)
if target_indx is not None:
rec_denormalized = rec_denormalized[target_indx]
ground_truth_denormalized = ground_truth_denormalized[target_indx]
filepath = os.path.join(
"reconstructions", f"img_rec_{cfg.case.data.name}_{cfg.case.model}_user{cfg.case.user.user_idx}.png",
)
if not side_by_side:
torchvision.utils.save_image(rec_denormalized, filepath)
else:
torchvision.utils.save_image(torch.cat([rec_denormalized, ground_truth_denormalized]), filepath)
def dump_metrics(cfg, metrics):
"""Simple yaml dump of metric values."""
filepath = f"metrics_{cfg.case.data.name}_{cfg.case.model}_user{cfg.case.user.user_idx}.yaml"
sanitized_metrics = dict()
for metric, val in metrics.items():
try:
sanitized_metrics[metric] = np.asarray(val).item()
except ValueError:
sanitized_metrics[metric] = np.asarray(val).tolist()
with open(filepath, "w") as yaml_file:
yaml.dump(sanitized_metrics, yaml_file, default_flow_style=False)
def huggingface_offline_mode(huggingface_offline_mode):
if huggingface_offline_mode:
os.environ["HF_DATASETS_OFFLINE"] = "1"
os.environ["TRANSFORMERS_OFFLINE"] = "1"
| [
"logging.getLogger",
"omegaconf.open_dict",
"csv.DictWriter",
"torch.as_tensor",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.distributed.get_rank",
"sys.version.split",
"datetime.timedelta",
"torchvision.utils.save_image",
"hydra.utils.get_original_cwd",
"numpy.asarray",
"os... | [((502, 576), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['cfg.case.impl.sharing_strategy'], {}), '(cfg.case.impl.sharing_strategy)\n', (544, 576), False, 'import torch\n'), ((1314, 1339), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1337, 1339), False, 'import torch\n'), ((1799, 1825), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1815, 1825), False, 'import os\n'), ((4446, 4494), 'os.path.join', 'os.path.join', (['out_dir', 'f"""table_{table_name}.csv"""'], {}), "(out_dir, f'table_{table_name}.csv')\n", (4458, 4494), False, 'import os\n'), ((5705, 5732), 'torch.manual_seed', 'torch.manual_seed', (['(seed + 1)'], {}), '(seed + 1)\n', (5722, 5732), False, 'import torch\n'), ((5737, 5769), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(seed + 2)'], {}), '(seed + 2)\n', (5759, 5769), False, 'import torch\n'), ((5774, 5810), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(seed + 3)'], {}), '(seed + 3)\n', (5800, 5810), False, 'import torch\n'), ((5815, 5839), 'numpy.random.seed', 'np.random.seed', (['(seed + 4)'], {}), '(seed + 4)\n', (5829, 5839), True, 'import numpy as np\n'), ((5844, 5880), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(seed + 5)'], {}), '(seed + 5)\n', (5870, 5880), False, 'import torch\n'), ((5885, 5906), 'random.seed', 'random.seed', (['(seed + 6)'], {}), '(seed + 6)\n', (5896, 5906), False, 'import random\n'), ((6121, 6161), 'torch.use_deterministic_algorithms', 'torch.use_deterministic_algorithms', (['(True)'], {}), '(True)\n', (6155, 6161), False, 'import torch\n'), ((8064, 8109), 'os.makedirs', 'os.makedirs', (['"""reconstructions"""'], {'exist_ok': '(True)'}), "('reconstructions', exist_ok=True)\n", (8075, 8109), False, 'import os\n'), ((953, 978), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (976, 978), False, 'import torch\n'), ((914, 949), 'torch.device', 'torch.device', (['f"""cuda:{process_idx}"""'], {}), "(f'cuda:{process_idx}')\n", (926, 949), False, 'import torch\n'), ((984, 1003), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (996, 1003), False, 'import torch\n'), ((1070, 1093), 'sys.version.split', 'sys.version.split', (['""" ("""'], {}), "(' (')\n", (1087, 1093), False, 'import sys\n'), ((1349, 1383), 'torch.cuda.set_device', 'torch.cuda.set_device', (['process_idx'], {}), '(process_idx)\n', (1370, 1383), False, 'import torch\n'), ((2064, 2087), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (2081, 2087), False, 'import logging\n'), ((2250, 2264), 'omegaconf.open_dict', 'open_dict', (['cfg'], {}), '(cfg)\n', (2259, 2264), False, 'from omegaconf import OmegaConf, open_dict\n'), ((2449, 2497), 'omegaconf.OmegaConf.to_container', 'OmegaConf.to_container', (['log_config'], {'resolve': '(True)'}), '(log_config, resolve=True)\n', (2471, 2497), False, 'from omegaconf import OmegaConf, open_dict\n'), ((2525, 2555), 'hydra.utils.get_original_cwd', 'hydra.utils.get_original_cwd', ([], {}), '()\n', (2553, 2555), False, 'import hydra\n'), ((4034, 4074), 'os.path.join', 'os.path.join', (['cfg.original_cwd', '"""tables"""'], {}), "(cfg.original_cwd, 'tables')\n", (4046, 4074), False, 'import os\n'), ((4381, 4403), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (4394, 4403), False, 'import os\n'), ((4413, 4433), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (4424, 4433), False, 'import os\n'), ((7021, 7051), 'hydra.utils.get_original_cwd', 'hydra.utils.get_original_cwd', ([], {}), '()\n', (7049, 7051), False, 'import hydra\n'), ((8285, 8423), 'breaching.cases.data.datasets_text._get_tokenizer', '_get_tokenizer', (["server_payload[0]['metadata']['tokenizer']", "server_payload[0]['metadata']['vocab_size']"], {'cache_dir': 'cfg.case.data.path'}), "(server_payload[0]['metadata']['tokenizer'], server_payload[0\n ]['metadata']['vocab_size'], cache_dir=cfg.case.data.path)\n", (8299, 8423), False, 'from breaching.cases.data.datasets_text import _get_tokenizer\n'), ((8753, 8877), 'os.path.join', 'os.path.join', (['"""reconstructions"""', 'f"""text_rec_{cfg.case.data.name}_{cfg.case.model}_user{cfg.case.user.user_idx}.txt"""'], {}), "('reconstructions',\n f'text_rec_{cfg.case.data.name}_{cfg.case.model}_user{cfg.case.user.user_idx}.txt'\n )\n", (8765, 8877), False, 'import os\n'), ((9801, 9924), 'os.path.join', 'os.path.join', (['"""reconstructions"""', 'f"""img_rec_{cfg.case.data.name}_{cfg.case.model}_user{cfg.case.user.user_idx}.png"""'], {}), "('reconstructions',\n f'img_rec_{cfg.case.data.name}_{cfg.case.model}_user{cfg.case.user.user_idx}.png'\n )\n", (9813, 9924), False, 'import os\n'), ((10629, 10694), 'yaml.dump', 'yaml.dump', (['sanitized_metrics', 'yaml_file'], {'default_flow_style': '(False)'}), '(sanitized_metrics, yaml_file, default_flow_style=False)\n', (10638, 10694), False, 'import yaml\n'), ((1703, 1737), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (1735, 1737), False, 'import torch\n'), ((1741, 1769), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (1767, 1769), False, 'import torch\n'), ((1988, 2045), 'omegaconf.OmegaConf.to_container', 'OmegaConf.to_container', (['cfg.job_logging_cfg'], {'resolve': '(True)'}), '(cfg.job_logging_cfg, resolve=True)\n', (2010, 2045), False, 'from omegaconf import OmegaConf, open_dict\n'), ((2329, 2370), 'hydra.core.hydra_config.HydraConfig.get', 'hydra.core.hydra_config.HydraConfig.get', ([], {}), '()\n', (2368, 2370), False, 'import hydra\n'), ((4626, 4655), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (4636, 4655), False, 'import csv\n'), ((5436, 5492), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'delimiter': '"""\t"""', 'fieldnames': 'fieldnames'}), "(f, delimiter='\\t', fieldnames=fieldnames)\n", (5450, 5492), False, 'import csv\n'), ((7116, 7127), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7125, 7127), False, 'import os\n'), ((9981, 10037), 'torchvision.utils.save_image', 'torchvision.utils.save_image', (['rec_denormalized', 'filepath'], {}), '(rec_denormalized, filepath)\n', (10009, 10037), False, 'import torchvision\n'), ((1217, 1240), 'torch.get_num_threads', 'torch.get_num_threads', ([], {}), '()\n', (1238, 1240), False, 'import torch\n'), ((1250, 1275), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1273, 1275), False, 'import torch\n'), ((1281, 1301), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1299, 1301), False, 'import socket\n'), ((7369, 7407), 'torch.as_tensor', 'torch.as_tensor', (['server.cfg_data.shape'], {}), '(server.cfg_data.shape)\n', (7384, 7407), False, 'import torch\n'), ((9227, 9257), 'torch.as_tensor', 'torch.as_tensor', (['metadata.mean'], {}), '(metadata.mean)\n', (9242, 9257), False, 'import torch\n'), ((9296, 9325), 'torch.as_tensor', 'torch.as_tensor', (['metadata.std'], {}), '(metadata.std)\n', (9311, 9325), False, 'import torch\n'), ((9382, 9397), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (9394, 9397), False, 'import torch\n'), ((9400, 9415), 'torch.tensor', 'torch.tensor', (['(1)'], {}), '(1)\n', (9412, 9415), False, 'import torch\n'), ((10093, 10149), 'torch.cat', 'torch.cat', (['[rec_denormalized, ground_truth_denormalized]'], {}), '([rec_denormalized, ground_truth_denormalized])\n', (10102, 10149), False, 'import torch\n'), ((1410, 1451), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', ([], {'device': 'device'}), '(device=device)\n', (1436, 1451), False, 'import torch\n'), ((3101, 3112), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3110, 3112), False, 'import os\n'), ((5113, 5169), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'delimiter': '"""\t"""', 'fieldnames': 'fieldnames'}), "(f, delimiter='\\t', fieldnames=fieldnames)\n", (5127, 5169), False, 'import csv\n'), ((10463, 10478), 'numpy.asarray', 'np.asarray', (['val'], {}), '(val)\n', (10473, 10478), True, 'import numpy as np\n'), ((3613, 3651), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'local_time'}), '(seconds=local_time)\n', (3631, 3651), False, 'import datetime\n'), ((10553, 10568), 'numpy.asarray', 'np.asarray', (['val'], {}), '(val)\n', (10563, 10568), True, 'import numpy as np\n')] |
import pkg_resources
import os.path
from PIL import ImageFont
def available():
"""
Returns list of available font names.
"""
names = []
for f in pkg_resources.resource_listdir('ev3dev.fonts', ''):
name, ext = os.path.splitext(os.path.basename(f))
if ext == '.pil':
names.append(name)
return sorted(names)
def load(name):
"""
Loads the font specified by name and returns it as an instance of
`PIL.ImageFont <http://pillow.readthedocs.io/en/latest/reference/ImageFont.html>`_
class.
"""
try:
pil_file = pkg_resources.resource_filename('ev3dev.fonts', '{}.pil'.format(name))
pbm_file = pkg_resources.resource_filename('ev3dev.fonts', '{}.pbm'.format(name))
return ImageFont.load(pil_file)
except FileNotFoundError:
raise Exception('Failed to load font "{}". '.format(name) +
'Check ev3dev.fonts.available() for the list of available fonts')
| [
"PIL.ImageFont.load",
"pkg_resources.resource_listdir"
] | [((166, 216), 'pkg_resources.resource_listdir', 'pkg_resources.resource_listdir', (['"""ev3dev.fonts"""', '""""""'], {}), "('ev3dev.fonts', '')\n", (196, 216), False, 'import pkg_resources\n'), ((763, 787), 'PIL.ImageFont.load', 'ImageFont.load', (['pil_file'], {}), '(pil_file)\n', (777, 787), False, 'from PIL import ImageFont\n')] |
#!flask/bin/python
import os, pymongo
import models
from flask import Flask, jsonify, abort, request, make_response, url_for
app = Flask(__name__)
# Error Handler for 400: Bad Request
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify( { 'error': 'Bad request' } ), 400)
# Error Handler for 404: Not Found
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify( { 'error': 'Not found' } ), 404)
# Service Call for retrieving list of documents
@app.route('/api/v1.0/documents', methods = ['GET'])
def get_documents():
documents = models.get_documents()
return jsonify( { 'documents': documents } ), 201
# Service Call for retrieving a single document's details using mongo unique index id
@app.route('/api/v1.0/doco/<string:d_id>', methods = ['GET'])
def get_doco(d_id):
doco = models.get_doco(d_id)
if not doco:
# No dog with that id found
abort(404)
return jsonify( { 'doco': doco })
# Service Call for creating a new document
@app.route('/api/v1.0/doco', methods = ['POST'])
def create_doco():
# Check for JSON input, plus:
# Mandatory document name, doc_id must be unique
if not request.json or not 'name' in request.json:
abort(400)
doco = {
'doc_id': str(request.json['doc_id']).strip().lower(),
'doco_type': str(request.json['doco_type']).strip().upper(),
'name': str(request.json['name']).strip().capitalize(),
'status': request.json.get('status', "").strip().upper(),
'handler_id': request.json.get('handler_id', None).strip().lower(),
'dog_id': str(request.json.get('dog_id', "")).strip().lower()
}
id = models.new_doco(doco)
doco['id'] = id
return jsonify( { 'doco': doco } ), 201
# Service Call for updating a document
@app.route('/api/v1.0/doco/<string:d_id>', methods = ['PUT'])
def update_doco(d_id):
doco = models.get_doco(d_id)
if len(doco) == 0:
abort(404)
if not request.json:
abort(400)
if 'name' in request.json and type(request.json['name']) is not unicode:
abort(400)
if 'status' in request.json and type(request.json['status']) is not unicode:
abort(400)
if 'doc_id' in request.json and type(request.json['doc_id']) is not unicode:
abort(400)
if 'doco_type' in request.json and type(request.json['doco_type']) is not unicode:
abort(400)
if 'handler_id' in request.json and type(request.json['handler_id']) is not unicode:
abort(400)
if 'dog_id' in request.json and type(request.json['dog_id']) is not unicode:
abort(400)
doco['name'] = str(request.json.get('name', doco['name'])).strip().capitalize()
doco['doc_id'] = str(request.json.get('doc_id', doco['doc_id'])).strip().lower()
doco['doco_type'] = str(request.json.get('doco_type', doco['doco_type'])).strip().upper()
doco['status'] = str(request.json.get('status', doco['status'])).strip().upper()
doco['handler_id'] = str(request.json.get('handler_id', doco['handler_id'])).strip().lower()
doco['dog_id'] = str(request.json.get('dog_id', doco['dog_id'])).strip().lower()
models.update_doco(doco)
return jsonify( { 'doco': doco } )
### Service Call for deleting a document
##@app.route('/api/v1.0/doco/<string:d_id>', methods = ['DELETE'])
##def delete_doco(d_id):
## doco = models.get_doco(d_id)
## if doco is None:
## abort(404)
## models.delete_doco(d_id)
## return jsonify( { 'result': True } )
# Service Call for search by criteria (similar to Update Handler method)
# Accepts a single field or multiple fields which are then AND'ed
@app.route('/api/v1.0/search', methods = ['PUT'])
def search():
if not request.json:
abort(400)
criteria = {}
if 'doc_id' in request.json:
doc_id = str(request.json['doc_id']).strip().lower()
criteria['doc_id'] = doc_id
if 'doco_type' in request.json:
doco_type = str(request.json['doco_type']).strip().upper()
criteria['doco_type'] = doco_type
if 'name' in request.json:
name = str(request.json['name']).strip().capitalize()
criteria['name'] = name
if 'handler_id' in request.json:
handler_id = request.json['handler_id']
if not (handler_id is None):
str(handler_id).strip().lower()
criteria['handler_id'] = handler_id
if 'dog_id' in request.json:
dog_id = request.json['dog_id']
if not (dog_id is None):
str(dog_id).strip().lower()
criteria['dog_id'] = dog_id
if 'status' in request.json:
status = str(request.json['status']).strip().upper()
criteria['status'] = status
documents = models.search(criteria)
return jsonify( {'documents': documents} )
# Initialise DB before starting web service
models.init_db()
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0', port=int(os.getenv('PORT', '5050')), threaded=True)
| [
"models.get_doco",
"os.getenv",
"flask.Flask",
"models.init_db",
"models.get_documents",
"models.new_doco",
"models.update_doco",
"models.search",
"flask.request.json.get",
"flask.abort",
"flask.jsonify"
] | [((141, 156), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (146, 156), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((5001, 5017), 'models.init_db', 'models.init_db', ([], {}), '()\n', (5015, 5017), False, 'import models\n'), ((616, 638), 'models.get_documents', 'models.get_documents', ([], {}), '()\n', (636, 638), False, 'import models\n'), ((879, 900), 'models.get_doco', 'models.get_doco', (['d_id'], {}), '(d_id)\n', (894, 900), False, 'import models\n'), ((992, 1015), 'flask.jsonify', 'jsonify', (["{'doco': doco}"], {}), "({'doco': doco})\n", (999, 1015), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((1745, 1766), 'models.new_doco', 'models.new_doco', (['doco'], {}), '(doco)\n', (1760, 1766), False, 'import models\n'), ((1976, 1997), 'models.get_doco', 'models.get_doco', (['d_id'], {}), '(d_id)\n', (1991, 1997), False, 'import models\n'), ((3251, 3275), 'models.update_doco', 'models.update_doco', (['doco'], {}), '(doco)\n', (3269, 3275), False, 'import models\n'), ((3290, 3313), 'flask.jsonify', 'jsonify', (["{'doco': doco}"], {}), "({'doco': doco})\n", (3297, 3313), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((4881, 4904), 'models.search', 'models.search', (['criteria'], {}), '(criteria)\n', (4894, 4904), False, 'import models\n'), ((4917, 4950), 'flask.jsonify', 'jsonify', (["{'documents': documents}"], {}), "({'documents': documents})\n", (4924, 4950), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((270, 303), 'flask.jsonify', 'jsonify', (["{'error': 'Bad request'}"], {}), "({'error': 'Bad request'})\n", (277, 303), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((426, 457), 'flask.jsonify', 'jsonify', (["{'error': 'Not found'}"], {}), "({'error': 'Not found'})\n", (433, 457), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((651, 684), 'flask.jsonify', 'jsonify', (["{'documents': documents}"], {}), "({'documents': documents})\n", (658, 684), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((967, 977), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (972, 977), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((1289, 1299), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (1294, 1299), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((1802, 1825), 'flask.jsonify', 'jsonify', (["{'doco': doco}"], {}), "({'doco': doco})\n", (1809, 1825), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((2031, 2041), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (2036, 2041), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((2077, 2087), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2082, 2087), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((2175, 2185), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2180, 2185), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((2277, 2287), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2282, 2287), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((2379, 2389), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2384, 2389), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((2487, 2497), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2492, 2497), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((2597, 2607), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2602, 2607), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((2699, 2709), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2704, 2709), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((3858, 3868), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (3863, 3868), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((5097, 5122), 'os.getenv', 'os.getenv', (['"""PORT"""', '"""5050"""'], {}), "('PORT', '5050')\n", (5106, 5122), False, 'import os, pymongo\n'), ((1532, 1562), 'flask.request.json.get', 'request.json.get', (['"""status"""', '""""""'], {}), "('status', '')\n", (1548, 1562), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((1603, 1639), 'flask.request.json.get', 'request.json.get', (['"""handler_id"""', 'None'], {}), "('handler_id', None)\n", (1619, 1639), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((2734, 2772), 'flask.request.json.get', 'request.json.get', (['"""name"""', "doco['name']"], {}), "('name', doco['name'])\n", (2750, 2772), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((2821, 2863), 'flask.request.json.get', 'request.json.get', (['"""doc_id"""', "doco['doc_id']"], {}), "('doc_id', doco['doc_id'])\n", (2837, 2863), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((2910, 2958), 'flask.request.json.get', 'request.json.get', (['"""doco_type"""', "doco['doco_type']"], {}), "('doco_type', doco['doco_type'])\n", (2926, 2958), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((3002, 3044), 'flask.request.json.get', 'request.json.get', (['"""status"""', "doco['status']"], {}), "('status', doco['status'])\n", (3018, 3044), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((3092, 3142), 'flask.request.json.get', 'request.json.get', (['"""handler_id"""', "doco['handler_id']"], {}), "('handler_id', doco['handler_id'])\n", (3108, 3142), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((3186, 3228), 'flask.request.json.get', 'request.json.get', (['"""dog_id"""', "doco['dog_id']"], {}), "('dog_id', doco['dog_id'])\n", (3202, 3228), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n'), ((1680, 1710), 'flask.request.json.get', 'request.json.get', (['"""dog_id"""', '""""""'], {}), "('dog_id', '')\n", (1696, 1710), False, 'from flask import Flask, jsonify, abort, request, make_response, url_for\n')] |
import os
import sys
from keystone import *
sys.path.append("./../../util/script/")
import shellcode
def gen_oepinit_code32():
ks = Ks(KS_ARCH_X86, KS_MODE_32)
code_str = f"""
// for relative address, get the base of addr
push ebx;
call getip;
lea ebx, [eax-6];
// get the imagebase
mov eax, 0x30; // to avoid relative addressing
mov edi, dword ptr fs:[eax]; //peb
mov edi, [edi + 0ch]; //ldr
mov edi, [edi + 14h]; //InMemoryOrderLoadList, this
mov edi, [edi -8h + 18h]; //this.DllBase
// get loadlibrarya, getprocaddress
mov eax, [ebx + findloadlibrarya];
add eax, edi;
call eax;
mov [ebx + findloadlibrarya], eax;
mov eax, [ebx + findgetprocaddress];
add eax, edi;
call eax;
mov [ebx + findgetprocaddress], eax;
// reloc
mov eax, [ebx + dllrva];
add eax, edi;
push eax;
push eax;
mov eax, [ebx + memrelocrva];
add eax, edi;
call eax;
// bind iat
mov eax, [ebx + findgetprocaddress];
push eax; // arg3, getprocaddress
mov eax, [ebx + findloadlibrarya];
push eax; // arg2, loadlibraryas
mov eax, [ebx + dllrva];
add eax, edi;
push eax; // arg1, dllbase value
mov eax, [ebx + membindiatrva];
add eax, edi
call eax;
// bind tls
xor eax, eax;
inc eax;
push eax; // arg2, reason for tls
mov eax, [ebx + dllrva]
add eax, edi;
push eax; // arg1, dllbase
mov eax, [ebx + membindtlsrva];
add eax, edi;
call eax;
// call dll oep, for dll entry
xor eax, eax;
push eax; // lpvReserved
inc eax;
push eax; // fdwReason, DLL_PROCESS_ATTACH
mov eax, [ebx + dllrva];
add eax, edi;
push eax; // hinstDLL
mov eax, [ebx + dlloeprva];
add eax, edi;
call eax;
// jmp to origin oep
mov eax, [ebx + exeoeprva];
add eax, edi;
pop ebx;
jmp eax;
getip:
mov eax, [esp]
ret
exeoeprva: nop;nop;nop;nop;
dllrva: nop;nop;nop;nop;
dlloeprva: nop;nop;nop;nop;
memrelocrva: nop;nop;nop;nop;
membindiatrva: nop;nop;nop;nop;
membindtlsrva: nop;nop;nop;nop;
findloadlibrarya: nop;nop;nop;nop;
findgetprocaddress: nop;nop;nop;nop;
"""
# print("gen_oepinit_code32", code_str)
payload, _ = ks.asm(code_str)
# print("payload: ", [hex(x) for x in payload])
return payload
def gen_oepinit_code64():
ks = Ks(KS_ARCH_X86, KS_MODE_64)
code_str = f"""
// for relative address, get the base of addr
call getip;
lea rbx, [rax-5];
push rcx;
push rdx;
push r8;
push r9;
sub rsp, 0x28; // this is for memory 0x10 align
// get the imagebase
mov rax, 0x60; // to avoid relative addressing
mov rdi, qword ptr gs:[rax]; //peb
mov rdi, [rdi + 18h]; //ldr
mov rdi, [rdi + 20h]; //InMemoryOrderLoadList, this
mov rdi, [rdi -10h + 30h]; //this.DllBase
// get loadlibrarya, getprocaddress
mov rax, [rbx + findloadlibrarya];
add rax, rdi;
call rax;
mov [rbx + findloadlibrarya], rax;
mov rax, [rbx + findgetprocaddress];
add rax, rdi;
call rax;
mov [rbx + findgetprocaddress], rax;
// reloc
mov rcx, [rbx + dllrva];
add rcx, rdi;
mov rdx, rcx;
mov rax, [rbx + memrelocrva];
add rax, rdi;
call rax;
// bind iat
mov r8, [rbx + findgetprocaddress]; // arg3, getprocaddress
mov rdx, [rbx + findloadlibrarya]; // arg2, loadlibraryas
mov rcx, [rbx + dllrva];
add rcx, rdi; // arg1, dllbase value
mov rax, [rbx + membindiatrva];
add rax, rdi
call rax;
// bind tls
xor rdx, rdx;
inc rdx; // argc, reason for tls
mov rcx, [rbx + dllrva]
add rcx, rdi; // arg1, dllbase
mov rax, [rbx + membindtlsrva];
add rax, rdi;
call rax;
// call dll oep, for dll entry
xor r8, r8; // lpvReserved
xor rdx, rdx;
inc rdx; // fdwReason, DLL_PROCESS_ATTACH
mov rcx, [rbx + dllrva];
add rcx, rdi; // hinstDLL
mov rax, [rbx + dlloeprva];
add rax, rdi;
call rax;
// jmp to origin oep
add rsp, 0x28;
pop r9;
pop r8;
pop rdx;
pop rcx;
mov rax, [rbx+exeoeprva];
add rax, rdi;
jmp rax;
getip:
mov rax, [rsp]
ret
exeoeprva: nop;nop;nop;nop;nop;nop;nop;nop;
dllrva: nop;nop;nop;nop;nop;nop;nop;nop;
dlloeprva: nop;nop;nop;nop;nop;nop;nop;nop;
memrelocrva: nop;nop;nop;nop;nop;nop;nop;nop;
membindiatrva: nop;nop;nop;nop;nop;nop;nop;nop;
membindtlsrva: nop;nop;nop;nop;nop;nop;nop;nop;
findloadlibrarya: nop;nop;nop;nop;nop;nop;nop;nop;
findgetprocaddress: nop;nop;nop;nop;nop;nop;nop;nop;
"""
# print("gen_oepinit_code64", code_str)
payload, _ = ks.asm(code_str)
# print("payload: ", [hex(x) for x in payload])
return payload
def gen_oepinitstatic_code32():
ks = Ks(KS_ARCH_X86, KS_MODE_32)
code_str = f"""
push eax
push ebx
call getip;
lea ebx, [eax-7];
mov eax, [ebx + dllnameva];
push eax;
mov eax, [ebx + loadlibraryva]
call eax;
mov eax, [ebx + retva];
mov edi, eax;
pop ebx;
pop eax;
jmp edi;
getip:
mov eax, [esp]
ret
retva:nop;nop;nop;nop;
dllnameva:nop;nop;nop;nop;
loadlibraryva:nop;nop;nop;nop;
"""
payload, _ = ks.asm(code_str)
return payload
def gen_oepinitstatic_code64():
ks = Ks(KS_ARCH_X86, KS_MODE_64)
code_str = f"""
push rax;
push rbx;
push rcx;
push rdx;
call getip;
lea rbx, [rax-9];
sub rsp, 0x28;
mov rcx, [rbx + dllnameva];
mov rax, [rbx + loadlibraryva]
call rax;
add rsp, 0x28;
mov rax, [rbx + retva];
mov r15, rax;
pop rdx;
pop rcx;
pop rbx;
pop rax;
jmp r15;
getip:
mov rax, [rsp];
ret;
retva:nop;nop;nop;nop;nop;nop;nop;nop;
dllnameva:nop;nop;nop;nop;nop;nop;nop;nop;
loadlibraryva:nop;nop;nop;nop;nop;nop;nop;nop;
"""
payload, _ = ks.asm(code_str)
return payload
def make_winpe_shellcode(libwinpepath, postfix):
codes = dict()
libwinpe = shellcode.extract_coff(libwinpepath)
# for static inject dll into exe oepinit code
codes[f'g_oepinit_code{postfix}'] = eval(f'gen_oepinit_code{postfix}()')
# for dynamic inject dll into exe oepint code
codes[f'g_oepinitstatic_code{postfix}'] = eval(f'gen_oepinitstatic_code{postfix}()')
for name, code in libwinpe.items():
newname = f"g_{name.replace('winpe_', '').lower()}_code{postfix}"
codes[newname] = code
return codes
def debug():
gen_oepinitstatic_code64()
codes = shellcode.extract_coff("./bin/winpe_shellcode32.obj")
pass
def main():
codes = dict()
codes.update(make_winpe_shellcode(sys.argv[1], '32'))
codes.update(make_winpe_shellcode(sys.argv[2], '64'))
shellcode.write_shellcode_header(codes, outpath=sys.argv[3])
if __name__ == '__main__':
# debug()
main()
pass | [
"sys.path.append",
"shellcode.extract_coff",
"shellcode.write_shellcode_header"
] | [((49, 88), 'sys.path.append', 'sys.path.append', (['"""./../../util/script/"""'], {}), "('./../../util/script/')\n", (64, 88), False, 'import sys\n'), ((6363, 6399), 'shellcode.extract_coff', 'shellcode.extract_coff', (['libwinpepath'], {}), '(libwinpepath)\n', (6385, 6399), False, 'import shellcode\n'), ((6897, 6950), 'shellcode.extract_coff', 'shellcode.extract_coff', (['"""./bin/winpe_shellcode32.obj"""'], {}), "('./bin/winpe_shellcode32.obj')\n", (6919, 6950), False, 'import shellcode\n'), ((7120, 7180), 'shellcode.write_shellcode_header', 'shellcode.write_shellcode_header', (['codes'], {'outpath': 'sys.argv[3]'}), '(codes, outpath=sys.argv[3])\n', (7152, 7180), False, 'import shellcode\n')] |
# -*- encoding: utf-8 -*-
# pylint: disable=E0203,E1101,C0111
"""
@file
@brief Runtime operator.
"""
from scipy.special import erf # pylint: disable=E0611
from ._op import OpRunUnaryNum
class Erf(OpRunUnaryNum):
def __init__(self, onnx_node, desc=None, **options):
OpRunUnaryNum.__init__(self, onnx_node, desc=desc,
**options)
def _run(self, x): # pylint: disable=W0221
if self.inplaces.get(0, False):
return self._run_inplace(x)
return (erf(x), )
def _run_inplace(self, x):
return (erf(x, out=x), )
def to_python(self, inputs):
return ('from scipy.special import erf',
"return erf(%s)" % inputs[0])
| [
"scipy.special.erf"
] | [((519, 525), 'scipy.special.erf', 'erf', (['x'], {}), '(x)\n', (522, 525), False, 'from scipy.special import erf\n'), ((577, 590), 'scipy.special.erf', 'erf', (['x'], {'out': 'x'}), '(x, out=x)\n', (580, 590), False, 'from scipy.special import erf\n')] |
from gstat_classroom.index import app
app.run_server(debug=True) | [
"gstat_classroom.index.app.run_server"
] | [((39, 65), 'gstat_classroom.index.app.run_server', 'app.run_server', ([], {'debug': '(True)'}), '(debug=True)\n', (53, 65), False, 'from gstat_classroom.index import app\n')] |
import pprint
import os
class ServerProps:
def __init__(self, filepath):
self.filepath = filepath
self.props = self._parse()
def _parse(self):
"""Loads and parses the file speified in self.filepath"""
with open(self.filepath) as fp:
line = fp.readline()
d = {}
if os.path.exists(".header"):
os.remove(".header")
while line:
if '#' != line[0]:
s = line
s1 = s[:s.find('=')]
if '\n' in s:
s2 = s[s.find('=')+1:s.find('\\')]
else:
s2 = s[s.find('=')+1:]
d[s1] = s2
else:
with open(".header", "a+") as h:
h.write(line)
line = fp.readline()
return d
def print(self):
"""Prints the properties dictionary (using pprint)"""
pprint.pprint(self.props)
def get(self):
"""Returns the properties dictionary"""
return self.props
def update(self, key, val):
"""Updates property in the properties dictionary [ update("pvp", "true") ] and returns boolean condition"""
if key in self.props.keys():
self.props[key] = val
return True
else:
return False
def save(self):
"""Writes to the new file"""
with open(self.filepath, "a+") as f:
f.truncate(0)
with open(".header") as header:
line = header.readline()
while line:
f.write(line)
line = header.readline()
header.close()
for key, value in self.props.items():
f.write(key + "=" + value + "\n")
if os.path.exists(".header"):
os.remove(".header")
| [
"os.path.exists",
"pprint.pprint",
"os.remove"
] | [((992, 1017), 'pprint.pprint', 'pprint.pprint', (['self.props'], {}), '(self.props)\n', (1005, 1017), False, 'import pprint\n'), ((1866, 1891), 'os.path.exists', 'os.path.exists', (['""".header"""'], {}), "('.header')\n", (1880, 1891), False, 'import os\n'), ((343, 368), 'os.path.exists', 'os.path.exists', (['""".header"""'], {}), "('.header')\n", (357, 368), False, 'import os\n'), ((1909, 1929), 'os.remove', 'os.remove', (['""".header"""'], {}), "('.header')\n", (1918, 1929), False, 'import os\n'), ((386, 406), 'os.remove', 'os.remove', (['""".header"""'], {}), "('.header')\n", (395, 406), False, 'import os\n')] |
import numpy as np
# laser = [0.1, 0.2, 0.3, 0.4, 0.5]
# laser_arr = np.array(laser)
# result = np.count_nonzero(laser_arr >= 0.2)
import math
radian = math.radians(90)
con_d = math.degrees(radian)
print(radian,':::',con_d) | [
"math.degrees",
"math.radians"
] | [((155, 171), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (167, 171), False, 'import math\n'), ((181, 201), 'math.degrees', 'math.degrees', (['radian'], {}), '(radian)\n', (193, 201), False, 'import math\n')] |
from morle.utils.files import full_path
import morle.shared as shared
from collections import defaultdict
import hfst
import os.path
import sys
import tqdm
import types
def seq_to_transducer(alignment, weight=0.0, type=None, alphabet=None):
if type is None:
type=shared.config['FST'].getint('transducer_type')
tr = hfst.HfstBasicTransducer()
if alphabet is None:
alphabet = ()
alphabet = tuple(sorted(set(alphabet) | set(sum(alignment, ()))))
tr.add_symbols_to_alphabet(alphabet)
last_state_id = 0
for (x, y) in alignment:
state_id = tr.add_state()
if (x, y) == (hfst.IDENTITY, hfst.IDENTITY):
tr.add_transition(last_state_id,
hfst.HfstBasicTransition(state_id,
hfst.IDENTITY,
hfst.IDENTITY,
0.0))
tr.add_transition(state_id,
hfst.HfstBasicTransition(state_id,
hfst.IDENTITY,
hfst.IDENTITY,
0.0))
for a in tr.get_alphabet():
if not a.startswith('@_'):
tr.add_transition(last_state_id, hfst.HfstBasicTransition(state_id, a, a, 0.0))
tr.add_transition(state_id, hfst.HfstBasicTransition(state_id, a, a, 0.0))
else:
tr.add_transition(last_state_id,
hfst.HfstBasicTransition(state_id, x, y, 0.0))
last_state_id = state_id
tr.set_final_weight(last_state_id, weight)
return hfst.HfstTransducer(tr, type)
def binary_disjunct(transducers, print_progress=False):
iterator, progressbar = None, None
if isinstance(transducers, list):
iterator = iter(transducers)
if print_progress:
progressbar = tqdm.tqdm(total=len(transducers))
elif isinstance(transducers, types.GeneratorType):
iterator = transducers
else:
raise TypeError('\'transducers\' must be a list or a generator!')
stack, sizes = [], []
count = 0
while True:
if len(sizes) >= 2 and sizes[-1] == sizes[-2]:
# disjunct the two top transducers from the stack
first, first_size = stack.pop(), sizes.pop()
second, second_size = stack.pop(), sizes.pop()
# print('merge', first_size, second_size)
first.disjunct(second)
stack.append(first)
sizes.append(first_size + second_size)
stack[-1].minimize()
else:
# push a new transducer to the stack
try:
# print('push')
stack.append(next(iterator))
sizes.append(1)
count += 1
if print_progress and progressbar is not None:
progressbar.update()
except StopIteration:
break
# disjunct the remaining transducers and minimize the result
# print('final merge')
t = stack.pop()
while stack:
t.disjunct(stack.pop())
t.determinize()
t.minimize()
# t.push_weights(hfst.TO_INITIAL_STATE)
t.push_weights_to_end()
if print_progress and progressbar is not None:
progressbar.close()
return t
# TODO deprecated
# A_TO_Z = tuple('abcdefghijklmnoprstuvwxyz')
#
# def generate_id(id_num):
# result = A_TO_Z[id_num % len(A_TO_Z)]
# while id_num > len(A_TO_Z):
# id_num //= len(A_TO_Z)
# result = A_TO_Z[id_num % len(A_TO_Z)-1] + result
# return result
#
# def id_generator():
# tr = hfst.HfstBasicTransducer()
# tr.add_symbols_to_alphabet(A_TO_Z + ('$',))
# tr.add_transition(0,
# hfst.HfstBasicTransition(1, '$', '$', 0.0))
# for c in A_TO_Z:
# tr.add_transition(1,
# hfst.HfstBasicTransition(1, c, c, 0.0))
# tr.set_final_weight(1, 0.0)
# return hfst.HfstTransducer(tr, settings.TRANSDUCER_TYPE)
def number_of_paths(transducer):
# in n-th iteration paths_for_state[s] contains the number of paths
# of length n terminating in state s
# terminates if maximum n is reached, i.e. paths_for_state > 0
# only for states without outgoing transitions
t = hfst.HfstBasicTransducer(transducer)
paths_for_state = [1] + [0] * (len(t.states())-1)
result = 0
changed = True
while changed:
changed = False
new_paths_for_state = [0] * len(t.states())
for state in t.states():
if paths_for_state[state] > 0:
for tr in t.transitions(state):
new_paths_for_state[tr.get_target_state()] +=\
paths_for_state[state]
changed = True
for state in t.states():
if t.is_final_state(state):
result += new_paths_for_state[state]
paths_for_state = new_paths_for_state
return result
def delenv(alphabet, max_affix_size, max_infix_size, max_infix_slots,
deletion_symbol='@_DEL_@', deletion_slot_symbol='@_DELSLOT_@'):
def add_deletion_chain(tr, alphabet, state, length):
tr.add_transition(state,
hfst.HfstBasicTransition(
state+1, hfst.EPSILON, deletion_slot_symbol, 0.0))
for i in range(1, length+1):
for c in alphabet:
if c not in (hfst.EPSILON, hfst.IDENTITY, hfst.UNKNOWN):
tr.add_transition(state+i,
hfst.HfstBasicTransition(
state+i+1,
c, deletion_symbol, 0.0))
last_state = state + length + 1
for i in range(length+1):
tr.add_transition(state+i,
hfst.HfstBasicTransition(
last_state,
hfst.EPSILON, hfst.EPSILON, 0.0))
return last_state
def add_identity_loop(tr, alphabet, state):
for c in alphabet:
if c not in (hfst.EPSILON, hfst.IDENTITY, hfst.UNKNOWN):
tr.add_transition(state,
hfst.HfstBasicTransition(state+1, c, c, 0.0))
tr.add_transition(state+1,
hfst.HfstBasicTransition(state+1, c, c, 0.0))
return state+1
tr = hfst.HfstBasicTransducer()
# prefix
state = add_deletion_chain(tr, alphabet, 0, max_affix_size)
state = add_identity_loop(tr, alphabet, state)
# infixes
for i in range(max_infix_slots):
state = add_deletion_chain(tr, alphabet, state, max_infix_size)
state = add_identity_loop(tr, alphabet, state)
# suffix
state = add_deletion_chain(tr, alphabet, state, max_affix_size)
tr.set_final_weight(state, 0.0)
tr_c = hfst.HfstTransducer(tr)
tr_c.remove_epsilons()
tr_c.minimize()
return tr_c
# TODO similar_words():
# lookup word to find out substrings,
# lookup each substring, sum and remove duplicates
def delfilter(alphabet, length, deletion_symbol='@_DEL_@',
deletion_slot_symbol='@_DELSLOT_@'):
tr = hfst.HfstBasicTransducer()
tr.set_final_weight(0, 0.0)
tr.add_transition(0,
hfst.HfstBasicTransition(
0, deletion_slot_symbol, deletion_slot_symbol, 0.0))
printable_chars = set(alphabet) -\
{ hfst.EPSILON, hfst.IDENTITY, hfst.UNKNOWN,
deletion_symbol }
for i in range(length):
for c in printable_chars:
tr.add_transition(i,
hfst.HfstBasicTransition(i+1, c, c, 0.0))
tr.add_transition(i+1,
hfst.HfstBasicTransition(
i, deletion_symbol, hfst.EPSILON, 0.0))
tr.add_transition(i+1,
hfst.HfstBasicTransition(
i+1, deletion_slot_symbol, deletion_slot_symbol, 0.0))
tr.set_final_weight(i+1, 0.0)
first_negative_state = length+1
tr.add_transition(0, hfst.HfstBasicTransition(
first_negative_state, deletion_symbol,
hfst.EPSILON, 0.0))
for c in printable_chars:
tr.add_transition(first_negative_state,
hfst.HfstBasicTransition(0, c, c, 0.0))
for i in range(length-1):
tr.add_transition(first_negative_state+i,
hfst.HfstBasicTransition(
first_negative_state+i+1,
deletion_symbol, hfst.EPSILON, 0.0))
tr.add_transition(first_negative_state+i+1,
hfst.HfstBasicTransition(
first_negative_state+i+1, deletion_slot_symbol, deletion_slot_symbol, 0.0))
for c in printable_chars:
tr.add_transition(first_negative_state+i+1,
hfst.HfstBasicTransition(
first_negative_state+i, c, c, 0.0))
tr_c = hfst.HfstTransducer(tr)
return tr_c
def rootgen_transducer(rootdist):
# create an automaton for word generation
if shared.config['Features'].getint('rootdist_n') != 1:
raise NotImplementedError('Not implemented for rootdist_n != 1')
weights = rootdist.features[0].log_probs
tr = hfst.HfstBasicTransducer()
tr.set_final_weight(0, weights[('#',)])
for char, weight in weights.items():
if char != ('#',):
tr.add_transition(0,
hfst.HfstBasicTransition(0, char[0], char[0], weight))
return hfst.HfstTransducer(tr)
def tag_absorber(alphabet):
tr = hfst.HfstBasicTransducer()
for c in alphabet:
if shared.compiled_patterns['symbol'].match(c):
tr.add_transition(0,
hfst.HfstBasicTransition(0, c, c, 0.0))
elif shared.compiled_patterns['tag'].match(c):
tr.add_transition(0,
hfst.HfstBasicTransition(1, c, hfst.EPSILON, 0.0))
tr.add_transition(1,
hfst.HfstBasicTransition(1, c, hfst.EPSILON, 0.0))
tr.set_final_weight(0, 0.0)
tr.set_final_weight(1, 0.0)
return hfst.HfstTransducer(tr)
def tag_acceptor(tag, alphabet):
tr = hfst.HfstBasicTransducer()
for c in alphabet:
if shared.compiled_patterns['symbol'].match(c):
tr.add_transition(0,
hfst.HfstBasicTransition(0, c, c, 0.0))
tr.set_final_weight(0, 0.0)
tr_c = hfst.HfstTransducer(tr)
tr_c.concatenate(seq_to_transducer(tuple(zip(tag, tag))))
return tr_c
def generator(seqs):
def _generator_for_seq(seq):
tr = hfst.HfstBasicTransducer()
for i, c in enumerate(seq):
tr.add_transition(i, hfst.HfstBasicTransition(i+1, c, c, 0.0))
tr.set_final_weight(len(seq), 0.0)
return hfst.HfstTransducer(tr)
transducers = [_generator_for_seq(seq) for seq in seqs]
return binary_disjunct(transducers)
def load_transducer(filename):
path = os.path.join(shared.options['working_dir'], filename)
istr = hfst.HfstInputStream(path)
transducer = istr.read()
istr.close()
return transducer
def load_cascade(filename):
transducers = []
istr = hfst.HfstInputStream(full_path(filename))
while not istr.is_eof():
transducers.append(istr.read())
istr.close()
return tuple(transducers)
def save_transducer(transducer, filename):
path = os.path.join(shared.options['working_dir'], filename)
ostr = hfst.HfstOutputStream(filename=path, type=transducer.get_type())
ostr.write(transducer)
ostr.flush()
ostr.close()
def save_cascade(transducers, filename, type=None):
path = os.path.join(shared.options['working_dir'], filename)
ostr = hfst.HfstOutputStream(filename=path, type=transducers[0].get_type())
for t in transducers:
ostr.write(t)
ostr.flush()
ostr.close()
| [
"hfst.HfstTransducer",
"morle.utils.files.full_path",
"hfst.HfstInputStream",
"hfst.HfstBasicTransition",
"hfst.HfstBasicTransducer"
] | [((333, 359), 'hfst.HfstBasicTransducer', 'hfst.HfstBasicTransducer', ([], {}), '()\n', (357, 359), False, 'import hfst\n'), ((1799, 1828), 'hfst.HfstTransducer', 'hfst.HfstTransducer', (['tr', 'type'], {}), '(tr, type)\n', (1818, 1828), False, 'import hfst\n'), ((4477, 4513), 'hfst.HfstBasicTransducer', 'hfst.HfstBasicTransducer', (['transducer'], {}), '(transducer)\n', (4501, 4513), False, 'import hfst\n'), ((6639, 6665), 'hfst.HfstBasicTransducer', 'hfst.HfstBasicTransducer', ([], {}), '()\n', (6663, 6665), False, 'import hfst\n'), ((7100, 7123), 'hfst.HfstTransducer', 'hfst.HfstTransducer', (['tr'], {}), '(tr)\n', (7119, 7123), False, 'import hfst\n'), ((7434, 7460), 'hfst.HfstBasicTransducer', 'hfst.HfstBasicTransducer', ([], {}), '()\n', (7458, 7460), False, 'import hfst\n'), ((9361, 9384), 'hfst.HfstTransducer', 'hfst.HfstTransducer', (['tr'], {}), '(tr)\n', (9380, 9384), False, 'import hfst\n'), ((9721, 9747), 'hfst.HfstBasicTransducer', 'hfst.HfstBasicTransducer', ([], {}), '()\n', (9745, 9747), False, 'import hfst\n'), ((9976, 9999), 'hfst.HfstTransducer', 'hfst.HfstTransducer', (['tr'], {}), '(tr)\n', (9995, 9999), False, 'import hfst\n'), ((10038, 10064), 'hfst.HfstBasicTransducer', 'hfst.HfstBasicTransducer', ([], {}), '()\n', (10062, 10064), False, 'import hfst\n'), ((10563, 10586), 'hfst.HfstTransducer', 'hfst.HfstTransducer', (['tr'], {}), '(tr)\n', (10582, 10586), False, 'import hfst\n'), ((10630, 10656), 'hfst.HfstBasicTransducer', 'hfst.HfstBasicTransducer', ([], {}), '()\n', (10654, 10656), False, 'import hfst\n'), ((10868, 10891), 'hfst.HfstTransducer', 'hfst.HfstTransducer', (['tr'], {}), '(tr)\n', (10887, 10891), False, 'import hfst\n'), ((11472, 11498), 'hfst.HfstInputStream', 'hfst.HfstInputStream', (['path'], {}), '(path)\n', (11492, 11498), False, 'import hfst\n'), ((7540, 7616), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(0)', 'deletion_slot_symbol', 'deletion_slot_symbol', '(0.0)'], {}), '(0, deletion_slot_symbol, deletion_slot_symbol, 0.0)\n', (7564, 7616), False, 'import hfst\n'), ((8380, 8467), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['first_negative_state', 'deletion_symbol', 'hfst.EPSILON', '(0.0)'], {}), '(first_negative_state, deletion_symbol, hfst.\n EPSILON, 0.0)\n', (8404, 8467), False, 'import hfst\n'), ((11043, 11069), 'hfst.HfstBasicTransducer', 'hfst.HfstBasicTransducer', ([], {}), '()\n', (11067, 11069), False, 'import hfst\n'), ((11239, 11262), 'hfst.HfstTransducer', 'hfst.HfstTransducer', (['tr'], {}), '(tr)\n', (11258, 11262), False, 'import hfst\n'), ((11649, 11668), 'morle.utils.files.full_path', 'full_path', (['filename'], {}), '(filename)\n', (11658, 11668), False, 'from morle.utils.files import full_path\n'), ((5427, 5503), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(state + 1)', 'hfst.EPSILON', 'deletion_slot_symbol', '(0.0)'], {}), '(state + 1, hfst.EPSILON, deletion_slot_symbol, 0.0)\n', (5451, 5503), False, 'import hfst\n'), ((8017, 8080), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['i', 'deletion_symbol', 'hfst.EPSILON', '(0.0)'], {}), '(i, deletion_symbol, hfst.EPSILON, 0.0)\n', (8041, 8080), False, 'import hfst\n'), ((8170, 8255), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(i + 1)', 'deletion_slot_symbol', 'deletion_slot_symbol', '(0.0)'], {}), '(i + 1, deletion_slot_symbol, deletion_slot_symbol, 0.0\n )\n', (8194, 8255), False, 'import hfst\n'), ((8628, 8666), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(0)', 'c', 'c', '(0.0)'], {}), '(0, c, c, 0.0)\n', (8652, 8666), False, 'import hfst\n'), ((8774, 8868), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(first_negative_state + i + 1)', 'deletion_symbol', 'hfst.EPSILON', '(0.0)'], {}), '(first_negative_state + i + 1, deletion_symbol,\n hfst.EPSILON, 0.0)\n', (8798, 8868), False, 'import hfst\n'), ((9002, 9109), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(first_negative_state + i + 1)', 'deletion_slot_symbol', 'deletion_slot_symbol', '(0.0)'], {}), '(first_negative_state + i + 1, deletion_slot_symbol,\n deletion_slot_symbol, 0.0)\n', (9026, 9109), False, 'import hfst\n'), ((732, 801), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['state_id', 'hfst.IDENTITY', 'hfst.IDENTITY', '(0.0)'], {}), '(state_id, hfst.IDENTITY, hfst.IDENTITY, 0.0)\n', (756, 801), False, 'import hfst\n'), ((1048, 1117), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['state_id', 'hfst.IDENTITY', 'hfst.IDENTITY', '(0.0)'], {}), '(state_id, hfst.IDENTITY, hfst.IDENTITY, 0.0)\n', (1072, 1117), False, 'import hfst\n'), ((1661, 1706), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['state_id', 'x', 'y', '(0.0)'], {}), '(state_id, x, y, 0.0)\n', (1685, 1706), False, 'import hfst\n'), ((6051, 6120), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['last_state', 'hfst.EPSILON', 'hfst.EPSILON', '(0.0)'], {}), '(last_state, hfst.EPSILON, hfst.EPSILON, 0.0)\n', (6075, 6120), False, 'import hfst\n'), ((7918, 7960), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(i + 1)', 'c', 'c', '(0.0)'], {}), '(i + 1, c, c, 0.0)\n', (7942, 7960), False, 'import hfst\n'), ((9254, 9315), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(first_negative_state + i)', 'c', 'c', '(0.0)'], {}), '(first_negative_state + i, c, c, 0.0)\n', (9278, 9315), False, 'import hfst\n'), ((9910, 9963), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(0)', 'char[0]', 'char[0]', 'weight'], {}), '(0, char[0], char[0], weight)\n', (9934, 9963), False, 'import hfst\n'), ((10193, 10231), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(0)', 'c', 'c', '(0.0)'], {}), '(0, c, c, 0.0)\n', (10217, 10231), False, 'import hfst\n'), ((10785, 10823), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(0)', 'c', 'c', '(0.0)'], {}), '(0, c, c, 0.0)\n', (10809, 10823), False, 'import hfst\n'), ((11139, 11181), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(i + 1)', 'c', 'c', '(0.0)'], {}), '(i + 1, c, c, 0.0)\n', (11163, 11181), False, 'import hfst\n'), ((6437, 6483), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(state + 1)', 'c', 'c', '(0.0)'], {}), '(state + 1, c, c, 0.0)\n', (6461, 6483), False, 'import hfst\n'), ((6560, 6606), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(state + 1)', 'c', 'c', '(0.0)'], {}), '(state + 1, c, c, 0.0)\n', (6584, 6606), False, 'import hfst\n'), ((10337, 10386), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(1)', 'c', 'hfst.EPSILON', '(0.0)'], {}), '(1, c, hfst.EPSILON, 0.0)\n', (10361, 10386), False, 'import hfst\n'), ((10437, 10486), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(1)', 'c', 'hfst.EPSILON', '(0.0)'], {}), '(1, c, hfst.EPSILON, 0.0)\n', (10461, 10486), False, 'import hfst\n'), ((1429, 1474), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['state_id', 'a', 'a', '(0.0)'], {}), '(state_id, a, a, 0.0)\n', (1453, 1474), False, 'import hfst\n'), ((1524, 1569), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['state_id', 'a', 'a', '(0.0)'], {}), '(state_id, a, a, 0.0)\n', (1548, 1569), False, 'import hfst\n'), ((5760, 5824), 'hfst.HfstBasicTransition', 'hfst.HfstBasicTransition', (['(state + i + 1)', 'c', 'deletion_symbol', '(0.0)'], {}), '(state + i + 1, c, deletion_symbol, 0.0)\n', (5784, 5824), False, 'import hfst\n')] |
"""Tests for the array sorting algorithms in array_sorts"""
import pytest
from src.array_sorts import bubble_sort, selection_sort, insertion_sort
@pytest.mark.parametrize("func", [
bubble_sort,
selection_sort,
insertion_sort
])
@pytest.mark.parametrize("array_to_sort,expected_sorted_array", [
([], []),
([1], [1]),
([1, 2], [1, 2]),
([2, 1], [1, 2]),
([8, 3, 4, 2], [2, 3, 4, 8]),
([1, 2, 3, 4, 5], [1, 2, 3, 4, 5]),
([5, 4, 3, 2, 1], [1, 2, 3, 4, 5]),
([65, 11, 92, 14, 8, 777, 21, 11, 786], [8, 11, 11, 14, 21, 65, 92, 777, 786]),
([-7, 8, -5, 0, 9, -5, -10, 1], [-10, -7, -5, -5, 0, 1, 8, 9])
])
def test_sorter_functions(func, array_to_sort, expected_sorted_array):
"""Test each array sorting algorithm implementation against the same set
of test parameters"""
assert func(array_to_sort) == expected_sorted_array
| [
"pytest.mark.parametrize"
] | [((148, 226), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', '[bubble_sort, selection_sort, insertion_sort]'], {}), "('func', [bubble_sort, selection_sort, insertion_sort])\n", (171, 226), False, 'import pytest\n'), ((242, 632), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""array_to_sort,expected_sorted_array"""', '[([], []), ([1], [1]), ([1, 2], [1, 2]), ([2, 1], [1, 2]), ([8, 3, 4, 2], [\n 2, 3, 4, 8]), ([1, 2, 3, 4, 5], [1, 2, 3, 4, 5]), ([5, 4, 3, 2, 1], [1,\n 2, 3, 4, 5]), ([65, 11, 92, 14, 8, 777, 21, 11, 786], [8, 11, 11, 14, \n 21, 65, 92, 777, 786]), ([-7, 8, -5, 0, 9, -5, -10, 1], [-10, -7, -5, -\n 5, 0, 1, 8, 9])]'], {}), "('array_to_sort,expected_sorted_array', [([], []), (\n [1], [1]), ([1, 2], [1, 2]), ([2, 1], [1, 2]), ([8, 3, 4, 2], [2, 3, 4,\n 8]), ([1, 2, 3, 4, 5], [1, 2, 3, 4, 5]), ([5, 4, 3, 2, 1], [1, 2, 3, 4,\n 5]), ([65, 11, 92, 14, 8, 777, 21, 11, 786], [8, 11, 11, 14, 21, 65, 92,\n 777, 786]), ([-7, 8, -5, 0, 9, -5, -10, 1], [-10, -7, -5, -5, 0, 1, 8, 9])]\n )\n", (265, 632), False, 'import pytest\n')] |
#!/usr/bin/env python
# Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
import os
import json
from util import build_path
def read_json(filename):
with open(filename) as json_file:
return json.load(json_file)
def write_json(filename, data):
with open(filename, 'w') as outfile:
json.dump(data, outfile)
current_data_file = os.path.join(build_path(), "bench.json")
all_data_file = "gh-pages/data.json" # Includes all benchmark data.
recent_data_file = "gh-pages/recent.json" # Includes recent 20 benchmark data.
assert os.path.exists(current_data_file)
assert os.path.exists(all_data_file)
new_data = read_json(current_data_file)
all_data = read_json(all_data_file)
all_data.append(new_data)
write_json(all_data_file, all_data)
write_json(recent_data_file, all_data[-20:])
| [
"json.load",
"os.path.exists",
"util.build_path",
"json.dump"
] | [((575, 608), 'os.path.exists', 'os.path.exists', (['current_data_file'], {}), '(current_data_file)\n', (589, 608), False, 'import os\n'), ((616, 645), 'os.path.exists', 'os.path.exists', (['all_data_file'], {}), '(all_data_file)\n', (630, 645), False, 'import os\n'), ((390, 402), 'util.build_path', 'build_path', ([], {}), '()\n', (400, 402), False, 'from util import build_path\n'), ((226, 246), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (235, 246), False, 'import json\n'), ((330, 354), 'json.dump', 'json.dump', (['data', 'outfile'], {}), '(data, outfile)\n', (339, 354), False, 'import json\n')] |
"""
Modified from https://github.com/microsoft/Swin-Transformer/blob/main/main.py
"""
import os
import time
import argparse
import datetime
import numpy as np
import oneflow as flow
import oneflow.backends.cudnn as cudnn
from flowvision.loss.cross_entropy import (
LabelSmoothingCrossEntropy,
SoftTargetCrossEntropy,
)
from flowvision.utils.metrics import accuracy, AverageMeter
from config import get_config
from models import build_model
from data import build_loader
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils import (
load_checkpoint,
save_checkpoint,
get_grad_norm,
auto_resume_helper,
reduce_tensor,
)
def parse_option():
parser = argparse.ArgumentParser(
"Flowvision image classification training and evaluation script", add_help=False
)
parser.add_argument(
"--model_arch",
type=str,
required=True,
default="swin_tiny_patch4_window7_224",
help="model for training",
)
parser.add_argument(
"--cfg", type=str, required=True, metavar="FILE", help="path to config file",
)
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs="+",
)
# easy config modification
parser.add_argument(
"--batch-size", type=int, default=8, help="batch size for single GPU"
)
parser.add_argument("--data-path", type=str, help="path to dataset")
parser.add_argument(
"--zip",
action="store_true",
help="use zipped dataset instead of folder dataset",
)
parser.add_argument(
"--cache-mode",
type=str,
default="part",
choices=["no", "full", "part"],
help="no: no cache, "
"full: cache all data, "
"part: sharding the dataset into nonoverlapping pieces and only cache one piece",
)
parser.add_argument("--resume", help="resume from checkpoint")
parser.add_argument(
"--accumulation-steps", type=int, help="gradient accumulation steps"
)
parser.add_argument(
"--use-checkpoint",
action="store_true",
help="whether to use gradient checkpointing to save memory",
)
parser.add_argument(
"--output",
default="output",
type=str,
metavar="PATH",
help="root of output folder, the full path is <output>/<model_name>/<tag> (default: output)",
)
parser.add_argument("--tag", help="tag of experiment")
parser.add_argument("--eval", action="store_true", help="Perform evaluation only")
parser.add_argument(
"--throughput", action="store_true", help="Test throughput only"
)
# distributed training
parser.add_argument(
"--local_rank",
type=int,
default=0,
required=False,
help="local rank for DistributedDataParallel",
)
args, unparsed = parser.parse_known_args()
config = get_config(args)
return args, config
def main(config):
(
dataset_train,
dataset_val,
data_loader_train,
data_loader_val,
mixup_fn,
) = build_loader(config)
logger.info(f"Creating model:{config.MODEL.ARCH}")
model = build_model(config)
model.cuda()
logger.info(str(model))
optimizer = build_optimizer(config, model)
model = flow.nn.parallel.DistributedDataParallel(model, broadcast_buffers=False)
# FIXME: model with DDP wrapper doesn't have model.module
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info(f"number of params: {n_parameters}")
if hasattr(model_without_ddp, "flops"):
flops = model_without_ddp.flops()
logger.info(f"number of GFLOPs: {flops / 1e9}")
lr_scheduler = build_scheduler(config, optimizer, len(data_loader_train))
if config.AUG.MIXUP > 0.0:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif config.MODEL.LABEL_SMOOTHING > 0.0:
criterion = LabelSmoothingCrossEntropy(smoothing=config.MODEL.LABEL_SMOOTHING)
else:
criterion = flow.nn.CrossEntropyLoss()
max_accuracy = 0.0
if config.TRAIN.AUTO_RESUME:
resume_file = auto_resume_helper(config.OUTPUT)
if resume_file:
if config.MODEL.RESUME:
logger.warning(
f"auto-resume changing resume file from {config.MODEL.RESUME} to {resume_file}"
)
config.defrost()
config.MODEL.RESUME = resume_file
config.freeze()
logger.info(f"auto resuming from {resume_file}")
else:
logger.info(f"no checkpoint found in {config.OUTPUT}, ignoring auto resume")
if config.MODEL.RESUME:
max_accuracy = load_checkpoint(
config, model_without_ddp, optimizer, lr_scheduler, logger
)
acc1, acc5, loss = validate(config, data_loader_val, model)
logger.info(
f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%"
)
if config.EVAL_MODE:
return
if config.THROUGHPUT_MODE:
throughput(data_loader_val, model, logger)
return
logger.info("Start training")
start_time = time.time()
for epoch in range(config.TRAIN.START_EPOCH, config.TRAIN.EPOCHS):
data_loader_train.sampler.set_epoch(epoch)
train_one_epoch(
config,
model,
criterion,
data_loader_train,
optimizer,
epoch,
mixup_fn,
lr_scheduler,
)
if flow.env.get_rank() == 0 and (
epoch % config.SAVE_FREQ == 0 or epoch == (config.TRAIN.EPOCHS - 1)
):
save_checkpoint(
config,
epoch,
model_without_ddp,
max_accuracy,
optimizer,
lr_scheduler,
logger,
)
# no validate
acc1, acc5, loss = validate(config, data_loader_val, model)
logger.info(
f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%"
)
max_accuracy = max(max_accuracy, acc1)
logger.info(f"Max accuracy: {max_accuracy:.2f}%")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info("Training time {}".format(total_time_str))
def train_one_epoch(
config, model, criterion, data_loader, optimizer, epoch, mixup_fn, lr_scheduler
):
model.train()
optimizer.zero_grad()
num_steps = len(data_loader)
batch_time = AverageMeter()
loss_meter = AverageMeter()
norm_meter = AverageMeter()
start = time.time()
end = time.time()
for idx, (samples, targets) in enumerate(data_loader):
samples = samples.cuda()
targets = targets.cuda()
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
outputs = model(samples)
if config.TRAIN.ACCUMULATION_STEPS > 1:
loss = criterion(outputs, targets)
loss = loss / config.TRAIN.ACCUMULATION_STEPS
loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = flow.nn.utils.clip_grad_norm_(
model.parameters(), config.TRAIN.CLIP_GRAD
)
else:
grad_norm = get_grad_norm(model.parameters())
if (idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0:
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step_update(epoch * num_steps + idx)
else:
loss = criterion(outputs, targets)
optimizer.zero_grad()
loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = flow.nn.utils.clip_grad_norm_(
model.parameters(), config.TRAIN.CLIP_GRAD
)
else:
grad_norm = get_grad_norm(model.parameters())
optimizer.step()
lr_scheduler.step_update(epoch * num_steps + idx)
loss_meter.update(loss.item(), targets.size(0))
norm_meter.update(grad_norm)
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
lr = optimizer.param_groups[0]["lr"]
etas = batch_time.avg * (num_steps - idx)
logger.info(
f"Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t"
f"eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\t"
f"time {batch_time.val:.4f} ({batch_time.avg:.4f})\t"
f"loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t"
f"grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\t"
)
epoch_time = time.time() - start
logger.info(
f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}"
)
@flow.no_grad()
def validate(config, data_loader, model):
criterion = flow.nn.CrossEntropyLoss()
model.eval()
batch_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
end = time.time()
for idx, (images, target) in enumerate(data_loader):
images = images.cuda()
target = target.cuda()
# compute output
output = model(images)
# measure accuracy and record loss
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
acc1 = reduce_tensor(acc1)
acc5 = reduce_tensor(acc5)
loss = reduce_tensor(loss)
loss_meter.update(loss.item(), target.size(0))
acc1_meter.update(acc1.item(), target.size(0))
acc5_meter.update(acc5.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
logger.info(
f"Test: [{idx}/{len(data_loader)}]\t"
f"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
f"Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t"
f"Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t"
f"Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t"
)
logger.info(f" * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}")
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
@flow.no_grad()
def throughput(data_loader, model, logger):
model.eval()
for idx, (images, _) in enumerate(data_loader):
images = images.cuda()
batch_size = images.shape[0]
for i in range(50):
model(images)
# TODO: add flow.cuda.synchronize()
logger.info(f"throughput averaged with 30 times")
tic1 = time.time()
for i in range(30):
model(images)
tic2 = time.time()
logger.info(
f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}"
)
return
if __name__ == "__main__":
_, config = parse_option()
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
rank = flow.env.get_rank()
world_size = flow.env.get_world_size()
print(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}")
else:
rank = -1
world_size = -1
seed = config.SEED + flow.env.get_rank()
flow.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
linear_scaled_lr = (
config.TRAIN.BASE_LR
* config.DATA.BATCH_SIZE
* flow.env.get_world_size()
/ 512.0
)
linear_scaled_warmup_lr = (
config.TRAIN.WARMUP_LR
* config.DATA.BATCH_SIZE
* flow.env.get_world_size()
/ 512.0
)
linear_scaled_min_lr = (
config.TRAIN.MIN_LR * config.DATA.BATCH_SIZE * flow.env.get_world_size() / 512.0
)
# gradient accumulation also need to scale the learning rate
if config.TRAIN.ACCUMULATION_STEPS > 1:
linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_warmup_lr = (
linear_scaled_warmup_lr * config.TRAIN.ACCUMULATION_STEPS
)
linear_scaled_min_lr = linear_scaled_min_lr * config.TRAIN.ACCUMULATION_STEPS
config.defrost()
config.TRAIN.BASE_LR = linear_scaled_lr
config.TRAIN.WARMUP_LR = linear_scaled_warmup_lr
config.TRAIN.MIN_LR = linear_scaled_min_lr
config.freeze()
os.makedirs(config.OUTPUT, exist_ok=True)
logger = create_logger(
output_dir=config.OUTPUT,
dist_rank=flow.env.get_rank(),
name=f"{config.MODEL.ARCH}",
)
if flow.env.get_rank() == 0:
path = os.path.join(config.OUTPUT, "config.json")
with open(path, "w") as f:
f.write(config.dump())
logger.info(f"Full config saved to {path}")
# print config
logger.info(config.dump())
main(config)
| [
"flowvision.loss.cross_entropy.SoftTargetCrossEntropy",
"utils.reduce_tensor",
"flowvision.utils.metrics.accuracy",
"argparse.ArgumentParser",
"oneflow.env.get_rank",
"flowvision.utils.metrics.AverageMeter",
"oneflow.nn.CrossEntropyLoss",
"models.build_model",
"data.build_loader",
"oneflow.no_grad... | [((9189, 9203), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (9201, 9203), True, 'import oneflow as flow\n'), ((10709, 10723), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (10721, 10723), True, 'import oneflow as flow\n'), ((754, 868), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Flowvision image classification training and evaluation script"""'], {'add_help': '(False)'}), "(\n 'Flowvision image classification training and evaluation script',\n add_help=False)\n", (777, 868), False, 'import argparse\n'), ((3034, 3050), 'config.get_config', 'get_config', (['args'], {}), '(args)\n', (3044, 3050), False, 'from config import get_config\n'), ((3224, 3244), 'data.build_loader', 'build_loader', (['config'], {}), '(config)\n', (3236, 3244), False, 'from data import build_loader\n'), ((3313, 3332), 'models.build_model', 'build_model', (['config'], {}), '(config)\n', (3324, 3332), False, 'from models import build_model\n'), ((3395, 3425), 'optimizer.build_optimizer', 'build_optimizer', (['config', 'model'], {}), '(config, model)\n', (3410, 3425), False, 'from optimizer import build_optimizer\n'), ((3438, 3510), 'oneflow.nn.parallel.DistributedDataParallel', 'flow.nn.parallel.DistributedDataParallel', (['model'], {'broadcast_buffers': '(False)'}), '(model, broadcast_buffers=False)\n', (3478, 3510), True, 'import oneflow as flow\n'), ((5411, 5422), 'time.time', 'time.time', ([], {}), '()\n', (5420, 5422), False, 'import time\n'), ((6827, 6841), 'flowvision.utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6839, 6841), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((6859, 6873), 'flowvision.utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6871, 6873), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((6891, 6905), 'flowvision.utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6903, 6905), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((6919, 6930), 'time.time', 'time.time', ([], {}), '()\n', (6928, 6930), False, 'import time\n'), ((6941, 6952), 'time.time', 'time.time', ([], {}), '()\n', (6950, 6952), False, 'import time\n'), ((9262, 9288), 'oneflow.nn.CrossEntropyLoss', 'flow.nn.CrossEntropyLoss', ([], {}), '()\n', (9286, 9288), True, 'import oneflow as flow\n'), ((9324, 9338), 'flowvision.utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9336, 9338), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((9356, 9370), 'flowvision.utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9368, 9370), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((9388, 9402), 'flowvision.utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9400, 9402), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((9420, 9434), 'flowvision.utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9432, 9434), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((9446, 9457), 'time.time', 'time.time', ([], {}), '()\n', (9455, 9457), False, 'import time\n'), ((11676, 11698), 'oneflow.manual_seed', 'flow.manual_seed', (['seed'], {}), '(seed)\n', (11692, 11698), True, 'import oneflow as flow\n'), ((11703, 11723), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (11717, 11723), True, 'import numpy as np\n'), ((12755, 12796), 'os.makedirs', 'os.makedirs', (['config.OUTPUT'], {'exist_ok': '(True)'}), '(config.OUTPUT, exist_ok=True)\n', (12766, 12796), False, 'import os\n'), ((4069, 4093), 'flowvision.loss.cross_entropy.SoftTargetCrossEntropy', 'SoftTargetCrossEntropy', ([], {}), '()\n', (4091, 4093), False, 'from flowvision.loss.cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy\n'), ((4363, 4396), 'utils.auto_resume_helper', 'auto_resume_helper', (['config.OUTPUT'], {}), '(config.OUTPUT)\n', (4381, 4396), False, 'from utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor\n'), ((4926, 5001), 'utils.load_checkpoint', 'load_checkpoint', (['config', 'model_without_ddp', 'optimizer', 'lr_scheduler', 'logger'], {}), '(config, model_without_ddp, optimizer, lr_scheduler, logger)\n', (4941, 5001), False, 'from utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor\n'), ((6468, 6479), 'time.time', 'time.time', ([], {}), '()\n', (6477, 6479), False, 'import time\n'), ((8473, 8484), 'time.time', 'time.time', ([], {}), '()\n', (8482, 8484), False, 'import time\n'), ((9057, 9068), 'time.time', 'time.time', ([], {}), '()\n', (9066, 9068), False, 'import time\n'), ((9740, 9777), 'flowvision.utils.metrics.accuracy', 'accuracy', (['output', 'target'], {'topk': '(1, 5)'}), '(output, target, topk=(1, 5))\n', (9748, 9777), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((9794, 9813), 'utils.reduce_tensor', 'reduce_tensor', (['acc1'], {}), '(acc1)\n', (9807, 9813), False, 'from utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor\n'), ((9829, 9848), 'utils.reduce_tensor', 'reduce_tensor', (['acc5'], {}), '(acc5)\n', (9842, 9848), False, 'from utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor\n'), ((9864, 9883), 'utils.reduce_tensor', 'reduce_tensor', (['loss'], {}), '(loss)\n', (9877, 9883), False, 'from utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor\n'), ((10141, 10152), 'time.time', 'time.time', ([], {}), '()\n', (10150, 10152), False, 'import time\n'), ((11077, 11088), 'time.time', 'time.time', ([], {}), '()\n', (11086, 11088), False, 'import time\n'), ((11159, 11170), 'time.time', 'time.time', ([], {}), '()\n', (11168, 11170), False, 'import time\n'), ((11437, 11456), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (11454, 11456), True, 'import oneflow as flow\n'), ((11478, 11503), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (11501, 11503), True, 'import oneflow as flow\n'), ((11652, 11671), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (11669, 11671), True, 'import oneflow as flow\n'), ((12949, 12968), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (12966, 12968), True, 'import oneflow as flow\n'), ((12990, 13032), 'os.path.join', 'os.path.join', (['config.OUTPUT', '"""config.json"""'], {}), "(config.OUTPUT, 'config.json')\n", (13002, 13032), False, 'import os\n'), ((4159, 4225), 'flowvision.loss.cross_entropy.LabelSmoothingCrossEntropy', 'LabelSmoothingCrossEntropy', ([], {'smoothing': 'config.MODEL.LABEL_SMOOTHING'}), '(smoothing=config.MODEL.LABEL_SMOOTHING)\n', (4185, 4225), False, 'from flowvision.loss.cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy\n'), ((4256, 4282), 'oneflow.nn.CrossEntropyLoss', 'flow.nn.CrossEntropyLoss', ([], {}), '()\n', (4280, 4282), True, 'import oneflow as flow\n'), ((5909, 6009), 'utils.save_checkpoint', 'save_checkpoint', (['config', 'epoch', 'model_without_ddp', 'max_accuracy', 'optimizer', 'lr_scheduler', 'logger'], {}), '(config, epoch, model_without_ddp, max_accuracy, optimizer,\n lr_scheduler, logger)\n', (5924, 6009), False, 'from utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor\n'), ((11849, 11874), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (11872, 11874), True, 'import oneflow as flow\n'), ((12003, 12028), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (12026, 12028), True, 'import oneflow as flow\n'), ((12135, 12160), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (12158, 12160), True, 'import oneflow as flow\n'), ((12877, 12896), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (12894, 12896), True, 'import oneflow as flow\n'), ((5775, 5794), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (5792, 5794), True, 'import oneflow as flow\n'), ((8440, 8451), 'time.time', 'time.time', ([], {}), '()\n', (8449, 8451), False, 'import time\n'), ((10108, 10119), 'time.time', 'time.time', ([], {}), '()\n', (10117, 10119), False, 'import time\n')] |
# Generated by Django 2.2.26 on 2022-01-17 03:54
from django.conf import settings
from django.db import migrations
from tahoe_sites import zd_helpers
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('organizations', '0011_historicalorganization_edx_uuid'),
('tahoe_sites', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='userorganizationmapping',
unique_together={
zd_helpers.get_unique_together(('user', 'organization')),
},
),
]
| [
"django.db.migrations.swappable_dependency",
"tahoe_sites.zd_helpers.get_unique_together"
] | [((223, 280), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (254, 280), False, 'from django.db import migrations\n'), ((546, 602), 'tahoe_sites.zd_helpers.get_unique_together', 'zd_helpers.get_unique_together', (["('user', 'organization')"], {}), "(('user', 'organization'))\n", (576, 602), False, 'from tahoe_sites import zd_helpers\n')] |
from fairseq.models.transformer_lm import *
from torch.nn import CrossEntropyLoss
from typing import Any, Dict, List, Optional, Tuple
from torch import Tensor
class TransformerLanguageModelWrapper(TransformerLanguageModel):
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(
task.source_dictionary,
eval(args.character_filters),
args.character_embedding_dim,
args.decoder_embed_dim,
args.char_embedder_highway_layers,
)
elif args.adaptive_input:
embed_tokens = AdaptiveInput(
len(task.source_dictionary),
task.source_dictionary.pad(),
args.decoder_input_dim,
args.adaptive_input_factor,
args.decoder_embed_dim,
options.eval_str_list(args.adaptive_input_cutoff, type=int),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
embed_tokens = cls.build_embedding(
args, task.source_dictionary, args.decoder_input_dim
)
if args.tie_adaptive_weights:
assert args.adaptive_input
assert args.adaptive_input_factor == args.adaptive_softmax_factor
assert (
args.adaptive_softmax_cutoff == args.adaptive_input_cutoff
), "{} != {}".format(
args.adaptive_softmax_cutoff, args.adaptive_input_cutoff
)
assert args.decoder_input_dim == args.decoder_output_dim
decoder = TransformerDecoderWrapper(
args, task.target_dictionary, embed_tokens, no_encoder_attn=True
)
return cls(decoder)
class TransformerDecoderWrapper(TransformerDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super(TransformerDecoderWrapper, self).__init__(args, dictionary, embed_tokens, no_encoder_attn)
self.use_parallel = False
def predict(self, prev_output_tokens, inputs_embeds, attention_mask, labels,
encoder_out=None, incremental_state=None, full_context_alignment=False,
alignment_layer=None, alignment_heads=None):
prev_output_tokens = prev_output_tokens.to("cuda:0")
inputs_embeds = inputs_embeds.to("cuda:0")
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=None
)
if self.embed_positions is not None
else None
)
prev_output_tokens = prev_output_tokens.to("cuda:0")
x = self.embed_scale * inputs_embeds
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions.to("cuda:0")
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
x = x.transpose(0, 1)
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
#inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
encoder_out.encoder_out if encoder_out is not None else None,
encoder_out.encoder_padding_mask if encoder_out is not None else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
#inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
x = x.to("cuda:0")
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
lm_logits = self.output_layer(x)
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
if labels is not None:
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
# return loss, lm_logits
return lm_logits, loss
else:
return lm_logits, None
| [
"torch.nn.CrossEntropyLoss"
] | [((5478, 5496), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (5494, 5496), False, 'from torch.nn import CrossEntropyLoss\n')] |
#!/usr/bin/env python3 -B
try: import mido
except: quit("missing module: mido")
import sys
import math
class MidiToData:
def main(self):
self.parseArgs()
self.processFile()
def parseArgs(self):
if len(sys.argv) < 2:
quit("midi2data <filename> <format> <columns> [<offset> = 0] [<track> = 1]")
try: self.midiData = mido.MidiFile(sys.argv[1])
except: quit("error loading MIDI file")
self.format = sys.argv[2]
if not (self.format == "midi" or self.format == "const"):
quit("format must be \"midi\" or \"const\"")
try: self.cols = int(sys.argv[3])
except: print("missing column count")
try: self.offset = int(sys.argv[4])
except: offset = 0
try: self.selectedTrackNumber = int(sys.argv[5])
except: self.selectedTrackNumber = 1
def processFile(self):
isAnyTrackProcessed = False
actualTrackNumber = 0
for i,track in enumerate(self.midiData.tracks):
actualTrackNumber += 1
if self.selectedTrackNumber == actualTrackNumber:
isAnyTrackProcessed = True
self.processTrack(track)
if not isAnyTrackProcessed:
quit("invalid track number")
def processTrack(self, track):
self.prepareData(track.name)
for message in track:
self.processMessage(message)
self.postProcessTrack()
def prepareData(self, trackName):
self.columnCounter = 0
print("\t;---- " + trackName + " ----")
def processMessage(self, message):
if message.type != "note_on": return
if self.columnCounter == 0:
print("\tdb ", end="")
else:
print(",",end="")
pitch = message.note + self.offset
if self.format == "midi":
print(pitch, end="")
else:
print(self.renderConst(pitch), end="")
self.columnCounter += 1
if (self.columnCounter == self.cols):
self.columnCounter = 0
print("")
def postProcessTrack(self):
if (self.columnCounter != 0):
print("")
def renderConst(self, pitch):
octave = str( math.floor(pitch/12) )
note = (
"C_","Cs","D_","Ds","E_","F_",
"Fs","G_","Gs","A_","As","H_"
)[pitch % 12]
return note + octave
if __name__ == "__main__":
(MidiToData()).main() | [
"mido.MidiFile",
"math.floor"
] | [((336, 362), 'mido.MidiFile', 'mido.MidiFile', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (349, 362), False, 'import mido\n'), ((1901, 1923), 'math.floor', 'math.floor', (['(pitch / 12)'], {}), '(pitch / 12)\n', (1911, 1923), False, 'import math\n')] |
"""Tests for the logix_driver.py file.
The Logix Driver is beholden to the CIPDriver interface. Only tests
which bind it to that interface should be allowed here. Tests binding
to another interface such as Socket are an anti-pattern.
There are quite a few methods in the LogixDriver which are difficult to
read or test due to both code clarity issues and it being inconvenient.
Also the vast majority of methods are private, I think that private
methods should not be tested directly, but rather, their effects on
public methods should be tested.
pytest --cov=pycomm3 --cov-branch tests/offline/
----------- coverage: platform linux, python 3.8.1-final-0 -----------
Name Stmts Miss Branch BrPart Cover
----------------------------------------------------------------
pycomm3/logix_driver.py 798 718 346 0 7%
We're currently at 7% test coverage, I would like to increase that to >=50%
and then continue to do so for the rest of the modules.
"""
from unittest import mock
import pytest
from pycomm3.cip_driver import CIPDriver
from pycomm3.const import MICRO800_PREFIX, SUCCESS
from pycomm3.exceptions import CommError, PycommError, RequestError
from pycomm3.logix_driver import LogixDriver, encode_value
from pycomm3.packets import RequestPacket, ResponsePacket
from pycomm3.socket_ import Socket
from pycomm3.tag import Tag
from pycomm3.custom_types import ModuleIdentityObject
CONNECT_PATH = '192.168.1.100/1'
IDENTITY_CLX_V20 = {'vendor': 'Rockwell Automation/Allen-Bradley',
'product_type': 'Programmable Logic Controller', 'product_code': 0,
'revision': {'major': 20, 'minor': 0},
'status': b'00', 'serial': '00000000',
'product_name': '1756-L55'}
IDENTITY_CLX_V21 = {'vendor': 'Rockwell Automation/Allen-Bradley',
'product_type': 'Programmable Logic Controller', 'product_code': 0,
'revision': {'major': 21, 'minor': 0},
'status': b'00', 'serial': '00000000',
'product_name': '1756-L62'}
IDENTITY_CLX_V32 = {'vendor': 'Rockwell Automation/Allen-Bradley',
'product_type': 'Programmable Logic Controller', 'product_code': 0,
'revision': {'major': 32, 'minor': 0},
'status': b'00', 'serial': '00000000',
'product_name': '1756-L85'}
IDENTITY_M8000 = {'encap_protocol_version': 1,
'ip_address': '192.168.1.124',
'product_code': 259,
'product_name': '2080-LC50-48QWBS',
'product_type': 'Programmable Logic Controller',
'revision': {'major': 12, 'minor': 11},
'serial': '12345678',
'state': 2,
'status': b'4\x00',
'vendor': 'Rockwell Automation/Allen-Bradley'}
def test_open_call_init_driver_open():
"""
This test is to make sure that the initialize driver method is called during
the `open()` method of the driver.
"""
with mock.patch.object(CIPDriver, 'open') as mock_open, \
mock.patch.object(LogixDriver, '_initialize_driver') as mock_init:
driver = LogixDriver(CONNECT_PATH)
driver.open()
assert mock_open.called
assert mock_init.called
def test_open_call_init_driver_with():
"""
This test is to make sure that the initialize driver method is called during
the `open()` method of the driver.
"""
with mock.patch.object(CIPDriver, 'open') as mock_open, \
mock.patch.object(LogixDriver, '_initialize_driver') as mock_init:
with LogixDriver(CONNECT_PATH):
...
assert mock_open.called
assert mock_init.called
@pytest.mark.parametrize('identity', [IDENTITY_CLX_V20, IDENTITY_CLX_V21, IDENTITY_CLX_V32])
def test_logix_init_for_version_support_instance_ids_large_connection(identity):
with mock.patch.object(LogixDriver, '_list_identity') as mock_identity, \
mock.patch.object(LogixDriver, 'get_plc_info') as mock_get_info, \
mock.patch.object(LogixDriver, 'get_plc_name') as mock_get_name:
mock_identity.return_value = identity
mock_get_info.return_value = identity # this is the ListIdentity response
# not the same as module idenity, but
# has all the fields needed for the test
plc = LogixDriver(CONNECT_PATH)
plc._initialize_driver(False, False)
assert plc._micro800 is False
assert plc._cfg['use_instance_ids'] == (identity['revision']['major'] >= 21)
assert mock_get_info.called
assert mock_get_name.called
@pytest.mark.parametrize('identity', [IDENTITY_M8000, ])
def test_logix_init_micro800(identity):
with mock.patch.object(LogixDriver, '_list_identity') as mock_identity, \
mock.patch.object(LogixDriver, 'get_plc_info') as mock_get_info, \
mock.patch.object(LogixDriver, 'get_plc_name') as mock_get_name:
mock_identity.return_value = identity
mock_get_info.return_value = identity
plc = LogixDriver(CONNECT_PATH)
plc._initialize_driver(False, False)
assert plc._micro800 is True
assert plc._cfg['use_instance_ids'] is False
assert mock_get_info.called
assert not mock_get_name.called
assert not plc._cfg['cip_path']
@pytest.mark.parametrize('identity', [IDENTITY_CLX_V20, IDENTITY_CLX_V21, IDENTITY_CLX_V32, IDENTITY_M8000])
def test_logix_init_calls_get_tag_list_if_init_tags(identity):
with mock.patch.object(LogixDriver, '_list_identity') as mock_identity, \
mock.patch.object(LogixDriver, 'get_plc_info') as mock_get_info, \
mock.patch.object(LogixDriver, 'get_plc_name'), \
mock.patch.object(CIPDriver, 'open'), \
mock.patch.object(LogixDriver, 'get_tag_list') as mock_tag:
mock_identity.return_value = identity
mock_get_info.return_value = identity
driver = LogixDriver(CONNECT_PATH, init_info=False, init_tags=True)
driver._target_is_connected = True
driver.open()
assert mock_tag.called
def test_logix_context_manager_calls_open_and_close():
with mock.patch.object(LogixDriver, 'open') as mock_open, \
mock.patch.object(LogixDriver, 'close') as mock_close:
with LogixDriver(CONNECT_PATH, init_info=False, init_tags=False):
pass
assert mock_open.called
assert mock_close.called
def test__exit__returns_false_on_commerror():
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
assert ld.__exit__(None, None, None) is True # Exit with no exception
def test__exit__returns_true_on_no_error_and_no_exc_type():
with mock.patch.object(LogixDriver, 'close'):
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
assert ld.__exit__(None, None, None) is True
def test__exit__returns_false_on_no_error_and_exc_type():
with mock.patch.object(LogixDriver, 'close'):
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
assert ld.__exit__('Some Exc Type', None, None) is False
def test__repr___ret_str():
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
_repr = repr(ld)
assert repr
assert isinstance(_repr, str)
def test_default_logix_tags_are_empty_dict():
"""Show that LogixDriver tags are an empty dict on init."""
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
assert ld.tags == dict()
def test_logix_connected_false_on_init_with_false_init_params():
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
assert ld.connected is False
def test_clx_get_plc_time_sends_packet():
with mock.patch.object(LogixDriver, 'send') as mock_send, \
mock.patch('pycomm3.cip_driver.with_forward_open'):
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
ld.get_plc_time()
assert mock_send.called
def test_clx_set_plc_time_sends_packet():
with mock.patch.object(LogixDriver, 'send') as mock_send, \
mock.patch('pycomm3.cip_driver.with_forward_open'):
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
ld.set_plc_time()
assert mock_send.called
# TODO: all of the tag list associated tests
@pytest.mark.skip(reason="""tag parsing is extremely complex, and it's \
nearly impossible to test this without also reverse-engineering it""")
def test__get_tag_list_returns_expected_user_tags():
EXPECTED_USER_TAGS = [{
'tag_type': 'struct', # bit 15 is a 1
'instance_id': 1,
'tag_name': b"\x00\x01",
'symbol_type': "",
'symbol_address': "",
'symbol_object_address': "",
'software_control': "",
'external_access': "",
'dimensions': ["", "", ""]
}]
TEST_RESPONSE = ResponsePacket()
# 0 -> 4 are the 'instance', dint
# 4 -> 6 is the 'tag_length', uint, used internally
# 8 -> 'tag_length' is 'tag_name'
# 8+tag_length -> 10+tag_length is 'symbol_type' uint
# 10+tag_length -> 14+tag_length is 'symbol_address' udint
# 14+tag_length -> 18+tag_length is 'symbol_object_address' udint
# 18+tag_length -> 22+tag_length is 'software_control' udint
# 'dim1', 'dim2' and 'dim3' are the next 12 bytes, udint
TEST_RESPONSE.data = \
b"\x00\x00\x00\x01" + \
b"\x00\x01" + \
b"\x00\x01" + \
b"\x00\x00\x00\x00\x00\x10"
TEST_RESPONSE.command = "Something"
TEST_RESPONSE.command_status = SUCCESS
ld = LogixDriver(CONNECT_PATH, init_info=False, init_tags=False)
with mock.patch.object(RequestPacket, 'send') as mock_send, \
mock.patch.object(CIPDriver, '_forward_open'), \
mock.patch.object(LogixDriver, '_parse_instance_attribute_list'):
mock_send.return_value = TEST_RESPONSE
actual_tags = ld.get_tag_list()
assert EXPECTED_USER_TAGS == actual_tags
| [
"pycomm3.logix_driver.LogixDriver",
"pycomm3.packets.ResponsePacket",
"pytest.mark.skip",
"pytest.mark.parametrize",
"unittest.mock.patch.object",
"unittest.mock.patch"
] | [((3633, 3728), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""identity"""', '[IDENTITY_CLX_V20, IDENTITY_CLX_V21, IDENTITY_CLX_V32]'], {}), "('identity', [IDENTITY_CLX_V20, IDENTITY_CLX_V21,\n IDENTITY_CLX_V32])\n", (3656, 3728), False, 'import pytest\n'), ((4622, 4675), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""identity"""', '[IDENTITY_M8000]'], {}), "('identity', [IDENTITY_M8000])\n", (4645, 4675), False, 'import pytest\n'), ((5335, 5446), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""identity"""', '[IDENTITY_CLX_V20, IDENTITY_CLX_V21, IDENTITY_CLX_V32, IDENTITY_M8000]'], {}), "('identity', [IDENTITY_CLX_V20, IDENTITY_CLX_V21,\n IDENTITY_CLX_V32, IDENTITY_M8000])\n", (5358, 5446), False, 'import pytest\n'), ((8325, 8471), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""tag parsing is extremely complex, and it\'s nearly impossible to test this without also reverse-engineering it"""'}), '(reason=\n "tag parsing is extremely complex, and it\'s nearly impossible to test this without also reverse-engineering it"\n )\n', (8341, 8471), False, 'import pytest\n'), ((6500, 6559), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {'init_info': '(False)', 'init_tags': '(False)'}), '(CONNECT_PATH, init_info=False, init_tags=False)\n', (6511, 6559), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n'), ((7160, 7219), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {'init_info': '(False)', 'init_tags': '(False)'}), '(CONNECT_PATH, init_info=False, init_tags=False)\n', (7171, 7219), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n'), ((7412, 7471), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {'init_info': '(False)', 'init_tags': '(False)'}), '(CONNECT_PATH, init_info=False, init_tags=False)\n', (7423, 7471), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n'), ((7577, 7636), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {'init_info': '(False)', 'init_tags': '(False)'}), '(CONNECT_PATH, init_info=False, init_tags=False)\n', (7588, 7636), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n'), ((8875, 8891), 'pycomm3.packets.ResponsePacket', 'ResponsePacket', ([], {}), '()\n', (8889, 8891), False, 'from pycomm3.packets import RequestPacket, ResponsePacket\n'), ((9577, 9636), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {'init_info': '(False)', 'init_tags': '(False)'}), '(CONNECT_PATH, init_info=False, init_tags=False)\n', (9588, 9636), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n'), ((2932, 2968), 'unittest.mock.patch.object', 'mock.patch.object', (['CIPDriver', '"""open"""'], {}), "(CIPDriver, 'open')\n", (2949, 2968), False, 'from unittest import mock\n'), ((2997, 3049), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""_initialize_driver"""'], {}), "(LogixDriver, '_initialize_driver')\n", (3014, 3049), False, 'from unittest import mock\n'), ((3081, 3106), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {}), '(CONNECT_PATH)\n', (3092, 3106), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n'), ((3380, 3416), 'unittest.mock.patch.object', 'mock.patch.object', (['CIPDriver', '"""open"""'], {}), "(CIPDriver, 'open')\n", (3397, 3416), False, 'from unittest import mock\n'), ((3442, 3494), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""_initialize_driver"""'], {}), "(LogixDriver, '_initialize_driver')\n", (3459, 3494), False, 'from unittest import mock\n'), ((3815, 3863), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""_list_identity"""'], {}), "(LogixDriver, '_list_identity')\n", (3832, 3863), False, 'from unittest import mock\n'), ((3893, 3939), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""get_plc_info"""'], {}), "(LogixDriver, 'get_plc_info')\n", (3910, 3939), False, 'from unittest import mock\n'), ((3969, 4015), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""get_plc_name"""'], {}), "(LogixDriver, 'get_plc_name')\n", (3986, 4015), False, 'from unittest import mock\n'), ((4352, 4377), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {}), '(CONNECT_PATH)\n', (4363, 4377), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n'), ((4727, 4775), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""_list_identity"""'], {}), "(LogixDriver, '_list_identity')\n", (4744, 4775), False, 'from unittest import mock\n'), ((4805, 4851), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""get_plc_info"""'], {}), "(LogixDriver, 'get_plc_info')\n", (4822, 4851), False, 'from unittest import mock\n'), ((4881, 4927), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""get_plc_name"""'], {}), "(LogixDriver, 'get_plc_name')\n", (4898, 4927), False, 'from unittest import mock\n'), ((5054, 5079), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {}), '(CONNECT_PATH)\n', (5065, 5079), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n'), ((5515, 5563), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""_list_identity"""'], {}), "(LogixDriver, '_list_identity')\n", (5532, 5563), False, 'from unittest import mock\n'), ((5593, 5639), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""get_plc_info"""'], {}), "(LogixDriver, 'get_plc_info')\n", (5610, 5639), False, 'from unittest import mock\n'), ((5669, 5715), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""get_plc_name"""'], {}), "(LogixDriver, 'get_plc_name')\n", (5686, 5715), False, 'from unittest import mock\n'), ((5728, 5764), 'unittest.mock.patch.object', 'mock.patch.object', (['CIPDriver', '"""open"""'], {}), "(CIPDriver, 'open')\n", (5745, 5764), False, 'from unittest import mock\n'), ((5777, 5823), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""get_tag_list"""'], {}), "(LogixDriver, 'get_tag_list')\n", (5794, 5823), False, 'from unittest import mock\n'), ((5947, 6005), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {'init_info': '(False)', 'init_tags': '(True)'}), '(CONNECT_PATH, init_info=False, init_tags=True)\n', (5958, 6005), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n'), ((6164, 6202), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""open"""'], {}), "(LogixDriver, 'open')\n", (6181, 6202), False, 'from unittest import mock\n'), ((6231, 6270), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""close"""'], {}), "(LogixDriver, 'close')\n", (6248, 6270), False, 'from unittest import mock\n'), ((6706, 6745), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""close"""'], {}), "(LogixDriver, 'close')\n", (6723, 6745), False, 'from unittest import mock\n'), ((6760, 6819), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {'init_info': '(False)', 'init_tags': '(False)'}), '(CONNECT_PATH, init_info=False, init_tags=False)\n', (6771, 6819), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n'), ((6942, 6981), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""close"""'], {}), "(LogixDriver, 'close')\n", (6959, 6981), False, 'from unittest import mock\n'), ((6996, 7055), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {'init_info': '(False)', 'init_tags': '(False)'}), '(CONNECT_PATH, init_info=False, init_tags=False)\n', (7007, 7055), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n'), ((7723, 7761), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""send"""'], {}), "(LogixDriver, 'send')\n", (7740, 7761), False, 'from unittest import mock\n'), ((7790, 7840), 'unittest.mock.patch', 'mock.patch', (['"""pycomm3.cip_driver.with_forward_open"""'], {}), "('pycomm3.cip_driver.with_forward_open')\n", (7800, 7840), False, 'from unittest import mock\n'), ((7855, 7914), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {'init_info': '(False)', 'init_tags': '(False)'}), '(CONNECT_PATH, init_info=False, init_tags=False)\n', (7866, 7914), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n'), ((8026, 8064), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""send"""'], {}), "(LogixDriver, 'send')\n", (8043, 8064), False, 'from unittest import mock\n'), ((8093, 8143), 'unittest.mock.patch', 'mock.patch', (['"""pycomm3.cip_driver.with_forward_open"""'], {}), "('pycomm3.cip_driver.with_forward_open')\n", (8103, 8143), False, 'from unittest import mock\n'), ((8158, 8217), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {'init_info': '(False)', 'init_tags': '(False)'}), '(CONNECT_PATH, init_info=False, init_tags=False)\n', (8169, 8217), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n'), ((9646, 9686), 'unittest.mock.patch.object', 'mock.patch.object', (['RequestPacket', '"""send"""'], {}), "(RequestPacket, 'send')\n", (9663, 9686), False, 'from unittest import mock\n'), ((9715, 9760), 'unittest.mock.patch.object', 'mock.patch.object', (['CIPDriver', '"""_forward_open"""'], {}), "(CIPDriver, '_forward_open')\n", (9732, 9760), False, 'from unittest import mock\n'), ((9776, 9840), 'unittest.mock.patch.object', 'mock.patch.object', (['LogixDriver', '"""_parse_instance_attribute_list"""'], {}), "(LogixDriver, '_parse_instance_attribute_list')\n", (9793, 9840), False, 'from unittest import mock\n'), ((3523, 3548), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {}), '(CONNECT_PATH)\n', (3534, 3548), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n'), ((6299, 6358), 'pycomm3.logix_driver.LogixDriver', 'LogixDriver', (['CONNECT_PATH'], {'init_info': '(False)', 'init_tags': '(False)'}), '(CONNECT_PATH, init_info=False, init_tags=False)\n', (6310, 6358), False, 'from pycomm3.logix_driver import LogixDriver, encode_value\n')] |
#
import json
import copy
import itertools
import multiprocessing
import os
import sys
import time
from functools import partial
import numpy as np
from numpy import linalg
from tqdm import tqdm
import rmsd
import quantum
import clockwork
import merge
import similarity_fchl19 as sim
from chemhelp import cheminfo
from rdkit import Chem
from rdkit.Chem import AllChem, ChemicalForceFields
from rdkit.Chem import rdmolfiles
from communication import rediscomm
import joblib
# Set local cache
cachedir = '.pycache'
memory = joblib.Memory(cachedir, verbose=0)
DEFAULT_DECIMALS = 5
# DEFAULT_DECIMALS = 12
def correct_userpath(filepath):
return os.path.expanduser(filepath)
def get_forcefield(molobj):
ffprop = ChemicalForceFields.MMFFGetMoleculeProperties(molobj)
forcefield = ChemicalForceFields.MMFFGetMoleculeForceField(molobj, ffprop) # 0.01 overhead
return ffprop, forcefield
def run_forcefield(ff, steps, energy=1e-2, force=1e-3):
"""
"""
try:
status = ff.Minimize(maxIts=steps, energyTol=energy, forceTol=force)
except RuntimeError:
return 1
return status
def run_forcefield_prime(ff, steps, energy=1e-2, force=1e-3):
try:
status = ff.Minimize(maxIts=steps, energyTol=energy, forceTol=force)
except RuntimeError:
return 1
return status
@memory.cache
def generate_torsion_combinations(total_torsions, n_tor):
combinations = clockwork.generate_torsion_combinations(total_torsions, n_tor)
combinations = list(combinations)
return combinations
def generate_torsions(total_torsions,
min_cost=0, max_cost=15, prefix="0"):
cost_input, cost_cost = clockwork.generate_costlist(total_torsions=total_torsions)
for (n_tor, resolution), cost in zip(cost_input[min_cost:max_cost], cost_cost[min_cost:max_cost]):
combinations = generate_torsion_combinations(total_torsions, n_tor)
for combination in combinations:
jobstr = prefix + ","
torstr = " ".join([str(x) for x in combination])
resstr = str(resolution)
jobstr += torstr + "," + resstr
print(jobstr)
return
def generate_torsions_specific(total_torsions, n_tor, resolution, prefix="0"):
sep = ","
combinations = generate_torsion_combinations(total_torsions, n_tor)
for combination in combinations:
jobstr = prefix + sep
torstr = " ".join([str(x) for x in combination])
resstr = str(resolution)
jobstr += torstr + sep + resstr
print(jobstr)
return
def generate_jobs(molobjs, args, tordb=None,
min_cost=0, max_cost=15):
# TODO Group by cost?
combos = args.jobcombos
n_molecules = len(molobjs)
if tordb is None:
tordb = [cheminfo.get_torsions(molobj) for molobj in molobjs]
# TODO only first 500 molecules
# for i in range(n_molecules)[:20]:
for i in range(n_molecules)[20:100]:
molobj = molobjs[i]
torsions = tordb[i]
total_torsions = len(torsions)
prefix = str(i)
if combos is None:
generate_torsions(total_torsions, prefix=prefix, min_cost=min_cost, max_cost=max_cost)
else:
for combo in combos:
combo = combo.split(",")
combo = [int(x) for x in combo]
generate_torsions_specific(total_torsions, combo[0], combo[1], prefix=prefix)
# quit()
#
# cost_input, cost_cost = clockwork.generate_costlist(total_torsions=total_torsions)
#
# for (n_tor, resolution), cost in zip(cost_input[min_cost:max_cost], cost_cost[min_cost:max_cost]):
#
# combinations = clockwork.generate_torsion_combinations(total_torsions, n_tor)
#
# for combination in combinations:
#
# # torsions = [tordb[x] for x in combination]
#
# jobstr = prefix + ","
# torstr = " ".join([str(x) for x in combination])
# resstr = str(resolution)
# jobstr += torstr + "," + resstr
# print(jobstr)
#
# quit()
return
def converge_clockwork(molobj, tordb, max_cost=2):
"""
molobj
torsions_idx
resolution
"""
atoms, xyz = cheminfo.molobj_to_xyz(molobj)
total_torsions = len(tordb)
print("total torsions", total_torsions)
# TODO Cache this
cost_input, cost_cost = clockwork.generate_costlist(total_torsions=total_torsions)
# TODO cost_cost and costfunc
offset = 6
max_cost = 1
offset = 1
max_cost = 7
# offset = 7
# max_cost = 1
for (n_tor, resolution), cost in zip(cost_input[offset:offset+max_cost], cost_cost[offset:offset+max_cost]):
start = time.time()
# Iterate over torsion combinations
combinations = clockwork.generate_torsion_combinations(total_torsions, n_tor)
cost_result_energies = []
cost_result_coordinates = []
C = 0
for combination in combinations:
# TODO Move this to function
com_start = time.time()
torsions = [tordb[i] for i in combination]
result_energies, result_coordinates = get_clockwork_conformations(molobj, torsions, resolution)
n_results = len(result_energies)
result_cost = [cost]*n_results
com_end = time.time()
# print("new confs", len(result_energies), "{:6.2f}".format(com_end-com_start))
# Merge
if len(cost_result_energies) == 0:
cost_result_energies += list(result_energies)
cost_result_coordinates += list(result_coordinates)
continue
else:
start_merge = time.time()
# TODO Move this to function
continue
idxs = merge.merge_asymmetric(atoms,
result_energies,
cost_result_energies,
result_coordinates,
cost_result_coordinates, decimals=2, debug=True)
for i, idx in enumerate(idxs):
C += 1
if len(idx) == 0:
cost_result_energies.append(result_energies[i])
cost_result_coordinates.append(result_coordinates[i])
end_merge = time.time()
print("total confs", len(cost_result_energies), "{:10.2f}".format(end_merge-start_merge))
continue
end = time.time()
print("conv", n_tor, resolution, cost, len(cost_result_energies), "tot: {:5.2f}".format(end-start), "per sec: {:5.2f}".format(cost/(end-start)))
quit()
return
def get_clockwork_conformations(molobj, torsions, resolution,
atoms=None,
debug=False,
timings=False):
"""
Get all conformation for specific cost
cost defined from torsions and resolution
"""
n_torsions = len(torsions)
if atoms is None:
atoms, xyz = cheminfo.molobj_to_xyz(molobj, atom_type="int")
del xyz
combinations = clockwork.generate_clockwork_combinations(resolution, n_torsions)
# Collect energies and coordinates
end_energies = []
end_coordinates = []
end_representations = []
first = True
for resolutions in combinations:
time_start = time.time()
# Get all conformations
c_energies, c_coordinates, c_states = get_conformations(molobj, torsions, resolutions)
N = len(c_energies)
# Filter unconverged
success = np.argwhere(c_states == 0)
success = success.flatten()
c_energies = c_energies[success]
c_coordinates = c_coordinates[success]
N2 = len(c_energies)
# Calculate representations
c_representations = [sim.get_representation(atoms, coordinates) for coordinates in c_coordinates]
c_representations = np.asarray(c_representations)
# Clean all new conformers for energies and similarity
idxs = clean_representations(atoms, c_energies, c_representations)
c_energies = c_energies[idxs]
c_coordinates = c_coordinates[idxs]
c_representations = c_representations[idxs]
if first:
first = False
end_energies += list(c_energies)
end_coordinates += list(c_coordinates)
end_representations += list(c_representations)
continue
# Asymmetrically add new conformers
idxs = merge.merge_asymmetric(atoms,
c_energies,
end_energies,
c_representations,
end_representations)
# Add new unique conformation to return collection
for i, idx in enumerate(idxs):
# if conformation already exists, continue
if len(idx) > 0: continue
# Add new unique conformation to collection
end_energies.append(c_energies[i])
end_coordinates.append(c_coordinates[i])
end_representations.append(c_representations[i])
time_end = time.time()
if timings:
timing = time_end - time_start
print("res time {:8.2f} cnf/sec - {:8.2f} tot sec".format(N/timing, timing))
continue
return end_energies, end_coordinates
def clean_representations(atoms, energies, representations):
"""
"""
N = len(energies)
# Keep index for only unique
# idxs = merge.merge_asymmetric(atoms,
# energies,
# energies,
# representations,
# representations)
idxs = merge.merge(atoms,
energies,
representations)
# Here all cost is the same, so just take the first conformer
# idxs = [idx[0] for idx in idxs]
# idxs = np.unique(idxs)
return idxs
def clean_conformers(atoms, energies, coordinates, states=None):
# Total count
N = len(energies)
if states is not None:
# Keep only converged states
success = np.argwhere(states == 0)
success = success.flatten()
# Only looked at converged states, discard rest
energies = energies[success]
coordinates = coordinates[success]
# TODO what about failed states?
# TODO Check high energies
# TODO change to asymetric merge (cleaner code)
# Keep index for only unique
idxs = merge.merge(atoms, energies, coordinates)
# Here all cost is the same, so just take the first conformer
idxs = [idx[0] for idx in idxs]
return idxs
def get_conformations(molobj, torsions, resolutions, method="sqm", debug=False):
molobj = copy.deepcopy(molobj)
n_torsions = len(torsions)
# init energy
energies = []
states = []
coordinates = []
# no constraints
ffprop, forcefield = get_forcefield(molobj)
# Forcefield generation failed
if forcefield is None:
return [], [], []
# Get conformer and origin
conformer = molobj.GetConformer()
origin = conformer.GetPositions()
# Origin angle
origin_angles = []
# HACK rdkit requires int type for index
torsions = [[int(y) for y in x] for x in torsions]
for idxs in torsions:
angle = Chem.rdMolTransforms.GetDihedralDeg(conformer, *idxs)
origin_angles.append(angle)
# Get resolution angles
angle_iterator = clockwork.generate_angles(resolutions, n_torsions)
# set calculate func
if method == "ff":
# rdkit mmff
calculate_method = calculate_forcefield
cal_kwargs = {
"ffprop": ffprop,
"ff": forcefield
}
else:
atoms = cheminfo.molobj_to_atoms(molobj)
atoms_str = [cheminfo.convert_atom(atom) for atom in atoms]
smiles = quantum.get_smiles(atoms, origin)
calculate_method = calculate_mopac
cal_kwargs = {
"ffprop": ffprop,
"atoms": atoms,
"reference_smiles": smiles
}
for angle in angle_iterator:
# reset coordinates
set_coordinates(conformer, origin)
# Minimze with torsion angle constraint
# energy, pos, status = calculate_forcefield(molobj, conformer, torsions, origin_angles, angle,
# ffprop=ffprop,
# ff=forcefield)
if debug:
start = time.time()
energy, pos, status = calculate_method(molobj, conformer, torsions, origin_angles, angle, **cal_kwargs)
if debug:
end = time.time()
print("{:6.5f}s".format(end-start), "{:6.2f}".format(energy), status)
# collect
energies += [energy]
coordinates += [pos]
states += [status]
return np.asarray(energies), np.asarray(coordinates), np.asarray(states)
def get_energy(molobj):
ffprop, ff = get_forcefield(molobj)
# Get current energy
energy = ff.CalcEnergy()
return energy
def get_energies(molobj, coordinates,
ffprop=None,
ff=None):
if ffprop is None or ff is None:
ffprop, ff = get_forcefield(molobj)
# Get conformer and origin
conformer = molobj.GetConformer()
for coordinate in coordinates:
set_coordinates(conformer, coordinate)
# Get current energy
energy = ff.CalcEnergy()
return
def get_sdfcontent(sdffile, rtn_atoms=False):
coordinates = []
energies = []
reader = cheminfo.read_sdffile(sdffile)
molobjs = [molobj for molobj in reader]
atoms = ""
for molobj in molobjs:
atoms, coordinate = cheminfo.molobj_to_xyz(molobj)
energy = get_energy(molobj)
coordinates.append(coordinate)
energies.append(energy)
if rtn_atoms:
return molobjs[0], atoms, energies, coordinates
return energies, coordinates
def calculate_mopac(molobj, conformer, torsions, origin_angles, delta_angles,
delta=10**-7,
coord_decimals=6,
atoms=None,
ffprop=None,
reference_smiles=None):
sdfstr = cheminfo.molobj_to_sdfstr(molobj)
molobj_prime, status = cheminfo.sdfstr_to_molobj(sdfstr)
conformer_prime = molobj_prime.GetConformer()
# Setup constrained forcefield
# ffprop_prime, ffc = get_forcefield(molobj_prime)
ffc = ChemicalForceFields.MMFFGetMoleculeForceField(molobj_prime, ffprop)
# Set angles and constrains for all torsions
for i, angle in enumerate(delta_angles):
set_angle = origin_angles[i] + angle
# Set clockwork angle
try: Chem.rdMolTransforms.SetDihedralDeg(conformer_prime, *torsions[i], set_angle)
except: pass
# Set forcefield constrain
ffc.MMFFAddTorsionConstraint(*torsions[i], False,
set_angle-delta, set_angle+delta, 1.0e10)
# minimize constrains
status = run_forcefield(ffc, 500)
# Set result
coordinates = conformer_prime.GetPositions()
coordinates = np.round(coordinates, coord_decimals) # rdkit hack, read description
smiles = ""
try:
energy, ocoordinates = quantum.optmize_conformation(atoms, coordinates)
status = 0
coordinates = ocoordinates
if reference_smiles is not None:
new_smiles = quantum.get_smiles(atoms, coordinates)
smiles = new_smiles
if new_smiles != reference_smiles:
status = 5
except:
energy = 0.0
status = 4
# if status == 0:
# atoms_str = [cheminfo.convert_atom(atom) for atom in atoms]
# txt = rmsd.set_coordinates(atoms_str, coordinates, title="")
# with open("_tmp_local_dump.xyz", 'a') as f:
# f.write(txt)
# f.write("\n")
#
# print(status, smiles)
return energy, coordinates, status
def calculate_forcefield(molobj, conformer, torsions, origin_angles, delta_angles,
ffprop=None,
ff=None,
delta=10**-7,
coord_decimals=6,
grad_threshold=100):
"""
Disclaimer: lots of hacks, sorry. Let me know if you have an alternative.
Note: There is a artificat where if delta < 10**-16 the FF will find a
*extremely* local minima with very high energy (un-physical)the FF will
find a *extremely* local minima with very high energy (un-physical).
Setting delta to 10**-6 (numerical noise) should fix this.
Note: rdkit forcefield restrained optimization will optimized to a *very*
local and very unphysical minima which the global optimizer cannot get out
from. Truncating the digits of the coordinates to six is a crude but
effective way to slight move the the molecule out of this in a reproducable
way.
"""
if ffprop is None or ff is None:
ffprop, ff = get_forcefield(molobj)
sdfstr = cheminfo.molobj_to_sdfstr(molobj)
molobj_prime, status = cheminfo.sdfstr_to_molobj(sdfstr)
conformer_prime = molobj_prime.GetConformer()
# Setup constrained forcefield
# ffprop_prime, ffc = get_forcefield(molobj_prime)
ffc = ChemicalForceFields.MMFFGetMoleculeForceField(molobj_prime, ffprop)
# Set angles and constrains for all torsions
for i, angle in enumerate(delta_angles):
set_angle = origin_angles[i] + angle
# Set clockwork angle
try: Chem.rdMolTransforms.SetDihedralDeg(conformer_prime, *torsions[i], set_angle)
except: pass
# Set forcefield constrain
ffc.MMFFAddTorsionConstraint(*torsions[i], False,
set_angle-delta, set_angle+delta, 1.0e10)
# minimize constrains
status = run_forcefield(ffc, 500)
# Set result
coordinates = conformer_prime.GetPositions()
coordinates = np.round(coordinates, coord_decimals) # rdkit hack, read description
cheminfo.conformer_set_coordinates(conformer, coordinates)
# minimize global
status = run_forcefield_prime(ff, 700, force=1e-4)
# Get current energy
energy = ff.CalcEnergy()
if status == 0:
grad = ff.CalcGrad()
grad = np.array(grad)
grad_norm = linalg.norm(grad)
if grad_norm > grad_threshold:
status = 4
debug = False
if energy > 1000 and debug:
print(torsions, origin_angles, delta_angles)
print(energy, status)
print("id")
print(id(molobj_prime))
print(id(molobj))
molobj_test, status = cheminfo.sdfstr_to_molobj(sdfstr)
coordinates = conformer.GetPositions()
cheminfo.molobj_set_coordinates(molobj_test, coordinates)
ffprop_t, ff_t = get_forcefield(molobj)
run_forcefield(ff_t, 500)
print(coordinates)
for idxs in torsions:
angle = Chem.rdMolTransforms.GetDihedralDeg(conformer, *idxs)
print("ANGLE 1", angle)
f = open("_test_dumpsdf.sdf", 'w')
sdf = cheminfo.save_molobj(molobj)
f.write(sdf)
# prop, ff = get_forcefield(molobj)
# status = run_forcefield(ff, 500)
conformer = molobj_test.GetConformer()
for idxs in torsions:
angle = Chem.rdMolTransforms.GetDihedralDeg(conformer, *idxs)
print("ANGLE 2",angle)
print(energy, status)
sdf = cheminfo.save_molobj(molobj_test)
f.write(sdf)
f.close()
quit()
# Get current positions
pos = conformer.GetPositions()
return energy, pos, status
def set_coordinates(conformer, coordinates):
for i, pos in enumerate(coordinates):
conformer.SetAtomPosition(i, pos)
return
def run_job(molobj, tordb, jobstr):
sep = ","
jobstr = jobstr.split(sep)
molid, torsions_idx, resolution = jobstr
molid = int(molid)
resolution = int(resolution)
torsions_idx = torsions_idx.split()
torsions_idx = [int(idx) for idx in torsions_idx]
torsions = [tordb[idx] for idx in torsions_idx]
job_energies, job_coordinates = get_clockwork_conformations(molobj, torsions, resolution)
return job_energies, job_coordinates
###
def run_jobfile(molobjs, tordbs, filename, threads=0):
# Prepare molobjs to xyz
origins = []
for molobj in molobjs:
atoms, xyz = cheminfo.molobj_to_xyz(molobj)
origins.append(xyz)
with open(filename, 'r') as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
if threads > 0:
run_joblines_threads(origins, molobjs, tordbs, lines, threads=threads, dump=False)
else:
run_joblines(origins, molobjs, tordbs, lines, dump=False)
return True
def run_joblines_threads(origins, molobjs, tordbs, lines, threads=1, show_bar=True, dump=False):
# TODO Collect the conformers and return them
# list for each line
pool = multiprocessing.Pool(threads)
if not show_bar:
pool.map(partial(run_jobline, origins, molobjs, tordbs, dump=dump), lines)
else:
pbar = tqdm(total=len(lines))
for i, _ in enumerate(pool.imap_unordered(partial(run_jobline, origins, molobjs, tordbs, dump=dump), lines)):
pbar.update()
pbar.close()
return True
def run_joblines(origins, molobjs, tordbs, lines, dump=False):
lines_energies = []
lines_coordinates = []
for i, line in enumerate(tqdm(lines)):
job_energies, job_coordinates = run_jobline(origins, molobjs, tordbs, line, prefix=i, dump=dump)
return True
def run_jobline(origins, molobjs, tordbs, line,
prefix=None,
debug=False,
dump=False):
sep = ","
# TODO multiple molobjs
line = line.strip()
# Locate molobj
line_s = line.split(sep)
molid = int(line_s[0])
molobj = molobjs[molid]
tordb = tordbs[molid]
# deep copy
molobj = copy.deepcopy(molobj)
cheminfo.molobj_set_coordinates(molobj, origins[molid])
if dump:
if prefix is None:
prefix = line.replace(" ", "_").replace(",", ".")
filename = "_tmp_data/{:}.sdf".format(prefix)
# if os.path.exists(filename):
# return [],[]
job_start = time.time()
job_energies, job_coordinates = run_job(molobj, tordb, line)
job_end = time.time()
if debug:
print(line, "-", len(job_energies), "{:5.2f}".format(job_end-job_start), filename)
if dump:
if debug: print("saving {:} confs to".format(len(job_energies)), filename)
fsdf = open(filename, 'w')
for energy, coordinates in zip(job_energies, job_coordinates):
sdfstr = cheminfo.save_molobj(molobj, coordinates)
fsdf.write(sdfstr)
return job_energies, job_coordinates
#####
def read_tordb(filename):
with open(filename) as f:
lines = f.readlines()
tordb = []
for line in lines:
line = line.split(":")
idx = line[0]
torsions = line[1]
torsions = torsions.split(",")
torsions = [np.array(x.split(), dtype=int) for x in torsions]
torsions = np.asarray(torsions, dtype=int)
tordb.append(torsions)
return tordb
def main_redis(args):
redis_task = args.redis_task
if args.redis_connect is not None:
redis_connection = args.redis_connection_str
else:
if not os.path.exists(args.redis_connect_file):
print("error: redis connection not set and file does not exists")
print("error: path", args.redis_connect_file)
quit()
with open(args.redis_connect_file, 'r') as f:
redis_connection = f.read().strip()
if args.debug:
print("redis: connecting to", redis_connection)
tasks = rediscomm.Taskqueue(redis_connection, redis_task)
# Prepare moldb
molecules = cheminfo.read_sdffile(args.sdf)
molecules = [molobj for molobj in molecules]
# Prepare tordb
if args.sdftor is None:
tordb = [cheminfo.get_torsions(molobj) for molobj in molecules]
else:
tordb = read_tordb(args.sdftor)
# Make origins
origins = []
for molobj in molecules:
xyz = cheminfo.molobj_get_coordinates(molobj)
origins.append(xyz)
# TODO if threads is > 0 then make more redis_workers
do_work = lambda x: redis_worker(origins, molecules, tordb, x, debug=args.debug)
tasks.main_loop(do_work)
return
def redis_worker(origins, moldb, tordb, lines, debug=False):
"""
job is lines
try
except
rtn = ("error "+jobs, error)
error = traceback.format_exc()
print(error)
"""
# TODO Prepare for multiple lines
line = lines
stamp1 = time.time()
energies, coordinates = run_jobline(origins, moldb, tordb, line, debug=debug)
# Prepare dump
results = prepare_redis_dump(energies, coordinates)
stamp2 = time.time()
print("workpackage {:} - {:5.3f}s".format(line, stamp2-stamp1))
here=1
line = line.split(",")
line[here] = line[here].split(" ")
line[here] = len(line[here])
line[here] = str(line[here])
storestring = "Results_" + "_".join(line)
status = ""
# Only log errors
status.strip()
if status == "":
status = None
return results, status, storestring
def prepare_redis_dump(energies, coordinates, coord_decimals=DEFAULT_DECIMALS):
results = []
for energy, coord in zip(energies, coordinates):
coord = np.round(coord, coord_decimals).flatten().tolist()
result = [energy, coord]
result = json.dumps(result)
result = result.replace(" ", "")
results.append(result)
results = "\n".join(results)
return results
def main_file(args):
suppl = cheminfo.read_sdffile(args.sdf)
molobjs = [molobj for molobj in suppl]
if args.sdftor:
tordb = read_tordb(args.sdftor)
else:
tordb = [cheminfo.get_torsions(molobj) for molobj in molobjs]
if args.jobfile:
run_jobfile(molobjs, tordb, args.jobfile, threads=args.threads)
else:
# TODO Base on tordb
generate_jobs(molobjs, args, tordb=tordb)
return
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--version', action='version', version="1.0")
parser.add_argument('--sdf', type=str, help='SDF file', metavar='file', default="~/db/qm9s.sdf.gz")
parser.add_argument('--sdftor', type=str, help='Torsion indexes for the SDF file', metavar='file', default=None)
parser.add_argument('-j', '--threads', type=int, default=0)
parser.add_argument('--jobcombos', nargs="+", help="", metavar="str")
# OR
parser.add_argument('--jobfile', type=str, help='txt of jobs', metavar='file')
# OR
parser.add_argument('--redis-task', help="redis task name", default=None)
parser.add_argument('--redis-connect', '--redis-connect-str', help="connection to str redis server", default=None)
parser.add_argument('--redis-connect-file', help="connection to redis server", default="~/db/redis_connection")
parser.add_argument('--debug', action="store_true", help="", default=False)
args = parser.parse_args()
if "~" in args.sdf:
args.sdf = correct_userpath(args.sdf)
is_redis = False
is_file = False
if args.redis_task is not None:
if "~" in args.redis_connect_file:
args.redis_connect_file = correct_userpath(args.redis_connect_file)
is_redis = True
else:
is_file = True
if is_file:
main_file(args)
if is_redis:
main_redis(args)
return
if __name__ == '__main__':
main()
| [
"clockwork.generate_torsion_combinations",
"similarity_fchl19.get_representation",
"quantum.optmize_conformation",
"chemhelp.cheminfo.convert_atom",
"communication.rediscomm.Taskqueue",
"chemhelp.cheminfo.sdfstr_to_molobj",
"numpy.array",
"copy.deepcopy",
"numpy.linalg.norm",
"rdkit.Chem.rdMolTran... | [((528, 562), 'joblib.Memory', 'joblib.Memory', (['cachedir'], {'verbose': '(0)'}), '(cachedir, verbose=0)\n', (541, 562), False, 'import joblib\n'), ((653, 681), 'os.path.expanduser', 'os.path.expanduser', (['filepath'], {}), '(filepath)\n', (671, 681), False, 'import os\n'), ((726, 779), 'rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeProperties', 'ChemicalForceFields.MMFFGetMoleculeProperties', (['molobj'], {}), '(molobj)\n', (771, 779), False, 'from rdkit.Chem import AllChem, ChemicalForceFields\n'), ((797, 858), 'rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeForceField', 'ChemicalForceFields.MMFFGetMoleculeForceField', (['molobj', 'ffprop'], {}), '(molobj, ffprop)\n', (842, 858), False, 'from rdkit.Chem import AllChem, ChemicalForceFields\n'), ((1434, 1496), 'clockwork.generate_torsion_combinations', 'clockwork.generate_torsion_combinations', (['total_torsions', 'n_tor'], {}), '(total_torsions, n_tor)\n', (1473, 1496), False, 'import clockwork\n'), ((1671, 1729), 'clockwork.generate_costlist', 'clockwork.generate_costlist', ([], {'total_torsions': 'total_torsions'}), '(total_torsions=total_torsions)\n', (1698, 1729), False, 'import clockwork\n'), ((4236, 4266), 'chemhelp.cheminfo.molobj_to_xyz', 'cheminfo.molobj_to_xyz', (['molobj'], {}), '(molobj)\n', (4258, 4266), False, 'from chemhelp import cheminfo\n'), ((4395, 4453), 'clockwork.generate_costlist', 'clockwork.generate_costlist', ([], {'total_torsions': 'total_torsions'}), '(total_torsions=total_torsions)\n', (4422, 4453), False, 'import clockwork\n'), ((7074, 7139), 'clockwork.generate_clockwork_combinations', 'clockwork.generate_clockwork_combinations', (['resolution', 'n_torsions'], {}), '(resolution, n_torsions)\n', (7115, 7139), False, 'import clockwork\n'), ((9573, 9618), 'merge.merge', 'merge.merge', (['atoms', 'energies', 'representations'], {}), '(atoms, energies, representations)\n', (9584, 9618), False, 'import merge\n'), ((10343, 10384), 'merge.merge', 'merge.merge', (['atoms', 'energies', 'coordinates'], {}), '(atoms, energies, coordinates)\n', (10354, 10384), False, 'import merge\n'), ((10602, 10623), 'copy.deepcopy', 'copy.deepcopy', (['molobj'], {}), '(molobj)\n', (10615, 10623), False, 'import copy\n'), ((11324, 11374), 'clockwork.generate_angles', 'clockwork.generate_angles', (['resolutions', 'n_torsions'], {}), '(resolutions, n_torsions)\n', (11349, 11374), False, 'import clockwork\n'), ((13356, 13386), 'chemhelp.cheminfo.read_sdffile', 'cheminfo.read_sdffile', (['sdffile'], {}), '(sdffile)\n', (13377, 13386), False, 'from chemhelp import cheminfo\n'), ((13945, 13978), 'chemhelp.cheminfo.molobj_to_sdfstr', 'cheminfo.molobj_to_sdfstr', (['molobj'], {}), '(molobj)\n', (13970, 13978), False, 'from chemhelp import cheminfo\n'), ((14006, 14039), 'chemhelp.cheminfo.sdfstr_to_molobj', 'cheminfo.sdfstr_to_molobj', (['sdfstr'], {}), '(sdfstr)\n', (14031, 14039), False, 'from chemhelp import cheminfo\n'), ((14191, 14258), 'rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeForceField', 'ChemicalForceFields.MMFFGetMoleculeForceField', (['molobj_prime', 'ffprop'], {}), '(molobj_prime, ffprop)\n', (14236, 14258), False, 'from rdkit.Chem import AllChem, ChemicalForceFields\n'), ((14841, 14878), 'numpy.round', 'np.round', (['coordinates', 'coord_decimals'], {}), '(coordinates, coord_decimals)\n', (14849, 14878), True, 'import numpy as np\n'), ((16666, 16699), 'chemhelp.cheminfo.molobj_to_sdfstr', 'cheminfo.molobj_to_sdfstr', (['molobj'], {}), '(molobj)\n', (16691, 16699), False, 'from chemhelp import cheminfo\n'), ((16727, 16760), 'chemhelp.cheminfo.sdfstr_to_molobj', 'cheminfo.sdfstr_to_molobj', (['sdfstr'], {}), '(sdfstr)\n', (16752, 16760), False, 'from chemhelp import cheminfo\n'), ((16912, 16979), 'rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeForceField', 'ChemicalForceFields.MMFFGetMoleculeForceField', (['molobj_prime', 'ffprop'], {}), '(molobj_prime, ffprop)\n', (16957, 16979), False, 'from rdkit.Chem import AllChem, ChemicalForceFields\n'), ((17562, 17599), 'numpy.round', 'np.round', (['coordinates', 'coord_decimals'], {}), '(coordinates, coord_decimals)\n', (17570, 17599), True, 'import numpy as np\n'), ((17635, 17693), 'chemhelp.cheminfo.conformer_set_coordinates', 'cheminfo.conformer_set_coordinates', (['conformer', 'coordinates'], {}), '(conformer, coordinates)\n', (17669, 17693), False, 'from chemhelp import cheminfo\n'), ((20606, 20635), 'multiprocessing.Pool', 'multiprocessing.Pool', (['threads'], {}), '(threads)\n', (20626, 20635), False, 'import multiprocessing\n'), ((21588, 21609), 'copy.deepcopy', 'copy.deepcopy', (['molobj'], {}), '(molobj)\n', (21601, 21609), False, 'import copy\n'), ((21614, 21669), 'chemhelp.cheminfo.molobj_set_coordinates', 'cheminfo.molobj_set_coordinates', (['molobj', 'origins[molid]'], {}), '(molobj, origins[molid])\n', (21645, 21669), False, 'from chemhelp import cheminfo\n'), ((21912, 21923), 'time.time', 'time.time', ([], {}), '()\n', (21921, 21923), False, 'import time\n'), ((22005, 22016), 'time.time', 'time.time', ([], {}), '()\n', (22014, 22016), False, 'import time\n'), ((23452, 23501), 'communication.rediscomm.Taskqueue', 'rediscomm.Taskqueue', (['redis_connection', 'redis_task'], {}), '(redis_connection, redis_task)\n', (23471, 23501), False, 'from communication import rediscomm\n'), ((23540, 23571), 'chemhelp.cheminfo.read_sdffile', 'cheminfo.read_sdffile', (['args.sdf'], {}), '(args.sdf)\n', (23561, 23571), False, 'from chemhelp import cheminfo\n'), ((24412, 24423), 'time.time', 'time.time', ([], {}), '()\n', (24421, 24423), False, 'import time\n'), ((24597, 24608), 'time.time', 'time.time', ([], {}), '()\n', (24606, 24608), False, 'import time\n'), ((25464, 25495), 'chemhelp.cheminfo.read_sdffile', 'cheminfo.read_sdffile', (['args.sdf'], {}), '(args.sdf)\n', (25485, 25495), False, 'from chemhelp import cheminfo\n'), ((25924, 25949), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (25947, 25949), False, 'import argparse\n'), ((4721, 4732), 'time.time', 'time.time', ([], {}), '()\n', (4730, 4732), False, 'import time\n'), ((4801, 4863), 'clockwork.generate_torsion_combinations', 'clockwork.generate_torsion_combinations', (['total_torsions', 'n_tor'], {}), '(total_torsions, n_tor)\n', (4840, 4863), False, 'import clockwork\n'), ((6498, 6509), 'time.time', 'time.time', ([], {}), '()\n', (6507, 6509), False, 'import time\n'), ((6989, 7036), 'chemhelp.cheminfo.molobj_to_xyz', 'cheminfo.molobj_to_xyz', (['molobj'], {'atom_type': '"""int"""'}), "(molobj, atom_type='int')\n", (7011, 7036), False, 'from chemhelp import cheminfo\n'), ((7334, 7345), 'time.time', 'time.time', ([], {}), '()\n', (7343, 7345), False, 'import time\n'), ((7551, 7577), 'numpy.argwhere', 'np.argwhere', (['(c_states == 0)'], {}), '(c_states == 0)\n', (7562, 7577), True, 'import numpy as np\n'), ((7903, 7932), 'numpy.asarray', 'np.asarray', (['c_representations'], {}), '(c_representations)\n', (7913, 7932), True, 'import numpy as np\n'), ((8488, 8587), 'merge.merge_asymmetric', 'merge.merge_asymmetric', (['atoms', 'c_energies', 'end_energies', 'c_representations', 'end_representations'], {}), '(atoms, c_energies, end_energies, c_representations,\n end_representations)\n', (8510, 8587), False, 'import merge\n'), ((9064, 9075), 'time.time', 'time.time', ([], {}), '()\n', (9073, 9075), False, 'import time\n'), ((9978, 10002), 'numpy.argwhere', 'np.argwhere', (['(states == 0)'], {}), '(states == 0)\n', (9989, 10002), True, 'import numpy as np\n'), ((11184, 11237), 'rdkit.Chem.rdMolTransforms.GetDihedralDeg', 'Chem.rdMolTransforms.GetDihedralDeg', (['conformer', '*idxs'], {}), '(conformer, *idxs)\n', (11219, 11237), False, 'from rdkit import Chem\n'), ((11612, 11644), 'chemhelp.cheminfo.molobj_to_atoms', 'cheminfo.molobj_to_atoms', (['molobj'], {}), '(molobj)\n', (11636, 11644), False, 'from chemhelp import cheminfo\n'), ((11730, 11763), 'quantum.get_smiles', 'quantum.get_smiles', (['atoms', 'origin'], {}), '(atoms, origin)\n', (11748, 11763), False, 'import quantum\n'), ((12674, 12694), 'numpy.asarray', 'np.asarray', (['energies'], {}), '(energies)\n', (12684, 12694), True, 'import numpy as np\n'), ((12696, 12719), 'numpy.asarray', 'np.asarray', (['coordinates'], {}), '(coordinates)\n', (12706, 12719), True, 'import numpy as np\n'), ((12721, 12739), 'numpy.asarray', 'np.asarray', (['states'], {}), '(states)\n', (12731, 12739), True, 'import numpy as np\n'), ((13502, 13532), 'chemhelp.cheminfo.molobj_to_xyz', 'cheminfo.molobj_to_xyz', (['molobj'], {}), '(molobj)\n', (13524, 13532), False, 'from chemhelp import cheminfo\n'), ((14968, 15016), 'quantum.optmize_conformation', 'quantum.optmize_conformation', (['atoms', 'coordinates'], {}), '(atoms, coordinates)\n', (14996, 15016), False, 'import quantum\n'), ((17893, 17907), 'numpy.array', 'np.array', (['grad'], {}), '(grad)\n', (17901, 17907), True, 'import numpy as np\n'), ((17928, 17945), 'numpy.linalg.norm', 'linalg.norm', (['grad'], {}), '(grad)\n', (17939, 17945), False, 'from numpy import linalg\n'), ((18254, 18287), 'chemhelp.cheminfo.sdfstr_to_molobj', 'cheminfo.sdfstr_to_molobj', (['sdfstr'], {}), '(sdfstr)\n', (18279, 18287), False, 'from chemhelp import cheminfo\n'), ((18343, 18400), 'chemhelp.cheminfo.molobj_set_coordinates', 'cheminfo.molobj_set_coordinates', (['molobj_test', 'coordinates'], {}), '(molobj_test, coordinates)\n', (18374, 18400), False, 'from chemhelp import cheminfo\n'), ((18711, 18739), 'chemhelp.cheminfo.save_molobj', 'cheminfo.save_molobj', (['molobj'], {}), '(molobj)\n', (18731, 18739), False, 'from chemhelp import cheminfo\n'), ((19082, 19115), 'chemhelp.cheminfo.save_molobj', 'cheminfo.save_molobj', (['molobj_test'], {}), '(molobj_test)\n', (19102, 19115), False, 'from chemhelp import cheminfo\n'), ((20039, 20069), 'chemhelp.cheminfo.molobj_to_xyz', 'cheminfo.molobj_to_xyz', (['molobj'], {}), '(molobj)\n', (20061, 20069), False, 'from chemhelp import cheminfo\n'), ((21119, 21130), 'tqdm.tqdm', 'tqdm', (['lines'], {}), '(lines)\n', (21123, 21130), False, 'from tqdm import tqdm\n'), ((22805, 22836), 'numpy.asarray', 'np.asarray', (['torsions'], {'dtype': 'int'}), '(torsions, dtype=int)\n', (22815, 22836), True, 'import numpy as np\n'), ((23873, 23912), 'chemhelp.cheminfo.molobj_get_coordinates', 'cheminfo.molobj_get_coordinates', (['molobj'], {}), '(molobj)\n', (23904, 23912), False, 'from chemhelp import cheminfo\n'), ((25283, 25301), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (25293, 25301), False, 'import json\n'), ((2775, 2804), 'chemhelp.cheminfo.get_torsions', 'cheminfo.get_torsions', (['molobj'], {}), '(molobj)\n', (2796, 2804), False, 'from chemhelp import cheminfo\n'), ((5060, 5071), 'time.time', 'time.time', ([], {}), '()\n', (5069, 5071), False, 'import time\n'), ((5348, 5359), 'time.time', 'time.time', ([], {}), '()\n', (5357, 5359), False, 'import time\n'), ((7798, 7840), 'similarity_fchl19.get_representation', 'sim.get_representation', (['atoms', 'coordinates'], {}), '(atoms, coordinates)\n', (7820, 7840), True, 'import similarity_fchl19 as sim\n'), ((11666, 11693), 'chemhelp.cheminfo.convert_atom', 'cheminfo.convert_atom', (['atom'], {}), '(atom)\n', (11687, 11693), False, 'from chemhelp import cheminfo\n'), ((12302, 12313), 'time.time', 'time.time', ([], {}), '()\n', (12311, 12313), False, 'import time\n'), ((12464, 12475), 'time.time', 'time.time', ([], {}), '()\n', (12473, 12475), False, 'import time\n'), ((14444, 14521), 'rdkit.Chem.rdMolTransforms.SetDihedralDeg', 'Chem.rdMolTransforms.SetDihedralDeg', (['conformer_prime', '*torsions[i]', 'set_angle'], {}), '(conformer_prime, *torsions[i], set_angle)\n', (14479, 14521), False, 'from rdkit import Chem\n'), ((15138, 15176), 'quantum.get_smiles', 'quantum.get_smiles', (['atoms', 'coordinates'], {}), '(atoms, coordinates)\n', (15156, 15176), False, 'import quantum\n'), ((17165, 17242), 'rdkit.Chem.rdMolTransforms.SetDihedralDeg', 'Chem.rdMolTransforms.SetDihedralDeg', (['conformer_prime', '*torsions[i]', 'set_angle'], {}), '(conformer_prime, *torsions[i], set_angle)\n', (17200, 17242), False, 'from rdkit import Chem\n'), ((18563, 18616), 'rdkit.Chem.rdMolTransforms.GetDihedralDeg', 'Chem.rdMolTransforms.GetDihedralDeg', (['conformer', '*idxs'], {}), '(conformer, *idxs)\n', (18598, 18616), False, 'from rdkit import Chem\n'), ((18947, 19000), 'rdkit.Chem.rdMolTransforms.GetDihedralDeg', 'Chem.rdMolTransforms.GetDihedralDeg', (['conformer', '*idxs'], {}), '(conformer, *idxs)\n', (18982, 19000), False, 'from rdkit import Chem\n'), ((20675, 20732), 'functools.partial', 'partial', (['run_jobline', 'origins', 'molobjs', 'tordbs'], {'dump': 'dump'}), '(run_jobline, origins, molobjs, tordbs, dump=dump)\n', (20682, 20732), False, 'from functools import partial\n'), ((22347, 22388), 'chemhelp.cheminfo.save_molobj', 'cheminfo.save_molobj', (['molobj', 'coordinates'], {}), '(molobj, coordinates)\n', (22367, 22388), False, 'from chemhelp import cheminfo\n'), ((23064, 23103), 'os.path.exists', 'os.path.exists', (['args.redis_connect_file'], {}), '(args.redis_connect_file)\n', (23078, 23103), False, 'import os\n'), ((23687, 23716), 'chemhelp.cheminfo.get_torsions', 'cheminfo.get_torsions', (['molobj'], {}), '(molobj)\n', (23708, 23716), False, 'from chemhelp import cheminfo\n'), ((25627, 25656), 'chemhelp.cheminfo.get_torsions', 'cheminfo.get_torsions', (['molobj'], {}), '(molobj)\n', (25648, 25656), False, 'from chemhelp import cheminfo\n'), ((5727, 5738), 'time.time', 'time.time', ([], {}), '()\n', (5736, 5738), False, 'import time\n'), ((5835, 5976), 'merge.merge_asymmetric', 'merge.merge_asymmetric', (['atoms', 'result_energies', 'cost_result_energies', 'result_coordinates', 'cost_result_coordinates'], {'decimals': '(2)', 'debug': '(True)'}), '(atoms, result_energies, cost_result_energies,\n result_coordinates, cost_result_coordinates, decimals=2, debug=True)\n', (5857, 5976), False, 'import merge\n'), ((6347, 6358), 'time.time', 'time.time', ([], {}), '()\n', (6356, 6358), False, 'import time\n'), ((20840, 20897), 'functools.partial', 'partial', (['run_jobline', 'origins', 'molobjs', 'tordbs'], {'dump': 'dump'}), '(run_jobline, origins, molobjs, tordbs, dump=dump)\n', (20847, 20897), False, 'from functools import partial\n'), ((25182, 25213), 'numpy.round', 'np.round', (['coord', 'coord_decimals'], {}), '(coord, coord_decimals)\n', (25190, 25213), True, 'import numpy as np\n')] |
from sys import argv
from os.path import exists
script, from_file, to_file = argv
print("Copying from %s to %s"%(from_file,to_file))
# we could do these two on one lines too, how?
input = open(from_file)
indata = input.read()
print ("The input file is %d bytes long"% len(indata))
print ("Does the output file exist? %r" % exists(to_file))
print ("Ready, hit return to continue, CTRL_C to abort.")
input()
output = open(to_file, 'w')
output.write(indata)
print ("Alright, all done.")
output.close()
input.close()
| [
"os.path.exists"
] | [((339, 354), 'os.path.exists', 'exists', (['to_file'], {}), '(to_file)\n', (345, 354), False, 'from os.path import exists\n')] |
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=255)
description = models.CharField(max_length=255, null=True, default='')
class Meta:
verbose_name = "Category"
verbose_name_plural = "Categories"
def __str__(self):
return self.name
class Forum(models.Model):
problem = models.CharField(max_length=255)
statement = models.CharField(max_length=20000)
category = models.ManyToManyField(Category)
class Meta:
verbose_name = "Forum"
verbose_name_plural = "Forums"
def __str__(self):
return self.problem | [
"django.db.models.ManyToManyField",
"django.db.models.CharField"
] | [((71, 103), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (87, 103), False, 'from django.db import models\n'), ((122, 177), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'default': '""""""'}), "(max_length=255, null=True, default='')\n", (138, 177), False, 'from django.db import models\n'), ((364, 396), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (380, 396), False, 'from django.db import models\n'), ((413, 447), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20000)'}), '(max_length=20000)\n', (429, 447), False, 'from django.db import models\n'), ((463, 495), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Category'], {}), '(Category)\n', (485, 495), False, 'from django.db import models\n')] |
from __future__ import unicode_literals
from __future__ import print_function
from fnmatch import fnmatch
import sys
from unittest.suite import _call_if_exists, _DebugResult, _isnotsuite, TestSuite
from unittest import util
import unittest
from io import StringIO
from green.config import default_args
from green.output import GreenStream
from green.result import ProtoTest
class GreenTestSuite(TestSuite):
"""
This version of a test suite has two important functions:
1) It brings Python 3.x-like features to Python 2.7
2) It adds Green-specific features (see customize())
"""
args = None
def __init__(self, tests=(), args=None):
# You should either set GreenTestSuite.args before instantiation, or
# pass args into __init__
self._removed_tests = 0
self.allow_stdout = default_args.allow_stdout
self.full_test_pattern = "test" + default_args.test_pattern
self.customize(args)
super(GreenTestSuite, self).__init__(tests)
def addTest(self, test):
"""
Override default behavior with some green-specific behavior.
"""
if (
self.full_test_pattern
# test can actually be suites and things. Only tests have
# _testMethodName
and getattr(test, "_testMethodName", False)
# Fake test cases (generated for module import failures, for example)
# do not start with 'test'. We still want to see those fake cases.
and test._testMethodName.startswith("test")
):
if not fnmatch(test._testMethodName, self.full_test_pattern):
return
super(GreenTestSuite, self).addTest(test)
def customize(self, args):
"""
Green-specific behavior customization via an args dictionary from
the green.config module. If you don't pass in an args dictionary,
then this class acts like TestSuite from Python 3.x.
"""
# Set a new args on the CLASS
if args:
self.args = args
# Use the class args
if self.args and getattr(self.args, "allow_stdout", None):
self.allow_stdout = self.args.allow_stdout
if self.args and getattr(self.args, "test_pattern", None):
self.full_test_pattern = "test" + self.args.test_pattern
def _removeTestAtIndex(self, index):
"""
Python 3.x-like version of this function for Python 2.7's sake.
"""
test = self._tests[index]
if hasattr(test, "countTestCases"):
self._removed_tests += test.countTestCases()
self._tests[index] = None
def countTestCases(self):
"""
Python 3.x-like version of this function for Python 2.7's sake.
"""
cases = self._removed_tests
for test in self:
if test:
cases += test.countTestCases()
return cases
def _handleClassSetUpPre38(self, test, result): # pragma: nocover
previousClass = getattr(result, "_previousTestClass", None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False): # pragma: no cover
return
try:
currentClass._classSetupFailed = False
except TypeError: # pragma: no cover
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, "setUpClass", None)
if setUpClass is not None:
_call_if_exists(result, "_setupStdout")
try:
setUpClass()
# Upstream Python forgets to take SkipTest into account
except unittest.case.SkipTest as e:
currentClass.__unittest_skip__ = True
currentClass.__unittest_skip_why__ = str(e)
# -- END of fix
except Exception as e: # pragma: no cover
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = "setUpClass (%s)" % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, "_restoreStdout")
def _handleClassSetUpPost38(
self, test, result
): # pragma: no cover -- because it's just like *Pre38
previousClass = getattr(result, "_previousTestClass", None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, "setUpClass", None)
if setUpClass is not None:
_call_if_exists(result, "_setupStdout")
try:
setUpClass()
# Upstream Python forgets to take SkipTest into account
except unittest.case.SkipTest as e:
currentClass.__unittest_skip__ = True
currentClass.__unittest_skip_why__ = str(e)
# -- END of fix
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
self._createClassOrModuleLevelException(
result, e, "setUpClass", className
)
finally:
_call_if_exists(result, "_restoreStdout")
if currentClass._classSetupFailed is True:
currentClass.doClassCleanups()
if len(currentClass.tearDown_exceptions) > 0:
for exc in currentClass.tearDown_exceptions:
self._createClassOrModuleLevelException(
result, exc[1], "setUpClass", className, info=exc
)
if sys.version_info < (3, 8): # pragma: no cover
_handleClassSetUp = _handleClassSetUpPre38
else:
_handleClassSetUp = _handleClassSetUpPost38
def run(self, result):
"""
Emulate unittest's behavior, with Green-specific changes.
"""
topLevel = False
if getattr(result, "_testRunEntered", False) is False:
result._testRunEntered = topLevel = True
for index, test in enumerate(self):
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if getattr(test.__class__, "_classSetupFailed", False) or getattr(
result, "_moduleSetUpFailed", False
):
continue
if not self.allow_stdout:
captured_stdout = StringIO()
captured_stderr = StringIO()
saved_stdout = sys.stdout
saved_stderr = sys.stderr
sys.stdout = GreenStream(captured_stdout)
sys.stderr = GreenStream(captured_stderr)
test(result)
if _isnotsuite(test):
if not self.allow_stdout:
sys.stdout = saved_stdout
sys.stderr = saved_stderr
result.recordStdout(test, captured_stdout.getvalue())
result.recordStderr(test, captured_stderr.getvalue())
# Since we're intercepting the stdout/stderr out here at the
# suite level, we need to poke the test result and let it know
# when we're ready to transmit results back up to the parent
# process. I would rather just do it automatically at test
# stop time, but we don't have the captured stuff at that
# point. Messy...but the only other alternative I can think of
# is monkey-patching loaded TestCases -- which could be from
# unittest or twisted or some other custom subclass.
result.finalize()
self._removeTestAtIndex(index)
# Green's subprocesses have handled all actual tests and sent up the
# result, but unittest expects to be able to add teardown errors to
# the result still, so we'll need to watch for that ourself.
errors_before = len(result.errors)
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
# Special handling for class/module tear-down errors. startTest() and
# finalize() both trigger communication between the subprocess and
# the runner process. addError()
if errors_before != len(result.errors):
difference = len(result.errors) - errors_before
result.errors, new_errors = (
result.errors[:-difference],
result.errors[-difference:],
)
for (test, err) in new_errors:
# test = ProtoTest()
test.module = result._previousTestClass.__module__
test.class_name = result._previousTestClass.__name__
# test.method_name = 'some method name'
test.is_class_or_module_teardown_error = True
test.name = "Error in class or module teardown"
# test.docstr_part = 'docstr part' # error_holder.description
result.startTest(test)
result.addError(test, err)
result.stopTest(test)
result.finalize()
return result
| [
"green.output.GreenStream",
"unittest.util.strclass",
"fnmatch.fnmatch",
"unittest.suite._call_if_exists",
"unittest.suite._isnotsuite",
"io.StringIO"
] | [((3672, 3711), 'unittest.suite._call_if_exists', '_call_if_exists', (['result', '"""_setupStdout"""'], {}), "(result, '_setupStdout')\n", (3687, 3711), False, 'from unittest.suite import _call_if_exists, _DebugResult, _isnotsuite, TestSuite\n'), ((5213, 5252), 'unittest.suite._call_if_exists', '_call_if_exists', (['result', '"""_setupStdout"""'], {}), "(result, '_setupStdout')\n", (5228, 5252), False, 'from unittest.suite import _call_if_exists, _DebugResult, _isnotsuite, TestSuite\n'), ((6960, 6977), 'unittest.suite._isnotsuite', '_isnotsuite', (['test'], {}), '(test)\n', (6971, 6977), False, 'from unittest.suite import _call_if_exists, _DebugResult, _isnotsuite, TestSuite\n'), ((7792, 7809), 'unittest.suite._isnotsuite', '_isnotsuite', (['test'], {}), '(test)\n', (7803, 7809), False, 'from unittest.suite import _call_if_exists, _DebugResult, _isnotsuite, TestSuite\n'), ((1589, 1642), 'fnmatch.fnmatch', 'fnmatch', (['test._testMethodName', 'self.full_test_pattern'], {}), '(test._testMethodName, self.full_test_pattern)\n', (1596, 1642), False, 'from fnmatch import fnmatch\n'), ((4430, 4471), 'unittest.suite._call_if_exists', '_call_if_exists', (['result', '"""_restoreStdout"""'], {}), "(result, '_restoreStdout')\n", (4445, 4471), False, 'from unittest.suite import _call_if_exists, _DebugResult, _isnotsuite, TestSuite\n'), ((5948, 5989), 'unittest.suite._call_if_exists', '_call_if_exists', (['result', '"""_restoreStdout"""'], {}), "(result, '_restoreStdout')\n", (5963, 5989), False, 'from unittest.suite import _call_if_exists, _DebugResult, _isnotsuite, TestSuite\n'), ((4232, 4259), 'unittest.util.strclass', 'util.strclass', (['currentClass'], {}), '(currentClass)\n', (4245, 4259), False, 'from unittest import util\n'), ((5753, 5780), 'unittest.util.strclass', 'util.strclass', (['currentClass'], {}), '(currentClass)\n', (5766, 5780), False, 'from unittest import util\n'), ((7474, 7484), 'io.StringIO', 'StringIO', ([], {}), '()\n', (7482, 7484), False, 'from io import StringIO\n'), ((7523, 7533), 'io.StringIO', 'StringIO', ([], {}), '()\n', (7531, 7533), False, 'from io import StringIO\n'), ((7659, 7687), 'green.output.GreenStream', 'GreenStream', (['captured_stdout'], {}), '(captured_stdout)\n', (7670, 7687), False, 'from green.output import GreenStream\n'), ((7721, 7749), 'green.output.GreenStream', 'GreenStream', (['captured_stderr'], {}), '(captured_stderr)\n', (7732, 7749), False, 'from green.output import GreenStream\n')] |
"""Persistence abstraction.
Limits persistence interactions - all persisted data goes through this layer.
"""
from django.contrib.auth.models import User
from spudblog.models import Blog, Post
###### API
def get_blogs(user_id=None):
if user_id:
return Blog.objects.filter(author_id=user_id).order_by('date_created')
else:
return Blog.objects.order_by('date_created')
### Common API
def all_as_json():
"""Debug api call, return all users, their blogs and posts."""
users = []
for u in User.objects.order_by('username'):
blogs = [b.as_json() for b in u.blog_set.order_by('date_created')]
users.append({'id': u.id, 'username': u.username, 'blogs': blogs})
return users
def get_full_blog(blog_id):
"""Gets full blog, with title, id, posts."""
return Blog.objects.get(id=blog_id)
### Blog API
def create_blog(user_id, blog):
new_blog = Blog(author_id=user_id,
title=blog['title'],
background=blog['background'])
new_blog.save()
return new_blog
def update_blog(blog):
updated_blog = Blog.objects.get(id=blog['id'])
if 'title' in blog:
updated_blog.title = blog['title']
if 'background' in blog:
updated_blog.background = blog['background']
updated_blog.save()
return updated_blog
def del_blog(blog_id):
deleted_blog = Blog.objects.get(id=blog_id)
# need to delete posts manually
for p in deleted_blog.post_set.all():
p.delete()
deleted_blog.delete()
return {'id': blog_id}
### Post API
def create_post(blog_id, post):
new_post = Post(blog_id=blog_id,
title=post['title'],
content=post['content'])
new_post.save()
return new_post
def update_post(post):
updated_post = Post.objects.get(id=post['id'])
if 'title' in post:
updated_post.title = post['title']
if 'content' in post:
updated_post.content = post['content']
updated_post.save()
return updated_post
def del_post(post_id):
deleted_post = Post.objects.get(id=post_id)
deleted_post.delete()
return {'id': post_id}
| [
"spudblog.models.Blog.objects.filter",
"spudblog.models.Post",
"spudblog.models.Post.objects.get",
"spudblog.models.Blog.objects.get",
"django.contrib.auth.models.User.objects.order_by",
"spudblog.models.Blog",
"spudblog.models.Blog.objects.order_by"
] | [((526, 559), 'django.contrib.auth.models.User.objects.order_by', 'User.objects.order_by', (['"""username"""'], {}), "('username')\n", (547, 559), False, 'from django.contrib.auth.models import User\n'), ((818, 846), 'spudblog.models.Blog.objects.get', 'Blog.objects.get', ([], {'id': 'blog_id'}), '(id=blog_id)\n', (834, 846), False, 'from spudblog.models import Blog, Post\n'), ((909, 984), 'spudblog.models.Blog', 'Blog', ([], {'author_id': 'user_id', 'title': "blog['title']", 'background': "blog['background']"}), "(author_id=user_id, title=blog['title'], background=blog['background'])\n", (913, 984), False, 'from spudblog.models import Blog, Post\n'), ((1109, 1140), 'spudblog.models.Blog.objects.get', 'Blog.objects.get', ([], {'id': "blog['id']"}), "(id=blog['id'])\n", (1125, 1140), False, 'from spudblog.models import Blog, Post\n'), ((1382, 1410), 'spudblog.models.Blog.objects.get', 'Blog.objects.get', ([], {'id': 'blog_id'}), '(id=blog_id)\n', (1398, 1410), False, 'from spudblog.models import Blog, Post\n'), ((1623, 1690), 'spudblog.models.Post', 'Post', ([], {'blog_id': 'blog_id', 'title': "post['title']", 'content': "post['content']"}), "(blog_id=blog_id, title=post['title'], content=post['content'])\n", (1627, 1690), False, 'from spudblog.models import Blog, Post\n'), ((1815, 1846), 'spudblog.models.Post.objects.get', 'Post.objects.get', ([], {'id': "post['id']"}), "(id=post['id'])\n", (1831, 1846), False, 'from spudblog.models import Blog, Post\n'), ((2079, 2107), 'spudblog.models.Post.objects.get', 'Post.objects.get', ([], {'id': 'post_id'}), '(id=post_id)\n', (2095, 2107), False, 'from spudblog.models import Blog, Post\n'), ((357, 394), 'spudblog.models.Blog.objects.order_by', 'Blog.objects.order_by', (['"""date_created"""'], {}), "('date_created')\n", (378, 394), False, 'from spudblog.models import Blog, Post\n'), ((268, 306), 'spudblog.models.Blog.objects.filter', 'Blog.objects.filter', ([], {'author_id': 'user_id'}), '(author_id=user_id)\n', (287, 306), False, 'from spudblog.models import Blog, Post\n')] |