code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from core_backend import context
from core_backend.service import handler
from core_backend.libs import token as tk
from core_backend.libs.exception import Error
from server.domain.models import WechatshopGood, WechatshopProduct, WechatshopGoodsSpecification, WechatshopGoodsGallery
from server.utils.tools import delete_file
import os
import re
import logging
import settings
logger = logging.getLogger(__name__)
class Handler(handler.handler):
""" 删除商品 """
def dispatch(self, session):
req_body = self.context.request.body
resp_body = self.context.response.body
id = req_body.id
if not id:
raise Error(-1, '参数缺失')
# 删除图片
goods = session.query(WechatshopGood).filter(WechatshopGood.id ==id).filter(WechatshopGood.is_delete == 0).first().to_dict()
img_url = goods['list_pic_url']
_, path = img_url.split('/static_source/')
pic_path = os.path.join(settings.STATIC_SOURCE_DIR, path)
delete_file(pic_path)
# 富文本中的图片需要通过正则来匹配
for eu in re.findall('src="(.*?)"', goods['goods_desc']):
_, path = eu.split('/static_source/')
pic_path = os.path.join(settings.STATIC_SOURCE_DIR, path)
delete_file(pic_path)
goods_gallery = session.query(WechatshopGoodsGallery).filter(WechatshopGoodsGallery.goods_id == id).filter(WechatshopGoodsGallery.is_delete == 0).all()
for g in goods_gallery:
img_url = g.to_dict()['img_url']
_, path = img_url.split('/static_source/')
pic_path = os.path.join(settings.STATIC_SOURCE_DIR, path)
delete_file(pic_path)
session.query(WechatshopGood).filter(WechatshopGood.id == id).update({WechatshopGood.is_delete: 1})
session.query(WechatshopProduct).filter(WechatshopProduct.goods_id == id).update({WechatshopProduct.is_delete: 1})
session.query(WechatshopGoodsSpecification).filter(WechatshopGoodsSpecification.goods_id == id).update({WechatshopGoodsSpecification.is_delete: 1})
session.flush()
|
[
"core_backend.libs.exception.Error",
"server.utils.tools.delete_file",
"re.findall",
"os.path.join",
"logging.getLogger"
] |
[((515, 542), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (532, 542), False, 'import logging\n'), ((1060, 1106), 'os.path.join', 'os.path.join', (['settings.STATIC_SOURCE_DIR', 'path'], {}), '(settings.STATIC_SOURCE_DIR, path)\n', (1072, 1106), False, 'import os\n'), ((1115, 1136), 'server.utils.tools.delete_file', 'delete_file', (['pic_path'], {}), '(pic_path)\n', (1126, 1136), False, 'from server.utils.tools import delete_file\n'), ((1182, 1228), 're.findall', 're.findall', (['"""src="(.*?)\\""""', "goods['goods_desc']"], {}), '(\'src="(.*?)"\', goods[\'goods_desc\'])\n', (1192, 1228), False, 'import re\n'), ((783, 800), 'core_backend.libs.exception.Error', 'Error', (['(-1)', '"""参数缺失"""'], {}), "(-1, '参数缺失')\n", (788, 800), False, 'from core_backend.libs.exception import Error\n'), ((1303, 1349), 'os.path.join', 'os.path.join', (['settings.STATIC_SOURCE_DIR', 'path'], {}), '(settings.STATIC_SOURCE_DIR, path)\n', (1315, 1349), False, 'import os\n'), ((1362, 1383), 'server.utils.tools.delete_file', 'delete_file', (['pic_path'], {}), '(pic_path)\n', (1373, 1383), False, 'from server.utils.tools import delete_file\n'), ((1700, 1746), 'os.path.join', 'os.path.join', (['settings.STATIC_SOURCE_DIR', 'path'], {}), '(settings.STATIC_SOURCE_DIR, path)\n', (1712, 1746), False, 'import os\n'), ((1759, 1780), 'server.utils.tools.delete_file', 'delete_file', (['pic_path'], {}), '(pic_path)\n', (1770, 1780), False, 'from server.utils.tools import delete_file\n')]
|
# Import Packages
from .Util import encodeURIComponent
from .Exception import *
import requests
# Track Class
class Track():
def __init__(self, token: str):
self.token = token
def search(self, query: str, limit: int = 1):
link = 'https://api.spotify.com/v1/search'
header = {'Authorization': 'Bearer ' + self.token}
if not 0 < limit < 50:
raise LimitOutOfRangeError('limit must be under 50')
return requests.request(
'GET',
link,
headers=header,
params={
'q': encodeURIComponent(query),
'type': 'track',
'limit': limit,
'market': 'US'
}
).json()
def get(self, trackID: str, advanced: bool = False):
data = requests.request(
'GET',
'https://api.spotify.com/v1/tracks/' + trackID,
headers={'Authorization': 'Bearer ' + self.token}
).json()
if advanced:
data['code_img'] = 'https://scannables.scdn.co/uri/plain/jpeg/1db954/white/1080/spotify:track:' + trackID
return data
def auido_features(self, trackID: str):
link = 'https://api.spotify.com/v1/audio-features/' + trackID
return requests.request(
'GET',
link,
headers={'Authorization': 'Bearer ' + self.token}
).json()
def auido_analysis(self, trackID: str):
link = 'https://api.spotify.com/v1/audio-analysis/' + trackID
return requests.request(
'GET',
link,
headers={'Authorization': 'Bearer ' + self.token}
).json()
|
[
"requests.request"
] |
[((816, 942), 'requests.request', 'requests.request', (['"""GET"""', "('https://api.spotify.com/v1/tracks/' + trackID)"], {'headers': "{'Authorization': 'Bearer ' + self.token}"}), "('GET', 'https://api.spotify.com/v1/tracks/' + trackID,\n headers={'Authorization': 'Bearer ' + self.token})\n", (832, 942), False, 'import requests\n'), ((1284, 1369), 'requests.request', 'requests.request', (['"""GET"""', 'link'], {'headers': "{'Authorization': 'Bearer ' + self.token}"}), "('GET', link, headers={'Authorization': 'Bearer ' + self.token}\n )\n", (1300, 1369), False, 'import requests\n'), ((1553, 1638), 'requests.request', 'requests.request', (['"""GET"""', 'link'], {'headers': "{'Authorization': 'Bearer ' + self.token}"}), "('GET', link, headers={'Authorization': 'Bearer ' + self.token}\n )\n", (1569, 1638), False, 'import requests\n')]
|
import torch
from logger.Logger import log
# uses global logger if available
class Metric():
def __init__(self, name):
self.name = name
log().add_plot(name, columns=("metric_value",))
def add(self, value):
self.add__(value)
def add_barrier(self, value):
self.add__(value)
def add__(self, value):
log().add_plot_point(self.name, value)
def get_plot(self):
return log().get_plot(self.name)
|
[
"logger.Logger.log"
] |
[((157, 162), 'logger.Logger.log', 'log', ([], {}), '()\n', (160, 162), False, 'from logger.Logger import log\n'), ((356, 361), 'logger.Logger.log', 'log', ([], {}), '()\n', (359, 361), False, 'from logger.Logger import log\n'), ((435, 440), 'logger.Logger.log', 'log', ([], {}), '()\n', (438, 440), False, 'from logger.Logger import log\n')]
|
import numpy as np
from yaml import safe_load
import pandas as pd
import glob
from cricscraper.cricinfo import CricInfo
from cricscraper.matchinfo import MatchInfo
class CricSheet:
innings_name = ["1st innings", "2nd innings", "3rd innings", "4th innings"]
def __init__(self, files=None, folder=None):
if folder:
self.files = glob.glob("{}/*.yaml".format(folder))
else:
self.files = files
self.dataFrame = pd.DataFrame()
self.__parser()
@staticmethod
def __get_fielders(wicket):
if wicket != 0:
try:
return ", ".join(wicket.get('fielders'))
except:
return None
return None
def __parser(self):
ordered_columns = ['match id', 'inning', 'delivery', 'over', 'batsman', 'non striker', 'bowler', 'runs off bat', 'extras', 'total', 'extra kind', 'wicket kind', 'player out', 'fielders', 'team1', 'team2', 'outcome', 'winner', 'by', 'win amount', 'player of match','toss winner', 'toss decision', 'match type', 'venue', 'city', 'gender', 'umpire1','umpire2']
for filename in self.files:
with open(filename) as f_input:
data = safe_load(f_input)
innings = data['innings']
for i in range(len(innings)):
dict_innings = {}
try:
inning = innings[i][CricSheet.innings_name[i]]['deliveries']
except:
continue
dict_innings["inning"] = np.ones(len(inning), dtype=int) * (i+1)
dict_innings['delivery'] = [delivery for ball in inning for delivery in ball]
dict_innings['batsman'] = [list(ball.values())[0].get("batsman") for ball in inning]
dict_innings['non striker'] = [list(ball.values())[0].get("non_striker") for ball in inning]
dict_innings['bowler'] = [list(ball.values())[0].get("bowler") for ball in inning]
dict_innings["runs"] = [list(ball.values())[0].get('runs') for ball in inning]
dict_innings["wicket"] = [list(ball.values())[0].get('wicket', 0) for ball in inning]
dict_innings['extra kind1'] = [list(ball.values())[0].get('extras', 0) for ball in inning]
frame = pd.DataFrame(dict_innings)
dict_innings['runs off bat'] = frame['runs'].apply(lambda x: x.get('batsman'))
dict_innings['extras'] = frame['runs'].apply(lambda x: x.get('extras'))
dict_innings['total'] = frame['runs'].apply(lambda x: x.get('total')).cumsum()
dict_innings['extra kind'] = frame['extra kind1'].apply(lambda x: next(iter(x.keys())) if x != 0 else None)
dict_innings['over'] = frame['delivery'].apply(lambda x: np.ceil(x))
def fn(x):
try:
return x.get('kind') if x != 0 else None
except:
return None
def fn1(x):
try:
return x.get('player_out') if x != 0 else None
except:
return None
dict_innings['wicket kind'] = frame.wicket.apply(fn)
dict_innings['player out'] = frame.wicket.apply(fn1)
dict_innings['fielders'] = frame.wicket.apply(CricSheet.__get_fielders)
# get match info from Info class
match_info = MatchInfo(data["info"])
assign_info = match_info.dict_info()
assign_info['match id'] = int(filename.split('.')[0].split('/')[-1])
frame = pd.DataFrame(dict_innings).assign(**assign_info)
frame.drop(["runs", "wicket", "extra kind1"], axis=1, inplace=True)
self.dataFrame = pd.concat([self.dataFrame, frame])
self.dataFrame.reset_index(inplace=True, drop=True)
self.dataFrame = self.dataFrame[ordered_columns]
def view(self):
'''
Returns DataFrame.
DataFrame can be used directly for required purposes.
'''
return self.dataFrame
def save(self, filename="output"):
'''
Saves the converted csv file.
Parameter:
filename (string): name of the output csv file
optional: True
default: "output.csv"
'''
if filename.endswith(".csv"):
filename = filename.replace('.csv', '')
filename += ".csv"
print("File saved - {}".format(filename))
return self.dataFrame.to_csv(filename)
def get_more_info(self):
'''Returns dictionary of CricInfo object'''
data = {}
for file in self.files:
match_id = int(file.split('.')[0].split('/')[-1])
data[str(match_id)] = CricInfo(match_id)
return data
|
[
"pandas.DataFrame",
"cricscraper.matchinfo.MatchInfo",
"numpy.ceil",
"cricscraper.cricinfo.CricInfo",
"yaml.safe_load",
"pandas.concat"
] |
[((429, 443), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (441, 443), True, 'import pandas as pd\n'), ((4120, 4138), 'cricscraper.cricinfo.CricInfo', 'CricInfo', (['match_id'], {}), '(match_id)\n', (4128, 4138), False, 'from cricscraper.cricinfo import CricInfo\n'), ((1086, 1104), 'yaml.safe_load', 'safe_load', (['f_input'], {}), '(f_input)\n', (1095, 1104), False, 'from yaml import safe_load\n'), ((2014, 2040), 'pandas.DataFrame', 'pd.DataFrame', (['dict_innings'], {}), '(dict_innings)\n', (2026, 2040), True, 'import pandas as pd\n'), ((2962, 2985), 'cricscraper.matchinfo.MatchInfo', 'MatchInfo', (["data['info']"], {}), "(data['info'])\n", (2971, 2985), False, 'from cricscraper.matchinfo import MatchInfo\n'), ((3267, 3301), 'pandas.concat', 'pd.concat', (['[self.dataFrame, frame]'], {}), '([self.dataFrame, frame])\n', (3276, 3301), True, 'import pandas as pd\n'), ((2461, 2471), 'numpy.ceil', 'np.ceil', (['x'], {}), '(x)\n', (2468, 2471), True, 'import numpy as np\n'), ((3121, 3147), 'pandas.DataFrame', 'pd.DataFrame', (['dict_innings'], {}), '(dict_innings)\n', (3133, 3147), True, 'import pandas as pd\n')]
|
from pyinstaller_setuptools import setup
setup(
name="mtool-1.04.1 measurement app",
version="1.04.1",
description="Read me",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/antkp/mtool.git",
author="<NAME>.",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
packages=["reader"],
include_package_data=True,
install_requires=[
"feedparser", "html2text", "importlib_resources", "typing"
],
entry_points={"console_scripts": ["realpython=reader.__main__:main"]},
)
|
[
"pyinstaller_setuptools.setup"
] |
[((44, 705), 'pyinstaller_setuptools.setup', 'setup', ([], {'name': '"""mtool-1.04.1 measurement app"""', 'version': '"""1.04.1"""', 'description': '"""Read me"""', 'long_description': 'README', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/antkp/mtool.git"""', 'author': '"""<NAME>."""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'classifiers': "['License :: OSI Approved :: MIT License', 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3']", 'packages': "['reader']", 'include_package_data': '(True)', 'install_requires': "['feedparser', 'html2text', 'importlib_resources', 'typing']", 'entry_points': "{'console_scripts': ['realpython=reader.__main__:main']}"}), "(name='mtool-1.04.1 measurement app', version='1.04.1', description=\n 'Read me', long_description=README, long_description_content_type=\n 'text/markdown', url='https://github.com/antkp/mtool.git', author=\n '<NAME>.', author_email='<EMAIL>', license='MIT', classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3'], packages=['reader'],\n include_package_data=True, install_requires=['feedparser', 'html2text',\n 'importlib_resources', 'typing'], entry_points={'console_scripts': [\n 'realpython=reader.__main__:main']})\n", (49, 705), False, 'from pyinstaller_setuptools import setup\n')]
|
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
import user.models as um
class OrganisationSignUpTest(TestCase):
def setUp(self) -> None:
self.client = APIClient()
def test_organisation_can_sign_up(self):
good_data = {
"full_name": "Company One",
"company_name": "Company group",
"email": "<EMAIL>",
"role": "CEO",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>"
}
data_without_full_name = {
"company_name": "Company group",
"email": "<EMAIL>",
"role": "CEO",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>"
}
data_with_invalid_full_name = {
"full_name": "Company",
"company_name": "Company group",
"email": "<EMAIL>",
"role": "CEO",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>"
}
data_without_company_name = {
"full_name": "Company One",
"email": "<EMAIL>",
"role": "CEO",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>"
}
data_without_valid_email = {
"full_name": "Company One",
"company_name": "Company group",
"email": "company1user.com",
"role": "CEO",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>"
}
data_without_email = {
"full_name": "Company One",
"company_name": "Company group",
"role": "CEO",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>"
}
data_without_role = {
"full_name": "Company One",
"company_name": "Company group",
"email": "<EMAIL>",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>"
}
data_without_password1 = {
"full_name": "Company One",
"company_name": "Company group",
"email": "<EMAIL>",
"role": "CEO",
"password2": "<PASSWORD>"
}
data_without_password2 = {
"full_name": "Company One",
"company_name": "Company group",
"email": "<EMAIL>",
"role": "CEO",
"password1": "<PASSWORD>"
}
data_without_matching_password = {
"full_name": "Company One",
"company_name": "Company group",
"email": "<EMAIL>",
"role": "CEO",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>"
}
existing_company_name = {
"full_name": "Company One",
"company_name": "Company group",
"email": "<EMAIL>",
"role": "CEO",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>"
}
existing_email = {
"full_name": "Company One",
"company_name": "Company groups",
"email": "<EMAIL>",
"role": "CEO",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>"
}
# Organisation tried registering without full name
response = self.client.post(reverse('rest_register'), data=data_without_full_name, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Field full_name is required')
# Organisation tried registering with invalid full name
response = self.client.post(reverse('rest_register'), data=data_with_invalid_full_name, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Please provide your first name and last name')
# Organisation tried registering without role
response = self.client.post(reverse('rest_register'), data=data_without_role, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Field role is required')
# Organisation tried registering without company name
response = self.client.post(reverse('rest_register'), data=data_without_company_name, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Field company_name is required')
# Organisation tried registering without email
response = self.client.post(reverse('rest_register'), data=data_without_email, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Field email is required')
# Organisation tried registering without password1
response = self.client.post(reverse('rest_register'), data=data_without_password1, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Field password1 is required')
# Organisation tried registering without password2
response = self.client.post(reverse('rest_register'), data=data_without_password2, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Field password2 is required')
# Organisation tried registering without valid email
response = self.client.post(reverse('rest_register'), data=data_without_valid_email, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Enter a valid email address.')
# Organisation tried registering without matching passwords
response = self.client.post(reverse('rest_register'), data=data_without_matching_password, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'The two password fields didn\'t match.')
# Organisation tried registering with good data
response = self.client.post(reverse('rest_register'), data=good_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('detail'), 'Employer successfully onboarded with initial amount of nine '
'hundred and ninety nine trillion (999999999999999). '
'Please login to continue')
# check wallet was created for organisation
self.assertIsNotNone(um.Organisation.objects.get(name="Company group").wallet)
# check admin object is created for email just onboarded
self.assertIsNotNone(um.Admin.objects.get(user__email='<EMAIL>'))
# check super admin object is created for this email
self.assertIsNotNone(um.OrganisationAdmin.objects.get(organisation__name="Company group",
admin__user__email='<EMAIL>',
admin_type='super_admin'))
# Another Organisation tried registering with existing company name
response = self.client.post(reverse('rest_register'), data=existing_company_name, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Company name already exists')
# Another Organisation tried registering with existing email
response = self.client.post(reverse('rest_register'), data=existing_email, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'A user is already registered with this e-mail address.')
class LoginTest(TestCase):
def setUp(self) -> None:
self.client = APIClient()
def test_organisation_can_log_in(self):
good_data = {
"email": "<EMAIL>",
"password": "<PASSWORD>",
}
data_without_email = {
"password": "<PASSWORD>",
}
data_without_password = {
"email": "<EMAIL>",
}
non_admin_email = {
"email": "<EMAIL>",
"password": "<PASSWORD>",
}
data_with_wrong_auth_details = {
"email": "<EMAIL>",
"password": "<PASSWORD>",
}
sign_up_data = {
"full_name": "Company One",
"company_name": "Company group",
"email": "<EMAIL>",
"role": "CEO",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>"
}
# Organisation onboards
response = self.client.post(reverse('rest_register'), data=sign_up_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('detail'), 'Employer successfully onboarded with initial amount of nine '
'hundred and ninety nine trillion (999999999999999). '
'Please login to continue')
# Organisation tried authenticating without email
response = self.client.post(reverse('rest_login'), data=data_without_email, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Field email is required')
# Organisation tried authenticating without password
response = self.client.post(reverse('rest_login'), data=data_without_password, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Field password is required')
# Non admin email tries signing up
response = self.client.post(reverse('rest_login'), data=non_admin_email, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'You are not an admin')
# Organisation tried authenticating with wrong auth details
response = self.client.post(reverse('rest_login'), data=data_with_wrong_auth_details, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Unable to log in with provided credentials.')
# Organisation tried authenticating with good data
login_response = self.client.post(reverse('rest_login'), data=good_data, format='json')
self.assertEqual(login_response.status_code, status.HTTP_200_OK)
# Check access token is returned
self.assertIsNotNone(login_response.data.get('access_token'))
# Check refresh token is returned
self.assertIsNotNone(login_response.data.get('refresh_token'))
employee_data = {
"full_name": "<NAME>",
"email": "<EMAIL>"
}
admin_data = {
"full_name": "<NAME>",
"email": "<EMAIL>"
}
login_admin_data = {
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
login_employee_data = {
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
# organisation add employee
self.client.credentials(HTTP_AUTHORIZATION='Bearer {}'.format(login_response.data.get('access_token')))
response = self.client.post(reverse('organisation-add-employee'), data=employee_data,
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# organisation add admin
self.client.credentials(HTTP_AUTHORIZATION='Bearer {}'.format(login_response.data.get('access_token')))
response = self.client.post(reverse('organisation-add-admin'), data=admin_data,
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.client.credentials()
# Admin logs in
admin_login_response = self.client.post(reverse('rest_login'), data=login_admin_data, format='json')
self.assertEqual(admin_login_response.status_code, status.HTTP_200_OK)
# Check access token is returned
self.assertIsNotNone(admin_login_response.data.get('access_token'))
# Check refresh token is returned
self.assertIsNotNone(admin_login_response.data.get('refresh_token'))
# employee logs in
employee_login_response = self.client.post(reverse('rest_login2'), data=login_employee_data, format='json')
self.assertEqual(employee_login_response.status_code, status.HTTP_200_OK)
# Check access token is returned
self.assertIsNotNone(employee_login_response.data.get('access_token'))
# Check refresh token is returned
self.assertIsNotNone(employee_login_response.data.get('refresh_token'))
class ChangePasswordTest(TestCase):
def setUp(self) -> None:
self.client = APIClient()
def test_change_password(self):
sign_up_data = {
"full_name": "Company One",
"company_name": "Company group",
"email": "<EMAIL>",
"role": "CEO",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>"
}
good_data = {
"email": "<EMAIL>",
"password": "<PASSWORD>",
}
good_password_data = {
"old_password": "<PASSWORD>",
"new_password1": "<PASSWORD>",
"new_password2": "<PASSWORD>"
}
password_data_without_old_password = {
"new_password1": "<PASSWORD>",
"new_password2": "<PASSWORD>"
}
password_data_with_incorrect_old_password = {
"old_password": "<PASSWORD>",
"new_password1": "<PASSWORD>",
"new_password2": "<PASSWORD>"
}
password_data_without_new_password1 = {
"old_password": "<PASSWORD>",
"new_password2": "<PASSWORD>"
}
password_data_without_new_password2 = {
"old_password": "<PASSWORD>",
"new_password1": "<PASSWORD>",
}
password_data_not_match = {
"old_password": "<PASSWORD>",
"new_password1": "<PASSWORD>",
"new_password2": "<PASSWORD>"
}
# Organisation onboards
response = self.client.post(reverse('rest_register'), data=sign_up_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Organisation tries changing password without authenticating
response = self.client.post(reverse('rest_password_change'), data=good_password_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data.get('detail'), 'Authentication credentials were not provided.')
# Organisation logs in
login_response = self.client.post(reverse('rest_login'), data=good_data, format='json')
self.assertEqual(login_response.status_code, status.HTTP_200_OK)
# organisation tries changing password without old password
self.client.credentials(HTTP_AUTHORIZATION='Bearer {}'.format(login_response.data.get('access_token')))
response = self.client.post(reverse('rest_password_change'), data=password_data_without_old_password,
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Field old_password is required')
# organisation tries changing password without new password1
response = self.client.post(reverse('rest_password_change'), data=password_data_without_new_password1,
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Field new_password1 is required')
# organisation tries changing password with incorrect old password
response = self.client.post(reverse('rest_password_change'), data=password_data_with_incorrect_old_password,
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Your old password was entered incorrectly. Please enter it '
'again.')
# organisation tries changing password without new password2
response = self.client.post(reverse('rest_password_change'), data=password_data_without_new_password2,
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'Field new_password2 is required')
# organisation tries changing password without password that don't match
response = self.client.post(reverse('rest_password_change'), data=password_data_not_match,
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail'), 'The two password fields didn’t match.')
# organisation tries changing password with good password data
response = self.client.post(reverse('rest_password_change'), data=good_password_data,
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('detail'), 'New password has been saved.')
employee_data = {
"full_name": "<NAME>",
"email": "<EMAIL>"
}
admin_data = {
"full_name": "<NAME>",
"email": "<EMAIL>"
}
login_admin_data = {
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
login_employee_data = {
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
# organisation add employee
response = self.client.post(reverse('organisation-add-employee'), data=employee_data,
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# organisation add admin
response = self.client.post(reverse('organisation-add-admin'), data=admin_data,
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.client.credentials()
# Admin logs in
admin_login_response = self.client.post(reverse('rest_login'), data=login_admin_data, format='json')
self.assertEqual(admin_login_response.status_code, status.HTTP_200_OK)
# admin tries changing password with good password data
self.client.credentials(HTTP_AUTHORIZATION='Bearer {}'.format(admin_login_response.data.get('access_token')))
response = self.client.post(reverse('rest_password_change'), data=good_password_data,
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('detail'), 'New password has been saved.')
self.client.credentials()
# employee logs in
employee_login_response = self.client.post(reverse('rest_login2'), data=login_employee_data, format='json')
self.assertEqual(employee_login_response.status_code, status.HTTP_200_OK)
# employee tries changing password with good password data
self.client.credentials(HTTP_AUTHORIZATION='Bearer {}'.format(employee_login_response.data.get('access_token')))
response = self.client.post(reverse('rest_password_change'), data=good_password_data,
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('detail'), 'New password has been saved.')
self.client.credentials()
|
[
"user.models.Admin.objects.get",
"user.models.OrganisationAdmin.objects.get",
"django.urls.reverse",
"user.models.Organisation.objects.get",
"rest_framework.test.APIClient"
] |
[((259, 270), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (268, 270), False, 'from rest_framework.test import APIClient\n'), ((8201, 8212), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (8210, 8212), False, 'from rest_framework.test import APIClient\n'), ((13433, 13444), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (13442, 13444), False, 'from rest_framework.test import APIClient\n'), ((3387, 3411), 'django.urls.reverse', 'reverse', (['"""rest_register"""'], {}), "('rest_register')\n", (3394, 3411), False, 'from django.urls import reverse\n'), ((3719, 3743), 'django.urls.reverse', 'reverse', (['"""rest_register"""'], {}), "('rest_register')\n", (3726, 3743), False, 'from django.urls import reverse\n'), ((4063, 4087), 'django.urls.reverse', 'reverse', (['"""rest_register"""'], {}), "('rest_register')\n", (4070, 4087), False, 'from django.urls import reverse\n'), ((4383, 4407), 'django.urls.reverse', 'reverse', (['"""rest_register"""'], {}), "('rest_register')\n", (4390, 4407), False, 'from django.urls import reverse\n'), ((4712, 4736), 'django.urls.reverse', 'reverse', (['"""rest_register"""'], {}), "('rest_register')\n", (4719, 4736), False, 'from django.urls import reverse\n'), ((5031, 5055), 'django.urls.reverse', 'reverse', (['"""rest_register"""'], {}), "('rest_register')\n", (5038, 5055), False, 'from django.urls import reverse\n'), ((5358, 5382), 'django.urls.reverse', 'reverse', (['"""rest_register"""'], {}), "('rest_register')\n", (5365, 5382), False, 'from django.urls import reverse\n'), ((5687, 5711), 'django.urls.reverse', 'reverse', (['"""rest_register"""'], {}), "('rest_register')\n", (5694, 5711), False, 'from django.urls import reverse\n'), ((6026, 6050), 'django.urls.reverse', 'reverse', (['"""rest_register"""'], {}), "('rest_register')\n", (6033, 6050), False, 'from django.urls import reverse\n'), ((6369, 6393), 'django.urls.reverse', 'reverse', (['"""rest_register"""'], {}), "('rest_register')\n", (6376, 6393), False, 'from django.urls import reverse\n'), ((7036, 7079), 'user.models.Admin.objects.get', 'um.Admin.objects.get', ([], {'user__email': '"""<EMAIL>"""'}), "(user__email='<EMAIL>')\n", (7056, 7079), True, 'import user.models as um\n'), ((7172, 7300), 'user.models.OrganisationAdmin.objects.get', 'um.OrganisationAdmin.objects.get', ([], {'organisation__name': '"""Company group"""', 'admin__user__email': '"""<EMAIL>"""', 'admin_type': '"""super_admin"""'}), "(organisation__name='Company group',\n admin__user__email='<EMAIL>', admin_type='super_admin')\n", (7204, 7300), True, 'import user.models as um\n'), ((7535, 7559), 'django.urls.reverse', 'reverse', (['"""rest_register"""'], {}), "('rest_register')\n", (7542, 7559), False, 'from django.urls import reverse\n'), ((7871, 7895), 'django.urls.reverse', 'reverse', (['"""rest_register"""'], {}), "('rest_register')\n", (7878, 7895), False, 'from django.urls import reverse\n'), ((9074, 9098), 'django.urls.reverse', 'reverse', (['"""rest_register"""'], {}), "('rest_register')\n", (9081, 9098), False, 'from django.urls import reverse\n'), ((9604, 9625), 'django.urls.reverse', 'reverse', (['"""rest_login"""'], {}), "('rest_login')\n", (9611, 9625), False, 'from django.urls import reverse\n'), ((9922, 9943), 'django.urls.reverse', 'reverse', (['"""rest_login"""'], {}), "('rest_login')\n", (9929, 9943), False, 'from django.urls import reverse\n'), ((10228, 10249), 'django.urls.reverse', 'reverse', (['"""rest_login"""'], {}), "('rest_login')\n", (10235, 10249), False, 'from django.urls import reverse\n'), ((10547, 10568), 'django.urls.reverse', 'reverse', (['"""rest_login"""'], {}), "('rest_login')\n", (10554, 10568), False, 'from django.urls import reverse\n'), ((10899, 10920), 'django.urls.reverse', 'reverse', (['"""rest_login"""'], {}), "('rest_login')\n", (10906, 10920), False, 'from django.urls import reverse\n'), ((11861, 11897), 'django.urls.reverse', 'reverse', (['"""organisation-add-employee"""'], {}), "('organisation-add-employee')\n", (11868, 11897), False, 'from django.urls import reverse\n'), ((12219, 12252), 'django.urls.reverse', 'reverse', (['"""organisation-add-admin"""'], {}), "('organisation-add-admin')\n", (12226, 12252), False, 'from django.urls import reverse\n'), ((12496, 12517), 'django.urls.reverse', 'reverse', (['"""rest_login"""'], {}), "('rest_login')\n", (12503, 12517), False, 'from django.urls import reverse\n'), ((12953, 12975), 'django.urls.reverse', 'reverse', (['"""rest_login2"""'], {}), "('rest_login2')\n", (12960, 12975), False, 'from django.urls import reverse\n'), ((14875, 14899), 'django.urls.reverse', 'reverse', (['"""rest_register"""'], {}), "('rest_register')\n", (14882, 14899), False, 'from django.urls import reverse\n'), ((15109, 15140), 'django.urls.reverse', 'reverse', (['"""rest_password_change"""'], {}), "('rest_password_change')\n", (15116, 15140), False, 'from django.urls import reverse\n'), ((15436, 15457), 'django.urls.reverse', 'reverse', (['"""rest_login"""'], {}), "('rest_login')\n", (15443, 15457), False, 'from django.urls import reverse\n'), ((15780, 15811), 'django.urls.reverse', 'reverse', (['"""rest_password_change"""'], {}), "('rest_password_change')\n", (15787, 15811), False, 'from django.urls import reverse\n'), ((16175, 16206), 'django.urls.reverse', 'reverse', (['"""rest_password_change"""'], {}), "('rest_password_change')\n", (16182, 16206), False, 'from django.urls import reverse\n'), ((16578, 16609), 'django.urls.reverse', 'reverse', (['"""rest_password_change"""'], {}), "('rest_password_change')\n", (16585, 16609), False, 'from django.urls import reverse\n'), ((17072, 17103), 'django.urls.reverse', 'reverse', (['"""rest_password_change"""'], {}), "('rest_password_change')\n", (17079, 17103), False, 'from django.urls import reverse\n'), ((17481, 17512), 'django.urls.reverse', 'reverse', (['"""rest_password_change"""'], {}), "('rest_password_change')\n", (17488, 17512), False, 'from django.urls import reverse\n'), ((17874, 17905), 'django.urls.reverse', 'reverse', (['"""rest_password_change"""'], {}), "('rest_password_change')\n", (17881, 17905), False, 'from django.urls import reverse\n'), ((18633, 18669), 'django.urls.reverse', 'reverse', (['"""organisation-add-employee"""'], {}), "('organisation-add-employee')\n", (18640, 18669), False, 'from django.urls import reverse\n'), ((18879, 18912), 'django.urls.reverse', 'reverse', (['"""organisation-add-admin"""'], {}), "('organisation-add-admin')\n", (18886, 18912), False, 'from django.urls import reverse\n'), ((19156, 19177), 'django.urls.reverse', 'reverse', (['"""rest_login"""'], {}), "('rest_login')\n", (19163, 19177), False, 'from django.urls import reverse\n'), ((19515, 19546), 'django.urls.reverse', 'reverse', (['"""rest_password_change"""'], {}), "('rest_password_change')\n", (19522, 19546), False, 'from django.urls import reverse\n'), ((19890, 19912), 'django.urls.reverse', 'reverse', (['"""rest_login2"""'], {}), "('rest_login2')\n", (19897, 19912), False, 'from django.urls import reverse\n'), ((20262, 20293), 'django.urls.reverse', 'reverse', (['"""rest_password_change"""'], {}), "('rest_password_change')\n", (20269, 20293), False, 'from django.urls import reverse\n'), ((6883, 6932), 'user.models.Organisation.objects.get', 'um.Organisation.objects.get', ([], {'name': '"""Company group"""'}), "(name='Company group')\n", (6910, 6932), True, 'import user.models as um\n')]
|
from typing import Optional
from flask import Flask, current_app, json, redirect
class App:
def __init__(self, test_config: Optional[dict] = None):
self._app = Flask(__name__, static_folder="staticfiles")
self._configure_app(test_config)
self._setup_database()
self._setup_cors()
self._setup_jwt()
self._setup_error_handling()
self._setup_routes()
@property
def test_client(self):
return self._app.test_client
def _configure_app(self, test_config: Optional[dict] = None):
# General config
self._app.config.from_mapping(
PROPAGATE_EXCEPTIONS=True,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
)
if not test_config:
from .config import config
self._app.secret_key = config.get_secret()
self._app.config["API_RESPONSE_MESSAGE_KEY"] = config.get_api_response_message_key()
self._app.config["SQLALCHEMY_DATABASE_URI"] = config.get_db_address()
self._app.config["JWT_SECRET_KEY"] = self._app.secret_key
self._app.config["JWT_ACCESS_TOKEN_EXPIRES"] = config.get_jwt_token_expiration_time()
self._app.config["JWT_ERROR_MESSAGE_KEY"] = self._app.config["API_RESPONSE_MESSAGE_KEY"]
self._config = config
else:
self._app.config.update(test_config)
def _setup_database(self):
from .db import db
db.init_app(self._app)
@self._app.before_first_request
def create_tables():
db.create_all()
def _setup_cors(self):
from flask_cors import CORS
CORS(self._app)
def _setup_jwt(self):
from flask_jwt_extended import JWTManager
from .security import user_identity_lookup, user_lookup_callback
jwt = JWTManager(self._app)
jwt.user_identity_loader(user_identity_lookup)
jwt.user_lookup_loader(user_lookup_callback)
def _setup_error_handling(self):
from werkzeug.exceptions import HTTPException
@self._app.errorhandler(HTTPException)
def handle_exception(e):
"""Return JSON instead of HTML for HTTP errors."""
response = e.get_response()
response.data = json.dumps(
{current_app.config["API_RESPONSE_MESSAGE_KEY"]: e.description}
)
response.content_type = "application/json"
return response
def _setup_routes(self):
from .routers import swagger, user
self._app.register_blueprint(user.router, url_prefix="/users")
self._app.register_blueprint(swagger.router)
@self._app.route("/")
def hello_world():
return redirect("/docs")
def run(self, host: Optional[str] = None, port: Optional[int] = None):
if host and port:
self._app.run(host, port)
self._app.run(
host=self._config.get_host(),
port=self._config.get_port(),
)
|
[
"flask_jwt_extended.JWTManager",
"flask.redirect",
"flask_cors.CORS",
"flask.Flask",
"flask.json.dumps"
] |
[((175, 219), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': '"""staticfiles"""'}), "(__name__, static_folder='staticfiles')\n", (180, 219), False, 'from flask import Flask, current_app, json, redirect\n'), ((1652, 1667), 'flask_cors.CORS', 'CORS', (['self._app'], {}), '(self._app)\n', (1656, 1667), False, 'from flask_cors import CORS\n'), ((1834, 1855), 'flask_jwt_extended.JWTManager', 'JWTManager', (['self._app'], {}), '(self._app)\n', (1844, 1855), False, 'from flask_jwt_extended import JWTManager\n'), ((2268, 2343), 'flask.json.dumps', 'json.dumps', (["{current_app.config['API_RESPONSE_MESSAGE_KEY']: e.description}"], {}), "({current_app.config['API_RESPONSE_MESSAGE_KEY']: e.description})\n", (2278, 2343), False, 'from flask import Flask, current_app, json, redirect\n'), ((2732, 2749), 'flask.redirect', 'redirect', (['"""/docs"""'], {}), "('/docs')\n", (2740, 2749), False, 'from flask import Flask, current_app, json, redirect\n')]
|
# -*- coding: utf-8 -*-
"""
security utils services module.
"""
from pyrin.application.services import get_component
from pyrin.security.utils import SecurityUtilsPackage
def generate_rsa_key(length=None, **options):
"""
generates a pair of public/private rsa keys.
:param int length: key length in bits.
if not provided, `rsa_default_key_length`
config will be used.
:returns: tuple[str public_key, str private_key]
:rtype: tuple[str, str]
"""
return get_component(SecurityUtilsPackage.COMPONENT_NAME).generate_rsa_key(length,
**options)
def load_rsa_key(public_pem, private_pem, **options):
"""
loads public/private rsa key objects from provided value.
:param str public_pem: public key content to load from.
:param str private_pem: private key content to load from.
:returns: tuple[object public_key, object private_key]
:rtype: tuple[object, object]
"""
return get_component(SecurityUtilsPackage.COMPONENT_NAME).load_rsa_key(public_pem,
private_pem,
**options)
def get_bytes(length=None, **options):
"""
gets a secure random bytes with given length.
the result value should not be decoded to string, because
it's not a safe-string and may cause an error.
if you want string representation, use `get_hex` or `get_url_safe` methods.
:param int length: length of random bytes to be get.
if not provided, `default_secure_random_size`
config will be used.
:rtype: bytes
"""
return get_component(SecurityUtilsPackage.COMPONENT_NAME).get_bytes(length, **options)
def get_hex(length=None, **options):
"""
gets a secure random hex string with given length.
:param int length: length of random string to be get in bytes.
if not provided, `default_secure_random_size`
config will be used.
:rtype: str
"""
return get_component(SecurityUtilsPackage.COMPONENT_NAME).get_hex(length, **options)
def get_url_safe(length=None, **options):
"""
gets a secure random url-safe string with given length.
:param int length: length of random string to be get in bytes.
if not provided, `default_secure_random_size`
config will be used.
:rtype: str
"""
return get_component(SecurityUtilsPackage.COMPONENT_NAME).get_url_safe(length, **options)
|
[
"pyrin.application.services.get_component"
] |
[((532, 582), 'pyrin.application.services.get_component', 'get_component', (['SecurityUtilsPackage.COMPONENT_NAME'], {}), '(SecurityUtilsPackage.COMPONENT_NAME)\n', (545, 582), False, 'from pyrin.application.services import get_component\n'), ((1061, 1111), 'pyrin.application.services.get_component', 'get_component', (['SecurityUtilsPackage.COMPONENT_NAME'], {}), '(SecurityUtilsPackage.COMPONENT_NAME)\n', (1074, 1111), False, 'from pyrin.application.services import get_component\n'), ((1814, 1864), 'pyrin.application.services.get_component', 'get_component', (['SecurityUtilsPackage.COMPONENT_NAME'], {}), '(SecurityUtilsPackage.COMPONENT_NAME)\n', (1827, 1864), False, 'from pyrin.application.services import get_component\n'), ((2214, 2264), 'pyrin.application.services.get_component', 'get_component', (['SecurityUtilsPackage.COMPONENT_NAME'], {}), '(SecurityUtilsPackage.COMPONENT_NAME)\n', (2227, 2264), False, 'from pyrin.application.services import get_component\n'), ((2622, 2672), 'pyrin.application.services.get_component', 'get_component', (['SecurityUtilsPackage.COMPONENT_NAME'], {}), '(SecurityUtilsPackage.COMPONENT_NAME)\n', (2635, 2672), False, 'from pyrin.application.services import get_component\n')]
|
################################################################################
# <NAME>
# https://github.com/aaronpenne
################################################################################
import datetime
import string
import sys
from random import shuffle, seed
import helper
################################################################################
# Global variables
################################################################################
random_seed = int(random(0, 10000))
# random_seed = 7596
random_seed = helper.get_seed(random_seed)
helper.set_seed(random_seed)
# Get time
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
# Parameters for draw speed
frame_rate = 2
################################################################################
# Knobs to turn
################################################################################
c_background = [13, 78, 76]
p_head_width = None
p_head_height = None
p_eye_distance = None
################################################################################
# setup()
# function gets run once at start of program
################################################################################
def setup():
# Sets size of canvas in pixels (must be first line)
size(1000, 1000)
# Sets resolution dynamically (affects resolution of saved image)
pixelDensity(displayDensity()) # 1 for low, 2 for high
# Sets color space to Hue Saturation Brightness with max values of HSB respectively
colorMode(HSB, 360, 100, 100, 100)
# Set the number of frames per second to display
frameRate(frame_rate)
background(*c_background)
rectMode(CORNER)
imageMode(CENTER)
# Stops draw() from running in an infinite loop (should be last line)
noLoop() # Comment to run draw() infinitely (or until 'count' hits limit)
################################################################################
# draw()
# function gets run repeatedly (unless noLoop() called in setup())
################################################################################
def draw():
step = 20
# if frameCount == step*2:
# exit()
counter = frameCount
translate(width/2, height/2)
#draw_background(0, 0)
draw_body(0, 100, width*1.2, height*0.8)
r = 200
draw_head(0, 200, 300, 230)
draw_eyes(0, 200)
draw_fur()
draw_hairs()
draw_horns(-300, 100, -350, -100, 10)
draw_horns(300, 100, 350, -100, 10)
draw_horns(-100, 50, -130, -200, 10)
draw_horns(100, 50, 130, -200, 10)
helper.save_frame_timestamp('creatures', timestamp, random_seed)
# Save memory by closing image, just look at it in the file system
# if (w > 1000) or (h > 1000):
# exit()
################################################################################
# Functions
################################################################################
def cvp(x, y):
curveVertex(x, y)
#ellipse(x, y, 5, 5)
def mousePressed():
helper.save_frame_timestamp('creatures', timestamp, random_seed)
def draw_head(x, y, w, h):
fill(0, 0, 30)
#rect(x, y, w, h)
top_offset = random(0, 100)
bottom_offset = random(0, 100)
side_offset = random(0, 100)
beginShape()
cvp(x-w, y-h)
cvp(x, y-h-top_offset)
cvp(x+w, y-h)
cvp(x+w+side_offset, y)
cvp(x+w, y+h)
cvp(x, y+h+bottom_offset)
cvp(x-w, y+h)
cvp(x-w-side_offset, y)
cvp(x-w, y-h)
cvp(x, y-h-top_offset)
cvp(x+w, y-h)
endShape()
def draw_eyes(x, y):
noStroke()
fill(0, 0, 80)
x_offset = random(0, 100)+20
y_offset = random(0, 100)+20
w = random(50, 100)
h = random(50, 100)
ellipse(x-x_offset, y-y_offset, w, h)
ellipse(x+x_offset, y-y_offset, w, h)
fill(0, 0, 0)
x_offset = x_offset + random(-10, 10)
y_offset = y_offset + random(-10, 10)
w = w*0.2
h = h*0.2
ellipse(x-x_offset, y-y_offset, w, h)
ellipse(x+x_offset, y-y_offset, w, h)
def draw_fur():
pass
def draw_hairs():
pass
def draw_body(x, y, w, h):
body = createGraphics(width, height)
body.colorMode(HSB, 360, 100, 100, 100)
body.beginDraw()
body.noStroke()
body.translate(width/2, height/2)
body.fill(color(0, 0, 20))
body.ellipse(x, y, w, h)
body.endDraw()
body.filter(BLUR, 5)
image(body, x, y)
def draw_horns(x1, y1, x2, y2, n):
stroke(0, 0, 0)
noFill()
mid = (x2-x1)/2
for i in range(n):
x1_ = x1 + random(-3, 3)
x2_ = x2 + random(-3, 3)
y1_ = y1 + random(-3, 3)
y2_ = y2 + random(-3, 3)
beginShape()
offset = random(-5, 5)
cvp(x1_, y1_+offset)
cvp(x1_, y1_+offset)
offset = random(-5, 5)
cvp(x1_+mid-mid/2, y1_+offset)
offset = random(-5, 5)
cvp(x1_+mid, y1_+offset)
offset = random(-5, 5)
cvp(x1_+mid+mid/2, y1_+offset)
offset = random(-5, 5)
cvp(x2_, y2_+offset)
cvp(x2_, y2_+offset)
endShape()
def draw_background(x=0, y=0):
noStroke()
radius = width
h = c_background[0]
for r in reversed(range(0, 2*width, 5)):
fill(h, 78, 76)
ellipse(x, y, r, r)
h = h + 1*sin(PI+0.1*r)
|
[
"helper.get_seed",
"helper.set_seed",
"datetime.datetime.now",
"helper.save_frame_timestamp"
] |
[((547, 575), 'helper.get_seed', 'helper.get_seed', (['random_seed'], {}), '(random_seed)\n', (562, 575), False, 'import helper\n'), ((576, 604), 'helper.set_seed', 'helper.set_seed', (['random_seed'], {}), '(random_seed)\n', (591, 604), False, 'import helper\n'), ((2651, 2715), 'helper.save_frame_timestamp', 'helper.save_frame_timestamp', (['"""creatures"""', 'timestamp', 'random_seed'], {}), "('creatures', timestamp, random_seed)\n", (2678, 2715), False, 'import helper\n'), ((3104, 3168), 'helper.save_frame_timestamp', 'helper.save_frame_timestamp', (['"""creatures"""', 'timestamp', 'random_seed'], {}), "('creatures', timestamp, random_seed)\n", (3131, 3168), False, 'import helper\n'), ((629, 652), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (650, 652), False, 'import datetime\n')]
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import math
import numpy as np
from openvino.tools.mo.ops.ONNXResize10 import ONNXResize10
from openvino.tools.mo.ops.upsample import UpsampleOp
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version
from openvino.tools.mo.utils.error import Error
class UpsampleFrontExtractor(FrontExtractorOp):
op = 'Upsample'
enabled = True
@classmethod
def extract(cls, node):
onnx_opset_version = get_onnx_opset_version(node)
if onnx_opset_version is not None and onnx_opset_version >= 9:
mode = onnx_attr(node, 'mode', 's', default='nearest', dst_type=lambda x: x.decode())
ONNXResize10.update_node_stat(node, {'mode': mode})
else:
mode = onnx_attr(node, 'mode', 's', default='nearest', dst_type=lambda x: x.decode())
scales = onnx_attr(node, 'scales', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32))
width_scale = onnx_attr(node, 'width_scale', 'f')
height_scale = onnx_attr(node, 'height_scale', 'f')
supported_modes = ['nearest', 'linear']
if mode not in supported_modes:
raise Error(
'Error decoding Upsample node {}, mode = {} is not in the list of supported modes {}.',
node.name,
mode,
supported_modes
)
if scales is not None:
if scales.shape != (4,):
raise Error(
'Upsample scales attribute is wrong for node {}. Only 4D scales are supported.',
node.name
)
if math.fabs(scales[0] - 1) > 1e-5 or math.fabs(scales[1] - 1) > 1e-5:
raise Error(
'Upsampling of batch and feature dimensions is not supported for node {}.',
node.name
)
height_scale = scales[2]
width_scale = scales[3]
if (width_scale is None or height_scale is None) and len(node.in_nodes()) != 2:
raise Error(
'One/both of widths_scale = {} and height_scale = {} is not defined for Upsample node {}.',
width_scale,
height_scale,
node.name
)
UpsampleOp.update_node_stat(node, {'mode': mode, 'height_scale': height_scale,
'width_scale': width_scale})
return cls.enabled
|
[
"openvino.tools.mo.front.onnx.extractors.utils.onnx_attr",
"openvino.tools.mo.front.onnx.extractors.utils.get_onnx_opset_version",
"math.fabs",
"openvino.tools.mo.ops.upsample.UpsampleOp.update_node_stat",
"openvino.tools.mo.utils.error.Error",
"numpy.array",
"openvino.tools.mo.ops.ONNXResize10.ONNXResize10.update_node_stat"
] |
[((597, 625), 'openvino.tools.mo.front.onnx.extractors.utils.get_onnx_opset_version', 'get_onnx_opset_version', (['node'], {}), '(node)\n', (619, 625), False, 'from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version\n'), ((807, 858), 'openvino.tools.mo.ops.ONNXResize10.ONNXResize10.update_node_stat', 'ONNXResize10.update_node_stat', (['node', "{'mode': mode}"], {}), "(node, {'mode': mode})\n", (836, 858), False, 'from openvino.tools.mo.ops.ONNXResize10 import ONNXResize10\n'), ((1104, 1139), 'openvino.tools.mo.front.onnx.extractors.utils.onnx_attr', 'onnx_attr', (['node', '"""width_scale"""', '"""f"""'], {}), "(node, 'width_scale', 'f')\n", (1113, 1139), False, 'from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version\n'), ((1167, 1203), 'openvino.tools.mo.front.onnx.extractors.utils.onnx_attr', 'onnx_attr', (['node', '"""height_scale"""', '"""f"""'], {}), "(node, 'height_scale', 'f')\n", (1176, 1203), False, 'from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version\n'), ((2539, 2650), 'openvino.tools.mo.ops.upsample.UpsampleOp.update_node_stat', 'UpsampleOp.update_node_stat', (['node', "{'mode': mode, 'height_scale': height_scale, 'width_scale': width_scale}"], {}), "(node, {'mode': mode, 'height_scale':\n height_scale, 'width_scale': width_scale})\n", (2566, 2650), False, 'from openvino.tools.mo.ops.upsample import UpsampleOp\n'), ((1323, 1460), 'openvino.tools.mo.utils.error.Error', 'Error', (['"""Error decoding Upsample node {}, mode = {} is not in the list of supported modes {}."""', 'node.name', 'mode', 'supported_modes'], {}), "(\n 'Error decoding Upsample node {}, mode = {} is not in the list of supported modes {}.'\n , node.name, mode, supported_modes)\n", (1328, 1460), False, 'from openvino.tools.mo.utils.error import Error\n'), ((2292, 2437), 'openvino.tools.mo.utils.error.Error', 'Error', (['"""One/both of widths_scale = {} and height_scale = {} is not defined for Upsample node {}."""', 'width_scale', 'height_scale', 'node.name'], {}), "(\n 'One/both of widths_scale = {} and height_scale = {} is not defined for Upsample node {}.'\n , width_scale, height_scale, node.name)\n", (2297, 2437), False, 'from openvino.tools.mo.utils.error import Error\n'), ((1652, 1759), 'openvino.tools.mo.utils.error.Error', 'Error', (['"""Upsample scales attribute is wrong for node {}. Only 4D scales are supported."""', 'node.name'], {}), "(\n 'Upsample scales attribute is wrong for node {}. Only 4D scales are supported.'\n , node.name)\n", (1657, 1759), False, 'from openvino.tools.mo.utils.error import Error\n'), ((1933, 2034), 'openvino.tools.mo.utils.error.Error', 'Error', (['"""Upsampling of batch and feature dimensions is not supported for node {}."""', 'node.name'], {}), "(\n 'Upsampling of batch and feature dimensions is not supported for node {}.',\n node.name)\n", (1938, 2034), False, 'from openvino.tools.mo.utils.error import Error\n'), ((1047, 1076), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float32'}), '(x, dtype=np.float32)\n', (1055, 1076), True, 'import numpy as np\n'), ((1839, 1863), 'math.fabs', 'math.fabs', (['(scales[0] - 1)'], {}), '(scales[0] - 1)\n', (1848, 1863), False, 'import math\n'), ((1874, 1898), 'math.fabs', 'math.fabs', (['(scales[1] - 1)'], {}), '(scales[1] - 1)\n', (1883, 1898), False, 'import math\n')]
|
# Debit card data compilation
import pandas as pd
cols_list = ['UNI_PT_KEY', 'CIF', 'CARD_CLASS_CODE', 'CARD_NUM', 'PRODUCT',
'PRIMARY_ACCOUNT', 'CARD_SEGMENT', 'CARD_BIN', 'CARD_RANGE', 'EMBLEM_ID',
'ACCOUNT_OPEN_DATE', 'CARD_ISSUE_DATE', 'CARD_EXPIRY_DATE', 'CARD_ACTIVATION_DATE',
'FIRST_TRN_DATE', 'CARD_ACT_FLAG','IS_CARD_WITH_TOKEN']
debit = pd.read_csv("debitcards.csv", usecols=cols_list, dtype=str, sep=";", error_bad_lines=False, low_memory=False)
a = debit["CARD_NUM"].nunique()
b = debit["UNI_PT_KEY"].nunique()
c = debit["CIF"].nunique()
print("# of UNI_PT_KEY = " +str(b))
print("# of CARD_NUM = " + str(a))
print("# of CIF = " + str(c))
#other products
other_products = pd.read_csv("other_metrics.csv", sep=";", dtype=str)
other_products["OTHER_PRODUCTS"] = 1
dc_other_products = debit.merge(other_products, how="left", on="UNI_PT_KEY")
dc_other_products["OTHER_PRODUCTS"] = dc_other_products["OTHER_PRODUCTS"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_other_products["OTHER_PRODUCTS"].astype(int).sum()))
#mobile banking
mobile_banking = pd.read_csv("mobile_banking.csv", sep=";", dtype=str)
mobile_banking["MOBILE_BANKING"] = 1
mobile_banking = pd.DataFrame(mobile_banking)
dc_mobile_banking = dc_other_products.merge(mobile_banking, how="left", on="UNI_PT_KEY")
dc_mobile_banking["MOBILE_BANKING"] = dc_mobile_banking["MOBILE_BANKING"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_mobile_banking["MOBILE_BANKING"].astype(int).sum()))
#internet banking
internet_banking = pd.read_csv("internet_banking.csv", sep=";", dtype=str)
internet_banking["INTERNET_BANKING"] = 1
dc_internet_banking = dc_mobile_banking.merge(internet_banking, how="left", on="UNI_PT_KEY")
dc_internet_banking["INTERNET_BANKING"] = dc_internet_banking["INTERNET_BANKING"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_internet_banking["INTERNET_BANKING"].astype(int).sum()))
#branch delivery
branch_delivery = pd.read_csv("branch_delivery.csv", sep=";", dtype=str)
branch_delivery["BRANCH_DELIVERY"] = 1
dc_branch_delivery = dc_internet_banking.merge(branch_delivery, how="left", on="CARD_NUM")
dc_branch_delivery["BRANCH_DELIVERY"] = dc_branch_delivery["BRANCH_DELIVERY"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_branch_delivery["BRANCH_DELIVERY"].astype(int).sum()))
#staff
staff = pd.read_csv("staff_flag.csv", sep=";", dtype=str)
staff["STAFF_FLAG"] = 1
dc_staff_flag = dc_branch_delivery.merge(staff, how="left", on="UNI_PT_KEY")
dc_staff_flag["STAFF_FLAG"] = dc_staff_flag["STAFF_FLAG"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_staff_flag["STAFF_FLAG"].astype(int).sum()))
#email phone
email_phone = pd.read_csv("contact_email_phone.csv", sep=";", dtype=str, error_bad_lines=False, low_memory=False)
dc_email_phone = dc_staff_flag.merge(email_phone, how="left", on ="UNI_PT_KEY")
#contact address
contact_address = pd.read_csv("customer_address.csv", sep=";", dtype=str)
dc_contact_address = dc_email_phone.merge(contact_address, how="left", on="CARD_NUM")
# owner vs holder
owner_vs_holder = pd.read_csv("card_ownervsholder_dc.csv", sep=";").applymap(str)
dc_owner_flag = dc_contact_address.merge(owner_vs_holder, how="left", on="CARD_NUM")
dc_owner_flag["OWNER_FLAG"] = dc_owner_flag["OWNER_FLAG"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_owner_flag["OWNER_FLAG"].astype(int).sum()))
# current balance (run the SQL script again and compare)
current_balance = pd.read_csv("debit_current_balance.csv", sep=";", low_memory=False, error_bad_lines=False)
current_balance["SRC_ID"] = current_balance["SRC_ID"].astype(int).astype(str)
current_balance["CA_BAL"] = current_balance["CA_BAL"].apply(lambda x: x.replace(",", ".") if isinstance(x, str) else x).astype(str)
current_balance.drop_duplicates(subset="SRC_ID", keep="first", inplace=True)
dc_current_balance = dc_owner_flag.merge(current_balance, how="left", left_on="PRIMARY_ACCOUNT", right_on="SRC_ID")
dc_current_balance.drop("SRC_ID", axis=1, inplace=True)
del(current_balance, dc_owner_flag, owner_vs_holder, contact_address, email_phone, staff, branch_delivery, internet_banking,
mobile_banking, other_products, dc_contact_address, dc_email_phone, dc_staff_flag, dc_branch_delivery, dc_internet_banking, dc_mobile_banking, dc_other_products,debit)
# insurance
cols_list = ["CARD_NUM", "INSURANCE_FLAG"]
insurance_flag = pd.read_csv("16_dc_insurance.csv", sep=";", usecols=cols_list).applymap(str)
insurance_flag["INSURANCE_FLAG"] = 1
dc_insurance_flag = dc_current_balance.merge(insurance_flag, how="left", on="CARD_NUM")
dc_insurance_flag["INSURANCE_FLAG"] = dc_insurance_flag["INSURANCE_FLAG"].fillna(0).astype(int).astype(str)
print("matched records = " + str(dc_insurance_flag["INSURANCE_FLAG"].astype(int).sum()))
#transactions
transactions = pd.read_csv("transactions_grouped_all_TRNTP.csv", sep=";", dtype=str, error_bad_lines=False)
def clean_trn_amt(x):
if isinstance(x, str):
return(x.replace(",", "."))
return(x)
transactions["TRN_AMT"] = transactions["TRN_AMT"].apply(clean_trn_amt).astype(float).round(2)
transactions["TRN_COUNT"] = transactions["TRN_COUNT"].astype(int)
total_transactions = transactions.groupby(["CARD_NUM"])["TRN_COUNT", "TRN_AMT"].sum().reset_index()
total_transactions.rename(columns = {"TRN_COUNT": 'TOTAL_TRN_COUNT', "TRN_AMT":'TOTAL_TRN_AMT'}, inplace=True)
pos_trn = transactions[(transactions["TRN_TP"]== "01_pos_trn")]
pos_trn.rename(columns = {"TRN_COUNT":'POS_TRN_COUNT', "TRN_AMT":'POS_TRN_AMT'}, inplace=True)
pos_trn.drop("TRN_TP", axis=1, inplace=True)
atm_trn = transactions[(transactions["TRN_TP"]=="02_atm_trn")]
atm_trn.rename(columns = {"TRN_COUNT":'ATM_TRN_COUNT', "TRN_AMT":'ATM_TRN_AMT'}, inplace=True)
atm_trn.drop("TRN_TP", axis=1, inplace=True)
net_trn = transactions[(transactions["TRN_TP"]=="03_net_trn")]
net_trn.rename(columns = {"TRN_COUNT":'NET_TRN_COUNT', "TRN_AMT":'NET_TRN_AMT'}, inplace=True)
net_trn.drop("TRN_TP", axis=1, inplace=True)
order_trn = transactions[(transactions["TRN_TP"]=="04_order_trn")]
order_trn.rename(columns = {"TRN_COUNT":'ORDER_TRN_COUNT', "TRN_AMT":'ORDER_TRN_AMT'}, inplace=True)
order_trn.drop("TRN_TP", axis=1, inplace=True)
dc_total_trn = dc_insurance_flag.merge(total_transactions, how="left", on="CARD_NUM")
dc_total_trn["TOTAL_TRN_COUNT"] = dc_total_trn["TOTAL_TRN_COUNT"].fillna(0).astype(int)
dc_total_trn["TOTAL_TRN_AMT"] = dc_total_trn["TOTAL_TRN_AMT"].fillna(0).astype(int).astype(str)
dc_pos_trn = dc_total_trn.merge(pos_trn, how="left", on="CARD_NUM")
dc_pos_trn["POS_TRN_COUNT"] = dc_pos_trn["POS_TRN_COUNT"].fillna(0).astype(int)
dc_pos_trn["POS_TRN_AMT"] = dc_pos_trn["POS_TRN_AMT"].fillna(0).astype(int).astype(str)
dc_atm_trn = dc_pos_trn.merge(atm_trn, how="left", on="CARD_NUM")
dc_atm_trn["ATM_TRN_COUNT"] = dc_atm_trn["ATM_TRN_COUNT"].fillna(0).astype(int)
dc_atm_trn["ATM_TRN_AMT"] = dc_atm_trn["ATM_TRN_AMT"].fillna(0).astype(int).astype(str)
dc_net_trn = dc_atm_trn.merge(net_trn, how="left", on="CARD_NUM")
dc_net_trn["NET_TRN_COUNT"] = dc_net_trn["NET_TRN_COUNT"].fillna(0).astype(int)
dc_net_trn["NET_TRN_AMT"] = dc_net_trn["NET_TRN_AMT"].fillna(0).astype(int).astype(str)
dc_order_trn = dc_net_trn.merge(order_trn, how="left", on="CARD_NUM")
dc_order_trn["ORDER_TRN_COUNT"] = dc_order_trn["ORDER_TRN_COUNT"].fillna(0).astype(int)
dc_order_trn["ORDER_TRN_AMT"] = dc_order_trn["ORDER_TRN_AMT"].fillna(0).astype(int).astype(str)
del( pos_trn, net_trn, order_trn, atm_trn, dc_insurance_flag, insurance_flag, dc_current_balance)
# continue dc_net_trn
#avg balance dc
cols_list = ["SRC_ID", "AVG_DR_BAL"]
avg_balance = pd.read_csv("avgmonthlybalance_dc.csv", sep=";", usecols=cols_list, dtype=str, low_memory=False, error_bad_lines=False)
avg_balance["AVG_DR_BAL"] = avg_balance["AVG_DR_BAL"].apply(lambda x: x.replace(",", ".") if isinstance(x, str) else x).astype(float).astype(int).astype(str)
avg_balance.drop_duplicates(subset="SRC_ID", keep="first", inplace=True)
dc_avg_bal = dc_order_trn.merge(avg_balance, how="left", left_on="PRIMARY_ACCOUNT", right_on="SRC_ID")
sum(pd.isnull(dc_avg_bal["SRC_ID"]))
DC_DATA = dc_avg_bal
#reorganized
DC_DATA = DC_DATA[
['UNI_PT_KEY', 'CIF', 'CARD_CLASS_CODE', 'CARD_NUM', 'PRIMARY_ACCOUNT', 'PRODUCT', 'CARD_SEGMENT', 'CARD_BIN', 'CARD_RANGE', 'EMBLEM_ID', 'SRC_ID', 'CURR_ACC_ID',
'ACCOUNT_OPEN_DATE', 'CARD_ISSUE_DATE', 'CARD_EXPIRY_DATE', 'CARD_ACTIVATION_DATE', 'FIRST_TRN_DATE',
'CARD_ACT_FLAG','IS_CARD_WITH_TOKEN', 'OTHER_PRODUCTS', 'MOBILE_BANKING','INTERNET_BANKING', 'BRANCH_DELIVERY', 'STAFF_FLAG','OWNER_FLAG', 'INSURANCE_FLAG',
'MOBILE_PHONE_NUM','CONT_EMAIL_1', 'CONT_EMAIL_2', 'CONT_EMAIL_3', 'CONT_EMAIL_ADDR', 'STREET_NAME', 'STREET_NUM', 'CITY_NAME', 'ZIP',
'TOTAL_TRN_COUNT', 'TOTAL_TRN_AMT', 'POS_TRN_COUNT', 'POS_TRN_AMT', 'ATM_TRN_COUNT', 'ATM_TRN_AMT', 'NET_TRN_COUNT', 'NET_TRN_AMT','ORDER_TRN_COUNT', 'ORDER_TRN_AMT',
'CA_BAL', 'AVG_DR_BAL']
]
DC_DATA.to_csv("DC_DATA.csv", index=False, sep=";")
|
[
"pandas.read_csv",
"pandas.isnull",
"pandas.DataFrame"
] |
[((385, 498), 'pandas.read_csv', 'pd.read_csv', (['"""debitcards.csv"""'], {'usecols': 'cols_list', 'dtype': 'str', 'sep': '""";"""', 'error_bad_lines': '(False)', 'low_memory': '(False)'}), "('debitcards.csv', usecols=cols_list, dtype=str, sep=';',\n error_bad_lines=False, low_memory=False)\n", (396, 498), True, 'import pandas as pd\n'), ((724, 776), 'pandas.read_csv', 'pd.read_csv', (['"""other_metrics.csv"""'], {'sep': '""";"""', 'dtype': 'str'}), "('other_metrics.csv', sep=';', dtype=str)\n", (735, 776), True, 'import pandas as pd\n'), ((1123, 1176), 'pandas.read_csv', 'pd.read_csv', (['"""mobile_banking.csv"""'], {'sep': '""";"""', 'dtype': 'str'}), "('mobile_banking.csv', sep=';', dtype=str)\n", (1134, 1176), True, 'import pandas as pd\n'), ((1231, 1259), 'pandas.DataFrame', 'pd.DataFrame', (['mobile_banking'], {}), '(mobile_banking)\n', (1243, 1259), True, 'import pandas as pd\n'), ((1589, 1644), 'pandas.read_csv', 'pd.read_csv', (['"""internet_banking.csv"""'], {'sep': '""";"""', 'dtype': 'str'}), "('internet_banking.csv', sep=';', dtype=str)\n", (1600, 1644), True, 'import pandas as pd\n'), ((2025, 2079), 'pandas.read_csv', 'pd.read_csv', (['"""branch_delivery.csv"""'], {'sep': '""";"""', 'dtype': 'str'}), "('branch_delivery.csv', sep=';', dtype=str)\n", (2036, 2079), True, 'import pandas as pd\n'), ((2430, 2479), 'pandas.read_csv', 'pd.read_csv', (['"""staff_flag.csv"""'], {'sep': '""";"""', 'dtype': 'str'}), "('staff_flag.csv', sep=';', dtype=str)\n", (2441, 2479), True, 'import pandas as pd\n'), ((2784, 2888), 'pandas.read_csv', 'pd.read_csv', (['"""contact_email_phone.csv"""'], {'sep': '""";"""', 'dtype': 'str', 'error_bad_lines': '(False)', 'low_memory': '(False)'}), "('contact_email_phone.csv', sep=';', dtype=str, error_bad_lines=\n False, low_memory=False)\n", (2795, 2888), True, 'import pandas as pd\n'), ((3001, 3056), 'pandas.read_csv', 'pd.read_csv', (['"""customer_address.csv"""'], {'sep': '""";"""', 'dtype': 'str'}), "('customer_address.csv', sep=';', dtype=str)\n", (3012, 3056), True, 'import pandas as pd\n'), ((3583, 3677), 'pandas.read_csv', 'pd.read_csv', (['"""debit_current_balance.csv"""'], {'sep': '""";"""', 'low_memory': '(False)', 'error_bad_lines': '(False)'}), "('debit_current_balance.csv', sep=';', low_memory=False,\n error_bad_lines=False)\n", (3594, 3677), True, 'import pandas as pd\n'), ((4938, 5034), 'pandas.read_csv', 'pd.read_csv', (['"""transactions_grouped_all_TRNTP.csv"""'], {'sep': '""";"""', 'dtype': 'str', 'error_bad_lines': '(False)'}), "('transactions_grouped_all_TRNTP.csv', sep=';', dtype=str,\n error_bad_lines=False)\n", (4949, 5034), True, 'import pandas as pd\n'), ((7759, 7883), 'pandas.read_csv', 'pd.read_csv', (['"""avgmonthlybalance_dc.csv"""'], {'sep': '""";"""', 'usecols': 'cols_list', 'dtype': 'str', 'low_memory': '(False)', 'error_bad_lines': '(False)'}), "('avgmonthlybalance_dc.csv', sep=';', usecols=cols_list, dtype=\n str, low_memory=False, error_bad_lines=False)\n", (7770, 7883), True, 'import pandas as pd\n'), ((8218, 8249), 'pandas.isnull', 'pd.isnull', (["dc_avg_bal['SRC_ID']"], {}), "(dc_avg_bal['SRC_ID'])\n", (8227, 8249), True, 'import pandas as pd\n'), ((3183, 3232), 'pandas.read_csv', 'pd.read_csv', (['"""card_ownervsholder_dc.csv"""'], {'sep': '""";"""'}), "('card_ownervsholder_dc.csv', sep=';')\n", (3194, 3232), True, 'import pandas as pd\n'), ((4508, 4570), 'pandas.read_csv', 'pd.read_csv', (['"""16_dc_insurance.csv"""'], {'sep': '""";"""', 'usecols': 'cols_list'}), "('16_dc_insurance.csv', sep=';', usecols=cols_list)\n", (4519, 4570), True, 'import pandas as pd\n')]
|
from django import forms
from django.utils.translation import ugettext as _
from .models import Item, Group, Profile, Area
class SearchForm(forms.Form):
area = forms.ModelChoiceField(label=_('Area'), queryset=Area.objects.all(), required=False)
group = forms.ModelChoiceField(label=_('Group'), queryset=Group.objects.all(), required=False)
q = forms.CharField(required=False, label=_('Query'),)
def filter_by(self):
# TODO search using more than one field
# TODO split query string and make seaprate search by words
filters = {}
if self.cleaned_data['group']:
filters['group'] = self.cleaned_data['group']
if self.cleaned_data['area']:
filters['area'] = self.cleaned_data['area']
filters['description__icontains'] = self.cleaned_data['q']
return filters
class ItemForm(forms.ModelForm):
class Meta:
model = Item
fields = (
'area',
'group',
'title',
'description',
'price',
'is_active'
)
class PhoneWidget(forms.TextInput):
input_type = 'phone'
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = (
'phone',
)
widgets = {
'phone': PhoneWidget
}
|
[
"django.utils.translation.ugettext"
] |
[((196, 205), 'django.utils.translation.ugettext', '_', (['"""Area"""'], {}), "('Area')\n", (197, 205), True, 'from django.utils.translation import ugettext as _\n'), ((293, 303), 'django.utils.translation.ugettext', '_', (['"""Group"""'], {}), "('Group')\n", (294, 303), True, 'from django.utils.translation import ugettext as _\n'), ((397, 407), 'django.utils.translation.ugettext', '_', (['"""Query"""'], {}), "('Query')\n", (398, 407), True, 'from django.utils.translation import ugettext as _\n')]
|
# encoding: utf-8
from nose.tools import *
import numpy as np
from cmpy.inference import standardize_data
from cmpy import machines
from ..canonical import tmatrix
from ..counts import path_counts, out_arrays
def test_path_counts1():
# Test without state_path
m = machines.Even()
delta, nodes, symbols = tmatrix(m)
prng = np.random.RandomState()
prng.seed(0)
d = m.symbols(20, prng=prng)
d = standardize_data(d)
counts, final, states = path_counts(delta, d)
counts_ = [[[4, 8], [0, 8]], [[0, 4], [1, 4]]]
assert_equal(counts.tolist(), counts_)
final_ = [0, -1]
assert_equal(final.tolist(), final_)
assert_equal(states, None)
def test_path_counts2():
# Test with node_path
m = machines.Even()
delta, nodes, symbols = tmatrix(m)
prng = np.random.RandomState()
prng.seed(0)
d = m.symbols(20, prng=prng)
d = standardize_data(d)
counts, final, states = path_counts(delta, d, node_path=True)
counts_ = [[[4, 8], [0, 8]], [[0, 4], [1, 4]]]
assert_equal(counts.tolist(), counts_)
final_ = [0, -1]
assert_equal(final.tolist(), final_)
states_ = [[0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 1, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_equal(states.tolist(), states_)
def test_path_counts3():
# Test with node_path and preallocated arrays
m = machines.Even()
delta, nodes, symbols = tmatrix(m)
prng = np.random.RandomState()
prng.seed(0)
d = m.symbols(20, prng=prng)
d = standardize_data(d)
counts, final, states = out_arrays(2, 2, 20, node_path=True)
path_counts(delta, d, node_path=True, out_arrays=(counts, final, states))
counts_ = [[[4, 8], [0, 8]], [[0, 4], [1, 4]]]
assert_equal(counts.tolist(), counts_)
final_ = [0, -1]
assert_equal(final.tolist(), final_)
states_ = [[0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 1, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_equal(states.tolist(), states_)
|
[
"cmpy.inference.standardize_data",
"numpy.random.RandomState",
"cmpy.machines.Even"
] |
[((277, 292), 'cmpy.machines.Even', 'machines.Even', ([], {}), '()\n', (290, 292), False, 'from cmpy import machines\n'), ((344, 367), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (365, 367), True, 'import numpy as np\n'), ((426, 445), 'cmpy.inference.standardize_data', 'standardize_data', (['d'], {}), '(d)\n', (442, 445), False, 'from cmpy.inference import standardize_data\n'), ((747, 762), 'cmpy.machines.Even', 'machines.Even', ([], {}), '()\n', (760, 762), False, 'from cmpy import machines\n'), ((814, 837), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (835, 837), True, 'import numpy as np\n'), ((896, 915), 'cmpy.inference.standardize_data', 'standardize_data', (['d'], {}), '(d)\n', (912, 915), False, 'from cmpy.inference import standardize_data\n'), ((1430, 1445), 'cmpy.machines.Even', 'machines.Even', ([], {}), '()\n', (1443, 1445), False, 'from cmpy import machines\n'), ((1497, 1520), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (1518, 1520), True, 'import numpy as np\n'), ((1579, 1598), 'cmpy.inference.standardize_data', 'standardize_data', (['d'], {}), '(d)\n', (1595, 1598), False, 'from cmpy.inference import standardize_data\n')]
|
import sys
import gurobipy
import math
import numpy as np
import time
# Lies die Lösungsdatei ein und gib eine Liste der Mittelpunkte zurück
# solutionFilePath = Pfad zur Lösungsdatei (string)
# n = Dimension der Kugel (int, >= 1)
def readSolution(solutionFilePath, n=3):
solution = []
try:
# Öffne die Lösungsdatei und lies die Lösung ein
with open(solutionFilePath) as solutionFile:
# Lies die Lösungsdatei zeilenweise,
# konvertiere jede Zeile zu einem Koordinatentupel
# und speichere die Koordinatentupel in der Liste solution ab
for line in solutionFile:
entries = line.split(";")
try:
solution.append(tuple(entries[i] for i in range(n)))
except:
print(f"Ungültige Zeile: {line}")
except:
print(f"Konnte die Datei {solutionFilePath} nicht öffnen.")
sys.exit(-1)
return solution
# Überprüfe die übergebene Liste von Mittelpunkten,
# ob die dort zentrierten Kappen die Sphäre vollständig überdecken
# solution = Liste der Mittelpunkte der Kappen (List[tuple(n)])
# alpha = Öffnungswinkel der Kappen (float, >= 0, <= 360)
# n = Dimension der Kugel (int, >= 1)
def checkSolution(solution, alpha, n=3, printing=True):
# Erzeuge ein Gurobi-Modell um zu überprüfen,
# ob die Überdeckung vollständig ist
# (Annahme: die Kappen sind "offen")
model = gurobipy.Model()
# Deaktiviere die Gurobi-Ausgabe
model.setParam("OutputFlag", 0)
# Aktiviere den nicht-konvexen Löser
model.setParam("NonConvex", 2)
# Erzeuge die Variablen und Nebenbedingungen
# Die y-Variablen kodieren den gesuchten unüberdeckten Punkt
y = {}
for i in range(n):
y[i] = model.addVar(
lb=-gurobipy.GRB.INFINITY,
ub=gurobipy.GRB.INFINITY,
vtype=gurobipy.GRB.CONTINUOUS,
name=f"y{i}",
)
# Der Punkt muss auf der Sphäre liegen, also eine 2-Norm von Wert 1 haben.
model.addQConstr(gurobipy.quicksum(y[i] * y[i] for i in range(n)) == 1, "Norm")
# Der Punkt darf von keiner Kappe in der übergebenen Lösung überdeckt werden
for j in range(len(solution)):
x = solution[j]
model.addConstr(
gurobipy.quicksum(x[i] * y[i] for i in range(n))
<= math.cos((0.5 * alpha) / 180 * math.pi),
f"Angle{j}",
)
# Schreibe zum Debuggen eine LP-Datei heraus
# model.write("Lösung.lp")
# Löse das Modell und entscheide an Hand des Zulässigkeitsstatus,
# ob die Überdeckung vollständig
model.optimize()
if model.status == 2:
if printing:
print(
"Die Überdeckung ist nicht vollständig.\nDer folgende Punkt ist nicht überdeckt:"
)
arr = np.array([0.0, 0.0, 0.0])
for i in range(n):
if printing:
print(f"y{i} = ", y[i].X)
arr[i] = y[i].X
return arr
else:
print("Die Überdeckung ist vollständig.")
if __name__ == "__main__":
try:
# Lies den Pfad zur Lösungsdatei ein
solutionFilePath = sys.argv[1]
# Lies den Öffnungwinkel der Kappen ein
alpha = float(sys.argv[2])
# Falls keine korrekten Parameter übergeben wurden,
# gib den Benutzungshinweis aus und schließe das Programm
except:
print("Verwendung: ./checker.py {Lösungsdatei} {Öffnungswinkel}")
sys.exit(-1)
# Lies die Lösung ein
solution = readSolution(solutionFilePath)
# Überprüfe die Lösung
checkSolution(solution, alpha)
# Überprüfe die übergebene Liste von Mittelpunkten,
# ob die dort zentrierten Kappen die Sphäre vollständig überdecken
# Sammle dann so lange Punkte ein, bis eine vollständige Überdeckung erreicht wurde
# solution = Liste der Mittelpunkte der Kappen (List[tuple(n)])
# alpha = Öffnungswinkel der Kappen (float, >= 0, <= 360)
# n = Dimension der Kugel (int, >= 1)
def collect_missing(solution, alpha, n=3, printer=None):
# Erzeuge ein Gurobi-Modell um zu überprüfen,
# ob die Überdeckung vollständig ist
# (Annahme: die Kappen sind "offen")
model = gurobipy.Model()
# Deaktiviere die Gurobi-Ausgabe
model.setParam("OutputFlag", 0)
# Aktiviere den nicht-konvexen Löser
model.setParam("NonConvex", 2)
# Erzeuge die Variablen und Nebenbedingungen
# Die y-Variablen kodieren den gesuchten unüberdeckten Punkt
y = {}
for i in range(n):
y[i] = model.addVar(
lb=-gurobipy.GRB.INFINITY,
ub=gurobipy.GRB.INFINITY,
vtype=gurobipy.GRB.CONTINUOUS,
name=f"y{i}",
)
# Der Punkt muss auf der Sphäre liegen, also eine 2-Norm von Wert 1 haben.
model.addQConstr(gurobipy.quicksum(y[i] * y[i] for i in range(n)) == 1, "Norm")
# Der Punkt darf von keiner Kappe in der übergebenen Lösung überdeckt werden
for j in range(len(solution)):
x = solution[j]
model.addConstr(
gurobipy.quicksum(x[i] * y[i] for i in range(n))
<= math.cos((0.5 * alpha) / 180 * math.pi),
f"Angle{j}",
)
added = []
# begining of do while
starttime = time.time()
model.optimize()
while model.status == 2:
x = np.array([0.0, 0.0, 0.0])
for i in range(n):
x[i] = y[i].X
added.append(x)
model.addConstr(
gurobipy.quicksum(x[i] * y[i] for i in range(n))
<= math.cos((0.5 * alpha) / 180 * math.pi),
f"Angle{j}",
)
if printer is not None:
printed = printer(len(added), len(solution) + len(added), time.time() - starttime)
if printed:
starttime = time.time()
pass
# do while
model.optimize()
pass
return added
|
[
"gurobipy.Model",
"time.time",
"numpy.array",
"math.cos",
"sys.exit"
] |
[((1451, 1467), 'gurobipy.Model', 'gurobipy.Model', ([], {}), '()\n', (1465, 1467), False, 'import gurobipy\n'), ((4200, 4216), 'gurobipy.Model', 'gurobipy.Model', ([], {}), '()\n', (4214, 4216), False, 'import gurobipy\n'), ((5245, 5256), 'time.time', 'time.time', ([], {}), '()\n', (5254, 5256), False, 'import time\n'), ((2838, 2863), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (2846, 2863), True, 'import numpy as np\n'), ((5321, 5346), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (5329, 5346), True, 'import numpy as np\n'), ((936, 948), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (944, 948), False, 'import sys\n'), ((3485, 3497), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (3493, 3497), False, 'import sys\n'), ((2360, 2397), 'math.cos', 'math.cos', (['(0.5 * alpha / 180 * math.pi)'], {}), '(0.5 * alpha / 180 * math.pi)\n', (2368, 2397), False, 'import math\n'), ((5109, 5146), 'math.cos', 'math.cos', (['(0.5 * alpha / 180 * math.pi)'], {}), '(0.5 * alpha / 180 * math.pi)\n', (5117, 5146), False, 'import math\n'), ((5525, 5562), 'math.cos', 'math.cos', (['(0.5 * alpha / 180 * math.pi)'], {}), '(0.5 * alpha / 180 * math.pi)\n', (5533, 5562), False, 'import math\n'), ((5781, 5792), 'time.time', 'time.time', ([], {}), '()\n', (5790, 5792), False, 'import time\n'), ((5704, 5715), 'time.time', 'time.time', ([], {}), '()\n', (5713, 5715), False, 'import time\n')]
|
# This file is part of the markdown-svgbob project
# https://github.com/mbarkhau/markdown-svgbob
#
# Copyright (c) 2019-2021 <NAME> (<EMAIL>) - MIT License
# SPDX-License-Identifier: MIT
import re
import copy
import json
import base64
import typing as typ
import hashlib
import logging
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from markdown.postprocessors import Postprocessor
from markdown_svgbob import wrapper
try:
from urllib.parse import quote
except ImportError:
from urllib import quote # type: ignore
logger = logging.getLogger(__name__)
BLOCK_START_RE = re.compile(r"^(`{3,}|~{3,})bob")
BLOCK_CLEAN_RE = re.compile(r"^(`{3,}|~{3,})bob(.*)(\1)$", flags=re.DOTALL)
def _clean_block_text(block_text: str) -> str:
block_match = BLOCK_CLEAN_RE.match(block_text)
if block_match:
return block_match.group(2)
else:
return block_text
def make_marker_id(text: str) -> str:
data = text.encode("utf-8")
return hashlib.md5(data).hexdigest()
# TagType enumeration: inline_svg|img_utf8_svg|img_base64_svg
TagType = str
def svg2html(svg_data: bytes, tag_type: TagType = 'inline_svg') -> str:
svg_data = svg_data.replace(b"\n", b"")
if tag_type == 'img_base64_svg':
img_b64_data: bytes = base64.standard_b64encode(svg_data)
img_text = img_b64_data.decode('ascii')
return f'<img class="bob" src="data:image/svg+xml;base64,{img_text}"/>'
elif tag_type == 'img_utf8_svg':
img_text = svg_data.decode("utf-8")
img_text = quote(img_text)
return f'<img class="bob" src="data:image/svg+xml;utf-8,{img_text}"/>'
elif tag_type == 'inline_svg':
return svg_data.decode("utf-8")
else:
err_msg = f"Invalid tag_type='{tag_type}'"
raise NotImplementedError(err_msg)
def _parse_min_char_width(options: wrapper.Options) -> int:
min_char_width = options.pop("min_char_width", "")
try:
return int(round(float(min_char_width)))
except ValueError:
logger.warning(
f"Invalid argument for min_char_width. expected integer, got: {min_char_width}"
)
return 0
def _add_char_padding(block_text: str, min_width: int) -> str:
lines = block_text.splitlines()
block_width = max(len(line) for line in lines)
if block_width >= min_width:
return block_text
lpad = " " * ((min_width - block_width) // 2)
new_lines = [(lpad + line).ljust(min_width) for line in lines]
return "\n".join(new_lines)
# https://regex101.com/r/BQkg5t/2/
BG_STYLE_PATTERN = r"""
(
rect\.backdrop\s*\{\s*fill:\s*white;
| \.bg_fill\s*\{\s*fill:\s*white;
| </style><rect fill="white"
)
"""
BG_STYLE_RE = re.compile(BG_STYLE_PATTERN.encode("ascii"), flags=re.VERBOSE)
FG_STYLE_PATTERN = r"""
(
\.fg_stroke\s*\{\s*stroke:\s*black;
| \.fg_fill\s*\{\s*fill:\s*black;
| text\s*{\s*fill:\s*black;
)
"""
FG_STYLE_RE = re.compile(FG_STYLE_PATTERN.encode("ascii"), flags=re.VERBOSE)
def _postprocess_svg(svg_data: bytes, bg_color: str = None, fg_color: str = None) -> bytes:
if bg_color:
pos = 0
while True:
match = BG_STYLE_RE.search(svg_data, pos)
if match is None:
break
repl = match.group(0).replace(b"white", bg_color.encode("ascii"))
begin, end = match.span()
pos = end
svg_data = svg_data[:begin] + repl + svg_data[end:]
if fg_color:
pos = 0
while True:
match = FG_STYLE_RE.search(svg_data, pos)
if match is None:
break
repl = match.group(0).replace(b"black", fg_color.encode("ascii"))
begin, end = match.span()
pos = end
svg_data = svg_data[:begin] + repl + svg_data[end:]
return svg_data
def draw_bob(block_text: str, default_options: wrapper.Options = None) -> str:
options: wrapper.Options = {}
if default_options:
options.update(default_options)
block_text = _clean_block_text(block_text)
header, rest = block_text.split("\n", 1)
if "{" in header and "}" in header:
options.update(json.loads(header))
block_text = rest
min_char_width = _parse_min_char_width(options)
if min_char_width:
block_text = _add_char_padding(block_text, min_char_width)
tag_type = typ.cast(str, options.pop('tag_type', 'inline_svg'))
bg_color = options.pop("bg_color", "")
fg_color = options.pop("fg_color", "")
if not isinstance(bg_color, str):
bg_color = ""
if not isinstance(fg_color, str):
fg_color = ""
svg_data = wrapper.text2svg(block_text, options)
svg_data = _postprocess_svg(svg_data , bg_color, fg_color)
return svg2html(svg_data, tag_type=tag_type)
DEFAULT_CONFIG = {
'tag_type' : ["inline_svg", "Format to use (inline_svg|img_utf8_svg|img_base64_svg)"],
'bg_color' : ["white" , "Set the background color"],
'fg_color' : ["black" , "Set the foreground color"],
'min_char_width': ["" , "Minimum width of diagram in characters"],
}
class SvgbobExtension(Extension):
def __init__(self, **kwargs) -> None:
self.config: typ.Dict[str, typ.List[str]] = copy.deepcopy(DEFAULT_CONFIG)
for name, options_text in wrapper.parse_options().items():
self.config[name] = ["", options_text]
self.images: typ.Dict[str, str] = {}
super().__init__(**kwargs)
def reset(self) -> None:
self.images.clear()
def extendMarkdown(self, md) -> None:
preproc = SvgbobPreprocessor(md, self)
md.preprocessors.register(preproc, name='svgbob_fenced_code_block', priority=50)
postproc = SvgbobPostprocessor(md, self)
md.postprocessors.register(postproc, name='svgbob_fenced_code_block', priority=0)
md.registerExtension(self)
BLOCK_RE = re.compile(r"^(```|~~~)bob")
class SvgbobPreprocessor(Preprocessor):
def __init__(self, md, ext: SvgbobExtension) -> None:
super().__init__(md)
self.ext: SvgbobExtension = ext
@property
def default_options(self) -> wrapper.Options:
options: wrapper.Options = {
'tag_type' : self.ext.getConfig('tag_type' , 'inline_svg'),
'min_char_width': self.ext.getConfig('min_char_width', ""),
}
for name in self.ext.config.keys():
val = self.ext.getConfig(name, "")
if val != "":
options[name] = val
return options
def _make_tag_for_block(self, block_lines: typ.List[str]) -> str:
block_text = "\n".join(block_lines).rstrip()
img_tag = draw_bob(block_text, self.default_options)
img_id = make_marker_id(img_tag)
marker_tag = f"<p id=\"tmp_md_svgbob{img_id}\">svgbob{img_id}</p>"
tag_text = f"<p>{img_tag}</p>"
self.ext.images[marker_tag] = tag_text
return marker_tag
def _iter_out_lines(self, lines: typ.List[str]) -> typ.Iterable[str]:
is_in_fence = False
expected_close_fence = "```"
block_lines: typ.List[str] = []
for line in lines:
if is_in_fence:
block_lines.append(line)
is_ending_fence = line.strip() == expected_close_fence
if not is_ending_fence:
continue
is_in_fence = False
marker_tag = self._make_tag_for_block(block_lines)
del block_lines[:]
yield marker_tag
else:
fence_match = BLOCK_START_RE.match(line)
if fence_match:
is_in_fence = True
expected_close_fence = fence_match.group(1)
block_lines.append(line)
else:
yield line
def run(self, lines: typ.List[str]) -> typ.List[str]:
return list(self._iter_out_lines(lines))
# NOTE (mb):
# Q: Why this business with the Postprocessor? Why
# not just do `yield tag_text` and save the hassle
# of `self.ext.math_html[marker_tag] = tag_text` ?
# A: Maybe there are other processors that can't be
# trusted to leave the inserted markup alone. Maybe
# the inserted markup could be incorrectly parsed as
# valid markdown.
class SvgbobPostprocessor(Postprocessor):
def __init__(self, md, ext: SvgbobExtension) -> None:
super().__init__(md)
self.ext: SvgbobExtension = ext
def run(self, text: str) -> str:
for marker_tag, img in self.ext.images.items():
if marker_tag in text:
wrapped_marker = "<p>" + marker_tag + "</p>"
while marker_tag in text:
if wrapped_marker in text:
text = text.replace(wrapped_marker, img)
else:
text = text.replace(marker_tag, img)
elif 'class="toc"' not in text:
logger.warning(f"SvgbobPostprocessor couldn't find: {marker_tag}")
return text
|
[
"markdown_svgbob.wrapper.text2svg",
"copy.deepcopy",
"hashlib.md5",
"json.loads",
"urllib.quote",
"base64.standard_b64encode",
"markdown_svgbob.wrapper.parse_options",
"logging.getLogger",
"re.compile"
] |
[((581, 608), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (598, 608), False, 'import logging\n'), ((628, 659), 're.compile', 're.compile', (['"""^(`{3,}|~{3,})bob"""'], {}), "('^(`{3,}|~{3,})bob')\n", (638, 659), False, 'import re\n'), ((678, 736), 're.compile', 're.compile', (['"""^(`{3,}|~{3,})bob(.*)(\\\\1)$"""'], {'flags': 're.DOTALL'}), "('^(`{3,}|~{3,})bob(.*)(\\\\1)$', flags=re.DOTALL)\n", (688, 736), False, 'import re\n'), ((5948, 5975), 're.compile', 're.compile', (['"""^(```|~~~)bob"""'], {}), "('^(```|~~~)bob')\n", (5958, 5975), False, 'import re\n'), ((4681, 4718), 'markdown_svgbob.wrapper.text2svg', 'wrapper.text2svg', (['block_text', 'options'], {}), '(block_text, options)\n', (4697, 4718), False, 'from markdown_svgbob import wrapper\n'), ((1305, 1340), 'base64.standard_b64encode', 'base64.standard_b64encode', (['svg_data'], {}), '(svg_data)\n', (1330, 1340), False, 'import base64\n'), ((5294, 5323), 'copy.deepcopy', 'copy.deepcopy', (['DEFAULT_CONFIG'], {}), '(DEFAULT_CONFIG)\n', (5307, 5323), False, 'import copy\n'), ((1012, 1029), 'hashlib.md5', 'hashlib.md5', (['data'], {}), '(data)\n', (1023, 1029), False, 'import hashlib\n'), ((1569, 1584), 'urllib.quote', 'quote', (['img_text'], {}), '(img_text)\n', (1574, 1584), False, 'from urllib import quote\n'), ((4200, 4218), 'json.loads', 'json.loads', (['header'], {}), '(header)\n', (4210, 4218), False, 'import json\n'), ((5358, 5381), 'markdown_svgbob.wrapper.parse_options', 'wrapper.parse_options', ([], {}), '()\n', (5379, 5381), False, 'from markdown_svgbob import wrapper\n')]
|
import torch
from torch.utils.data import DataLoader
from torch.optim import Adam, RMSprop
import torch.nn as nn
from model import W2V_model, W2V_SGNS_model
from w2v_dataloader import CBOW_dataset, SkipGramDataset, SkipGramNegativeSamplingDataset
from test_embeddings import test_embedding_question_words
import os
import argparse
from tqdm import tqdm
import numpy as np
import json
class W2VTrainer:
def __init__(self, opt):
"""
Given the parameters initializes training class
input:
opt: Argparse Options
"""
# get the data
assert os.path.isfile(opt.dataset_path), "Bad file path!!"
# Training device
self.device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
print("Training on {}".format(self.device))
# Choose the dataset
print("Loading dataset")
self.dataset = CBOW_dataset(opt.dataset_path) if opt.dataset_type == 'cbow' else SkipGramDataset(opt.dataset_path)
print("Dataset loaded")
self.batch_size = opt.batch_size
# initialize the network
self.network = W2V_model(len(self.dataset.vocab_set), hidden_dim=opt.hidden_dim)
self.network.to(self.device)
# initialize optmizer and pass training parameters
self.optimizer = Adam(self.network.parameters(), lr=opt.lr)
# This combines Softmax and NLLLoss
self.loss = nn.CrossEntropyLoss()
#testing parameters
self.test_interval = opt.checkpoint_interval
def train(self, opt):
"""
Trains the model for the number of epochs provided
"""
# Initialize the Dataloader
train_dataloader = DataLoader(self.dataset,
batch_size=self.batch_size,
shuffle=True, num_workers=4)
# Training starts
for epoch in range(opt.epochs):
print("Epoch {}/{}".format(epoch + 1, opt.epochs))
print('-' * 10)
running_loss = 0.0
pbar = tqdm(train_dataloader)
for batch_i, (input_samples, output_samples) in enumerate(pbar):
input_samples = input_samples.to(self.device).type(torch.float)
output_samples = output_samples.to(self.device).type(torch.long)
y_pred = self.network(input_samples)
loss = self.loss(y_pred, output_samples)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
running_loss += loss.item()
pbar.set_description("Loss = %f" % loss.item())
epoch_loss = running_loss / len(train_dataloader)
print("Loss after epoch {} : {}".format(epoch + 1, epoch_loss))
if (epoch+1) % opt.checkpoint_interval == 0:
print("Saving the model after {} epoch".format(epoch + 1))
torch.save(self.network.state_dict(), "w2v_ckpt_epoch_{}.pth".format(epoch+1))
# testing for the cosine similarity
# get the current embedding
if opt.test_file:
embedding_current = self.network.fc1.weight.clone().cpu().transpose(0, 1).detach().numpy()
emb_dict = {'vocab_dict':self.dataset.vocab_word_to_idx, 'embedding': embedding_current}
cosine_sim = test_embedding_question_words(emb_dict, opt.test_file)
print("Avg cosine similarity after {} epoch - {}".format(epoch+1, cosine_sim))
print("="*20)
return 0
def save_embeddings(self):
'''
Saves the word embeddings. Its the weights of the first linear layer.
A JSON file will be saved as dictionary.
Dictionary has vocabulary and embeddings.
'''
# Get the first layer weights
embedding = self.network.fc1.weight.cpu().detach().numpy().transpose()
# Convert it to list
embedding = embedding.tolist()
# Create embedding dictionary
emb_dict = {'vocab_dict':self.dataset.vocab_word_to_idx, 'embedding':embedding}
with open('embeddings.json', 'w') as fp:
json.dump(emb_dict, fp)
print("Embedding saved successfully")
class W2V_SGNS_Trainer:
def __init__(self, opt):
assert os.path.isfile(opt.dataset_path), "Bad file path!!"
self.device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
print("Training on {}".format(self.device))
print("Loading the dataset")
self.dataset = SkipGramNegativeSamplingDataset(opt.dataset_path,
k=opt.neg_sample_size, freq_power=0.75)
print("Dataset Loaded")
self.batch_size = opt.batch_size
# initialize the network
self.network = W2V_SGNS_model(len(self.dataset.vocab_set), hidden_dim=opt.hidden_dim)
self.network.to(self.device)
# initialize optimizer and pass training parameters
self.optimizer = Adam(self.network.parameters(), lr=opt.lr)
# Binary cross entropy loss to handle two classes
self.loss = nn.BCELoss()
#testing parameters
self.test_interval = opt.checkpoint_interval
def train(self, opt):
"""
Trains the model for the number of epochs provided
"""
train_dataloader = DataLoader(self.dataset, batch_size=self.batch_size, shuffle=True, num_workers=2)
for epoch in range(opt.epochs):
print("Epoch {}/{}".format(epoch + 1, opt.epochs))
print('-' * 10)
running_loss = 0.0
pbar = tqdm(train_dataloader)
for batch_i, (input_idxs, context_idxs, targets) in enumerate(pbar):
input_idxs = input_idxs.view(-1,).to(self.device)
context_idxs = context_idxs.view(-1,).to(self.device)
targets = targets.view(-1, 1).to(self.device)
y_pred = self.network(input_idxs, context_idxs)
loss = self.loss(y_pred, targets)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
running_loss += loss.item()
pbar.set_description("Loss = %f" % loss.item())
epoch_loss = running_loss / len(train_dataloader)
print("Loss after epoch {} : {}".format(epoch + 1, epoch_loss))
if (epoch+1) % opt.checkpoint_interval == 0:
if not os.path.exists('checkpoint'):
os.mkdir('checkpoint')
print("Saving the model after {} epoch".format(epoch + 1))
torch.save(self.network.state_dict(), "checkpoint/w2v_sgns_ckpt_epoch_{}.pth".format(epoch+1))
#testing for the cosine similarity
#get the current embedding
if opt.test_file:
embedding_current = self.network.embedding.weight.cpu().detach().numpy()
emb_dict = {'vocab_dict':self.dataset.vocab_word_to_idx, 'embedding': embedding_current}
cosine_sim = test_embedding_question_words(emb_dict, opt.test_file)
print("Avg cosine similarity after {} epoch - {}".format(epoch+1, cosine_sim))
print("="*20)
return 0
def save_embeddings(self):
'''
Saves the word embeddings. Its the weights of the first linear layers.
A JSON file will be saved as dictionary.
Dictionary has vocabulary and embeddings.
'''
# Get the embedding matrix
embedding = self.network.embedding.weight.cpu().detach().numpy()
embedding = embedding.tolist()
emb_dict = {'vocab_dict':self.dataset.vocab_word_to_idx, 'embedding':embedding}
with open('embeddings_sgns.json', 'w') as fp:
json.dump(emb_dict, fp)
print("Embedding saved successfully")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_type", type=str, default="sgns", help="model type vanilla/sgns")
parser.add_argument("--epochs", type=int, default=5, help="number of epochs")
parser.add_argument("--hidden_dim", type=int, default=300, help="size of the hidden layer dimension")
parser.add_argument("--dataset_type", type=str, default="sgram", help="cbow or skipgram model")
parser.add_argument("--batch_size", type=int, default=128, help="size of each word batch")
parser.add_argument("--lr", type=float, default=0.001, help="learning rate of the model")
parser.add_argument("--checkpoint_interval", type=int, default=1, help="interval between saving model weights")
parser.add_argument("--dataset_path", type=str, required=True, help="path to the JSON dataset")
parser.add_argument("--neg_sample_size", type=int, default=5, help="Number of negative samples")
parser.add_argument("--test_file", type=str, help="path to the file used in the testing part")
parser.add_argument("--checkpoint_path", type=str, default=None, help="path to the training checkpoint")
opt = parser.parse_args()
print("=" * 10, "HYPERPARAMETERS", "=" * 10)
print(opt)
if opt.model_type == 'sgns':
trainer = W2V_SGNS_Trainer(opt)
else:
trainer = W2VTrainer(opt)
if opt.checkpoint_path:
trainer.network.load_state_dict(torch.load(opt.checkpoint_path))
trainer.train(opt)
trainer.save_embeddings()
|
[
"json.dump",
"tqdm.tqdm",
"os.mkdir",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"w2v_dataloader.SkipGramDataset",
"torch.load",
"torch.nn.CrossEntropyLoss",
"os.path.exists",
"test_embeddings.test_embedding_question_words",
"os.path.isfile",
"torch.cuda.is_available",
"w2v_dataloader.CBOW_dataset",
"w2v_dataloader.SkipGramNegativeSamplingDataset"
] |
[((8020, 8045), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8043, 8045), False, 'import argparse\n'), ((604, 636), 'os.path.isfile', 'os.path.isfile', (['opt.dataset_path'], {}), '(opt.dataset_path)\n', (618, 636), False, 'import os\n'), ((1432, 1453), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1451, 1453), True, 'import torch.nn as nn\n'), ((1711, 1796), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': '(4)'}), '(self.dataset, batch_size=self.batch_size, shuffle=True,\n num_workers=4)\n', (1721, 1796), False, 'from torch.utils.data import DataLoader\n'), ((4378, 4410), 'os.path.isfile', 'os.path.isfile', (['opt.dataset_path'], {}), '(opt.dataset_path)\n', (4392, 4410), False, 'import os\n'), ((4629, 4722), 'w2v_dataloader.SkipGramNegativeSamplingDataset', 'SkipGramNegativeSamplingDataset', (['opt.dataset_path'], {'k': 'opt.neg_sample_size', 'freq_power': '(0.75)'}), '(opt.dataset_path, k=opt.neg_sample_size,\n freq_power=0.75)\n', (4660, 4722), False, 'from w2v_dataloader import CBOW_dataset, SkipGramDataset, SkipGramNegativeSamplingDataset\n'), ((5188, 5200), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (5198, 5200), True, 'import torch.nn as nn\n'), ((5421, 5506), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': '(2)'}), '(self.dataset, batch_size=self.batch_size, shuffle=True,\n num_workers=2)\n', (5431, 5506), False, 'from torch.utils.data import DataLoader\n'), ((906, 936), 'w2v_dataloader.CBOW_dataset', 'CBOW_dataset', (['opt.dataset_path'], {}), '(opt.dataset_path)\n', (918, 936), False, 'from w2v_dataloader import CBOW_dataset, SkipGramDataset, SkipGramNegativeSamplingDataset\n'), ((972, 1005), 'w2v_dataloader.SkipGramDataset', 'SkipGramDataset', (['opt.dataset_path'], {}), '(opt.dataset_path)\n', (987, 1005), False, 'from w2v_dataloader import CBOW_dataset, SkipGramDataset, SkipGramNegativeSamplingDataset\n'), ((2079, 2101), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {}), '(train_dataloader)\n', (2083, 2101), False, 'from tqdm import tqdm\n'), ((4237, 4260), 'json.dump', 'json.dump', (['emb_dict', 'fp'], {}), '(emb_dict, fp)\n', (4246, 4260), False, 'import json\n'), ((5687, 5709), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {}), '(train_dataloader)\n', (5691, 5709), False, 'from tqdm import tqdm\n'), ((7909, 7932), 'json.dump', 'json.dump', (['emb_dict', 'fp'], {}), '(emb_dict, fp)\n', (7918, 7932), False, 'import json\n'), ((9427, 9458), 'torch.load', 'torch.load', (['opt.checkpoint_path'], {}), '(opt.checkpoint_path)\n', (9437, 9458), False, 'import torch\n'), ((730, 755), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (753, 755), False, 'import torch\n'), ((4478, 4503), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4501, 4503), False, 'import torch\n'), ((3430, 3484), 'test_embeddings.test_embedding_question_words', 'test_embedding_question_words', (['emb_dict', 'opt.test_file'], {}), '(emb_dict, opt.test_file)\n', (3459, 3484), False, 'from test_embeddings import test_embedding_question_words\n'), ((6550, 6578), 'os.path.exists', 'os.path.exists', (['"""checkpoint"""'], {}), "('checkpoint')\n", (6564, 6578), False, 'import os\n'), ((6600, 6622), 'os.mkdir', 'os.mkdir', (['"""checkpoint"""'], {}), "('checkpoint')\n", (6608, 6622), False, 'import os\n'), ((7174, 7228), 'test_embeddings.test_embedding_question_words', 'test_embedding_question_words', (['emb_dict', 'opt.test_file'], {}), '(emb_dict, opt.test_file)\n', (7203, 7228), False, 'from test_embeddings import test_embedding_question_words\n')]
|
#!/usr/bin/env python3
# encoding: utf-8
# end_pymotw_header
import sys
import sys_shelve_importer
def show_module_details(module):
print(" message :", module.message)
print(" __name__ :", module.__name__)
print(" __package__:", module.__package__)
print(" __file__ :", module.__file__)
print(" __path__ :", module.__path__)
print(" __loader__ :", module.__loader__)
filename = "/tmp/pymotw_import_example.shelve"
sys.path_hooks.append(sys_shelve_importer.ShelveFinder)
sys.path.insert(0, filename)
print('Import of "package.module1":')
import package.module1
print()
print("Examine package.module1 details:")
show_module_details(package.module1)
print()
print('Import of "package.subpackage.module2":')
import package.subpackage.module2
print()
print("Examine package.subpackage.module2 details:")
show_module_details(package.subpackage.module2)
|
[
"sys.path.insert",
"sys.path_hooks.append"
] |
[((457, 512), 'sys.path_hooks.append', 'sys.path_hooks.append', (['sys_shelve_importer.ShelveFinder'], {}), '(sys_shelve_importer.ShelveFinder)\n', (478, 512), False, 'import sys\n'), ((513, 541), 'sys.path.insert', 'sys.path.insert', (['(0)', 'filename'], {}), '(0, filename)\n', (528, 541), False, 'import sys\n')]
|
#!/usr/bin/env vpython3
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script for updating the project settings for a chromium branch.
To initialize a new chromium branch, run the following from the root of
the repo (where MM is the milestone number and BBBB is the branch
number):
```
infra/config/scripts/branch.py initialize --milestone MM --branch BBBB
infra/config/main.star
infra/config/dev.star
```
Usage:
branch.py initialize --milestone XX --branch YYYY
"""
import argparse
import json
import os
import sys
INFRA_CONFIG_DIR = os.path.abspath(os.path.join(__file__, '..', '..'))
def parse_args(args=None, *, parser_type=None):
parser_type = parser_type or argparse.ArgumentParser
parser = parser_type(
description='Update the project settings for a chromium branch')
parser.set_defaults(func=None)
parser.add_argument('--settings-json',
help='Path to the settings.json file',
default=os.path.join(INFRA_CONFIG_DIR, 'settings.json'))
subparsers = parser.add_subparsers()
init_parser = subparsers.add_parser(
'initialize', help='Initialize the settings for a branch')
init_parser.set_defaults(func=initialize_cmd)
init_parser.add_argument(
'--milestone',
required=True,
help=('The milestone identifier '
'(e.g. the milestone number for standard release channel)'))
init_parser.add_argument(
'--branch',
required=True,
help='The branch name, must correspond to a ref in refs/branch-heads')
args = parser.parse_args(args)
if args.func is None:
parser.error('no sub-command specified')
return args
def initial_settings(milestone, branch):
settings = dict(
project=f'chromium-m{milestone}',
project_title=f'Chromium M{milestone}',
is_master=False,
is_lts_branch=False,
ref=f'refs/branch-heads/{branch}',
)
return json.dumps(settings, indent=4) + '\n'
def initialize_cmd(args):
settings = initial_settings(args.milestone, args.branch)
with open(args.settings_json, 'w') as f:
f.write(settings)
def main():
args = parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
[
"os.path.join",
"json.dumps"
] |
[((675, 709), 'os.path.join', 'os.path.join', (['__file__', '""".."""', '""".."""'], {}), "(__file__, '..', '..')\n", (687, 709), False, 'import os\n'), ((2013, 2043), 'json.dumps', 'json.dumps', (['settings'], {'indent': '(4)'}), '(settings, indent=4)\n', (2023, 2043), False, 'import json\n'), ((1075, 1122), 'os.path.join', 'os.path.join', (['INFRA_CONFIG_DIR', '"""settings.json"""'], {}), "(INFRA_CONFIG_DIR, 'settings.json')\n", (1087, 1122), False, 'import os\n')]
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from .managers import CustomUserManager
# Create your models here.
class CustomUser(AbstractBaseUser, PermissionsMixin):
'''First Name and Last Name are must in POST and optionals in PUT and also
are required in forms'''
first_name = models.CharField( max_length = 100, null=True, blank = True)
last_name = models.CharField( max_length = 100, null=True, blank = True)
email = models.EmailField(_('email address'), unique=True)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
is_deleted = models.BooleanField(default=False)
date_joined = models.DateTimeField(default=timezone.now)
updated_date=models.DateTimeField(auto_now=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = CustomUserManager()
def __str__(self):
return self.email
|
[
"django.db.models.CharField",
"django.db.models.DateTimeField",
"django.db.models.BooleanField",
"django.utils.translation.gettext_lazy"
] |
[((469, 524), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (485, 524), False, 'from django.db import models\n'), ((543, 598), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (559, 598), False, 'from django.db import models\n'), ((676, 710), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (695, 710), False, 'from django.db import models\n'), ((724, 757), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (743, 757), False, 'from django.db import models\n'), ((772, 806), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (791, 806), False, 'from django.db import models\n'), ((822, 864), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (842, 864), False, 'from django.db import models\n'), ((879, 914), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (899, 914), False, 'from django.db import models\n'), ((631, 649), 'django.utils.translation.gettext_lazy', '_', (['"""email address"""'], {}), "('email address')\n", (632, 649), True, 'from django.utils.translation import gettext_lazy as _\n')]
|
import pickle
import copy
import pathlib
import dash
import math
import datetime as dt
import pandas as pd
import pydriller
pydriller.Commit
# Multi-dropdown options
from controls import COUNTIES, WELL_STATUSES, WELL_TYPES, WELL_COLORS
# Create controls
county_options = [
{"label": str(COUNTIES[county]), "value": str(county)} for county in COUNTIES
]
well_status_options = [
{"label": str(WELL_STATUSES[well_status]), "value": str(well_status)}
for well_status in WELL_STATUSES
]
well_type_options = [
{"label": str(WELL_TYPES[well_type]), "value": str(well_type)}
for well_type in WELL_TYPES
]
# Create global chart template
mapbox_access_token = "<KEY>"
layout = dict(
autosize=True,
automargin=True,
margin=dict(l=30, r=30, b=20, t=40),
hovermode="closest",
plot_bgcolor="#F9F9F9",
paper_bgcolor="#F9F9F9",
legend=dict(font=dict(size=10), orientation="h"),
title="Satellite Overview",
mapbox=dict(
accesstoken=mapbox_access_token,
style="light",
center=dict(lon=-78.05, lat=42.54),
zoom=7,
),
)
# Helper functions
def human_format(num):
if num == 0:
return "0"
magnitude = int(math.log(num, 1000))
mantissa = str(int(num / (1000 ** magnitude)))
return mantissa + ["", "K", "M", "G", "T", "P"][magnitude]
def filter_dataframe(df, points, well_statuses, well_types, year_slider):
dff = df[
df["Well_Status"].isin(well_statuses)
& df["Well_Type"].isin(well_types)
& (df["Date_Well_Completed"] > dt.datetime(year_slider[0], 1, 1))
& (df["Date_Well_Completed"] < dt.datetime(year_slider[1], 1, 1))
]
return dff
def produce_individual(api_well_num, points):
try:
points[api_well_num]
except:
return None, None, None, None
index = list(
range(min(points[api_well_num].keys()), max(points[api_well_num].keys()) + 1)
)
gas = []
oil = []
water = []
for year in index:
try:
gas.append(points[api_well_num][year]["Gas Produced, MCF"])
except:
gas.append(0)
try:
oil.append(points[api_well_num][year]["Oil Produced, bbl"])
except:
oil.append(0)
try:
water.append(points[api_well_num][year]["Water Produced, bbl"])
except:
water.append(0)
return index, gas, oil, water
def produce_aggregate(selected, year_slider):
index = list(range(max(year_slider[0], 1985), 2016))
gas = []
oil = []
water = []
for year in index:
count_gas = 0
count_oil = 0
count_water = 0
for api_well_num in selected:
try:
count_gas += points[api_well_num][year]["Gas Produced, MCF"]
except:
pass
try:
count_oil += points[api_well_num][year]["Oil Produced, bbl"]
except:
pass
try:
count_water += points[api_well_num][year]["Water Produced, bbl"]
except:
pass
gas.append(count_gas)
oil.append(count_oil)
water.append(count_water)
return index, gas, oil, water
|
[
"math.log",
"datetime.datetime"
] |
[((1200, 1219), 'math.log', 'math.log', (['num', '(1000)'], {}), '(num, 1000)\n', (1208, 1219), False, 'import math\n'), ((1627, 1660), 'datetime.datetime', 'dt.datetime', (['year_slider[1]', '(1)', '(1)'], {}), '(year_slider[1], 1, 1)\n', (1638, 1660), True, 'import datetime as dt\n'), ((1553, 1586), 'datetime.datetime', 'dt.datetime', (['year_slider[0]', '(1)', '(1)'], {}), '(year_slider[0], 1, 1)\n', (1564, 1586), True, 'import datetime as dt\n')]
|
from beneath.client import Client
from beneath.utils import ProjectIdentifier, ServiceIdentifier, TableIdentifier
from beneath.cli.utils import (
async_cmd,
mb_to_bytes,
pretty_print_graphql_result,
str2bool,
project_path_help,
service_path_help,
table_path_help,
)
def add_subparser(root):
service = root.add_parser(
"service",
help="Create and manage services",
description="A service represents a non-user account with its own permissions, secrets, "
"quotas and monitoring. They're used to access Beneath from production code.",
).add_subparsers()
_list = service.add_parser("list", help="List services in a project")
_list.set_defaults(func=async_cmd(show_list))
_list.add_argument("project_path", type=str, help=project_path_help)
_create = service.add_parser("create", help="Create a new service")
_create.set_defaults(func=async_cmd(create))
_create.add_argument("service_path", type=str, help=service_path_help)
_create.add_argument("--description", type=str)
_create.add_argument("--source-url", type=str)
_create.add_argument("--read-quota-mb", type=int)
_create.add_argument("--write-quota-mb", type=int)
_create.add_argument("--scan-quota-mb", type=int)
_update = service.add_parser("update", help="Update a service")
_update.set_defaults(func=async_cmd(update))
_update.add_argument("service_path", type=str, help=service_path_help)
_update.add_argument("--description", type=str)
_update.add_argument("--source-url", type=str)
_update.add_argument("--read-quota-mb", type=int)
_update.add_argument("--write-quota-mb", type=int)
_update.add_argument("--scan-quota-mb", type=int)
_update_perms = service.add_parser(
"update-permissions",
help="Add/remove service permissions for tables",
)
_update_perms.set_defaults(func=async_cmd(update_permissions))
_update_perms.add_argument("service_path", type=str, help=service_path_help)
_update_perms.add_argument("table_path", type=str, help=table_path_help)
_update_perms.add_argument("--read", type=str2bool, nargs="?", const=True, default=None)
_update_perms.add_argument("--write", type=str2bool, nargs="?", const=True, default=None)
_delete = service.add_parser("delete", help="Delete a service")
_delete.set_defaults(func=async_cmd(delete))
_delete.add_argument("service_path", type=str, help=service_path_help)
_issue_secret = service.add_parser("issue-secret", help="Issue a new service secret")
_issue_secret.set_defaults(func=async_cmd(issue_secret))
_issue_secret.add_argument("service_path", type=str, help=service_path_help)
_issue_secret.add_argument("--description", type=str)
_list_secrets = service.add_parser("list-secrets", help="List active secrets")
_list_secrets.set_defaults(func=async_cmd(list_secrets))
_list_secrets.add_argument("service_path", type=str, help=service_path_help)
_revoke_secret = service.add_parser("revoke-secret", help="Revoke a service secret")
_revoke_secret.set_defaults(func=async_cmd(revoke_secret))
_revoke_secret.add_argument("secret_id", type=str)
async def show_list(args):
client = Client()
pq = ProjectIdentifier.from_path(args.project_path)
proj = await client.admin.projects.find_by_organization_and_name(pq.organization, pq.project)
services = proj["services"]
if (services is None) or len(services) == 0:
print("No services found in project")
return
for service in services:
pretty_print_graphql_result(service)
async def create(args):
client = Client()
seq = ServiceIdentifier.from_path(args.service_path)
result = await client.admin.services.create(
organization_name=seq.organization,
project_name=seq.project,
service_name=seq.service,
description=args.description,
source_url=args.source_url,
read_quota_bytes=mb_to_bytes(args.read_quota_mb),
write_quota_bytes=mb_to_bytes(args.write_quota_mb),
scan_quota_bytes=mb_to_bytes(args.scan_quota_mb),
)
pretty_print_graphql_result(result)
async def update(args):
client = Client()
seq = ServiceIdentifier.from_path(args.service_path)
result = await client.admin.services.update(
organization_name=seq.organization,
project_name=seq.project,
service_name=seq.service,
description=args.description,
source_url=args.source_url,
read_quota_bytes=mb_to_bytes(args.read_quota_mb),
write_quota_bytes=mb_to_bytes(args.write_quota_mb),
scan_quota_bytes=mb_to_bytes(args.scan_quota_mb),
)
pretty_print_graphql_result(result)
async def update_permissions(args):
client = Client()
seq = ServiceIdentifier.from_path(args.service_path)
service = await client.admin.services.find_by_organization_project_and_name(
organization_name=seq.organization,
project_name=seq.project,
service_name=seq.service,
)
stq = TableIdentifier.from_path(args.table_path)
table = await client.admin.tables.find_by_organization_project_and_name(
organization_name=stq.organization,
project_name=stq.project,
table_name=stq.table,
)
result = await client.admin.services.update_permissions_for_table(
service_id=service["serviceID"],
table_id=table["tableID"],
read=args.read,
write=args.write,
)
pretty_print_graphql_result(result)
async def delete(args):
client = Client()
seq = ServiceIdentifier.from_path(args.service_path)
service = await client.admin.services.find_by_organization_project_and_name(
organization_name=seq.organization,
project_name=seq.project,
service_name=seq.service,
)
result = await client.admin.services.delete(service_id=service["serviceID"])
pretty_print_graphql_result(result)
async def issue_secret(args):
client = Client()
seq = ServiceIdentifier.from_path(args.service_path)
service = await client.admin.services.find_by_organization_project_and_name(
organization_name=seq.organization,
project_name=seq.project,
service_name=seq.service,
)
result = await client.admin.services.issue_secret(
service_id=service["serviceID"],
description=args.description
if args.description is not None
else "Command-line issued secret",
)
print(
f"Keep your secret string safe. "
f"You won't be able to see it again.\nSecret: {result['token']}"
)
async def list_secrets(args):
client = Client()
seq = ServiceIdentifier.from_path(args.service_path)
service = await client.admin.services.find_by_organization_project_and_name(
organization_name=seq.organization,
project_name=seq.project,
service_name=seq.service,
)
result = await client.admin.services.list_secrets(service_id=service["serviceID"])
pretty_print_graphql_result(result)
async def revoke_secret(args):
client = Client()
result = await client.admin.secrets.revoke_service_secret(secret_id=args.secret_id)
pretty_print_graphql_result(result)
|
[
"beneath.utils.ProjectIdentifier.from_path",
"beneath.cli.utils.async_cmd",
"beneath.client.Client",
"beneath.utils.TableIdentifier.from_path",
"beneath.cli.utils.pretty_print_graphql_result",
"beneath.cli.utils.mb_to_bytes",
"beneath.utils.ServiceIdentifier.from_path"
] |
[((3249, 3257), 'beneath.client.Client', 'Client', ([], {}), '()\n', (3255, 3257), False, 'from beneath.client import Client\n'), ((3267, 3313), 'beneath.utils.ProjectIdentifier.from_path', 'ProjectIdentifier.from_path', (['args.project_path'], {}), '(args.project_path)\n', (3294, 3313), False, 'from beneath.utils import ProjectIdentifier, ServiceIdentifier, TableIdentifier\n'), ((3667, 3675), 'beneath.client.Client', 'Client', ([], {}), '()\n', (3673, 3675), False, 'from beneath.client import Client\n'), ((3686, 3732), 'beneath.utils.ServiceIdentifier.from_path', 'ServiceIdentifier.from_path', (['args.service_path'], {}), '(args.service_path)\n', (3713, 3732), False, 'from beneath.utils import ProjectIdentifier, ServiceIdentifier, TableIdentifier\n'), ((4154, 4189), 'beneath.cli.utils.pretty_print_graphql_result', 'pretty_print_graphql_result', (['result'], {}), '(result)\n', (4181, 4189), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((4229, 4237), 'beneath.client.Client', 'Client', ([], {}), '()\n', (4235, 4237), False, 'from beneath.client import Client\n'), ((4248, 4294), 'beneath.utils.ServiceIdentifier.from_path', 'ServiceIdentifier.from_path', (['args.service_path'], {}), '(args.service_path)\n', (4275, 4294), False, 'from beneath.utils import ProjectIdentifier, ServiceIdentifier, TableIdentifier\n'), ((4716, 4751), 'beneath.cli.utils.pretty_print_graphql_result', 'pretty_print_graphql_result', (['result'], {}), '(result)\n', (4743, 4751), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((4803, 4811), 'beneath.client.Client', 'Client', ([], {}), '()\n', (4809, 4811), False, 'from beneath.client import Client\n'), ((4822, 4868), 'beneath.utils.ServiceIdentifier.from_path', 'ServiceIdentifier.from_path', (['args.service_path'], {}), '(args.service_path)\n', (4849, 4868), False, 'from beneath.utils import ProjectIdentifier, ServiceIdentifier, TableIdentifier\n'), ((5078, 5120), 'beneath.utils.TableIdentifier.from_path', 'TableIdentifier.from_path', (['args.table_path'], {}), '(args.table_path)\n', (5103, 5120), False, 'from beneath.utils import ProjectIdentifier, ServiceIdentifier, TableIdentifier\n'), ((5519, 5554), 'beneath.cli.utils.pretty_print_graphql_result', 'pretty_print_graphql_result', (['result'], {}), '(result)\n', (5546, 5554), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((5594, 5602), 'beneath.client.Client', 'Client', ([], {}), '()\n', (5600, 5602), False, 'from beneath.client import Client\n'), ((5613, 5659), 'beneath.utils.ServiceIdentifier.from_path', 'ServiceIdentifier.from_path', (['args.service_path'], {}), '(args.service_path)\n', (5640, 5659), False, 'from beneath.utils import ProjectIdentifier, ServiceIdentifier, TableIdentifier\n'), ((5944, 5979), 'beneath.cli.utils.pretty_print_graphql_result', 'pretty_print_graphql_result', (['result'], {}), '(result)\n', (5971, 5979), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((6025, 6033), 'beneath.client.Client', 'Client', ([], {}), '()\n', (6031, 6033), False, 'from beneath.client import Client\n'), ((6044, 6090), 'beneath.utils.ServiceIdentifier.from_path', 'ServiceIdentifier.from_path', (['args.service_path'], {}), '(args.service_path)\n', (6071, 6090), False, 'from beneath.utils import ProjectIdentifier, ServiceIdentifier, TableIdentifier\n'), ((6689, 6697), 'beneath.client.Client', 'Client', ([], {}), '()\n', (6695, 6697), False, 'from beneath.client import Client\n'), ((6708, 6754), 'beneath.utils.ServiceIdentifier.from_path', 'ServiceIdentifier.from_path', (['args.service_path'], {}), '(args.service_path)\n', (6735, 6754), False, 'from beneath.utils import ProjectIdentifier, ServiceIdentifier, TableIdentifier\n'), ((7045, 7080), 'beneath.cli.utils.pretty_print_graphql_result', 'pretty_print_graphql_result', (['result'], {}), '(result)\n', (7072, 7080), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((7127, 7135), 'beneath.client.Client', 'Client', ([], {}), '()\n', (7133, 7135), False, 'from beneath.client import Client\n'), ((7228, 7263), 'beneath.cli.utils.pretty_print_graphql_result', 'pretty_print_graphql_result', (['result'], {}), '(result)\n', (7255, 7263), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((3591, 3627), 'beneath.cli.utils.pretty_print_graphql_result', 'pretty_print_graphql_result', (['service'], {}), '(service)\n', (3618, 3627), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((725, 745), 'beneath.cli.utils.async_cmd', 'async_cmd', (['show_list'], {}), '(show_list)\n', (734, 745), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((923, 940), 'beneath.cli.utils.async_cmd', 'async_cmd', (['create'], {}), '(create)\n', (932, 940), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((1382, 1399), 'beneath.cli.utils.async_cmd', 'async_cmd', (['update'], {}), '(update)\n', (1391, 1399), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((1913, 1942), 'beneath.cli.utils.async_cmd', 'async_cmd', (['update_permissions'], {}), '(update_permissions)\n', (1922, 1942), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((2388, 2405), 'beneath.cli.utils.async_cmd', 'async_cmd', (['delete'], {}), '(delete)\n', (2397, 2405), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((2609, 2632), 'beneath.cli.utils.async_cmd', 'async_cmd', (['issue_secret'], {}), '(issue_secret)\n', (2618, 2632), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((2893, 2916), 'beneath.cli.utils.async_cmd', 'async_cmd', (['list_secrets'], {}), '(list_secrets)\n', (2902, 2916), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((3126, 3150), 'beneath.cli.utils.async_cmd', 'async_cmd', (['revoke_secret'], {}), '(revoke_secret)\n', (3135, 3150), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((3993, 4024), 'beneath.cli.utils.mb_to_bytes', 'mb_to_bytes', (['args.read_quota_mb'], {}), '(args.read_quota_mb)\n', (4004, 4024), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((4052, 4084), 'beneath.cli.utils.mb_to_bytes', 'mb_to_bytes', (['args.write_quota_mb'], {}), '(args.write_quota_mb)\n', (4063, 4084), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((4111, 4142), 'beneath.cli.utils.mb_to_bytes', 'mb_to_bytes', (['args.scan_quota_mb'], {}), '(args.scan_quota_mb)\n', (4122, 4142), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((4555, 4586), 'beneath.cli.utils.mb_to_bytes', 'mb_to_bytes', (['args.read_quota_mb'], {}), '(args.read_quota_mb)\n', (4566, 4586), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((4614, 4646), 'beneath.cli.utils.mb_to_bytes', 'mb_to_bytes', (['args.write_quota_mb'], {}), '(args.write_quota_mb)\n', (4625, 4646), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n'), ((4673, 4704), 'beneath.cli.utils.mb_to_bytes', 'mb_to_bytes', (['args.scan_quota_mb'], {}), '(args.scan_quota_mb)\n', (4684, 4704), False, 'from beneath.cli.utils import async_cmd, mb_to_bytes, pretty_print_graphql_result, str2bool, project_path_help, service_path_help, table_path_help\n')]
|
import json
import unittest
from alerta.app import create_app, db, key_helper
from alerta.models.enums import Scope
from alerta.models.key import ApiKey
from alerta.models.permission import Permission
class ScopesTestCase(unittest.TestCase):
def setUp(self):
test_config = {
'TESTING': True,
'AUTH_REQUIRED': True,
'ADMIN_USERS': ['<EMAIL>', '<EMAIL>'],
'DEFAULT_ADMIN_ROLE': 'ops',
'ADMIN_ROLES': ['ops', 'devops'],
'DEFAULT_USER_ROLE': 'dev',
'USER_ROLES': ['dev']
}
self.app = create_app(test_config, environment='development')
self.client = self.app.test_client()
def make_key(user, scopes=None, type=None, text=''):
api_key = ApiKey(
user=user,
scopes=scopes,
type=type,
text=text
)
return api_key.create().key
with self.app.test_request_context('/'):
self.app.preprocess_request()
self.api_keys_scopes = dict()
self.api_keys_scopes['read-only'] = make_key('<EMAIL>', scopes=['read'], type=None, text='read-only')
self.api_keys_scopes['read-write'] = make_key('<EMAIL>',
scopes=['read', 'write'], type=None, text='read-write')
self.api_keys_scopes['admin'] = make_key(
'<EMAIL>', scopes=['read', 'write', 'admin'], type=None, text='admin')
# self.api_keys_types = dict()
# self.api_keys_types['read-only'] = make_key('<EMAIL>', scopes=None, type='read-only', text='read-only')
# self.api_keys_types['read-write'] = make_key('<EMAIL>', scopes=None, type='read-write', text='read-write')
# self.api_keys_types['admin'] = make_key('<EMAIL>', scopes=None, type='read-write', text='admin')
def tearDown(self):
db.destroy()
def test_scopes(self):
response = self.client.get('/keys', headers={'Authorization': 'Key %s' % self.api_keys_scopes['read-only']})
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
for key in data['keys']:
self.assertEqual(self.api_keys_scopes[key['text']], key['key'])
if key['text'] == 'admin':
self.assertEqual('read-write', key['type'])
else:
self.assertEqual(key['text'], key['type'])
self.assertEqual(sorted(key_helper.type_to_scopes(key['user'], key['text'])), sorted(key['scopes']))
# def test_types(self):
# #FIXME
# response = self.client.get('/keys')
# self.assertEqual(response.status_code, 200)
# data = json.loads(response.data.decode('utf-8'))
# for key in data['keys']:
# self.assertEqual(self.api_keys_types[key['text']], key['key'])
# if key['text'] == 'admin':
# self.assertEqual('read-write', key['type'])
# else:
# self.assertEqual(key['text'], key['type'])
# self.assertEqual(sorted(key_helper.type_to_scopes(key['user'], key['text'])), sorted(key['scopes']))
def test_is_in_scope(self):
self.assertEqual(Permission.is_in_scope(Scope.read_customers, [Scope.read]), True)
self.assertEqual(Permission.is_in_scope(Scope.read_customers, [Scope.write]), True)
self.assertEqual(Permission.is_in_scope(Scope.read_customers, [Scope.admin]), True)
self.assertEqual(Permission.is_in_scope(Scope.read_heartbeats, [Scope.read_alerts]), False)
self.assertEqual(Permission.is_in_scope(Scope.read_heartbeats, [Scope.write_alerts]), False)
self.assertEqual(Permission.is_in_scope(Scope.read_heartbeats, [Scope.admin_alerts]), False)
self.assertEqual(Permission.is_in_scope(Scope.write_blackouts, [Scope.read]), False)
self.assertEqual(Permission.is_in_scope(Scope.write_blackouts, [Scope.read_blackouts, Scope.read]), False)
self.assertEqual(
Permission.is_in_scope(Scope.write_blackouts, [Scope.read_blackouts, Scope.write_blackouts]), True)
self.assertEqual(Permission.is_in_scope(Scope.write_blackouts, [Scope.write_blackouts]), True)
self.assertEqual(Permission.is_in_scope(Scope.write_blackouts, [Scope.read_blackouts, Scope.write]), True)
self.assertEqual(Permission.is_in_scope(Scope.write_blackouts, [Scope.read_blackouts, Scope.admin]), True)
self.assertEqual(Permission.is_in_scope(Scope.write_blackouts, [Scope.read, Scope.write_keys]), False)
self.assertEqual(Permission.is_in_scope(Scope.write_blackouts, [Scope.read, Scope.admin_keys]), False)
self.assertEqual(Permission.is_in_scope(Scope.admin, [Scope.write]), False)
self.assertEqual(Permission.is_in_scope(Scope.admin, [Scope.read, Scope.write, Scope.admin]), True)
self.assertEqual(Permission.is_in_scope(Scope.read_heartbeats, [Scope.write]), True)
def test_edit_perms(self):
headers = {
'Authorization': 'Key %s' % self.api_keys_scopes['admin'],
'Content-type': 'application/json'
}
# add permsission
payload = {
'scopes': [Scope.read],
'match': 'read-only'
}
response = self.client.post('/perm', data=json.dumps(payload),
content_type='application/json', headers=headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
perm_id = data['id']
# change scopes
update = {
'scopes': [Scope.write, Scope.read]
}
response = self.client.put('/perm/' + perm_id, data=json.dumps(update), headers=headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['status'], 'ok')
# try invalid scopes
update = {
'scopes': ['foo:bar']
}
response = self.client.put('/perm/' + perm_id, data=json.dumps(update), headers=headers)
self.assertEqual(response.status_code, 400)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['message'], "'foo:bar' is not a valid Scope")
# change perm
update = {
'match': 'read-write'
}
response = self.client.put('/perm/' + perm_id, data=json.dumps(update), headers=headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['status'], 'ok')
# check updates worked
response = self.client.get('/perm/' + perm_id, headers=headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['permission']['scopes'], [Scope.write, Scope.read])
self.assertEqual(data['permission']['match'], 'read-write')
def test_system_roles(self):
login = '<EMAIL>'
roles = ['no-ops', 'team']
with self.app.test_request_context():
scopes = Permission.lookup(login, roles)
self.assertEqual(scopes, [Scope.admin, Scope.read, Scope.write])
login = 'dev_who_wants_to_<EMAIL>'
roles = ['web', 'ops']
with self.app.test_request_context():
scopes = Permission.lookup(login, roles)
self.assertEqual(scopes, [Scope.admin, Scope.read, Scope.write])
login = '<EMAIL>'
roles = ['dev', 'engineer']
with self.app.test_request_context():
scopes = Permission.lookup(login, roles)
self.assertEqual(scopes, [Scope.read, Scope.write])
login = '<EMAIL>'
roles = ['guest']
with self.app.test_request_context():
scopes = Permission.lookup(login, roles)
self.assertEqual(scopes, [Scope.read_alerts])
|
[
"alerta.models.permission.Permission.lookup",
"alerta.models.key.ApiKey",
"json.dumps",
"alerta.models.permission.Permission.is_in_scope",
"alerta.app.key_helper.type_to_scopes",
"alerta.app.db.destroy",
"alerta.app.create_app"
] |
[((597, 647), 'alerta.app.create_app', 'create_app', (['test_config'], {'environment': '"""development"""'}), "(test_config, environment='development')\n", (607, 647), False, 'from alerta.app import create_app, db, key_helper\n'), ((1949, 1961), 'alerta.app.db.destroy', 'db.destroy', ([], {}), '()\n', (1959, 1961), False, 'from alerta.app import create_app, db, key_helper\n'), ((777, 831), 'alerta.models.key.ApiKey', 'ApiKey', ([], {'user': 'user', 'scopes': 'scopes', 'type': 'type', 'text': 'text'}), '(user=user, scopes=scopes, type=type, text=text)\n', (783, 831), False, 'from alerta.models.key import ApiKey\n'), ((3290, 3348), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.read_customers', '[Scope.read]'], {}), '(Scope.read_customers, [Scope.read])\n', (3312, 3348), False, 'from alerta.models.permission import Permission\n'), ((3381, 3440), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.read_customers', '[Scope.write]'], {}), '(Scope.read_customers, [Scope.write])\n', (3403, 3440), False, 'from alerta.models.permission import Permission\n'), ((3473, 3532), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.read_customers', '[Scope.admin]'], {}), '(Scope.read_customers, [Scope.admin])\n', (3495, 3532), False, 'from alerta.models.permission import Permission\n'), ((3566, 3632), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.read_heartbeats', '[Scope.read_alerts]'], {}), '(Scope.read_heartbeats, [Scope.read_alerts])\n', (3588, 3632), False, 'from alerta.models.permission import Permission\n'), ((3666, 3733), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.read_heartbeats', '[Scope.write_alerts]'], {}), '(Scope.read_heartbeats, [Scope.write_alerts])\n', (3688, 3733), False, 'from alerta.models.permission import Permission\n'), ((3767, 3834), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.read_heartbeats', '[Scope.admin_alerts]'], {}), '(Scope.read_heartbeats, [Scope.admin_alerts])\n', (3789, 3834), False, 'from alerta.models.permission import Permission\n'), ((3869, 3928), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.write_blackouts', '[Scope.read]'], {}), '(Scope.write_blackouts, [Scope.read])\n', (3891, 3928), False, 'from alerta.models.permission import Permission\n'), ((3962, 4048), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.write_blackouts', '[Scope.read_blackouts, Scope.read]'], {}), '(Scope.write_blackouts, [Scope.read_blackouts, Scope.\n read])\n', (3984, 4048), False, 'from alerta.models.permission import Permission\n'), ((4090, 4187), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.write_blackouts', '[Scope.read_blackouts, Scope.write_blackouts]'], {}), '(Scope.write_blackouts, [Scope.read_blackouts, Scope.\n write_blackouts])\n', (4112, 4187), False, 'from alerta.models.permission import Permission\n'), ((4215, 4285), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.write_blackouts', '[Scope.write_blackouts]'], {}), '(Scope.write_blackouts, [Scope.write_blackouts])\n', (4237, 4285), False, 'from alerta.models.permission import Permission\n'), ((4318, 4405), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.write_blackouts', '[Scope.read_blackouts, Scope.write]'], {}), '(Scope.write_blackouts, [Scope.read_blackouts, Scope.\n write])\n', (4340, 4405), False, 'from alerta.models.permission import Permission\n'), ((4433, 4520), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.write_blackouts', '[Scope.read_blackouts, Scope.admin]'], {}), '(Scope.write_blackouts, [Scope.read_blackouts, Scope.\n admin])\n', (4455, 4520), False, 'from alerta.models.permission import Permission\n'), ((4548, 4625), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.write_blackouts', '[Scope.read, Scope.write_keys]'], {}), '(Scope.write_blackouts, [Scope.read, Scope.write_keys])\n', (4570, 4625), False, 'from alerta.models.permission import Permission\n'), ((4659, 4736), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.write_blackouts', '[Scope.read, Scope.admin_keys]'], {}), '(Scope.write_blackouts, [Scope.read, Scope.admin_keys])\n', (4681, 4736), False, 'from alerta.models.permission import Permission\n'), ((4771, 4821), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.admin', '[Scope.write]'], {}), '(Scope.admin, [Scope.write])\n', (4793, 4821), False, 'from alerta.models.permission import Permission\n'), ((4855, 4930), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.admin', '[Scope.read, Scope.write, Scope.admin]'], {}), '(Scope.admin, [Scope.read, Scope.write, Scope.admin])\n', (4877, 4930), False, 'from alerta.models.permission import Permission\n'), ((4963, 5023), 'alerta.models.permission.Permission.is_in_scope', 'Permission.is_in_scope', (['Scope.read_heartbeats', '[Scope.write]'], {}), '(Scope.read_heartbeats, [Scope.write])\n', (4985, 5023), False, 'from alerta.models.permission import Permission\n'), ((7230, 7261), 'alerta.models.permission.Permission.lookup', 'Permission.lookup', (['login', 'roles'], {}), '(login, roles)\n', (7247, 7261), False, 'from alerta.models.permission import Permission\n'), ((7478, 7509), 'alerta.models.permission.Permission.lookup', 'Permission.lookup', (['login', 'roles'], {}), '(login, roles)\n', (7495, 7509), False, 'from alerta.models.permission import Permission\n'), ((7714, 7745), 'alerta.models.permission.Permission.lookup', 'Permission.lookup', (['login', 'roles'], {}), '(login, roles)\n', (7731, 7745), False, 'from alerta.models.permission import Permission\n'), ((7927, 7958), 'alerta.models.permission.Permission.lookup', 'Permission.lookup', (['login', 'roles'], {}), '(login, roles)\n', (7944, 7958), False, 'from alerta.models.permission import Permission\n'), ((5388, 5407), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (5398, 5407), False, 'import json\n'), ((5796, 5814), 'json.dumps', 'json.dumps', (['update'], {}), '(update)\n', (5806, 5814), False, 'import json\n'), ((6142, 6160), 'json.dumps', 'json.dumps', (['update'], {}), '(update)\n', (6152, 6160), False, 'import json\n'), ((6510, 6528), 'json.dumps', 'json.dumps', (['update'], {}), '(update)\n', (6520, 6528), False, 'import json\n'), ((2538, 2589), 'alerta.app.key_helper.type_to_scopes', 'key_helper.type_to_scopes', (["key['user']", "key['text']"], {}), "(key['user'], key['text'])\n", (2563, 2589), False, 'from alerta.app import create_app, db, key_helper\n')]
|
'''
Implementation of SQLAlchemy backend.
'''
import sys
import threading
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from oslo_utils import timeutils
from sqlalchemy.orm import joinedload_all
from playnetmano_rm.common import exceptions as exception
from playnetmano_rm.common.i18n import _
from playnetmano_rm.db.sqlalchemy import migration
from playnetmano_rm.db.sqlalchemy import models
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
_facade = None
_main_context_manager = None
_CONTEXT = threading.local()
def _get_main_context_manager():
global _main_context_manager
if not _main_context_manager:
_main_context_manager = enginefacade.transaction_context()
return _main_context_manager
def get_engine():
return _get_main_context_manager().get_legacy_facade().get_engine()
def get_session():
return _get_main_context_manager().get_legacy_facade().get_session()
def read_session():
return _get_main_context_manager().reader.using(_CONTEXT)
def write_session():
return _get_main_context_manager().writer.using(_CONTEXT)
_DEFAULT_QUOTA_NAME = 'default'
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def model_query(context, *args):
with read_session() as session:
query = session.query(*args).options(joinedload_all('*'))
return query
def _session(context):
return get_session()
def is_admin_context(context):
"""Indicates if the request context is an administrator."""
if not context:
LOG.warning(_('Use of empty request context is deprecated'),
DeprecationWarning)
raise Exception('die')
return context.is_admin
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user or not context.project:
return False
return True
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
###################
@require_context
def _quota_get(context, project_id, resource, session=None):
result = model_query(context, models.Quota). \
filter_by(project_id=project_id). \
filter_by(resource=resource). \
first()
if not result:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
def quota_get(context, project_id, resource):
return _quota_get(context, project_id, resource)
@require_context
def quota_get_all_by_project(context, project_id):
rows = model_query(context, models.Quota). \
filter_by(project_id=project_id). \
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_admin_context
def quota_create(context, project_id, resource, limit):
with write_session() as session:
quota_ref = models.Quota()
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
session.add(quota_ref)
return quota_ref
@require_admin_context
def quota_update(context, project_id, resource, limit):
with write_session() as session:
quota_ref = _quota_get(context, project_id, resource, session=session)
if not quota_ref:
raise exception.ProjectQuotaNotFound(project_id=project_id)
quota_ref.hard_limit = limit
quota_ref.save(session)
return quota_ref
@require_admin_context
def quota_destroy(context, project_id, resource):
with write_session() as session:
quota_ref = _quota_get(context, project_id, resource, session=session)
if not quota_ref:
raise exception.ProjectQuotaNotFound(project_id=project_id)
session.delete(quota_ref)
@require_admin_context
def quota_destroy_all(context, project_id):
with write_session() as session:
quotas = model_query(context, models.Quota). \
filter_by(project_id=project_id). \
all()
if not quotas:
raise exception.ProjectQuotaNotFound(project_id=project_id)
for quota_ref in quotas:
session.delete(quota_ref)
##########################
@require_context
def _quota_class_get(context, class_name, resource):
result = model_query(context, models.QuotaClass). \
filter_by(deleted=False). \
filter_by(class_name=class_name). \
filter_by(resource=resource). \
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
@require_context
def quota_class_get(context, class_name, resource):
return _quota_class_get(context, class_name, resource)
@require_context
def quota_class_get_default(context):
return quota_class_get_all_by_name(context, _DEFAULT_QUOTA_NAME)
@require_context
def quota_class_get_all_by_name(context, class_name):
rows = model_query(context, models.QuotaClass). \
filter_by(deleted=False). \
filter_by(class_name=class_name). \
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_admin_context
def quota_class_create(context, class_name, resource, limit):
with write_session() as session:
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
session.add(quota_class_ref)
return quota_class_ref
@require_admin_context
def quota_class_update(context, class_name, resource, limit):
with write_session() as session:
quota_class_ref = session.query(models.QuotaClass). \
filter_by(deleted=False). \
filter_by(class_name=class_name). \
filter_by(resource=resource).first()
if not quota_class_ref:
raise exception.QuotaClassNotFound(class_name=class_name)
quota_class_ref.hard_limit = limit
quota_class_ref.save(session)
return quota_class_ref
@require_admin_context
def quota_class_destroy(context, class_name, resource):
with write_session() as session:
quota_class_ref = _quota_class_get(context, class_name, resource)
session.delete(quota_class_ref)
@require_admin_context
def quota_class_destroy_all(context, class_name):
with write_session() as session:
quota_classes = session.query(models.QuotaClass). \
filter_by(deleted=False). \
filter_by(class_name=class_name). \
all()
for quota_class_ref in quota_classes:
session.delete(quota_class_ref)
def db_sync(engine, version=None):
"""Migrate the database to `version` or the most recent version."""
return migration.db_sync(engine, version=version)
def db_version(engine):
"""Display the current database version."""
return migration.db_version(engine)
@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
retry_interval=0.5, inc_retry_interval=True)
def sync_lock_acquire(context, engine_id, task_type):
with write_session() as session:
lock = session.query(models.SyncLock). \
filter_by(task_type=task_type).all()
if not lock:
lock_ref = models.SyncLock()
lock_ref.engine_id = engine_id
lock_ref.timer_lock = "Lock Acquired for EngineId: " + engine_id
lock_ref.task_type = task_type
session.add(lock_ref)
return True
return False
@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
retry_interval=0.5, inc_retry_interval=True)
def sync_lock_release(context, task_type):
with write_session() as session:
locks = session.query(models.SyncLock). \
filter_by(task_type=task_type).all()
for lock in locks:
session.delete(lock)
def sync_lock_steal(context, engine_id, task_type):
sync_lock_release(context, task_type)
return sync_lock_acquire(context, engine_id, task_type)
def service_create(context, service_id, host=None, binary=None,
topic=None):
with write_session() as session:
time_now = timeutils.utcnow()
svc = models.Service(id=service_id,
host=host,
binary=binary,
topic=topic,
created_at=time_now,
updated_at=time_now)
session.add(svc)
return svc
def service_update(context, service_id, values=None):
with write_session() as session:
service = session.query(models.Service).get(service_id)
if not service:
return
if values is None:
values = {}
values.update({'updated_at': timeutils.utcnow()})
service.update(values)
service.save(session)
return service
def service_delete(context, service_id):
with write_session() as session:
session.query(models.Service).filter_by(
id=service_id).delete(synchronize_session='fetch')
# Remove all engine locks
locks = session.query(models.SyncLock). \
filter_by(engine_id=service_id).all()
for lock in locks:
session.delete(lock)
def service_get(context, service_id):
return model_query(context, models.Service).get(service_id)
def service_get_all(context):
return model_query(context, models.Service).all()
|
[
"playnetmano_rm.db.sqlalchemy.migration.db_sync",
"playnetmano_rm.common.exceptions.QuotaClassNotFound",
"oslo_db.api.wrap_db_retry",
"oslo_log.log.getLogger",
"oslo_utils.timeutils.utcnow",
"sqlalchemy.orm.joinedload_all",
"oslo_db.sqlalchemy.enginefacade.transaction_context",
"playnetmano_rm.common.exceptions.AdminRequired",
"threading.local",
"playnetmano_rm.db.sqlalchemy.models.Quota",
"playnetmano_rm.db.sqlalchemy.models.Service",
"playnetmano_rm.common.i18n._",
"playnetmano_rm.common.exceptions.ProjectQuotaNotFound",
"playnetmano_rm.db.sqlalchemy.migration.db_version",
"playnetmano_rm.db.sqlalchemy.models.QuotaClass",
"playnetmano_rm.common.exceptions.NotAuthorized",
"playnetmano_rm.db.sqlalchemy.models.SyncLock"
] |
[((506, 533), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (523, 533), True, 'from oslo_log import log as logging\n'), ((608, 625), 'threading.local', 'threading.local', ([], {}), '()\n', (623, 625), False, 'import threading\n'), ((7938, 8051), 'oslo_db.api.wrap_db_retry', 'oslo_db_api.wrap_db_retry', ([], {'max_retries': '(3)', 'retry_on_deadlock': '(True)', 'retry_interval': '(0.5)', 'inc_retry_interval': '(True)'}), '(max_retries=3, retry_on_deadlock=True,\n retry_interval=0.5, inc_retry_interval=True)\n', (7963, 8051), True, 'from oslo_db import api as oslo_db_api\n'), ((8567, 8680), 'oslo_db.api.wrap_db_retry', 'oslo_db_api.wrap_db_retry', ([], {'max_retries': '(3)', 'retry_on_deadlock': '(True)', 'retry_interval': '(0.5)', 'inc_retry_interval': '(True)'}), '(max_retries=3, retry_on_deadlock=True,\n retry_interval=0.5, inc_retry_interval=True)\n', (8592, 8680), True, 'from oslo_db import api as oslo_db_api\n'), ((7778, 7820), 'playnetmano_rm.db.sqlalchemy.migration.db_sync', 'migration.db_sync', (['engine'], {'version': 'version'}), '(engine, version=version)\n', (7795, 7820), False, 'from playnetmano_rm.db.sqlalchemy import migration\n'), ((7906, 7934), 'playnetmano_rm.db.sqlalchemy.migration.db_version', 'migration.db_version', (['engine'], {}), '(engine)\n', (7926, 7934), False, 'from playnetmano_rm.db.sqlalchemy import migration\n'), ((760, 794), 'oslo_db.sqlalchemy.enginefacade.transaction_context', 'enginefacade.transaction_context', ([], {}), '()\n', (792, 794), False, 'from oslo_db.sqlalchemy import enginefacade\n'), ((3220, 3273), 'playnetmano_rm.common.exceptions.ProjectQuotaNotFound', 'exception.ProjectQuotaNotFound', ([], {'project_id': 'project_id'}), '(project_id=project_id)\n', (3250, 3273), True, 'from playnetmano_rm.common import exceptions as exception\n'), ((3851, 3865), 'playnetmano_rm.db.sqlalchemy.models.Quota', 'models.Quota', ([], {}), '()\n', (3863, 3865), False, 'from playnetmano_rm.db.sqlalchemy import models\n'), ((5473, 5524), 'playnetmano_rm.common.exceptions.QuotaClassNotFound', 'exception.QuotaClassNotFound', ([], {'class_name': 'class_name'}), '(class_name=class_name)\n', (5501, 5524), True, 'from playnetmano_rm.common import exceptions as exception\n'), ((6298, 6317), 'playnetmano_rm.db.sqlalchemy.models.QuotaClass', 'models.QuotaClass', ([], {}), '()\n', (6315, 6317), False, 'from playnetmano_rm.db.sqlalchemy import models\n'), ((9253, 9271), 'oslo_utils.timeutils.utcnow', 'timeutils.utcnow', ([], {}), '()\n', (9269, 9271), False, 'from oslo_utils import timeutils\n'), ((9286, 9400), 'playnetmano_rm.db.sqlalchemy.models.Service', 'models.Service', ([], {'id': 'service_id', 'host': 'host', 'binary': 'binary', 'topic': 'topic', 'created_at': 'time_now', 'updated_at': 'time_now'}), '(id=service_id, host=host, binary=binary, topic=topic,\n created_at=time_now, updated_at=time_now)\n', (9300, 9400), False, 'from playnetmano_rm.db.sqlalchemy import models\n'), ((1432, 1451), 'sqlalchemy.orm.joinedload_all', 'joinedload_all', (['"""*"""'], {}), "('*')\n", (1446, 1451), False, 'from sqlalchemy.orm import joinedload_all\n'), ((1661, 1708), 'playnetmano_rm.common.i18n._', '_', (['"""Use of empty request context is deprecated"""'], {}), "('Use of empty request context is deprecated')\n", (1662, 1708), False, 'from playnetmano_rm.common.i18n import _\n'), ((2329, 2354), 'playnetmano_rm.common.exceptions.AdminRequired', 'exception.AdminRequired', ([], {}), '()\n', (2352, 2354), True, 'from playnetmano_rm.common import exceptions as exception\n'), ((2853, 2878), 'playnetmano_rm.common.exceptions.NotAuthorized', 'exception.NotAuthorized', ([], {}), '()\n', (2876, 2878), True, 'from playnetmano_rm.common import exceptions as exception\n'), ((4280, 4333), 'playnetmano_rm.common.exceptions.ProjectQuotaNotFound', 'exception.ProjectQuotaNotFound', ([], {'project_id': 'project_id'}), '(project_id=project_id)\n', (4310, 4333), True, 'from playnetmano_rm.common import exceptions as exception\n'), ((4663, 4716), 'playnetmano_rm.common.exceptions.ProjectQuotaNotFound', 'exception.ProjectQuotaNotFound', ([], {'project_id': 'project_id'}), '(project_id=project_id)\n', (4693, 4716), True, 'from playnetmano_rm.common import exceptions as exception\n'), ((5021, 5074), 'playnetmano_rm.common.exceptions.ProjectQuotaNotFound', 'exception.ProjectQuotaNotFound', ([], {'project_id': 'project_id'}), '(project_id=project_id)\n', (5051, 5074), True, 'from playnetmano_rm.common import exceptions as exception\n'), ((6894, 6945), 'playnetmano_rm.common.exceptions.QuotaClassNotFound', 'exception.QuotaClassNotFound', ([], {'class_name': 'class_name'}), '(class_name=class_name)\n', (6922, 6945), True, 'from playnetmano_rm.common import exceptions as exception\n'), ((8308, 8325), 'playnetmano_rm.db.sqlalchemy.models.SyncLock', 'models.SyncLock', ([], {}), '()\n', (8323, 8325), False, 'from playnetmano_rm.db.sqlalchemy import models\n'), ((9876, 9894), 'oslo_utils.timeutils.utcnow', 'timeutils.utcnow', ([], {}), '()\n', (9892, 9894), False, 'from oslo_utils import timeutils\n')]
|
import logging
from typing import Dict, Text, Any, List, Union, Optional
from rasa_sdk import Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction, REQUESTED_SLOT
from rasa_sdk.events import AllSlotsReset, SlotSet, EventType
from actions.snow import SnowAPI
import random
logger = logging.getLogger(__name__)
vers = "vers: 0.1.0, date: Apr 2, 2020"
logger.debug(vers)
snow = SnowAPI()
localmode = snow.localmode
logger.debug(f"Local mode: {snow.localmode}")
def custom_request_next_slot(
form,
dispatcher: "CollectingDispatcher",
tracker: "Tracker",
domain: Dict[Text, Any],
) -> Optional[List[EventType]]:
"""Request the next slot and utter template if needed,
else return None"""
for slot in form.required_slots(tracker):
if form._should_request_slot(tracker, slot):
logger.debug(f"Request next slot '{slot}'")
if slot == "email" and tracker.get_slot("previous_email"):
dispatcher.utter_message(
template=f"utter_ask_use_previous_email_{slot}",
**tracker.slots,
)
else:
dispatcher.utter_message(
template=f"utter_ask_{slot}", **tracker.slots
)
return [SlotSet(REQUESTED_SLOT, slot)]
return None
def _validate_email(
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate email is in ticket system."""
if not value:
return {"email": None, "previous_email": None}
elif isinstance(value, bool):
value = tracker.get_slot("previous_email")
if localmode:
return {"email": value}
results = snow.email_to_sysid(value)
caller_id = results.get("caller_id")
if caller_id:
return {"email": value, "caller_id": caller_id}
elif isinstance(caller_id, list):
dispatcher.utter_message(template="utter_no_email")
return {"email": None}
else:
dispatcher.utter_message(results.get("error"))
return {"email": None}
class OpenIncidentForm(FormAction):
def name(self) -> Text:
return "open_incident_form"
def request_next_slot(
self,
dispatcher: "CollectingDispatcher",
tracker: "Tracker",
domain: Dict[Text, Any],
) -> Optional[List[EventType]]:
return custom_request_next_slot(self, dispatcher, tracker, domain)
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
"""A list of required slots that the form has to fill"""
return [
"email",
"priority",
"problem_description",
"incident_title",
"confirm",
]
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
"""A dictionary to map required slots to
- an extracted entity
- intent: value pairs
- a whole message
or a list of them, where a first match will be picked"""
return {
"email": [
self.from_entity(entity="email"),
self.from_intent(value=True, intent="affirm"),
self.from_intent(value=False, intent="deny"),
],
"priority": self.from_entity(entity="priority"),
"problem_description": [
self.from_text(
not_intent=[
"incident_status",
"bot_challenge",
"help",
"affirm",
"deny",
]
)
],
"incident_title": [
self.from_trigger_intent(
intent="password_reset",
value="Problem resetting password",
),
self.from_trigger_intent(
intent="problem_email", value="Problem with email"
),
self.from_text(
not_intent=[
"incident_status",
"bot_challenge",
"help",
"affirm",
"deny",
]
),
],
"confirm": [
self.from_intent(value=True, intent="affirm"),
self.from_intent(value=False, intent="deny"),
],
}
def validate_email(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate email is in ticket system."""
return _validate_email(value, dispatcher, tracker, domain)
def validate_priority(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate priority is a valid value."""
if value.lower() in snow.priority_db():
return {"priority": value}
else:
dispatcher.utter_message(template="utter_no_priority")
return {"priority": None}
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
"""Create an incident and return details or
if localmode return incident details as if incident
was created
"""
priority = tracker.get_slot("priority")
email = tracker.get_slot("email")
problem_description = tracker.get_slot("problem_description")
incident_title = tracker.get_slot("incident_title")
confirm = tracker.get_slot("confirm")
if not confirm:
dispatcher.utter_message(
template="utter_incident_creation_canceled"
)
return [AllSlotsReset(), SlotSet("previous_email", email)]
if localmode:
message = (
f"An incident with the following details would be opened "
f"if ServiceNow was connected:\n"
f"email: {email}\n"
f"problem description: {problem_description}\n"
f"title: {incident_title}\npriority: {priority}"
)
else:
snow_priority = snow.priority_db().get(priority)
response = snow.create_incident(
description=problem_description,
short_description=incident_title,
priority=snow_priority,
email=email,
)
incident_number = (
response.get("content", {}).get("result", {}).get("number")
)
if incident_number:
message = (
f"Successfully opened up incident {incident_number} "
f"for you. Someone will reach out soon."
)
else:
message = (
f"Something went wrong while opening an incident for you. "
f"{response.get('error')}"
)
dispatcher.utter_message(message)
return [AllSlotsReset(), SlotSet("previous_email", email)]
class IncidentStatusForm(FormAction):
def name(self) -> Text:
return "incident_status_form"
def request_next_slot(
self,
dispatcher: "CollectingDispatcher",
tracker: "Tracker",
domain: Dict[Text, Any],
) -> Optional[List[EventType]]:
return custom_request_next_slot(self, dispatcher, tracker, domain)
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
"""A list of required slots that the form has to fill"""
return ["email"]
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
"""A dictionary to map required slots to
- an extracted entity
- intent: value pairs
- a whole message
or a list of them, where a first match will be picked"""
return {
"email": [
self.from_entity(entity="email"),
self.from_intent(value=True, intent="affirm"),
self.from_intent(value=False, intent="deny"),
]
}
def validate_email(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Validate email is in ticket system."""
return _validate_email(value, dispatcher, tracker, domain)
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
"""Look up all incidents associated with email address
and return status of each"""
email = tracker.get_slot("email")
incident_states = {
"New": "is currently awaiting triage",
"In Progress": "is currently in progress",
"On Hold": "has been put on hold",
"Closed": "has been closed",
}
if localmode:
status = random.choice(list(incident_states.values()))
message = (
f"Since ServiceNow isn't connected, I'm making this up!\n"
f"The most recent incident for {email} {status}"
)
else:
incidents_result = snow.retrieve_incidents(email)
incidents = incidents_result.get("incidents")
if incidents:
message = "\n".join(
[
f'Incident {i.get("number")}: '
f'"{i.get("short_description")}", '
f'opened on {i.get("opened_at")} '
f'{incident_states.get(i.get("incident_state"))}'
for i in incidents
]
)
else:
message = f"{incidents_result.get('error')}"
dispatcher.utter_message(message)
return [AllSlotsReset(), SlotSet("previous_email", email)]
|
[
"actions.snow.SnowAPI",
"rasa_sdk.events.SlotSet",
"rasa_sdk.events.AllSlotsReset",
"logging.getLogger"
] |
[((327, 354), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (344, 354), False, 'import logging\n'), ((422, 431), 'actions.snow.SnowAPI', 'SnowAPI', ([], {}), '()\n', (429, 431), False, 'from actions.snow import SnowAPI\n'), ((7294, 7309), 'rasa_sdk.events.AllSlotsReset', 'AllSlotsReset', ([], {}), '()\n', (7307, 7309), False, 'from rasa_sdk.events import AllSlotsReset, SlotSet, EventType\n'), ((7311, 7343), 'rasa_sdk.events.SlotSet', 'SlotSet', (['"""previous_email"""', 'email'], {}), "('previous_email', email)\n", (7318, 7343), False, 'from rasa_sdk.events import AllSlotsReset, SlotSet, EventType\n'), ((10194, 10209), 'rasa_sdk.events.AllSlotsReset', 'AllSlotsReset', ([], {}), '()\n', (10207, 10209), False, 'from rasa_sdk.events import AllSlotsReset, SlotSet, EventType\n'), ((10211, 10243), 'rasa_sdk.events.SlotSet', 'SlotSet', (['"""previous_email"""', 'email'], {}), "('previous_email', email)\n", (10218, 10243), False, 'from rasa_sdk.events import AllSlotsReset, SlotSet, EventType\n'), ((1316, 1345), 'rasa_sdk.events.SlotSet', 'SlotSet', (['REQUESTED_SLOT', 'slot'], {}), '(REQUESTED_SLOT, slot)\n', (1323, 1345), False, 'from rasa_sdk.events import AllSlotsReset, SlotSet, EventType\n'), ((6006, 6021), 'rasa_sdk.events.AllSlotsReset', 'AllSlotsReset', ([], {}), '()\n', (6019, 6021), False, 'from rasa_sdk.events import AllSlotsReset, SlotSet, EventType\n'), ((6023, 6055), 'rasa_sdk.events.SlotSet', 'SlotSet', (['"""previous_email"""', 'email'], {}), "('previous_email', email)\n", (6030, 6055), False, 'from rasa_sdk.events import AllSlotsReset, SlotSet, EventType\n')]
|
from dataclasses import dataclass, field
from typing import List, Text
from typefit import typefit
@dataclass
class Comment:
text: Text
children: List["Comment"] = field(default_factory=list)
data = {"text": "Hello", "children": [{"text": "Howdy"}, {"text": "Hello to you too"}]}
def test_forward_ref():
comment = typefit(Comment, data)
assert comment.children[0].text == "Howdy"
|
[
"dataclasses.field",
"typefit.typefit"
] |
[((175, 202), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (180, 202), False, 'from dataclasses import dataclass, field\n'), ((333, 355), 'typefit.typefit', 'typefit', (['Comment', 'data'], {}), '(Comment, data)\n', (340, 355), False, 'from typefit import typefit\n')]
|
import base64
import collections
import functools
import six
from six.moves import urllib
from dcos import cosmos, util
from dcos.errors import (DCOSAuthenticationException,
DCOSAuthorizationException, DCOSBadRequest,
DCOSConnectionError, DCOSException, DCOSHTTPException)
logger = util.get_logger(__name__)
def cosmos_error(fn):
"""Decorator for errors returned from cosmos
:param fn: function to check for errors from cosmos
:type fn: function
:rtype: requests.Response
:returns: requests.Response
"""
@functools.wraps(fn)
def check_for_cosmos_error(*args, **kwargs):
"""Returns response from cosmos or raises exception
:returns: Response or raises Exception
:rtype: requests.Response
"""
error_media_type = 'application/vnd.dcos.package.error+json;' \
'charset=utf-8;version=v1'
response = fn(*args, **kwargs)
content_type = response.headers.get('Content-Type')
if content_type is None:
raise DCOSHTTPException(response)
elif error_media_type in content_type:
logger.debug("Error: {}".format(response.json()))
error_msg = _format_error_message(response.json())
raise DCOSException(error_msg)
elif response.status_code not in range(200, 300):
raise DCOSHTTPException(response)
return response
return check_for_cosmos_error
class PackageManager(object):
"""Implementation of Package Manager using Cosmos"""
def __init__(self, cosmos_url):
self.cosmos_url = cosmos_url
self.cosmos = cosmos.Cosmos(self.cosmos_url)
def has_capability(self, capability):
"""Check if cluster has a capability.
:param capability: capability name
:type capability: string
:return: does the cluster has capability
:rtype: bool
"""
if not self.enabled():
return False
try:
response = self.cosmos.call_endpoint(
'capabilities').json()
except DCOSAuthenticationException:
raise
except DCOSAuthorizationException:
raise
except DCOSConnectionError:
raise
except Exception as e:
logger.exception(e)
return False
if 'capabilities' not in response:
logger.error(
'Request to get cluster capabilities: {} '
'returned unexpected response: {}. '
'Missing "capabilities" field'.format(
urllib.parse.urljoin(self.cosmos_url, 'capabilities'),
response))
return False
return {'name': capability} in response['capabilities']
def enabled(self):
"""Returns whether or not cosmos is enabled on specified dcos cluster
:rtype: bool
"""
return self.cosmos.enabled()
def install_app(self, pkg, options):
"""Installs a package's application
:param pkg: the package to install
:type pkg: CosmosPackageVersion
:param options: user supplied package parameters
:type options: dict
:rtype: None
"""
params = {"packageName": pkg.name(), "packageVersion": pkg.version()}
if options is not None:
params["options"] = options
self.cosmos_post("install", params)
def uninstall_app(self, package_name, remove_all, app_id):
"""Uninstalls an app.
:param package_name: The package to uninstall
:type package_name: str
:param remove_all: Whether to remove all instances of the named app
:type remove_all: boolean
:param app_id: App ID of the app instance to uninstall
:type app_id: str
:returns: whether uninstall was successful or not
:rtype: bool
"""
params = {"packageName": package_name}
if remove_all is True:
params["all"] = True
if app_id is not None:
params["appId"] = app_id
response = self.cosmos_post("uninstall", params)
results = response.json().get("results")
uninstalled_versions = []
for res in results:
version = res.get("packageVersion")
if version not in uninstalled_versions:
uninstalled_versions += [res.get("packageVersion")]
if res.get("postUninstallNotes") is not None:
print(res.get("postUninstallNotes"))
return True
def search_sources(self, query):
"""package search
:param query: query to search
:type query: str
:returns: list of package indicies of matching packages
:rtype: [packages]
"""
response = self.cosmos_post("search", {"query": query})
return response.json()
def get_package_version(self, package_name, package_version):
"""Returns PackageVersion of specified package
:param package_name: package name
:type package_name: str
:param package_version: version of package
:type package_version: str | None
:rtype: PackageVersion
"""
return CosmosPackageVersion(package_name, package_version,
self.cosmos_url)
def installed_apps(self, package_name, app_id):
"""List installed packages
{
'appId': <appId>,
..<package.json properties>..
}
:param package_name: the optional package to list
:type package_name: str
:param app_id: the optional application id to list
:type app_id: str
:rtype: [dict]
"""
params = {}
if package_name is not None:
params["packageName"] = package_name
if app_id is not None:
params["appId"] = app_id
list_response = self.cosmos_post("list", params).json()
packages = []
for pkg in list_response['packages']:
result = pkg['packageInformation']['packageDefinition']
result['appId'] = pkg['appId']
packages.append(result)
return packages
def get_repos(self):
"""List locations of repos
:returns: the list of repos, in resolution order or list
:rtype: dict
"""
return self.cosmos_post("repository/list", params={}).json()
def add_repo(self, name, package_repo, index):
"""Add package repo and update repo with new repo
:param name: name to call repo
:type name: str
:param package_repo: location of repo to add
:type package_repo: str
:param index: index to add this repo
:type index: int
:returns: current repo list
:rtype: dict
"""
params = {"name": name, "uri": package_repo}
if index is not None:
params["index"] = index
response = self.cosmos_post("repository/add", params=params)
return response.json()
def remove_repo(self, name):
"""Remove package repo and update repo
:param name: name of repo to remove
:type name: str
:returns: current repo list
:rtype: dict
"""
params = {"name": name}
response = self.cosmos_post("repository/delete", params=params)
return response.json()
def package_add_local(self, dcos_package):
"""
Adds a locally stored DC/OS package to DC/OS
:param dcos_package: path to the DC/OS package
:type dcos_package: None | str
:return: Response to the package add request
:rtype: requests.Response
"""
try:
with util.open_file(dcos_package, 'rb') as pkg:
extra_headers = {
'Content-Type':
'application/vnd.dcos.'
'universe.package+zip;version=v1',
'X-Dcos-Content-MD5': util.md5_hash_file(pkg)
}
return self._post('add', headers=extra_headers, data=pkg)
except DCOSHTTPException as e:
if e.status() == 404:
message = 'Your version of DC/OS ' \
'does not support this operation'
raise DCOSException(message)
else:
raise e
def package_add_remote(self, package_name, package_version):
"""
Adds a remote DC/OS package to DC/OS
:param package_name: name of the remote package to add
:type package_name: None | str
:param package_version: version of the remote package to add
:type package_version: None | str
:return: Response to the package add request
:rtype: requests.Response
"""
try:
json = {'packageName': package_name}
if package_version is not None:
json['packageVersion'] = package_version
return self._post('add', params=json)
except DCOSHTTPException as e:
if e.status() == 404:
message = 'Your version of DC/OS ' \
'does not support this operation'
raise DCOSException(message)
else:
raise e
@cosmos_error
def _post(self, request, params=None, headers=None, data=None):
"""Request to cosmos server
:param request: type of request
:type request: str
:param params: body of request
:type params: dict
:param headers: list of headers for request in order of preference
:type headers: [str]
:param data: a file object
:type: file
:returns: Response
:rtype: requests.Response
"""
endpoint = 'package/{}'.format(request)
try:
return self.cosmos.call_endpoint(
endpoint, headers, data=data, json=params)
except DCOSAuthenticationException:
raise
except DCOSAuthorizationException:
raise
except DCOSBadRequest as e:
return e.response
except DCOSHTTPException as e:
# let non authentication responses be handled by `cosmos_error` so
# we can expose errors reported by cosmos
return e.response
def cosmos_post(self, request, params):
"""Request to cosmos server
:param request: type of request
:type request: str
:param params: body of request
:type params: dict
:returns: Response
:rtype: requests.Response
"""
return self._post(request, params)
class CosmosPackageVersion():
"""Interface to a specific package version from cosmos"""
def __init__(self, name, package_version, url):
self._cosmos_url = url
params = {"packageName": name}
if package_version is not None:
params["packageVersion"] = package_version
response = PackageManager(url).cosmos_post("describe", params)
self._package_json = response.json()
self._content_type = response.headers['Content-Type']
def __repr__(self):
return "<CosmosPackageVersion name='{}' version='{}'>".format(
self.name(), self.version())
def version(self):
"""Returns the package version.
:returns: The version of this package
:rtype: str
"""
return self.package_json()["version"]
def name(self):
"""Returns the package name.
:returns: The name of this package
:rtype: str
"""
return self.package_json()["name"]
def package_json(self):
"""Returns the JSON content of the package definition.
:returns: Package data
:rtype: dict
"""
if 'version=v2' in self._content_type:
return self._package_json
else:
return self._package_json["package"]
def package_response(self):
"""Returns the JSON content of the describe response.
:returns: Package data
:rtype: dict
"""
return self._package_json
def config_json(self):
"""Returns the JSON content of the config.json file.
:returns: Package config schema
:rtype: dict | None
"""
return self.package_json().get("config")
def resource_json(self):
"""Returns the JSON content of the resource.json file.
:returns: Package resources
:rtype: dict | None
"""
return self.package_json().get("resource")
def marathon_template(self):
"""Returns raw data from marathon.json
:returns: raw data from marathon.json
:rtype: str | None
"""
template = self.package_json().get("marathon", {}).get(
"v2AppMustacheTemplate"
)
return base64.b64decode(template) if template else None
def marathon_json(self, options):
"""Returns the JSON content of the marathon.json template, after
rendering it with options.
:param options: the template options to use in rendering
:type options: dict
:rtype: dict
"""
params = {
"packageName": self.name(),
"packageVersion": self.version()
}
if options:
params["options"] = options
response = PackageManager(
self._cosmos_url
).cosmos_post("render", params)
return response.json().get("marathonJson")
def options(self, user_options):
"""Makes sure user supplied options are valid
:param user_options: the template options to use in rendering
:type user_options: dict
:rtype: None
"""
self.marathon_json(user_options)
return None
def cli_definition(self):
"""Returns the JSON content that defines a cli subcommand. Looks for
"cli" property in resource.json first and if that is None, checks for
command.json
:returns: Package data
:rtype: dict | None
"""
return (self.resource_json() and self.resource_json().get("cli")) or (
self.command_json()
)
def command_json(self):
"""Returns the JSON content of the command.json file.
:returns: Package data
:rtype: dict | None
"""
return self.package_json().get("command")
def package_versions(self):
"""Returns a list of available versions for this package
:returns: package version
:rtype: []
"""
params = {"packageName": self.name(), "includePackageVersions": True}
response = PackageManager(self._cosmos_url).cosmos_post(
"list-versions", params)
return list(
version for (version, releaseVersion) in
sorted(
response.json().get("results").items(),
key=lambda item: int(item[1]), # release version
reverse=True
)
)
def _format_error_message(error):
"""Returns formatted error message based on error type
:param error: cosmos error
:type error: dict
:returns: formatted error
:rtype: str
"""
if error.get("type") == "AmbiguousAppId":
helper = (".\nPlease use --app-id to specify the ID of the app "
"to uninstall, or use --all to uninstall all apps.")
error_message = error.get("message") + helper
elif error.get("type") == "MultipleFrameworkIds":
helper = ". Manually shut them down using 'dcos service shutdown'"
error_message = error.get("message") + helper
elif error.get("type") == "JsonSchemaMismatch":
error_message = _format_json_schema_mismatch_message(error)
elif error.get("type") == "MarathonBadResponse":
error_message = _format_marathon_bad_response_message(error)
elif error.get('type') == 'NotImplemented':
error_message = 'DC/OS has not been ' \
'configured to support this operation'
else:
error_message = error.get("message")
return error_message
def _format_json_schema_mismatch_message(error):
"""Returns the formatted error message for JsonSchemaMismatch
:param error: cosmos JsonSchemMismatch error
:type error: dict
:returns: formatted error
:rtype: str
"""
error_messages = ["Error: {}".format(error.get("message"))]
for err in error.get("data").get("errors"):
if err.get("unwanted"):
reason = "Unexpected properties: {}".format(err["unwanted"])
error_messages += [reason]
if err.get("found"):
found = "Found: {}".format(err["found"])
error_messages += [found]
if err.get("minimum"):
found = "Required minimum: {}".format(err["minimum"])
error_messages += [found]
if err.get("expected"):
expected = "Expected: {}".format(",".join(err["expected"]))
error_messages += [expected]
if err.get("missing"):
missing = "Required parameter missing: {}".format(
",".join(err["missing"]),
)
error_messages += [missing]
if err.get("instance"):
pointer = err["instance"].get("pointer")
formatted_path = pointer.lstrip("/").replace("/", ".")
path = "Path: {}".format(formatted_path)
error_messages += [path]
error_messages += [
"\nPlease create a JSON file with the appropriate options, and"
" pass the /path/to/file as an --options argument."
]
return "\n".join(error_messages)
def _format_marathon_bad_response_message(error):
data = error.get("data")
error_messages = [error.get("message")]
if data is not None:
for err in data.get("errors"):
if err.get("error") and isinstance(err["error"], six.string_types):
error_messages += [err["error"]]
elif err.get("errors") and \
isinstance(err["errors"], collections.Sequence):
error_messages += err["errors"]
return "\n".join(error_messages)
|
[
"six.moves.urllib.parse.urljoin",
"dcos.util.get_logger",
"dcos.cosmos.Cosmos",
"dcos.util.open_file",
"base64.b64decode",
"functools.wraps",
"dcos.errors.DCOSHTTPException",
"dcos.errors.DCOSException",
"dcos.util.md5_hash_file"
] |
[((335, 360), 'dcos.util.get_logger', 'util.get_logger', (['__name__'], {}), '(__name__)\n', (350, 360), False, 'from dcos import cosmos, util\n'), ((590, 609), 'functools.wraps', 'functools.wraps', (['fn'], {}), '(fn)\n', (605, 609), False, 'import functools\n'), ((1680, 1710), 'dcos.cosmos.Cosmos', 'cosmos.Cosmos', (['self.cosmos_url'], {}), '(self.cosmos_url)\n', (1693, 1710), False, 'from dcos import cosmos, util\n'), ((1089, 1116), 'dcos.errors.DCOSHTTPException', 'DCOSHTTPException', (['response'], {}), '(response)\n', (1106, 1116), False, 'from dcos.errors import DCOSAuthenticationException, DCOSAuthorizationException, DCOSBadRequest, DCOSConnectionError, DCOSException, DCOSHTTPException\n'), ((12990, 13016), 'base64.b64decode', 'base64.b64decode', (['template'], {}), '(template)\n', (13006, 13016), False, 'import base64\n'), ((1307, 1331), 'dcos.errors.DCOSException', 'DCOSException', (['error_msg'], {}), '(error_msg)\n', (1320, 1331), False, 'from dcos.errors import DCOSAuthenticationException, DCOSAuthorizationException, DCOSBadRequest, DCOSConnectionError, DCOSException, DCOSHTTPException\n'), ((7799, 7833), 'dcos.util.open_file', 'util.open_file', (['dcos_package', '"""rb"""'], {}), "(dcos_package, 'rb')\n", (7813, 7833), False, 'from dcos import cosmos, util\n'), ((1408, 1435), 'dcos.errors.DCOSHTTPException', 'DCOSHTTPException', (['response'], {}), '(response)\n', (1425, 1435), False, 'from dcos.errors import DCOSAuthenticationException, DCOSAuthorizationException, DCOSBadRequest, DCOSConnectionError, DCOSException, DCOSHTTPException\n'), ((2641, 2694), 'six.moves.urllib.parse.urljoin', 'urllib.parse.urljoin', (['self.cosmos_url', '"""capabilities"""'], {}), "(self.cosmos_url, 'capabilities')\n", (2661, 2694), False, 'from six.moves import urllib\n'), ((8061, 8084), 'dcos.util.md5_hash_file', 'util.md5_hash_file', (['pkg'], {}), '(pkg)\n', (8079, 8084), False, 'from dcos import cosmos, util\n'), ((8385, 8407), 'dcos.errors.DCOSException', 'DCOSException', (['message'], {}), '(message)\n', (8398, 8407), False, 'from dcos.errors import DCOSAuthenticationException, DCOSAuthorizationException, DCOSBadRequest, DCOSConnectionError, DCOSException, DCOSHTTPException\n'), ((9308, 9330), 'dcos.errors.DCOSException', 'DCOSException', (['message'], {}), '(message)\n', (9321, 9330), False, 'from dcos.errors import DCOSAuthenticationException, DCOSAuthorizationException, DCOSBadRequest, DCOSConnectionError, DCOSException, DCOSHTTPException\n')]
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
#
#------------------------------------------------------------------------------
""" Enthought pyface package component
"""
# Standard library imports.
import os
# Major package imports.
import wx
# Enthought library imports.
from traits.api import Any, HasTraits, List, Property, provides
from traits.api import Unicode
# Local imports.
from pyface.i_image_resource import IImageResource, MImageResource
@provides(IImageResource)
class ImageResource(MImageResource, HasTraits):
""" The toolkit specific implementation of an ImageResource. See the
IImageResource interface for the API documentation.
"""
#### Private interface ####################################################
# The resource manager reference for the image.
_ref = Any
#### 'ImageResource' interface ############################################
absolute_path = Property(Unicode)
name = Unicode
search_path = List
###########################################################################
# 'ImageResource' interface.
###########################################################################
def create_bitmap(self, size=None):
return self.create_image(size).ConvertToBitmap()
def create_icon(self, size=None):
ref = self._get_ref(size)
if ref is not None:
icon = wx.Icon(self.absolute_path, wx.BITMAP_TYPE_ICO)
else:
image = self._get_image_not_found_image()
# We have to convert the image to a bitmap first and then create an
# icon from that.
bmp = image.ConvertToBitmap()
icon = wx.EmptyIcon()
icon.CopyFromBitmap(bmp)
return icon
def image_size(cls, image):
""" Get the size of a toolkit image
Parameters
----------
image : toolkit image
A toolkit image to compute the size of.
Returns
-------
size : tuple
The (width, height) tuple giving the size of the image.
"""
size = image.GetSize()
return size.Get()
###########################################################################
# Private interface.
###########################################################################
def _get_absolute_path(self):
# FIXME: This doesn't quite wotk the new notion of image size. We
# should find out who is actually using this trait, and for what!
# (AboutDialog uses it to include the path name in some HTML.)
ref = self._get_ref()
if ref is not None:
absolute_path = os.path.abspath(self._ref.filename)
else:
absolute_path = self._get_image_not_found().absolute_path
return absolute_path
#### EOF ######################################################################
|
[
"os.path.abspath",
"traits.api.Property",
"traits.api.provides",
"wx.Icon",
"wx.EmptyIcon"
] |
[((934, 958), 'traits.api.provides', 'provides', (['IImageResource'], {}), '(IImageResource)\n', (942, 958), False, 'from traits.api import Any, HasTraits, List, Property, provides\n'), ((1397, 1414), 'traits.api.Property', 'Property', (['Unicode'], {}), '(Unicode)\n', (1405, 1414), False, 'from traits.api import Any, HasTraits, List, Property, provides\n'), ((1872, 1919), 'wx.Icon', 'wx.Icon', (['self.absolute_path', 'wx.BITMAP_TYPE_ICO'], {}), '(self.absolute_path, wx.BITMAP_TYPE_ICO)\n', (1879, 1919), False, 'import wx\n'), ((2160, 2174), 'wx.EmptyIcon', 'wx.EmptyIcon', ([], {}), '()\n', (2172, 2174), False, 'import wx\n'), ((3148, 3183), 'os.path.abspath', 'os.path.abspath', (['self._ref.filename'], {}), '(self._ref.filename)\n', (3163, 3183), False, 'import os\n')]
|
import logging
from datetime import timedelta
from core import Feed
from core.errors import ObservableValidationError
from core.observables import Ip
from core.config.config import yeti_config
class AbuseIPDB(Feed):
default_values = {
"frequency": timedelta(hours=5),
"name": "AbuseIPDB",
"source": "https://api.abuseipdb.com/api/v2/blacklist",
"description":
"Black List IP generated by AbuseIPDB",
}
def update(self):
api_key = yeti_config.get('abuseIPDB', 'key')
if api_key:
self.source = "https://api.abuseipdb.com/api/v2/blacklist?&key=%s&plaintext&limit=10000" % (api_key)
# change the limit rate if you subscribe to a paid plan
for line in self.update_lines():
self.analyze(line)
else:
logging.error("Your abuseIPDB API key is not set in the yeti.conf file")
def analyze(self, line):
line = line.strip()
ip = line
context = {'source': self.name}
try:
ip = Ip.get_or_create(value=ip)
ip.add_context(context)
ip.add_source(self.name)
ip.tag('abuseIPDB')
except ObservableValidationError as e:
raise logging.error(e)
|
[
"core.observables.Ip.get_or_create",
"core.config.config.yeti_config.get",
"datetime.timedelta",
"logging.error"
] |
[((263, 281), 'datetime.timedelta', 'timedelta', ([], {'hours': '(5)'}), '(hours=5)\n', (272, 281), False, 'from datetime import timedelta\n'), ((498, 533), 'core.config.config.yeti_config.get', 'yeti_config.get', (['"""abuseIPDB"""', '"""key"""'], {}), "('abuseIPDB', 'key')\n", (513, 533), False, 'from core.config.config import yeti_config\n'), ((842, 914), 'logging.error', 'logging.error', (['"""Your abuseIPDB API key is not set in the yeti.conf file"""'], {}), "('Your abuseIPDB API key is not set in the yeti.conf file')\n", (855, 914), False, 'import logging\n'), ((1064, 1090), 'core.observables.Ip.get_or_create', 'Ip.get_or_create', ([], {'value': 'ip'}), '(value=ip)\n', (1080, 1090), False, 'from core.observables import Ip\n'), ((1261, 1277), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (1274, 1277), False, 'import logging\n')]
|
from urllib import parse
from django.contrib import auth
from django.contrib.auth import logout
from django.core.cache import cache
from django.shortcuts import render
# Create your views here.
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import FormView, RedirectView
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from ZhiQue import permissions, mixins
from ZhiQue.utils import get_redirect_uri
from .clients import get_client_by_type
from .forms import LoginForm
from .serializers import UserRegisterSerializer
from .utils import generate_token
class LoginView(FormView):
"""登录视图"""
form_class = LoginForm
template_name = 'oauth/login.html'
@method_decorator(sensitive_post_parameters('password'))
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
return super(LoginView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['redirect_to'] = get_redirect_uri(self.request)
return super(LoginView, self).get_context_data(**kwargs)
def post(self, request, *args, **kwargs):
form = LoginForm(data=self.request.POST, request=self.request)
if form.is_valid():
auth.login(self.request, form.get_user())
return super(LoginView, self).form_valid(form)
return self.render_to_response({
'form': form
})
def get_success_url(self):
authorize_uri = reverse('authorize', request=self.request, kwargs={
'authorize_type': 'account'
})
data = parse.urlencode({
'response_type': 'token',
'redirect_uri': get_redirect_uri(self.request)
})
return f'{authorize_uri}?{data}'
class OAuthLoginView(RedirectView):
"""oauth客户端登录"""
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
return super(OAuthLoginView, self).dispatch(request, *args, **kwargs)
def get_redirect_url(self, authorize_type, *args, **kwargs):
client = get_client_by_type(authorize_type)
return client.get_authorize_url(self.request)
class LogoutView(RedirectView):
"""退出登录"""
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
return super(LogoutView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
user = request.user
user_token_cache_key = f'oauth:user:id:{user.id}:token'
if cache.ttl(user_token_cache_key) != 0:
token = cache.get(user_token_cache_key)
cache.delete(user_token_cache_key)
token_user_cache_key = f'oauth:token:{token}:user:id'
if cache.ttl(token_user_cache_key) != 0:
cache.delete(token_user_cache_key)
logout(request)
return super(LogoutView, self).get(request, *args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
return get_redirect_uri(self.request)
class AuthorizeView(RedirectView):
"""用户授权"""
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
return super(AuthorizeView, self).dispatch(request, *args, **kwargs)
def get_redirect_url(self, authorize_type, *args, **kwargs):
request = self.request
user = request.user
token = None
if authorize_type == 'account':
if user.is_authenticated:
token = generate_token()
token_user_cache_key = f'oauth:token:{token}:user:id'
user_token_cache_key = f'oauth:user:id:{user.id}:token'
cache.set(token_user_cache_key, user.id, timeout=60 * 60 * 24)
cache.set(user_token_cache_key, token, timeout=None)
else:
code = request.GET.get('code')
client = get_client_by_type(authorize_type)
if client.get_access_token_by_code(code):
oauth_user = client.get_oauth_user_info()
oauth_user.user = user
oauth_user.save()
if token:
data = parse.urlencode({
'access_token': token,
'token_type': 'bearer'
})
return f'{get_redirect_uri(request)}#{data}'
return reverse('login', request=request)
class UserRegisterAPIView(mixins.CreateModelMixin, GenericAPIView):
"""用户注册"""
serializer_class = UserRegisterSerializer
permission_classes = (permissions.AllowAny,)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
|
[
"django.core.cache.cache.ttl",
"django.utils.decorators.method_decorator",
"ZhiQue.utils.get_redirect_uri",
"urllib.parse.urlencode",
"django.core.cache.cache.set",
"django.core.cache.cache.get",
"rest_framework.reverse.reverse",
"django.contrib.auth.logout",
"rest_framework.response.Response",
"django.core.cache.cache.delete",
"django.views.decorators.debug.sensitive_post_parameters"
] |
[((1065, 1095), 'django.utils.decorators.method_decorator', 'method_decorator', (['csrf_protect'], {}), '(csrf_protect)\n', (1081, 1095), False, 'from django.utils.decorators import method_decorator\n'), ((1101, 1130), 'django.utils.decorators.method_decorator', 'method_decorator', (['never_cache'], {}), '(never_cache)\n', (1117, 1130), False, 'from django.utils.decorators import method_decorator\n'), ((2168, 2197), 'django.utils.decorators.method_decorator', 'method_decorator', (['never_cache'], {}), '(never_cache)\n', (2184, 2197), False, 'from django.utils.decorators import method_decorator\n'), ((2553, 2582), 'django.utils.decorators.method_decorator', 'method_decorator', (['never_cache'], {}), '(never_cache)\n', (2569, 2582), False, 'from django.utils.decorators import method_decorator\n'), ((3410, 3439), 'django.utils.decorators.method_decorator', 'method_decorator', (['never_cache'], {}), '(never_cache)\n', (3426, 3439), False, 'from django.utils.decorators import method_decorator\n'), ((1021, 1058), 'django.views.decorators.debug.sensitive_post_parameters', 'sensitive_post_parameters', (['"""password"""'], {}), "('password')\n", (1046, 1058), False, 'from django.views.decorators.debug import sensitive_post_parameters\n'), ((1329, 1359), 'ZhiQue.utils.get_redirect_uri', 'get_redirect_uri', (['self.request'], {}), '(self.request)\n', (1345, 1359), False, 'from ZhiQue.utils import get_redirect_uri\n'), ((1817, 1902), 'rest_framework.reverse.reverse', 'reverse', (['"""authorize"""'], {'request': 'self.request', 'kwargs': "{'authorize_type': 'account'}"}), "('authorize', request=self.request, kwargs={'authorize_type': 'account'}\n )\n", (1824, 1902), False, 'from rest_framework.reverse import reverse\n'), ((3171, 3186), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (3177, 3186), False, 'from django.contrib.auth import logout\n'), ((3321, 3351), 'ZhiQue.utils.get_redirect_uri', 'get_redirect_uri', (['self.request'], {}), '(self.request)\n', (3337, 3351), False, 'from ZhiQue.utils import get_redirect_uri\n'), ((4640, 4673), 'rest_framework.reverse.reverse', 'reverse', (['"""login"""'], {'request': 'request'}), "('login', request=request)\n", (4647, 4673), False, 'from rest_framework.reverse import reverse\n'), ((5128, 5202), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_201_CREATED', 'headers': 'headers'}), '(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n', (5136, 5202), False, 'from rest_framework.response import Response\n'), ((2856, 2887), 'django.core.cache.cache.ttl', 'cache.ttl', (['user_token_cache_key'], {}), '(user_token_cache_key)\n', (2865, 2887), False, 'from django.core.cache import cache\n'), ((2914, 2945), 'django.core.cache.cache.get', 'cache.get', (['user_token_cache_key'], {}), '(user_token_cache_key)\n', (2923, 2945), False, 'from django.core.cache import cache\n'), ((2958, 2992), 'django.core.cache.cache.delete', 'cache.delete', (['user_token_cache_key'], {}), '(user_token_cache_key)\n', (2970, 2992), False, 'from django.core.cache import cache\n'), ((4457, 4521), 'urllib.parse.urlencode', 'parse.urlencode', (["{'access_token': token, 'token_type': 'bearer'}"], {}), "({'access_token': token, 'token_type': 'bearer'})\n", (4472, 4521), False, 'from urllib import parse\n'), ((2020, 2050), 'ZhiQue.utils.get_redirect_uri', 'get_redirect_uri', (['self.request'], {}), '(self.request)\n', (2036, 2050), False, 'from ZhiQue.utils import get_redirect_uri\n'), ((3074, 3105), 'django.core.cache.cache.ttl', 'cache.ttl', (['token_user_cache_key'], {}), '(token_user_cache_key)\n', (3083, 3105), False, 'from django.core.cache import cache\n'), ((3128, 3162), 'django.core.cache.cache.delete', 'cache.delete', (['token_user_cache_key'], {}), '(token_user_cache_key)\n', (3140, 3162), False, 'from django.core.cache import cache\n'), ((3990, 4052), 'django.core.cache.cache.set', 'cache.set', (['token_user_cache_key', 'user.id'], {'timeout': '(60 * 60 * 24)'}), '(token_user_cache_key, user.id, timeout=60 * 60 * 24)\n', (3999, 4052), False, 'from django.core.cache import cache\n'), ((4069, 4121), 'django.core.cache.cache.set', 'cache.set', (['user_token_cache_key', 'token'], {'timeout': 'None'}), '(user_token_cache_key, token, timeout=None)\n', (4078, 4121), False, 'from django.core.cache import cache\n'), ((4590, 4615), 'ZhiQue.utils.get_redirect_uri', 'get_redirect_uri', (['request'], {}), '(request)\n', (4606, 4615), False, 'from ZhiQue.utils import get_redirect_uri\n')]
|
from kafka import KafkaProducer
import requests
from json import dumps
import time
def on_message1(message):
producer1.send('ntpc', message)
producer1.flush()
producer1 = KafkaProducer(value_serializer=lambda m: dumps(m).encode("utf-8"), bootstrap_servers=['localhost:9092'])
# url for collecting NTPC company share data
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=NTPC.BSE&apikey=<KEY>'
r = requests.get(url)
data=r.json()
#preprocessing
del(data['Meta Data'])
df=data['Time Series (Daily)']
jsonFile={}
j=0
for i in df.keys():
jsonFile[j]=df[i]
jsonFile[j]['0. date']=i
j+=1
for i in jsonFile.keys():
jsonFile[i]["open"]=jsonFile[i]["1. open"]
jsonFile[i]["high"]=jsonFile[i]["2. high"]
jsonFile[i]["low"]=jsonFile[i]["3. low"]
jsonFile[i]["close"]=jsonFile[i]["4. close"]
jsonFile[i]["volume"]=jsonFile[i]["5. volume"]
jsonFile[i]["date"]=jsonFile[i]["0. date"]
del jsonFile[i]["1. open"]
del jsonFile[i]["2. high"]
del jsonFile[i]["3. low"]
del jsonFile[i]["4. close"]
del jsonFile[i]["5. volume"]
del jsonFile[i]["0. date"]
for i in jsonFile.keys():
on_message1(jsonFile[i])
time.sleep(1)
|
[
"json.dumps",
"requests.get",
"time.sleep"
] |
[((436, 453), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (448, 453), False, 'import requests\n'), ((1156, 1169), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1166, 1169), False, 'import time\n'), ((223, 231), 'json.dumps', 'dumps', (['m'], {}), '(m)\n', (228, 231), False, 'from json import dumps\n')]
|
# -*- coding: utf-8 -*-
from brewtils.errors import ModelValidationError
from brewtils.models import Operation
from brewtils.schema_parser import SchemaParser
from beer_garden.api.http.base_handler import BaseHandler
class AdminAPI(BaseHandler):
async def patch(self):
"""
---
summary: Initiate administrative actions
description: |
The body of the request needs to contain a set of instructions
detailing the operations to perform.
Currently the supported operations are `rescan`:
```JSON
[
{ "operation": "rescan" }
]
```
* Will remove from the registry and database any currently stopped
plugins who's directory has been removed.
* Will add and start any new plugin directories.
And reloading the plugin logging configuration:
```JSON
[
{
"operation": "reload",
"path": "/config/logging/plugin"
}
]
```
parameters:
- name: patch
in: body
required: true
description: Instructions for operations
schema:
$ref: '#/definitions/Patch'
responses:
204:
description: Operation successfully initiated
50x:
$ref: '#/definitions/50xError'
tags:
- Admin
"""
operations = SchemaParser.parse_patch(
self.request.decoded_body, many=True, from_string=True
)
for op in operations:
if op.operation == "rescan":
await self.client(Operation(operation_type="RUNNER_RESCAN"))
elif op.operation == "reload":
if op.path == "/config/logging/plugin":
await self.client(Operation(operation_type="PLUGIN_LOG_RELOAD"))
else:
raise ModelValidationError(f"Unsupported path '{op.path}'")
else:
raise ModelValidationError(f"Unsupported operation '{op.operation}'")
self.set_status(204)
|
[
"brewtils.errors.ModelValidationError",
"brewtils.models.Operation",
"brewtils.schema_parser.SchemaParser.parse_patch"
] |
[((1485, 1570), 'brewtils.schema_parser.SchemaParser.parse_patch', 'SchemaParser.parse_patch', (['self.request.decoded_body'], {'many': '(True)', 'from_string': '(True)'}), '(self.request.decoded_body, many=True, from_string=True\n )\n', (1509, 1570), False, 'from brewtils.schema_parser import SchemaParser\n'), ((2063, 2126), 'brewtils.errors.ModelValidationError', 'ModelValidationError', (['f"""Unsupported operation \'{op.operation}\'"""'], {}), '(f"Unsupported operation \'{op.operation}\'")\n', (2083, 2126), False, 'from brewtils.errors import ModelValidationError\n'), ((1694, 1735), 'brewtils.models.Operation', 'Operation', ([], {'operation_type': '"""RUNNER_RESCAN"""'}), "(operation_type='RUNNER_RESCAN')\n", (1703, 1735), False, 'from brewtils.models import Operation\n'), ((1969, 2022), 'brewtils.errors.ModelValidationError', 'ModelValidationError', (['f"""Unsupported path \'{op.path}\'"""'], {}), '(f"Unsupported path \'{op.path}\'")\n', (1989, 2022), False, 'from brewtils.errors import ModelValidationError\n'), ((1874, 1919), 'brewtils.models.Operation', 'Operation', ([], {'operation_type': '"""PLUGIN_LOG_RELOAD"""'}), "(operation_type='PLUGIN_LOG_RELOAD')\n", (1883, 1919), False, 'from brewtils.models import Operation\n')]
|
# -*- coding: utf-8 -*-
import cv2, glob
import numpy as np
import pandas as pd
from os import path
from math import isnan
from sklearn.metrics.pairwise import euclidean_distances
from JPP_precision import load_JPP_ply
from Modules.utils import get_parameter, get_args, figure_disappears, enum_test_files
from Modules.features_labels import make_labels
from Modules.coordinate_conversion import project_point_cloud
def make_ground_truth(test_filename):
n_joints = 19
ground_truth = np.ones((n_joints, 2))
label_img = cv2.imread("%s.png" % test_filename)[:, :, :3][:, :, ::-1]
label_array = make_labels(label_img).reshape(label_img.shape[:2])
parts2joints_map = np.array((0, 0, 0, 0, 1, 2, 2, 3, 3, 4, 5, 18, 18, 18, 18, 6, 7, 8, 9, 10, 11, 18, 18, 18, 18, 12, 13, 14, 15, 16, 17, 18))
for j in range(n_joints):
ground_truth[j, :] = np.mean(np.array(np.where(parts2joints_map[label_array] == j)), axis=1)
return ground_truth[:-1, :]
def JPP_precision():
args = get_args()
discr_setting_type = args.discr_setting_type
num_train_images = args.n_train_images
data_path = args.data_path
jpp_path = data_path + "Main/JointPositionPrediction/"
jpp_gt_path = jpp_path + "GroundTruth/"
jpp_out_path = jpp_path + "Output/"
eval_path = jpp_path + "Evaluation/"
test_path = args.test_path
n_test_images = args.n_test_images
device = "Kinect" if "SyntheticImages" in test_path else "Xtion"
target_joint_names = ["Head", "neck", "Chest", "Waist",
"rShoulder", "lShoulder", "rElbow", "lElbow", "rWrist", "lWrist", "rHand", "lHand",
"rKnee", "lKnee", "rAnkle", "lAnkle", "rFoot", "lFoot"]
n_joints = len(target_joint_names)
test_filenames = enum_test_files(data_path, args.test_path, n_test_images)
setting_str = "_" + str(num_train_images) + ("_%s" % discr_setting_type if discr_setting_type else "")
average_error_path = eval_path + "JPP_average_error_px" + setting_str + ".csv"
sum_prediction_error = np.zeros((n_joints+1,))
for test_filename in test_filenames:
test_filename_id = "/".join(test_filename.split("/")[-2:])
print(test_filename_id)
test_JPP_path = jpp_out_path + test_filename_id + setting_str + "_JPP.ply"
test_gt_path = jpp_gt_path + test_filename_id + "_px_gt.csv"
error_path = eval_path + test_filename_id + setting_str + "_JPP_error_px.csv"
if path.exists(test_gt_path):
gt_joint_positions = np.array(pd.read_csv(test_gt_path, header=None))
else:
gt_joint_positions = make_ground_truth(test_filename)
joint_positions_3d = load_JPP_ply(test_JPP_path)
visible_joints = []
for j, joint_position in enumerate(joint_positions_3d):
if joint_position != (0, 0):
visible_joints.append(j)
visible_joints = np.array(visible_joints)
depth_img = cv2.imread(test_filename + " Z.png", flags=0)
params = get_parameter(test_filename + "_param")
_, joint_positions_2d = project_point_cloud(joint_positions_3d, depth_img, visible_joints, device)
joint_positions_2d = np.array(joint_positions_2d).transpose()
error_per_joint = np.zeros((18,))
for j, (gt, p) in enumerate(zip(gt_joint_positions, joint_positions_2d)):
if ((not isnan(gt[0])) and (not isnan(gt[1]))) and (p[0] != 0 or p[1] != 0):
error_per_joint[j] = euclidean_distances(gt.reshape((1, -1)), p.reshape((1, -1))) * joint_positions_3d[j, 2] / 200.
elif (isnan(gt[0]) and isnan(gt[1])) and (p[0] == 0 and p[1] == 0):
error_per_joint[j] = np.nan
else:
error_per_joint[j] = 20 * joint_positions_3d[j, 2] / 200.
mean_error = np.nanmean(error_per_joint)
prediction_error = np.r_[error_per_joint, mean_error]
sum_prediction_error += prediction_error
pd.DataFrame(prediction_error, index=target_joint_names+["Mean"]).to_csv(error_path, header=False)
print("\tMean Error is %f" % mean_error)
mean_errors = sum_prediction_error / n_test_images
pd.DataFrame(mean_errors, index=target_joint_names+["Mean"]).to_csv(average_error_path, header=False)
print("Mean error is %f" % mean_errors[-1])
if __name__ == "__main__":
JPP_precision()
|
[
"pandas.DataFrame",
"math.isnan",
"Modules.coordinate_conversion.project_point_cloud",
"pandas.read_csv",
"JPP_precision.load_JPP_ply",
"numpy.zeros",
"numpy.ones",
"os.path.exists",
"Modules.features_labels.make_labels",
"Modules.utils.get_args",
"cv2.imread",
"numpy.where",
"numpy.array",
"Modules.utils.get_parameter",
"Modules.utils.enum_test_files",
"numpy.nanmean"
] |
[((494, 516), 'numpy.ones', 'np.ones', (['(n_joints, 2)'], {}), '((n_joints, 2))\n', (501, 516), True, 'import numpy as np\n'), ((687, 815), 'numpy.array', 'np.array', (['(0, 0, 0, 0, 1, 2, 2, 3, 3, 4, 5, 18, 18, 18, 18, 6, 7, 8, 9, 10, 11, 18, \n 18, 18, 18, 12, 13, 14, 15, 16, 17, 18)'], {}), '((0, 0, 0, 0, 1, 2, 2, 3, 3, 4, 5, 18, 18, 18, 18, 6, 7, 8, 9, 10, \n 11, 18, 18, 18, 18, 12, 13, 14, 15, 16, 17, 18))\n', (695, 815), True, 'import numpy as np\n'), ((1011, 1021), 'Modules.utils.get_args', 'get_args', ([], {}), '()\n', (1019, 1021), False, 'from Modules.utils import get_parameter, get_args, figure_disappears, enum_test_files\n'), ((1785, 1842), 'Modules.utils.enum_test_files', 'enum_test_files', (['data_path', 'args.test_path', 'n_test_images'], {}), '(data_path, args.test_path, n_test_images)\n', (1800, 1842), False, 'from Modules.utils import get_parameter, get_args, figure_disappears, enum_test_files\n'), ((2062, 2087), 'numpy.zeros', 'np.zeros', (['(n_joints + 1,)'], {}), '((n_joints + 1,))\n', (2070, 2087), True, 'import numpy as np\n'), ((2478, 2503), 'os.path.exists', 'path.exists', (['test_gt_path'], {}), '(test_gt_path)\n', (2489, 2503), False, 'from os import path\n'), ((2697, 2724), 'JPP_precision.load_JPP_ply', 'load_JPP_ply', (['test_JPP_path'], {}), '(test_JPP_path)\n', (2709, 2724), False, 'from JPP_precision import load_JPP_ply\n'), ((2924, 2948), 'numpy.array', 'np.array', (['visible_joints'], {}), '(visible_joints)\n', (2932, 2948), True, 'import numpy as np\n'), ((2969, 3014), 'cv2.imread', 'cv2.imread', (["(test_filename + ' Z.png')"], {'flags': '(0)'}), "(test_filename + ' Z.png', flags=0)\n", (2979, 3014), False, 'import cv2, glob\n'), ((3032, 3071), 'Modules.utils.get_parameter', 'get_parameter', (["(test_filename + '_param')"], {}), "(test_filename + '_param')\n", (3045, 3071), False, 'from Modules.utils import get_parameter, get_args, figure_disappears, enum_test_files\n'), ((3104, 3178), 'Modules.coordinate_conversion.project_point_cloud', 'project_point_cloud', (['joint_positions_3d', 'depth_img', 'visible_joints', 'device'], {}), '(joint_positions_3d, depth_img, visible_joints, device)\n', (3123, 3178), False, 'from Modules.coordinate_conversion import project_point_cloud\n'), ((3276, 3291), 'numpy.zeros', 'np.zeros', (['(18,)'], {}), '((18,))\n', (3284, 3291), True, 'import numpy as np\n'), ((3833, 3860), 'numpy.nanmean', 'np.nanmean', (['error_per_joint'], {}), '(error_per_joint)\n', (3843, 3860), True, 'import numpy as np\n'), ((534, 570), 'cv2.imread', 'cv2.imread', (["('%s.png' % test_filename)"], {}), "('%s.png' % test_filename)\n", (544, 570), False, 'import cv2, glob\n'), ((612, 634), 'Modules.features_labels.make_labels', 'make_labels', (['label_img'], {}), '(label_img)\n', (623, 634), False, 'from Modules.features_labels import make_labels\n'), ((4189, 4251), 'pandas.DataFrame', 'pd.DataFrame', (['mean_errors'], {'index': "(target_joint_names + ['Mean'])"}), "(mean_errors, index=target_joint_names + ['Mean'])\n", (4201, 4251), True, 'import pandas as pd\n'), ((888, 932), 'numpy.where', 'np.where', (['(parts2joints_map[label_array] == j)'], {}), '(parts2joints_map[label_array] == j)\n', (896, 932), True, 'import numpy as np\n'), ((2547, 2585), 'pandas.read_csv', 'pd.read_csv', (['test_gt_path'], {'header': 'None'}), '(test_gt_path, header=None)\n', (2558, 2585), True, 'import pandas as pd\n'), ((3208, 3236), 'numpy.array', 'np.array', (['joint_positions_2d'], {}), '(joint_positions_2d)\n', (3216, 3236), True, 'import numpy as np\n'), ((3981, 4048), 'pandas.DataFrame', 'pd.DataFrame', (['prediction_error'], {'index': "(target_joint_names + ['Mean'])"}), "(prediction_error, index=target_joint_names + ['Mean'])\n", (3993, 4048), True, 'import pandas as pd\n'), ((3395, 3407), 'math.isnan', 'isnan', (['gt[0]'], {}), '(gt[0])\n', (3400, 3407), False, 'from math import isnan\n'), ((3418, 3430), 'math.isnan', 'isnan', (['gt[1]'], {}), '(gt[1])\n', (3423, 3430), False, 'from math import isnan\n'), ((3613, 3625), 'math.isnan', 'isnan', (['gt[0]'], {}), '(gt[0])\n', (3618, 3625), False, 'from math import isnan\n'), ((3630, 3642), 'math.isnan', 'isnan', (['gt[1]'], {}), '(gt[1])\n', (3635, 3642), False, 'from math import isnan\n')]
|
from dimagi.utils.parsing import string_to_boolean
from corehq.apps.custom_data_fields.models import PROFILE_SLUG
from corehq.apps.user_importer.exceptions import UserUploadError
from corehq.apps.users.audit.change_messages import UserChangeMessage
from corehq.apps.users.model_log import UserModelAction
from corehq.apps.users.util import log_user_change
def spec_value_to_boolean_or_none(user_spec_dict, key):
value = user_spec_dict.get(key, None)
if value and isinstance(value, str):
return string_to_boolean(value)
elif isinstance(value, bool):
return value
else:
return None
class UserChangeLogger(object):
"""
User change logger to record
- changes to user properties
- text messages for changes
- useful info for changes to associated data models like role/locations
"""
def __init__(self, upload_domain, user_domain, user, is_new_user, changed_by_user, changed_via,
upload_record_id, user_domain_required_for_log=True):
self.upload_domain = upload_domain
self.user_domain = user_domain
self.user = user
self.is_new_user = is_new_user
self.changed_by_user = changed_by_user
self.changed_via = changed_via
self.upload_record_id = upload_record_id
self.user_domain_required_for_log = user_domain_required_for_log
if not is_new_user:
self.original_user_doc = self.user.to_json()
else:
self.original_user_doc = None
self.fields_changed = {}
self.change_messages = {}
self._save = False # flag to check if log needs to be saved for updates
def add_changes(self, changes):
"""
Add changes to user properties.
Ignored for new user since the whole user doc is logged for a new user
:param changes: dict of property mapped to it's new value
"""
if self.is_new_user:
return
for name, new_value in changes.items():
if self.original_user_doc[name] != new_value:
self.fields_changed[name] = new_value
self._save = True
def add_change_message(self, message):
"""
Add change message for a change in user property that is in form of a UserChangeMessage
Ignored for new user since the whole user doc is logged for a new user
:param message: text message for the change like 'Password reset' / 'Added as web user to domain foo'
"""
if self.is_new_user:
return
self._update_change_messages(message)
self._save = True
def _update_change_messages(self, change_messages):
for slug in change_messages:
if slug in self.change_messages:
raise UserUploadError(f"Double Entry for {slug}")
self.change_messages.update(change_messages)
def add_info(self, change_message):
"""
Add change message for a change to the user that is in form of a UserChangeMessage
"""
self._update_change_messages(change_message)
self._save = True
def save(self):
if self.is_new_user or self._save:
action = UserModelAction.CREATE if self.is_new_user else UserModelAction.UPDATE
fields_changed = None if self.is_new_user else self.fields_changed
log_user_change(
by_domain=self.upload_domain,
for_domain=self.user_domain,
couch_user=self.user,
changed_by_user=self.changed_by_user,
changed_via=self.changed_via,
change_messages=self.change_messages,
action=action,
fields_changed=fields_changed,
bulk_upload_record_id=self.upload_record_id,
for_domain_required_for_log=self.user_domain_required_for_log,
)
class BaseUserImporter(object):
"""
Imports a Web/CommCareUser via bulk importer and also handles the logging
save_log should be called explicitly to save logs, after user is saved
"""
def __init__(self, upload_domain, user_domain, user, upload_user, is_new_user, via, upload_record_id):
"""
:param upload_domain: domain on which the bulk upload is being done
:param user_domain: domain user is being updated for
:param user: user to update
:param upload_user: user doing the upload
:param is_new_user: if user is a new user
:param via: USER_CHANGE_VIA_BULK_IMPORTER
:param upload_record_id: ID of the bulk upload record
"""
self.user_domain = user_domain
self.user = user
self.upload_user = upload_user
self.logger = UserChangeLogger(upload_domain=upload_domain, user_domain=user_domain, user=user,
is_new_user=is_new_user,
changed_by_user=upload_user, changed_via=via,
upload_record_id=upload_record_id)
self.role_updated = False
def update_role(self, role_qualified_id):
user_current_role = self.user.get_role(domain=self.user_domain)
self.role_updated = not (user_current_role
and user_current_role.get_qualified_id() == role_qualified_id)
if self.role_updated:
self.user.set_role(self.user_domain, role_qualified_id)
def save_log(self):
# Tracking for role is done post save to have role setup correctly on save
if self.role_updated:
new_role = self.user.get_role(domain=self.user_domain)
self.logger.add_info(UserChangeMessage.role_change(new_role))
self._include_user_data_changes()
self.logger.save()
def _include_user_data_changes(self):
# ToDo: consider putting just the diff
if self.logger.original_user_doc and self.logger.original_user_doc['user_data'] != self.user.user_data:
self.logger.add_changes({'user_data': self.user.user_data})
class CommCareUserImporter(BaseUserImporter):
def update_password(self, password):
self.user.set_password(password)
self.logger.add_change_message(UserChangeMessage.password_reset())
def update_phone_numbers(self, phone_numbers):
"""
The first item in 'phone_numbers' will be the default
"""
old_user_phone_numbers = self.user.phone_numbers
fmt_phone_numbers = [_fmt_phone(n) for n in phone_numbers]
if any(fmt_phone_numbers):
self.user.set_phone_numbers(fmt_phone_numbers, default_number=fmt_phone_numbers[0])
else:
self.user.set_phone_numbers([])
self._log_phone_number_changes(old_user_phone_numbers, fmt_phone_numbers)
def update_name(self, name):
self.user.set_full_name(str(name))
self.logger.add_changes({'first_name': self.user.first_name, 'last_name': self.user.last_name})
def update_user_data(self, data, uncategorized_data, profile, domain_info):
# Add in existing data. Don't use metadata - we don't want to add profile-controlled fields.
current_profile_id = self.user.user_data.get(PROFILE_SLUG)
for key, value in self.user.user_data.items():
if key not in data:
data[key] = value
if profile:
profile_obj = domain_info.profiles_by_name[profile]
data[PROFILE_SLUG] = profile_obj.id
for key in profile_obj.fields.keys():
self.user.pop_metadata(key)
try:
self.user.update_metadata(data)
except ValueError as e:
raise UserUploadError(str(e))
if uncategorized_data:
self.user.update_metadata(uncategorized_data)
# Clear blank user data so that it can be purged by remove_unused_custom_fields_from_users_task
for key in dict(data, **uncategorized_data):
value = self.user.metadata[key]
if value is None or value == '':
self.user.pop_metadata(key)
new_profile_id = self.user.user_data.get(PROFILE_SLUG)
if new_profile_id and new_profile_id != current_profile_id:
profile_name = domain_info.profile_name_by_id[new_profile_id]
self.logger.add_info(UserChangeMessage.profile_info(new_profile_id, profile_name))
def update_language(self, language):
self.user.language = language
self.logger.add_changes({'language': language})
def update_email(self, email):
self.user.email = email.lower()
self.logger.add_changes({'email': self.user.email})
def update_status(self, is_active):
self.user.is_active = is_active
self.logger.add_changes({'is_active': is_active})
def update_locations(self, location_codes, domain_info):
from corehq.apps.user_importer.importer import (
check_modified_user_loc,
find_location_id,
get_location_from_site_code
)
location_ids = find_location_id(location_codes, domain_info.location_cache)
user_current_primary_location_id = self.user.location_id
locations_updated, primary_loc_removed = check_modified_user_loc(location_ids,
self.user.location_id,
self.user.assigned_location_ids)
if primary_loc_removed:
self.user.unset_location(commit=False)
if locations_updated:
self.user.reset_locations(location_ids, commit=False)
self.logger.add_changes({'assigned_location_ids': location_ids})
if location_ids:
locations = [get_location_from_site_code(code, domain_info.location_cache)
for code in location_codes]
self.logger.add_info(
UserChangeMessage.assigned_locations_info(locations))
else:
self.logger.add_info(UserChangeMessage.assigned_locations_info([]))
# log this after assigned locations are updated, which can re-set primary location
if self.user.location_id != user_current_primary_location_id:
self.logger.add_changes({'location_id': self.user.location_id})
if self.user.location_id:
self.logger.add_info(
UserChangeMessage.primary_location_info(
self.user.get_sql_location(self.user_domain)
)
)
else:
self.logger.add_info(UserChangeMessage.primary_location_removed())
def _log_phone_number_changes(self, old_phone_numbers, new_phone_numbers):
(items_added, items_removed) = find_differences_in_list(
target=new_phone_numbers,
source=old_phone_numbers
)
change_messages = {}
if items_added:
change_messages.update(UserChangeMessage.phone_numbers_added(list(items_added))["phone_numbers"])
if items_removed:
change_messages.update(UserChangeMessage.phone_numbers_removed(list(items_removed))["phone_numbers"])
if change_messages:
self.logger.add_change_message({'phone_numbers': change_messages})
def _fmt_phone(phone_number):
if phone_number and not isinstance(phone_number, str):
phone_number = str(int(phone_number))
return phone_number.lstrip("+")
class WebUserImporter(BaseUserImporter):
def add_to_domain(self, role_qualified_id, location_id):
self.user.add_as_web_user(self.user_domain, role=role_qualified_id, location_id=location_id)
self.role_updated = bool(role_qualified_id)
self.logger.add_info(UserChangeMessage.added_as_web_user(self.user_domain))
if location_id:
self._log_primary_location_info()
def _log_primary_location_info(self):
primary_location = self.user.get_sql_location(self.user_domain)
self.logger.add_info(UserChangeMessage.primary_location_info(primary_location))
def update_primary_location(self, location_id):
current_primary_location_id = get_user_primary_location_id(self.user, self.user_domain)
if location_id:
self.user.set_location(self.user_domain, location_id)
if current_primary_location_id != location_id:
self._log_primary_location_info()
else:
self.user.unset_location(self.user_domain)
# if there was a location before, log that it was cleared
if current_primary_location_id:
self.logger.add_info(UserChangeMessage.primary_location_removed())
def update_locations(self, location_codes, membership, domain_info):
from corehq.apps.user_importer.importer import (
check_modified_user_loc,
find_location_id,
get_location_from_site_code
)
location_ids = find_location_id(location_codes, domain_info.location_cache)
user_current_primary_location_id = membership.location_id
locations_updated, primary_loc_removed = check_modified_user_loc(location_ids,
membership.location_id,
membership.assigned_location_ids)
if primary_loc_removed:
self.user.unset_location(self.user_domain, commit=False)
if locations_updated:
self.user.reset_locations(self.user_domain, location_ids, commit=False)
if location_ids:
locations = [get_location_from_site_code(code, domain_info.location_cache)
for code in location_codes]
else:
locations = []
self.logger.add_info(UserChangeMessage.assigned_locations_info(locations))
# log this after assigned locations are updated, which can re-set primary location
user_updated_primary_location_id = get_user_primary_location_id(self.user, self.user_domain)
if user_updated_primary_location_id != user_current_primary_location_id:
if user_updated_primary_location_id:
self._log_primary_location_info()
else:
self.logger.add_info(UserChangeMessage.primary_location_removed())
def get_user_primary_location_id(user, domain):
primary_location = user.get_sql_location(domain)
if primary_location:
return primary_location.location_id
def get_user_primary_location_name(user, domain):
primary_location = user.get_sql_location(domain)
if primary_location:
return primary_location.name
def find_differences_in_list(target: list, source: list):
"""
Find the differences between 'source' and 'target' and
return (added_items, removed_items)
'added_items': items that are in 'target' but not in 'source'
'removed_items': items that are in 'source' but not 'target'
>>> find_differences_in_list(list_to_compare=[3,4,5,6], reference_list=[1,2,3,5])
({4, 6}, {1, 2})
"""
shared_items = set(target).intersection(source)
added_items = set(target).difference(shared_items)
removed_items = set(source).difference(shared_items)
return added_items, removed_items
|
[
"corehq.apps.user_importer.importer.find_location_id",
"corehq.apps.user_importer.importer.get_location_from_site_code",
"corehq.apps.users.audit.change_messages.UserChangeMessage.profile_info",
"corehq.apps.users.audit.change_messages.UserChangeMessage.password_reset",
"corehq.apps.users.audit.change_messages.UserChangeMessage.primary_location_info",
"corehq.apps.users.audit.change_messages.UserChangeMessage.primary_location_removed",
"corehq.apps.users.audit.change_messages.UserChangeMessage.assigned_locations_info",
"dimagi.utils.parsing.string_to_boolean",
"corehq.apps.user_importer.exceptions.UserUploadError",
"corehq.apps.users.audit.change_messages.UserChangeMessage.added_as_web_user",
"corehq.apps.users.audit.change_messages.UserChangeMessage.role_change",
"corehq.apps.users.util.log_user_change",
"corehq.apps.user_importer.importer.check_modified_user_loc"
] |
[((514, 538), 'dimagi.utils.parsing.string_to_boolean', 'string_to_boolean', (['value'], {}), '(value)\n', (531, 538), False, 'from dimagi.utils.parsing import string_to_boolean\n'), ((9084, 9144), 'corehq.apps.user_importer.importer.find_location_id', 'find_location_id', (['location_codes', 'domain_info.location_cache'], {}), '(location_codes, domain_info.location_cache)\n', (9100, 9144), False, 'from corehq.apps.user_importer.importer import check_modified_user_loc, find_location_id, get_location_from_site_code\n'), ((9259, 9357), 'corehq.apps.user_importer.importer.check_modified_user_loc', 'check_modified_user_loc', (['location_ids', 'self.user.location_id', 'self.user.assigned_location_ids'], {}), '(location_ids, self.user.location_id, self.user.\n assigned_location_ids)\n', (9282, 9357), False, 'from corehq.apps.user_importer.importer import check_modified_user_loc, find_location_id, get_location_from_site_code\n'), ((13048, 13108), 'corehq.apps.user_importer.importer.find_location_id', 'find_location_id', (['location_codes', 'domain_info.location_cache'], {}), '(location_codes, domain_info.location_cache)\n', (13064, 13108), False, 'from corehq.apps.user_importer.importer import check_modified_user_loc, find_location_id, get_location_from_site_code\n'), ((13224, 13324), 'corehq.apps.user_importer.importer.check_modified_user_loc', 'check_modified_user_loc', (['location_ids', 'membership.location_id', 'membership.assigned_location_ids'], {}), '(location_ids, membership.location_id, membership.\n assigned_location_ids)\n', (13247, 13324), False, 'from corehq.apps.user_importer.importer import check_modified_user_loc, find_location_id, get_location_from_site_code\n'), ((3379, 3758), 'corehq.apps.users.util.log_user_change', 'log_user_change', ([], {'by_domain': 'self.upload_domain', 'for_domain': 'self.user_domain', 'couch_user': 'self.user', 'changed_by_user': 'self.changed_by_user', 'changed_via': 'self.changed_via', 'change_messages': 'self.change_messages', 'action': 'action', 'fields_changed': 'fields_changed', 'bulk_upload_record_id': 'self.upload_record_id', 'for_domain_required_for_log': 'self.user_domain_required_for_log'}), '(by_domain=self.upload_domain, for_domain=self.user_domain,\n couch_user=self.user, changed_by_user=self.changed_by_user, changed_via\n =self.changed_via, change_messages=self.change_messages, action=action,\n fields_changed=fields_changed, bulk_upload_record_id=self.\n upload_record_id, for_domain_required_for_log=self.\n user_domain_required_for_log)\n', (3394, 3758), False, 'from corehq.apps.users.util import log_user_change\n'), ((6252, 6286), 'corehq.apps.users.audit.change_messages.UserChangeMessage.password_reset', 'UserChangeMessage.password_reset', ([], {}), '()\n', (6284, 6286), False, 'from corehq.apps.users.audit.change_messages import UserChangeMessage\n'), ((11834, 11887), 'corehq.apps.users.audit.change_messages.UserChangeMessage.added_as_web_user', 'UserChangeMessage.added_as_web_user', (['self.user_domain'], {}), '(self.user_domain)\n', (11869, 11887), False, 'from corehq.apps.users.audit.change_messages import UserChangeMessage\n'), ((12103, 12160), 'corehq.apps.users.audit.change_messages.UserChangeMessage.primary_location_info', 'UserChangeMessage.primary_location_info', (['primary_location'], {}), '(primary_location)\n', (12142, 12160), False, 'from corehq.apps.users.audit.change_messages import UserChangeMessage\n'), ((2800, 2843), 'corehq.apps.user_importer.exceptions.UserUploadError', 'UserUploadError', (['f"""Double Entry for {slug}"""'], {}), "(f'Double Entry for {slug}')\n", (2815, 2843), False, 'from corehq.apps.user_importer.exceptions import UserUploadError\n'), ((5698, 5737), 'corehq.apps.users.audit.change_messages.UserChangeMessage.role_change', 'UserChangeMessage.role_change', (['new_role'], {}), '(new_role)\n', (5727, 5737), False, 'from corehq.apps.users.audit.change_messages import UserChangeMessage\n'), ((8351, 8411), 'corehq.apps.users.audit.change_messages.UserChangeMessage.profile_info', 'UserChangeMessage.profile_info', (['new_profile_id', 'profile_name'], {}), '(new_profile_id, profile_name)\n', (8381, 8411), False, 'from corehq.apps.users.audit.change_messages import UserChangeMessage\n'), ((13940, 13992), 'corehq.apps.users.audit.change_messages.UserChangeMessage.assigned_locations_info', 'UserChangeMessage.assigned_locations_info', (['locations'], {}), '(locations)\n', (13981, 13992), False, 'from corehq.apps.users.audit.change_messages import UserChangeMessage\n'), ((9813, 9874), 'corehq.apps.user_importer.importer.get_location_from_site_code', 'get_location_from_site_code', (['code', 'domain_info.location_cache'], {}), '(code, domain_info.location_cache)\n', (9840, 9874), False, 'from corehq.apps.user_importer.importer import check_modified_user_loc, find_location_id, get_location_from_site_code\n'), ((9990, 10042), 'corehq.apps.users.audit.change_messages.UserChangeMessage.assigned_locations_info', 'UserChangeMessage.assigned_locations_info', (['locations'], {}), '(locations)\n', (10031, 10042), False, 'from corehq.apps.users.audit.change_messages import UserChangeMessage\n'), ((10099, 10144), 'corehq.apps.users.audit.change_messages.UserChangeMessage.assigned_locations_info', 'UserChangeMessage.assigned_locations_info', (['[]'], {}), '([])\n', (10140, 10144), False, 'from corehq.apps.users.audit.change_messages import UserChangeMessage\n'), ((10685, 10729), 'corehq.apps.users.audit.change_messages.UserChangeMessage.primary_location_removed', 'UserChangeMessage.primary_location_removed', ([], {}), '()\n', (10727, 10729), False, 'from corehq.apps.users.audit.change_messages import UserChangeMessage\n'), ((12730, 12774), 'corehq.apps.users.audit.change_messages.UserChangeMessage.primary_location_removed', 'UserChangeMessage.primary_location_removed', ([], {}), '()\n', (12772, 12774), False, 'from corehq.apps.users.audit.change_messages import UserChangeMessage\n'), ((13739, 13800), 'corehq.apps.user_importer.importer.get_location_from_site_code', 'get_location_from_site_code', (['code', 'domain_info.location_cache'], {}), '(code, domain_info.location_cache)\n', (13766, 13800), False, 'from corehq.apps.user_importer.importer import check_modified_user_loc, find_location_id, get_location_from_site_code\n'), ((14422, 14466), 'corehq.apps.users.audit.change_messages.UserChangeMessage.primary_location_removed', 'UserChangeMessage.primary_location_removed', ([], {}), '()\n', (14464, 14466), False, 'from corehq.apps.users.audit.change_messages import UserChangeMessage\n')]
|
import boto3
import os
import sys
import uuid
from urllib.parse import unquote_plus
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
s3_client = boto3.client('s3')
destBucket= os.environ['DEST_BUCKET']
control_key = "/"
def lambda_handler(event, context):
for record in event['Records']:
bucket = record['s3']['bucket']['name']
key = unquote_plus(record['s3']['object']['key'])
tmpkey = key.replace('/', '')
download_path = '/tmp/{}{}'.format(uuid.uuid4(), tmpkey)
upload_path = '/tmp/resized-{}'.format(tmpkey)
logger.info(str(key) + " download from " + str(bucket) + " beginging--------------")
s3_client.download_file(bucket, key, download_path)
logger.info(str(key) +" download from " + str(bucket) + " completed --------------")
#Reupload video with correct metadata
logger.info( str(key) + " re-upload to " + str(destBucket) + " beginging--------------")
if "preview" in key.lower():
s3_client.upload_file(download_path, destBucket, key, ExtraArgs= {'ContentType': 'video/mp4'})
else:
s3_client.upload_file(download_path, destBucket, key, ExtraArgs= {'ContentDisposition': 'attachment'})
logger.info(str(key) + " re-upload to "+ str(destBucket) + " completed --------------")
return
|
[
"urllib.parse.unquote_plus",
"uuid.uuid4",
"logging.getLogger",
"boto3.client"
] |
[((113, 132), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (130, 132), False, 'import logging\n'), ((176, 194), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (188, 194), False, 'import boto3\n'), ((398, 441), 'urllib.parse.unquote_plus', 'unquote_plus', (["record['s3']['object']['key']"], {}), "(record['s3']['object']['key'])\n", (410, 441), False, 'from urllib.parse import unquote_plus\n'), ((523, 535), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (533, 535), False, 'import uuid\n')]
|
""" Implementation of all available options """
from __future__ import print_function
"""Model architecture/optimization options for Seq2seq architecture."""
import argparse
import logging
logger = logging.getLogger(__name__)
# Index of arguments concerning the core model architecture
MODEL_ARCHITECTURE = {
'model_type',
'emsize',
'rnn_type',
'nhid',
'nlayers',
'use_all_enc_layers',
'bidirection',
'src_pos_emb',
'tgt_pos_emb',
'max_relative_pos',
'use_neg_dist',
'd_ff',
'd_k',
'd_v',
'num_head',
'trans_drop',
'n_characters',
'char_emsize',
'filter_size',
'nfilters'
}
SEQ2SEQ_ARCHITECTURE = {
'attn_type',
'coverage_attn',
'copy_attn',
'review_attn',
'force_copy',
'layer_wise_attn',
'split_decoder',
'reuse_copy_attn',
'reload_decoder_state',
'share_decoder_embeddings',
'conditional_decoding'
}
DATA_OPTIONS = {
'use_src_char',
'use_tgt_char',
'use_src_word',
'use_tgt_word',
'max_src_len',
'max_tgt_len',
'src_vocab_size',
'tgt_vocab_size',
'num_train_examples',
'batch_size',
'use_code_type',
'code_tag_type',
'uncase',
'max_characters_per_token',
'dataset_weights'
}
# Index of arguments concerning the model optimizer/training
MODEL_OPTIMIZER = {
'optimizer',
'fix_embeddings',
'learning_rate',
'momentum',
'weight_decay',
'rnn_padding',
'dropout_rnn',
'dropout',
'dropout_emb',
'cuda',
'grad_clipping',
'lr_decay',
'warmup_steps',
'num_epochs',
'parallel'
}
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def add_model_args(parser):
parser.register('type', 'bool', str2bool)
# Data options
data = parser.add_argument_group('Data parameters')
data.add_argument('--max_src_len', type=int, default=100,
help='Maximum allowed length for the source sequence')
data.add_argument('--max_tgt_len', type=int, default=50,
help='Maximum allowed length for the target sequence')
data.add_argument('--use_code_type', type='bool', default=False,
help='Use code type as additional feature for feature representations')
data.add_argument('--code_tag_type', type=str, default='subtoken',
help='Use code type as additional feature for feature representations')
# Model architecture
model = parser.add_argument_group('Summary Generator')
model.add_argument('--model_type', type=str, default='rnn',
choices=['rnn', 'transformer'],
help='Model architecture type')
model.add_argument('--emsize', type=int, default=300,
help='Embedding size if embedding_file is not given')
model.add_argument('--rnn_type', type=str, default='LSTM',
help='RNN type: LSTM, GRU')
model.add_argument('--nhid', type=int, default=200,
help='Hidden size of RNN units')
model.add_argument('--bidirection', type='bool', default=True,
help='use bidirectional recurrent unit')
model.add_argument('--nlayers', type=int, default=2,
help='Number of encoding layers')
model.add_argument('--use_all_enc_layers', type='bool', default=False,
help='Use a weighted average of all encoder layers\' '
'representation as the contextual representation')
# Transformer specific params
model.add_argument('--src_pos_emb', type='bool', default=True,
help='Use positional embeddings in encoder')
model.add_argument('--tgt_pos_emb', type='bool', default=True,
help='Use positional embeddings in decoder')
model.add_argument('--max_relative_pos', nargs='+', type=int,
default=0, help='Max value for relative position representations')
model.add_argument('--use_neg_dist', type='bool', default=True,
help='Use negative Max value for relative position representations')
model.add_argument('--d_ff', type=int, default=2048,
help='Number of units in position-wise FFNN')
model.add_argument('--d_k', type=int, default=64,
help='Hidden size of heads in multi-head attention')
model.add_argument('--d_v', type=int, default=64,
help='Hidden size of heads in multi-head attention')
model.add_argument('--num_head', type=int, default=8,
help='Number of heads in Multi-Head Attention')
model.add_argument('--trans_drop', type=float, default=0.2,
help='Dropout for transformer')
model.add_argument('--layer_wise_attn', type='bool', default=False,
help='Use layer-wise attention in Transformer')
# Input representation specific details
model.add_argument('--use_src_char', type='bool', default=False,
help='Use character embedding in the source')
model.add_argument('--use_tgt_char', type='bool', default=False,
help='Use character embedding in the target')
model.add_argument('--use_src_word', type='bool', default=True,
help='Use word embedding in the input')
model.add_argument('--use_tgt_word', type='bool', default=True,
help='Use word embedding in the input')
model.add_argument('--n_characters', type=int, default=260,
help='Character vocabulary size')
model.add_argument('--char_emsize', type=int, default=16,
help='Character embedding size')
model.add_argument('--filter_size', nargs='+', type=int,
default=5, help='Char convolution filter sizes')
model.add_argument('--nfilters', nargs='+', type=int,
default=100, help='Number of char convolution filters')
seq2seq = parser.add_argument_group('Seq2seq Model Specific Params')
seq2seq.add_argument('--attn_type', type=str, default='general',
help='Attention type for the seq2seq [dot, general, mlp]')
seq2seq.add_argument('--coverage_attn', type='bool', default=False,
help='Use coverage attention')
seq2seq.add_argument('--copy_attn', type='bool', default=False,
help='Use copy attention')
seq2seq.add_argument('--review_attn', type='bool', default=False,
help='Use review attention')
seq2seq.add_argument('--force_copy', type='bool', default=False,
help='Apply force copying')
seq2seq.add_argument('--reuse_copy_attn', type='bool', default=False,
help='Reuse encoder attention')
seq2seq.add_argument('--share_decoder_embeddings', type='bool', default=False,
help='Share decoder embeddings weight with softmax layer')
seq2seq.add_argument('--split_decoder', type='bool', default=False,
help='Split the decoder into two for copying and generation')
seq2seq.add_argument('--reload_decoder_state', type=str, default=None,
help='Reload decoder states for the seq2seq')
seq2seq.add_argument('--conditional_decoding', type='bool', default=False,
help='Conditional decoding applied to Seq2seq')
# Optimization details
optim = parser.add_argument_group('Neural QA Reader Optimization')
optim.add_argument('--optimizer', type=str, default='adam',
choices=['sgd', 'adam', 'adamW'],
help='Name of the optimizer')
optim.add_argument('--dropout_emb', type=float, default=0.2,
help='Dropout rate for word embeddings')
optim.add_argument('--dropout_rnn', type=float, default=0.2,
help='Dropout rate for RNN states')
optim.add_argument('--dropout', type=float, default=0.2,
help='Dropout for NN layers')
optim.add_argument('--learning_rate', type=float, default=0.001,
help='Learning rate for the optimizer')
parser.add_argument('--lr_decay', type=float, default=0.99,
help='Decay ratio for learning rate')
optim.add_argument('--grad_clipping', type=float, default=5.0,
help='Gradient clipping')
parser.add_argument('--early_stop', type=int, default=5,
help='Stop training if performance doesn\'t improve')
optim.add_argument('--weight_decay', type=float, default=0,
help='Weight decay factor')
optim.add_argument('--momentum', type=float, default=0,
help='Momentum factor')
optim.add_argument('--fix_embeddings', type='bool', default=True,
help='Keep word embeddings fixed (use pretrained)')
optim.add_argument('--warmup_steps', type=int, default=10000,
help='Number of of warmup steps')
optim.add_argument('--warmup_epochs', type=int, default=0,
help='Number of of warmup steps')
def get_model_args(args):
"""Filter args for model ones.
From a args Namespace, return a new Namespace with *only* the args specific
to the model architecture or optimization. (i.e. the ones defined here.)
"""
global MODEL_ARCHITECTURE, MODEL_OPTIMIZER, SEQ2SEQ_ARCHITECTURE, DATA_OPTIONS
required_args = MODEL_ARCHITECTURE | MODEL_OPTIMIZER | SEQ2SEQ_ARCHITECTURE | DATA_OPTIONS
arg_values = {k: v for k, v in vars(args).items() if k in required_args}
return argparse.Namespace(**arg_values)
def override_model_args(old_args, new_args):
"""Set args to new parameters.
Decide which model args to keep and which to override when resolving a set
of saved args and new args.
We keep the new optimization or RL setting, and leave the model architecture alone.
"""
global MODEL_OPTIMIZER
old_args, new_args = vars(old_args), vars(new_args)
for k in old_args.keys():
if k in new_args and old_args[k] != new_args[k]:
if k in MODEL_OPTIMIZER:
logger.info('Overriding saved %s: %s --> %s' %
(k, old_args[k], new_args[k]))
old_args[k] = new_args[k]
else:
logger.info('Keeping saved %s: %s' % (k, old_args[k]))
return argparse.Namespace(**old_args)
def add_new_model_args(old_args, new_args):
"""Set args to new parameters.
Decide which model args to keep and which to override when resolving a set
of saved args and new args.
We keep the new optimization or RL setting, and leave the model architecture alone.
"""
global ADVANCED_OPTIONS
old_args, new_args = vars(old_args), vars(new_args)
for k in new_args.keys():
if k not in old_args:
if k in ADVANCED_OPTIONS:
logger.info('Adding arg %s: %s' % (k, new_args[k]))
old_args[k] = new_args[k]
return argparse.Namespace(**old_args)
|
[
"argparse.Namespace",
"logging.getLogger"
] |
[((209, 236), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (226, 236), False, 'import logging\n'), ((9999, 10031), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '(**arg_values)\n', (10017, 10031), False, 'import argparse\n'), ((10813, 10843), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '(**old_args)\n', (10831, 10843), False, 'import argparse\n'), ((11453, 11483), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '(**old_args)\n', (11471, 11483), False, 'import argparse\n')]
|
from TikTokApi import TikTokApi
import json
api = TikTokApi.get_instance()
count = 1
tiktoks = api.byUsername("iamtabithabrown", count=count)
jsonString = json.dumps(tiktoks)
jsonFile = open("tiktok_example_data.json", "w")
jsonFile.write(jsonString)
jsonFile.close()
for tiktok in tiktoks:
# print(tiktok)
print(tiktok['video']['originCover'])
|
[
"TikTokApi.TikTokApi.get_instance",
"json.dumps"
] |
[((51, 75), 'TikTokApi.TikTokApi.get_instance', 'TikTokApi.get_instance', ([], {}), '()\n', (73, 75), False, 'from TikTokApi import TikTokApi\n'), ((159, 178), 'json.dumps', 'json.dumps', (['tiktoks'], {}), '(tiktoks)\n', (169, 178), False, 'import json\n')]
|
# Generated by Django 3.1.7 on 2021-03-16 15:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0003_profile_dtu_email'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='roll_no',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
|
[
"django.db.models.CharField"
] |
[((334, 388), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'null': '(True)'}), '(blank=True, max_length=50, null=True)\n', (350, 388), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import operator
import os
import re
import subprocess
#
# Find the root of the git tree
#
git_root = (subprocess
.check_output(['git', 'rev-parse', '--show-toplevel'])
.decode('utf-8')
.strip())
#
# Parse command line arguments
#
default_out = os.path.join(git_root, '.github', 'CODEOWNERS')
argp = argparse.ArgumentParser('Generate .github/CODEOWNERS file')
argp.add_argument('--out', '-o',
type=str,
default=default_out,
help='Output file (default %s)' % default_out)
args = argp.parse_args()
#
# Walk git tree to locate all OWNERS files
#
owners_files = [os.path.join(root, 'OWNERS')
for root, dirs, files in os.walk(git_root)
if 'OWNERS' in files]
#
# Parse owners files
#
Owners = collections.namedtuple('Owners', 'parent directives dir')
Directive = collections.namedtuple('Directive', 'who globs')
def parse_owners(filename):
with open(filename) as f:
src = f.read().splitlines()
parent = True
directives = []
for line in src:
line = line.strip()
# line := directive | comment
if not line: continue
if line[0] == '#': continue
# it's a directive
directive = None
if line == 'set noparent':
parent = False
elif line == '*':
directive = Directive(who='*', globs=[])
elif ' ' in line:
(who, globs) = line.split(' ', 1)
globs_list = [glob
for glob in globs.split(' ')
if glob]
directive = Directive(who=who, globs=globs_list)
else:
directive = Directive(who=line, globs=[])
if directive:
directives.append(directive)
return Owners(parent=parent,
directives=directives,
dir=os.path.relpath(os.path.dirname(filename), git_root))
owners_data = sorted([parse_owners(filename)
for filename in owners_files],
key=operator.attrgetter('dir'))
#
# Modify owners so that parented OWNERS files point to the actual
# Owners tuple with their parent field
#
new_owners_data = []
for owners in owners_data:
if owners.parent == True:
best_parent = None
best_parent_score = None
for possible_parent in owners_data:
if possible_parent is owners: continue
rel = os.path.relpath(owners.dir, possible_parent.dir)
# '..' ==> we had to walk up from possible_parent to get to owners
# ==> not a parent
if '..' in rel: continue
depth = len(rel.split(os.sep))
if not best_parent or depth < best_parent_score:
best_parent = possible_parent
best_parent_score = depth
if best_parent:
owners = owners._replace(parent = best_parent.dir)
else:
owners = owners._replace(parent = None)
new_owners_data.append(owners)
owners_data = new_owners_data
#
# In bottom to top order, process owners data structures to build up
# a CODEOWNERS file for GitHub
#
def full_dir(rules_dir, sub_path):
return os.path.join(rules_dir, sub_path) if rules_dir != '.' else sub_path
# glob using git
gg_cache = {}
def git_glob(glob):
global gg_cache
if glob in gg_cache: return gg_cache[glob]
r = set(subprocess
.check_output(['git', 'ls-files', os.path.join(git_root, glob)])
.decode('utf-8')
.strip()
.splitlines())
gg_cache[glob] = r
return r
def expand_directives(root, directives):
globs = collections.OrderedDict()
# build a table of glob --> owners
for directive in directives:
for glob in directive.globs or ['**']:
if glob not in globs:
globs[glob] = []
if directive.who not in globs[glob]:
globs[glob].append(directive.who)
# expand owners for intersecting globs
sorted_globs = sorted(globs.keys(),
key=lambda g: len(git_glob(full_dir(root, g))),
reverse=True)
out_globs = collections.OrderedDict()
for glob_add in sorted_globs:
who_add = globs[glob_add]
pre_items = [i for i in out_globs.items()]
out_globs[glob_add] = who_add.copy()
for glob_have, who_have in pre_items:
files_add = git_glob(full_dir(root, glob_add))
files_have = git_glob(full_dir(root, glob_have))
intersect = files_have.intersection(files_add)
if intersect:
for f in sorted(files_add): # sorted to ensure merge stability
if f not in intersect:
out_globs[os.path.relpath(f, start=root)] = who_add
for who in who_have:
if who not in out_globs[glob_add]:
out_globs[glob_add].append(who)
return out_globs
def add_parent_to_globs(parent, globs, globs_dir):
if not parent: return
for owners in owners_data:
if owners.dir == parent:
owners_globs = expand_directives(owners.dir, owners.directives)
for oglob, oglob_who in owners_globs.items():
for gglob, gglob_who in globs.items():
files_parent = git_glob(full_dir(owners.dir, oglob))
files_child = git_glob(full_dir(globs_dir, gglob))
intersect = files_parent.intersection(files_child)
gglob_who_orig = gglob_who.copy()
if intersect:
for f in sorted(files_child): # sorted to ensure merge stability
if f not in intersect:
who = gglob_who_orig.copy()
globs[os.path.relpath(f, start=globs_dir)] = who
for who in oglob_who:
if who not in gglob_who:
gglob_who.append(who)
add_parent_to_globs(owners.parent, globs, globs_dir)
return
assert(False)
todo = owners_data.copy()
done = set()
with open(args.out, 'w') as out:
out.write('# Auto-generated by the tools/mkowners/mkowners.py tool\n')
out.write('# Uses OWNERS files in different modules throughout the\n')
out.write('# repository as the source of truth for module ownership.\n')
written_globs = []
while todo:
head, *todo = todo
if head.parent and not head.parent in done:
todo.append(head)
continue
globs = expand_directives(head.dir, head.directives)
add_parent_to_globs(head.parent, globs, head.dir)
for glob, owners in globs.items():
skip = False
for glob1, owners1, dir1 in reversed(written_globs):
files = git_glob(full_dir(head.dir, glob))
files1 = git_glob(full_dir(dir1, glob1))
intersect = files.intersection(files1)
if files == intersect:
if sorted(owners) == sorted(owners1):
skip = True # nothing new in this rule
break
elif intersect:
# continuing would cause a semantic change since some files are
# affected differently by this rule and CODEOWNERS is order dependent
break
if not skip:
out.write('/%s %s\n' % (
full_dir(head.dir, glob), ' '.join(owners)))
written_globs.append((glob, owners, head.dir))
done.add(head.dir)
|
[
"argparse.ArgumentParser",
"subprocess.check_output",
"os.walk",
"os.path.dirname",
"operator.attrgetter",
"collections.namedtuple",
"os.path.relpath",
"collections.OrderedDict",
"os.path.join"
] |
[((920, 967), 'os.path.join', 'os.path.join', (['git_root', '""".github"""', '"""CODEOWNERS"""'], {}), "(git_root, '.github', 'CODEOWNERS')\n", (932, 967), False, 'import os\n'), ((976, 1035), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Generate .github/CODEOWNERS file"""'], {}), "('Generate .github/CODEOWNERS file')\n", (999, 1035), False, 'import argparse\n'), ((1453, 1510), 'collections.namedtuple', 'collections.namedtuple', (['"""Owners"""', '"""parent directives dir"""'], {}), "('Owners', 'parent directives dir')\n", (1475, 1510), False, 'import collections\n'), ((1523, 1571), 'collections.namedtuple', 'collections.namedtuple', (['"""Directive"""', '"""who globs"""'], {}), "('Directive', 'who globs')\n", (1545, 1571), False, 'import collections\n'), ((1291, 1319), 'os.path.join', 'os.path.join', (['root', '"""OWNERS"""'], {}), "(root, 'OWNERS')\n", (1303, 1319), False, 'import os\n'), ((4069, 4094), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (4092, 4094), False, 'import collections\n'), ((4547, 4572), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (4570, 4572), False, 'import collections\n'), ((1361, 1378), 'os.walk', 'os.walk', (['git_root'], {}), '(git_root)\n', (1368, 1378), False, 'import os\n'), ((2594, 2620), 'operator.attrgetter', 'operator.attrgetter', (['"""dir"""'], {}), "('dir')\n", (2613, 2620), False, 'import operator\n'), ((3651, 3684), 'os.path.join', 'os.path.join', (['rules_dir', 'sub_path'], {}), '(rules_dir, sub_path)\n', (3663, 3684), False, 'import os\n'), ((2958, 3006), 'os.path.relpath', 'os.path.relpath', (['owners.dir', 'possible_parent.dir'], {}), '(owners.dir, possible_parent.dir)\n', (2973, 3006), False, 'import os\n'), ((740, 804), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'rev-parse', '--show-toplevel']"], {}), "(['git', 'rev-parse', '--show-toplevel'])\n", (763, 804), False, 'import subprocess\n'), ((2432, 2457), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (2447, 2457), False, 'import os\n'), ((5072, 5102), 'os.path.relpath', 'os.path.relpath', (['f'], {'start': 'root'}), '(f, start=root)\n', (5087, 5102), False, 'import os\n'), ((5987, 6022), 'os.path.relpath', 'os.path.relpath', (['f'], {'start': 'globs_dir'}), '(f, start=globs_dir)\n', (6002, 6022), False, 'import os\n'), ((3895, 3923), 'os.path.join', 'os.path.join', (['git_root', 'glob'], {}), '(git_root, glob)\n', (3907, 3923), False, 'import os\n')]
|
import os ;from equities import Universe
import random
u = Universe()
k,f,s = 'bar',(10,7),True
ciks = u.ciks
random.shuffle(ciks)
for cik in u.ciks:
c = u.company(cik)
income = c['income']
if not income.empty:
income.to_csv(os.path.join('data','income'+c['name']+'.csv'))
balance = c['balance']
if not balance.empty:
balance.to_csv(os.path.join('data','balance'+c['name']+'.csv'))
cash = c['cash']
if not cash.empty:
cash.to_csv(os.path.join('data','cash'+c['name']+'.csv'))
prices = c['prices']
if not prices.empty:
prices.to_csv(os.path.join('data','prices'+c['name']+'.csv'))
|
[
"random.shuffle",
"os.path.join",
"equities.Universe"
] |
[((60, 70), 'equities.Universe', 'Universe', ([], {}), '()\n', (68, 70), False, 'from equities import Universe\n'), ((112, 132), 'random.shuffle', 'random.shuffle', (['ciks'], {}), '(ciks)\n', (126, 132), False, 'import random\n'), ((250, 301), 'os.path.join', 'os.path.join', (['"""data"""', "('income' + c['name'] + '.csv')"], {}), "('data', 'income' + c['name'] + '.csv')\n", (262, 301), False, 'import os\n'), ((375, 427), 'os.path.join', 'os.path.join', (['"""data"""', "('balance' + c['name'] + '.csv')"], {}), "('data', 'balance' + c['name'] + '.csv')\n", (387, 427), False, 'import os\n'), ((489, 538), 'os.path.join', 'os.path.join', (['"""data"""', "('cash' + c['name'] + '.csv')"], {}), "('data', 'cash' + c['name'] + '.csv')\n", (501, 538), False, 'import os\n'), ((608, 659), 'os.path.join', 'os.path.join', (['"""data"""', "('prices' + c['name'] + '.csv')"], {}), "('data', 'prices' + c['name'] + '.csv')\n", (620, 659), False, 'import os\n')]
|
# Frontend
from tkinter import *
import tkinter.messagebox
import stdDatabase_Backend
class Student():
def __init__(self, root):
self.root = root
self.root.title("Akwins - Your Student Data Manager")
self.root.geometry("1350x7500+0+0")
self.root.config(bg = "#3399FF")
StdID = StringVar()
Firstname = StringVar()
Lastname = StringVar()
DOB = StringVar()
Age = StringVar()
Gender = StringVar()
Address = StringVar()
Mobile = StringVar()
#==========================================================================================================================================================
def iExit():
iExit = tkinter.messagebox.askyesno("Akwins", "Are you sure you want to exit?")
if iExit > 0:
root.destroy()
def ClearData():
self.txtStdID.delete(0, END)
self.txtFirstname.delete(0, END)
self.txtLastname.delete(0, END)
self.txtDOB.delete(0, END)
self.txtAge.delete(0, END)
self.txtGender.delete(0, END)
self.txtAddress.delete(0, END)
self.txtMobile.delete(0, END)
def AddData():
if(len(StdID.get()) != 0):
stdDatabase_Backend.AddStdRec(StdID.get(), Firstname.get(), Lastname.get(), DOB.get(), Age.get(), Gender.get(), Address.get(), Mobile.get())
MyStudentList.delete(0, END)
MyStudentList.insert(END, (StdID.get(), Firstname.get(), Lastname.get(), DOB.get(), Age.get(), Gender.get(), Address.get(), Mobile.get()))
def DisplayData():
MyStudentList.delete(0, END)
for row in stdDatabase_Backend.viewData():
MyStudentList.insert(END, row, str(""))
def StudentRec(event):
global sd
searchStd = MyStudentList.curselection() [0]
sd = MyStudentList.get(searchStd)
self.txtStdID.delete(0, END)
self.txtStdID.insert(END, sd[1])
self.txtFirstname.delete(0, END)
self.txtFirstname.insert(END, sd[2])
self.txtLastname.delete(0, END)
self.txtLastname.insert(END, sd[3])
self.txtDOB.delete(0, END)
self.txtDOB.insert(END, sd[4])
self.txtAge.delete(0, END)
self.txtAge.insert(END, sd[5])
self.txtGender.delete(0, END)
self.txtGender.insert(END, sd[6])
self.txtAddress.delete(0, END)
self.txtAddress.insert(END, sd[7])
self.txtMobile.delete(0, END)
self.txtMobile.insert(END, sd[8])
def searchDatabase():
MyStudentList.delete(0, END)
for row in stdDatabase_Backend.searchData(StdID.get(), Firstname.get(), Lastname.get(), DOB.get(), Age.get(), Gender.get(), Address.get(), Mobile.get()):
MyStudentList.insert(END, row, str(""))
def update():
if(len(StdID.get()) != 0):
stdDatabase_Backend.deleteRec(sd[0])
if(len(StdID.get()) != 0):
stdDatabase_Backend.AddStdRec(StdID.get(), Firstname.get(), Lastname.get(), DOB.get(), Age.get(), Gender.get(), Address.get(), Mobile.get())
MyStudentList.delete(0, END)
MyStudentList.insert(END, StdID.get(), Firstname.get(), Lastname.get(), DOB.get(), Age.get(), Gender.get(), Address.get(), Mobile.get())
def DeleteData():
if(len(StdID.get()) != 0):
stdDatabase_Backend.deleteRec(sd[0])
ClearData()
DisplayData()
# =========================================================================================================================================================
MainFrame = Frame(self.root, bg = "#3399FF")
MainFrame.grid()
TitleFrame = Frame(MainFrame, bd = 2, padx = 54, pady = 8, bg = "#F8F8FF", relief = RIDGE)
TitleFrame.pack(side = TOP)
self.lblTitle = Label(TitleFrame, font = ("Arial", 47 ,"bold"), text = "Akwins - System Data Manager", bg = "#F8F8FF")
self.lblTitle.grid()
ButtonFrame = Frame(MainFrame, bd = 2, width = 1350, height = 70, padx = 18, pady = 10, bg = "#F8F8FF", relief = RIDGE)
ButtonFrame.pack(side = BOTTOM)
DataFrame = Frame(MainFrame, bd = 1, width = 1350, height = 400, padx = 20, pady = 20, bg = "#3399FF")
DataFrame.pack(side = BOTTOM)
DataFrameLEFT = LabelFrame(DataFrame, bd = 1, width = 1000, height = 600, padx = 20, relief = RIDGE, bg = "#F8F8FF", font = ("Arial", 20 ,"bold"), text = "Student Info\n")
DataFrameLEFT.pack(side = LEFT)
DataFrameRIGHT = LabelFrame(DataFrame, bd = 1, width = 450, height = 300, padx = 31, pady = 3, relief = RIDGE, bg = "#F8F8FF", font = ("Arial", 20 ,"bold"), text = "Student Data\n")
DataFrameRIGHT.pack(side = RIGHT)
# ===================================================================================================================================================================================================================================
self.lblStdID = Label(DataFrameLEFT, font = ("Arial", 20 ,"bold"), text = "Student ID:", padx = 2, pady = 2, bg = "#F8F8FF")
self.lblStdID.grid(row = 0, column = 0, sticky = W)
self.txtStdID = Entry(DataFrameLEFT, font = ("Arial", 20 ,"bold"), textvariable = StdID, width = 39)
self.txtStdID.grid(row = 0, column = 1)
#===================================================================================================================================================================================
self.lblFirstname = Label(DataFrameLEFT, font = ("Arial", 20 ,"bold"), text = "Firstname:", padx = 2, pady = 2, bg = "#F8F8FF")
self.lblFirstname.grid(row = 1, column = 0, sticky = W)
self.txtFirstname = Entry(DataFrameLEFT, font = ("Arial", 20 ,"bold"), textvariable = Firstname, width = 39)
self.txtFirstname.grid(row = 1, column = 1)
#======================================================================================================================================================================================================
self.lblLastname = Label(DataFrameLEFT, font = ("Arial", 20 ,"bold"), text = "Lastname:", padx = 2, pady = 2, bg = "#F8F8FF")
self.lblLastname.grid(row = 2, column = 0, sticky = W)
self.txtLastname = Entry(DataFrameLEFT, font = ("Arial", 20 ,"bold"), textvariable = Lastname, width = 39)
self.txtLastname.grid(row = 2, column = 1)
# ===================================================================================================================================================================================================================================
self.lblDOB = Label(DataFrameLEFT, font = ("Arial", 20 ,"bold"), text = "Date Of Birth:", padx = 2, pady = 3, bg = "#F8F8FF")
self.lblDOB.grid(row = 3, column = 0, sticky = W)
self.txtDOB = Entry(DataFrameLEFT, font = ("Arial", 20 ,"bold"), textvariable = DOB, width = 39)
self.txtDOB.grid(row = 3, column = 1)
# ===================================================================================================================================================================================================================================
self.lblAge = Label(DataFrameLEFT, font = ("Arial", 20 ,"bold"), text = "Age:", padx = 2, pady = 3, bg = "#F8F8FF")
self.lblAge.grid(row = 4, column = 0, sticky = W)
self.txtAge = Entry(DataFrameLEFT, font = ("Arial", 20 ,"bold"), textvariable = Age, width = 39)
self.txtAge.grid(row = 4, column = 1)
# ===================================================================================================================================================================================================================================
self.lblGender = Label(DataFrameLEFT, font = ("Arial", 20 ,"bold"), text = "Gender:", padx = 2, pady = 3, bg = "#F8F8FF")
self.lblGender.grid(row = 5, column = 0, sticky = W)
self.txtGender = Entry(DataFrameLEFT, font = ("Arial", 20 ,"bold"), textvariable = Gender, width = 39)
self.txtGender.grid(row = 5, column = 1)
# ===================================================================================================================================================================================================================================
self.lblAddress = Label(DataFrameLEFT, font = ("Arial", 20 ,"bold"), text = "Address:", padx = 2, pady = 3, bg = "#F8F8FF")
self.lblAddress.grid(row = 6, column = 0, sticky = W)
self.txtAddress = Entry(DataFrameLEFT, font = ("Arial", 20 ,"bold"), textvariable = Address, width = 39)
self.txtAddress.grid(row = 6, column = 1)
# ===================================================================================================================================================================================================================================
self.lblMobile = Label(DataFrameLEFT, font = ("Arial", 20 ,"bold"), text = "Mobile No:", padx = 2, pady = 3, bg = "#F8F8FF")
self.lblMobile.grid(row = 7, column = 0, sticky = W)
self.txtMobile = Entry(DataFrameLEFT, font = ("Arial", 20 ,"bold"), textvariable = Mobile, width = 39)
self.txtMobile.grid(row = 7, column = 1)
#=====================================================================================================================================================================================================================================
MyScrollbar = Scrollbar(DataFrameRIGHT)
MyScrollbar.grid(row = 0, column = 1, sticky = "ns")
MyStudentList = Listbox(DataFrameRIGHT, width = 41, height = 16, font = ("Arial", 12, "bold"), yscrollcommand = MyScrollbar.set)
MyStudentList.bind('<<ListboxSelect>>', StudentRec)
MyStudentList.grid(row = 0, column = 0, padx = 8)
MyScrollbar.config(command = MyStudentList.yview)
#===================================================================================================================================================================
btnAddNew = Button(ButtonFrame, font = ("Arial", 20 ,"bold"), text = "Add New", height = 1, width = 10, bd = 4, command = AddData)
btnAddNew.grid(row = 0, column = 0)
btnAddNew = Button(ButtonFrame, font = ("Arial", 20 ,"bold"), text = "Display", height = 1, width = 10, bd = 4, command = DisplayData)
btnAddNew.grid(row = 0, column = 1)
btnAddNew = Button(ButtonFrame, font = ("Arial", 20 ,"bold"), text = "Clear", height = 1, width = 10, bd = 4, command = ClearData)
btnAddNew.grid(row = 0, column = 2)
btnAddNew = Button(ButtonFrame, font = ("Arial", 20 ,"bold"), text = "Delete", height = 1, width = 10, bd = 4, command = DeleteData)
btnAddNew.grid(row = 0, column = 3)
btnAddNew = Button(ButtonFrame, font = ("Arial", 20 ,"bold"), text = "Search", height = 1, width = 10, bd = 4, command = searchDatabase)
btnAddNew.grid(row = 0, column = 4)
btnAddNew = Button(ButtonFrame, font = ("Arial", 20 ,"bold"), text = "Update", height = 1, width = 10, bd = 4, command = update)
btnAddNew.grid(row = 0, column = 5)
btnAddNew = Button(ButtonFrame, font = ("Arial", 20 ,"bold"), text = "Exit", height = 1, width = 10, bd = 4, command = iExit)
btnAddNew.grid(row = 0, column = 6)
if __name__ == "__main__":
root = Tk()
applicaton = Student(root)
root.mainloop()
|
[
"stdDatabase_Backend.deleteRec",
"stdDatabase_Backend.viewData"
] |
[((1832, 1862), 'stdDatabase_Backend.viewData', 'stdDatabase_Backend.viewData', ([], {}), '()\n', (1860, 1862), False, 'import stdDatabase_Backend\n'), ((3295, 3331), 'stdDatabase_Backend.deleteRec', 'stdDatabase_Backend.deleteRec', (['sd[0]'], {}), '(sd[0])\n', (3324, 3331), False, 'import stdDatabase_Backend\n'), ((3859, 3895), 'stdDatabase_Backend.deleteRec', 'stdDatabase_Backend.deleteRec', (['sd[0]'], {}), '(sd[0])\n', (3888, 3895), False, 'import stdDatabase_Backend\n')]
|
import inspect
"""
An immutable class representing a command, which is anything that has a side
effect or is asynchronous.
"""
class Cmd:
def __init__(self, performer_getter, map_functions=None, dependent=None):
# A non-async function that, when given the result from `dependent`,
# returns a function (can be async or not) that performs the side
# effect. The function-returning function should ideally should be pure.
self.performer_getter = performer_getter
# Transform functions to perform on the resulting value
self.map_functions = map_functions or []
# A Cmd that should be performed first before performing this command.
# The result from this Cmd will be passed to `performer_getter` for this
# Cmd's command.
self.dependent = dependent
def map(self, function):
return type(self)(self.performer_getter, map_functions=[*self.map_functions, function], dependent=self.dependent)
def then(self, then_command_getter):
return type(self)(then_command_getter, dependent=self)
async def eval(self):
if self.dependent:
result = await self.dependent.eval()
else:
result = ()
for function in self.map_functions:
result = function(result)
performer = self.performer_getter(result)
maybe_awaitable = performer()
if inspect.isawaitable(maybe_awaitable):
return await maybe_awaitable
else:
return maybe_awaitable
def __repr__(self):
return 'Cmd(%s, map_functions=%s, dependent=%s)' % (repr(self.performer_getter), repr(self.map_functions), repr(self.dependent))
|
[
"inspect.isawaitable"
] |
[((1265, 1301), 'inspect.isawaitable', 'inspect.isawaitable', (['maybe_awaitable'], {}), '(maybe_awaitable)\n', (1284, 1301), False, 'import inspect\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-09 16:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ejudge', '0012_auto_20170405_1555'),
]
operations = [
migrations.AddField(
model_name='solutioncheckingresult',
name='report',
field=models.TextField(blank=True),
),
]
|
[
"django.db.models.TextField"
] |
[((414, 442), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (430, 442), False, 'from django.db import migrations, models\n')]
|
import igraph as ig
from load_data import load_data
def create_graph(data):
nodes_count = len(data['nodes'])
edges_count = len(data['links'])
Edges=[(data['links'][i]['source'], data['links'][i]['target']) for i in range(edges_count)]
return ig.Graph(Edges, directed=False)
if __name__ == "__main__":
print(create_graph(load_data()))
|
[
"load_data.load_data",
"igraph.Graph"
] |
[((249, 280), 'igraph.Graph', 'ig.Graph', (['Edges'], {'directed': '(False)'}), '(Edges, directed=False)\n', (257, 280), True, 'import igraph as ig\n'), ((329, 340), 'load_data.load_data', 'load_data', ([], {}), '()\n', (338, 340), False, 'from load_data import load_data\n')]
|
# OpenNero will execute ModMain when this mod is loaded
from Maze.client import ClientMain
def ModMain():
ClientMain()
def StartMe():
from Maze.module import getMod
getMod().set_speedup(1.0) # full speed ahead
getMod().start_sarsa() # start an algorithm for headless mode
|
[
"Maze.module.getMod",
"Maze.client.ClientMain"
] |
[((111, 123), 'Maze.client.ClientMain', 'ClientMain', ([], {}), '()\n', (121, 123), False, 'from Maze.client import ClientMain\n'), ((179, 187), 'Maze.module.getMod', 'getMod', ([], {}), '()\n', (185, 187), False, 'from Maze.module import getMod\n'), ((228, 236), 'Maze.module.getMod', 'getMod', ([], {}), '()\n', (234, 236), False, 'from Maze.module import getMod\n')]
|
import os
from vitaes_parser import env
if env == 'production' or env == 'staging':
print('Building for %s environment...' % env)
print()
os.system('docker build --tag latexos latexos/')
os.system('docker build --tag webapp webapp/')
os.system('docker build --tag renderer renderer/')
os.system('docker build --tag api api/')
os.system('docker build --tag logger logger/')
os.system('docker build --tag storage storage/')
print()
print('Built in %s environment' % env)
else:
print('Building for development environment...')
print()
os.system('docker-compose build')
print()
print('Built in development environment')
|
[
"os.system"
] |
[((151, 199), 'os.system', 'os.system', (['"""docker build --tag latexos latexos/"""'], {}), "('docker build --tag latexos latexos/')\n", (160, 199), False, 'import os\n'), ((204, 250), 'os.system', 'os.system', (['"""docker build --tag webapp webapp/"""'], {}), "('docker build --tag webapp webapp/')\n", (213, 250), False, 'import os\n'), ((255, 305), 'os.system', 'os.system', (['"""docker build --tag renderer renderer/"""'], {}), "('docker build --tag renderer renderer/')\n", (264, 305), False, 'import os\n'), ((310, 350), 'os.system', 'os.system', (['"""docker build --tag api api/"""'], {}), "('docker build --tag api api/')\n", (319, 350), False, 'import os\n'), ((355, 401), 'os.system', 'os.system', (['"""docker build --tag logger logger/"""'], {}), "('docker build --tag logger logger/')\n", (364, 401), False, 'import os\n'), ((406, 454), 'os.system', 'os.system', (['"""docker build --tag storage storage/"""'], {}), "('docker build --tag storage storage/')\n", (415, 454), False, 'import os\n'), ((585, 618), 'os.system', 'os.system', (['"""docker-compose build"""'], {}), "('docker-compose build')\n", (594, 618), False, 'import os\n')]
|
import unittest
from taric_challange.core.models.book import Book
data = {"author_data" : [
{
"name": "<NAME>",
"id": "richards_rowland"
}],
"awards_text": "",
"marc_enc_level": "4",
"subject_ids": [
"mechanics_applied",
"physics"
],
"summary": "",
"isbn13": "9780849303159",
"dewey_normal": "620.105",
"title_latin": "Principles of solid mechanics",
"publisher_id": "crc_press",
"dewey_decimal": "620/.1/05",
"publisher_text": "Boca Raton, FL : CRC Press, 2001.",
"language": "eng",
"physical_description_text": "446 p. : ill. ; 24 cm.",
"isbn10": "084930315X",
"edition_info": "(alk. paper)",
"urls_text": "",
"lcc_number": "TA350",
"publisher_name": "CRC Press",
"book_id": "principles_of_solid_mechanics",
"notes": "Includes bibliographical references and index.",
"title": "Principles of solid mechanics",
"title_long": ""}
class BookTest(unittest.TestCase):
def book_model_test(self):
book = Book(data)
self.assertEqual(book.title, 'Principles of solid mechanics')
self.assertEqual(book.publisher, 'CRC Press')
self.assertEqual(book.subjects, 'Mechanics applied\nPhysics')
self.assertEqual(book.author, "<NAME>")
self.assertEqual(book.isbn10, '084930315X')
self.assertEqual(book.isbn13, '9780849303159')
self.assertEqual(book.edition, '(alk. paper)')
self.assertEqual(book.language, 'eng')
|
[
"taric_challange.core.models.book.Book"
] |
[((1147, 1157), 'taric_challange.core.models.book.Book', 'Book', (['data'], {}), '(data)\n', (1151, 1157), False, 'from taric_challange.core.models.book import Book\n')]
|
# pylint: disable=missing-docstring
# pylint: disable=invalid-name
# pylint: disable=unnecessary-lambda
# pylint: disable=unused-argument
# pylint: disable=no-self-use
import textwrap
import unittest
from typing import List, Optional # pylint: disable=unused-import
import icontract
import tests.error
class TestOK(unittest.TestCase):
def test_without_argument(self) -> None:
z = [1]
@icontract.snapshot(lambda: z[:], name="z")
@icontract.ensure(lambda OLD, val: OLD.z + [val] == z)
def some_func(val: int) -> None:
z.append(val)
some_func(2)
def test_with_name_same_for_single_argument(self) -> None:
@icontract.snapshot(lambda lst: lst[:])
@icontract.ensure(lambda OLD, val, lst: OLD.lst + [val] == lst)
def some_func(lst: List[int], val: int) -> None:
lst.append(val)
# Expected to pass
some_func([1], 2)
def test_with_custom_name_for_single_argument(self) -> None:
@icontract.snapshot(lambda lst: len(lst), name="len_lst")
@icontract.ensure(lambda OLD, val, lst: OLD.len_lst + 1 == len(lst))
def some_func(lst: List[int], val: int) -> None:
lst.append(val)
# Expected to pass
some_func([1], 2)
def test_with_multiple_arguments(self) -> None:
@icontract.snapshot(lambda lst_a, lst_b: set(lst_a).union(lst_b), name="union")
@icontract.ensure(lambda OLD, lst_a, lst_b: set(lst_a).union(lst_b) == OLD.union)
def some_func(lst_a: List[int], lst_b: List[int]) -> None:
pass
# Expected to pass
some_func(lst_a=[1, 2], lst_b=[3, 4])
class TestViolation(unittest.TestCase):
def test_with_name_same_as_argument(self) -> None:
@icontract.snapshot(lambda lst: lst[:])
@icontract.ensure(lambda OLD, val, lst: OLD.lst + [val] == lst)
def some_func(lst: List[int], val: int) -> None:
lst.append(val)
lst.append(1984)
violation_error = None # type: Optional[icontract.ViolationError]
try:
some_func([1], 2)
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual(
textwrap.dedent("""\
OLD.lst + [val] == lst:
OLD was a bunch of OLD values
OLD.lst was [1]
lst was [1, 2, 1984]
result was None
val was 2"""), tests.error.wo_mandatory_location(str(violation_error)))
def test_with_custom_name(self) -> None:
@icontract.snapshot(lambda lst: len(lst), name="len_lst")
@icontract.ensure(lambda OLD, val, lst: OLD.len_lst + 1 == len(lst))
def some_func(lst: List[int], val: int) -> None:
lst.append(val)
lst.append(1984)
violation_error = None # type: Optional[icontract.ViolationError]
try:
some_func([1], 2)
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual(
textwrap.dedent("""\
OLD.len_lst + 1 == len(lst):
OLD was a bunch of OLD values
OLD.len_lst was 1
len(lst) was 3
lst was [1, 2, 1984]
result was None
val was 2"""), tests.error.wo_mandatory_location(str(violation_error)))
def test_with_multiple_arguments(self) -> None:
@icontract.snapshot(lambda lst_a, lst_b: set(lst_a).union(lst_b), name="union")
@icontract.ensure(lambda OLD, lst_a, lst_b: set(lst_a).union(lst_b) == OLD.union)
def some_func(lst_a: List[int], lst_b: List[int]) -> None:
lst_a.append(1984) # bug
violation_error = None # type: Optional[icontract.ViolationError]
try:
some_func(lst_a=[1, 2], lst_b=[3, 4])
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual(
textwrap.dedent('''\
set(lst_a).union(lst_b) == OLD.union:
OLD was a bunch of OLD values
OLD.union was {1, 2, 3, 4}
lst_a was [1, 2, 1984]
lst_b was [3, 4]
result was None
set(lst_a) was {1, 2, 1984}
set(lst_a).union(lst_b) was {1, 2, 3, 4, 1984}'''),
tests.error.wo_mandatory_location(str(violation_error)))
class TestInvalid(unittest.TestCase):
def test_missing_snapshot_but_old_in_postcondition(self) -> None:
@icontract.ensure(lambda OLD, val, lst: OLD.len_lst + 1 == len(lst))
def some_func(lst: List[int], val: int) -> None:
lst.append(val)
type_error = None # type: Optional[TypeError]
try:
some_func([1], 2)
except TypeError as err:
type_error = err
self.assertIsNotNone(type_error)
self.assertEqual("The argument(s) of the contract condition have not been set: ['OLD']. "
"Does the original function define them? Did you supply them in the call? "
"Did you decorate the function with a snapshot to capture OLD values?",
tests.error.wo_mandatory_location(str(type_error)))
def test_conflicting_snapshots_with_argument_name(self) -> None:
value_error = None # type: Optional[ValueError]
try:
# pylint: disable=unused-variable
@icontract.snapshot(lambda lst: lst[:])
@icontract.snapshot(lambda lst: lst[:])
@icontract.ensure(lambda OLD, val, lst: len(OLD.lst) + 1 == len(lst))
def some_func(lst: List[int], val: int) -> None:
lst.append(val)
except ValueError as err:
value_error = err
self.assertIsNotNone(value_error)
self.assertEqual("There are conflicting snapshots with the name: 'lst'", str(value_error))
def test_conflicting_snapshots_with_custom_name(self) -> None:
value_error = None # type: Optional[ValueError]
try:
# pylint: disable=unused-variable
@icontract.snapshot(lambda lst: len(lst), name='len_lst')
@icontract.snapshot(lambda lst: len(lst), name='len_lst')
@icontract.ensure(lambda OLD, val, lst: OLD.len_lst + 1 == len(lst))
def some_func(lst: List[int], val: int) -> None:
lst.append(val)
except ValueError as err:
value_error = err
self.assertIsNotNone(value_error)
self.assertEqual("There are conflicting snapshots with the name: 'len_lst'", str(value_error))
def test_with_invalid_argument(self) -> None:
# lst versus a_list
type_error = None # type: Optional[TypeError]
try:
@icontract.snapshot(lambda lst: len(lst), name='len_lst')
@icontract.ensure(lambda OLD, val, a_list: OLD.len_lst + 1 == len(a_list))
def some_func(a_list: List[int], val: int) -> None:
a_list.append(val)
some_func([1], 2)
except TypeError as err:
type_error = err
self.assertIsNotNone(type_error)
self.assertEqual("The argument(s) of the snapshot have not been set: ['lst']. "
"Does the original function define them? Did you supply them in the call?",
tests.error.wo_mandatory_location(str(type_error)))
def test_with_no_arguments_and_no_name(self) -> None:
z = [1]
value_error = None # type: Optional[ValueError]
try:
# pylint: disable=unused-variable
@icontract.snapshot(lambda: z[:])
@icontract.ensure(lambda OLD, val: OLD.z + [val] == z)
def some_func(val: int) -> None:
z.append(val)
except ValueError as err:
value_error = err
self.assertIsNotNone(value_error)
self.assertEqual("You must name a snapshot if no argument was given in the capture function.", str(value_error))
def test_with_multiple_arguments_and_no_name(self) -> None:
value_error = None # type: Optional[ValueError]
try:
# pylint: disable=unused-variable
@icontract.snapshot(lambda lst_a, lst_b: set(lst_a).union(lst_b))
@icontract.ensure(lambda OLD, lst_a, lst_b: set(lst_a).union(lst_b) == OLD.union)
def some_func(lst_a: List[int], lst_b: List[int]) -> None:
pass
except ValueError as err:
value_error = err
self.assertIsNotNone(value_error)
self.assertEqual("You must name a snapshot if multiple arguments were given in the capture function.",
str(value_error))
def test_with_no_postcondition(self) -> None:
value_error = None # type: Optional[ValueError]
try:
# pylint: disable=unused-variable
@icontract.snapshot(lambda lst: lst[:])
def some_func(lst: List[int]) -> None:
return
except ValueError as err:
value_error = err
self.assertIsNotNone(value_error)
self.assertEqual("You are decorating a function with a snapshot, "
"but no postcondition was defined on the function before.", str(value_error))
def test_missing_old_attribute(self) -> None:
@icontract.snapshot(lambda lst: lst[:])
@icontract.ensure(lambda OLD, lst: OLD.len_list == lst) # We miss len_lst in OLD here!
def some_func(lst: List[int]) -> None:
return
attribute_error = None # type: Optional[AttributeError]
try:
some_func(lst=[1, 2, 3])
except AttributeError as error:
attribute_error = error
assert attribute_error is not None
self.assertEqual("The snapshot with the name 'len_list' is not available in the OLD of a postcondition. "
"Have you decorated the function with a corresponding snapshot decorator?",
str(attribute_error))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"icontract.snapshot",
"textwrap.dedent",
"icontract.ensure"
] |
[((10319, 10334), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10332, 10334), False, 'import unittest\n'), ((410, 453), 'icontract.snapshot', 'icontract.snapshot', (['(lambda : z[:])'], {'name': '"""z"""'}), "(lambda : z[:], name='z')\n", (428, 453), False, 'import icontract\n'), ((462, 515), 'icontract.ensure', 'icontract.ensure', (['(lambda OLD, val: OLD.z + [val] == z)'], {}), '(lambda OLD, val: OLD.z + [val] == z)\n', (478, 515), False, 'import icontract\n'), ((678, 716), 'icontract.snapshot', 'icontract.snapshot', (['(lambda lst: lst[:])'], {}), '(lambda lst: lst[:])\n', (696, 716), False, 'import icontract\n'), ((726, 788), 'icontract.ensure', 'icontract.ensure', (['(lambda OLD, val, lst: OLD.lst + [val] == lst)'], {}), '(lambda OLD, val, lst: OLD.lst + [val] == lst)\n', (742, 788), False, 'import icontract\n'), ((1771, 1809), 'icontract.snapshot', 'icontract.snapshot', (['(lambda lst: lst[:])'], {}), '(lambda lst: lst[:])\n', (1789, 1809), False, 'import icontract\n'), ((1819, 1881), 'icontract.ensure', 'icontract.ensure', (['(lambda OLD, val, lst: OLD.lst + [val] == lst)'], {}), '(lambda OLD, val, lst: OLD.lst + [val] == lst)\n', (1835, 1881), False, 'import icontract\n'), ((9585, 9623), 'icontract.snapshot', 'icontract.snapshot', (['(lambda lst: lst[:])'], {}), '(lambda lst: lst[:])\n', (9603, 9623), False, 'import icontract\n'), ((9633, 9687), 'icontract.ensure', 'icontract.ensure', (['(lambda OLD, lst: OLD.len_list == lst)'], {}), '(lambda OLD, lst: OLD.len_list == lst)\n', (9649, 9687), False, 'import icontract\n'), ((2282, 2527), 'textwrap.dedent', 'textwrap.dedent', (['""" OLD.lst + [val] == lst:\n OLD was a bunch of OLD values\n OLD.lst was [1]\n lst was [1, 2, 1984]\n result was None\n val was 2"""'], {}), '(\n """ OLD.lst + [val] == lst:\n OLD was a bunch of OLD values\n OLD.lst was [1]\n lst was [1, 2, 1984]\n result was None\n val was 2"""\n )\n', (2297, 2527), False, 'import textwrap\n'), ((3167, 3450), 'textwrap.dedent', 'textwrap.dedent', (['""" OLD.len_lst + 1 == len(lst):\n OLD was a bunch of OLD values\n OLD.len_lst was 1\n len(lst) was 3\n lst was [1, 2, 1984]\n result was None\n val was 2"""'], {}), '(\n """ OLD.len_lst + 1 == len(lst):\n OLD was a bunch of OLD values\n OLD.len_lst was 1\n len(lst) was 3\n lst was [1, 2, 1984]\n result was None\n val was 2"""\n )\n', (3182, 3450), False, 'import textwrap\n'), ((4143, 4529), 'textwrap.dedent', 'textwrap.dedent', (['""" set(lst_a).union(lst_b) == OLD.union:\n OLD was a bunch of OLD values\n OLD.union was {1, 2, 3, 4}\n lst_a was [1, 2, 1984]\n lst_b was [3, 4]\n result was None\n set(lst_a) was {1, 2, 1984}\n set(lst_a).union(lst_b) was {1, 2, 3, 4, 1984}"""'], {}), '(\n """ set(lst_a).union(lst_b) == OLD.union:\n OLD was a bunch of OLD values\n OLD.union was {1, 2, 3, 4}\n lst_a was [1, 2, 1984]\n lst_b was [3, 4]\n result was None\n set(lst_a) was {1, 2, 1984}\n set(lst_a).union(lst_b) was {1, 2, 3, 4, 1984}"""\n )\n', (4158, 4529), False, 'import textwrap\n'), ((5640, 5678), 'icontract.snapshot', 'icontract.snapshot', (['(lambda lst: lst[:])'], {}), '(lambda lst: lst[:])\n', (5658, 5678), False, 'import icontract\n'), ((5692, 5730), 'icontract.snapshot', 'icontract.snapshot', (['(lambda lst: lst[:])'], {}), '(lambda lst: lst[:])\n', (5710, 5730), False, 'import icontract\n'), ((7834, 7867), 'icontract.snapshot', 'icontract.snapshot', (['(lambda : z[:])'], {}), '(lambda : z[:])\n', (7852, 7867), False, 'import icontract\n'), ((7880, 7933), 'icontract.ensure', 'icontract.ensure', (['(lambda OLD, val: OLD.z + [val] == z)'], {}), '(lambda OLD, val: OLD.z + [val] == z)\n', (7896, 7933), False, 'import icontract\n'), ((9127, 9165), 'icontract.snapshot', 'icontract.snapshot', (['(lambda lst: lst[:])'], {}), '(lambda lst: lst[:])\n', (9145, 9165), False, 'import icontract\n')]
|
from pretf.blocks import output, variable
def pretf_blocks(var):
yield variable.one(default=1)
yield output.one(value=var.one)
yield variable.two(default=2)
|
[
"pretf.blocks.variable.one",
"pretf.blocks.output.one",
"pretf.blocks.variable.two"
] |
[((77, 100), 'pretf.blocks.variable.one', 'variable.one', ([], {'default': '(1)'}), '(default=1)\n', (89, 100), False, 'from pretf.blocks import output, variable\n'), ((111, 136), 'pretf.blocks.output.one', 'output.one', ([], {'value': 'var.one'}), '(value=var.one)\n', (121, 136), False, 'from pretf.blocks import output, variable\n'), ((147, 170), 'pretf.blocks.variable.two', 'variable.two', ([], {'default': '(2)'}), '(default=2)\n', (159, 170), False, 'from pretf.blocks import output, variable\n')]
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
x=np.arange(0,2*np.pi,0.1)
y=np.exp(x)
plt.plot(x,y)
plt.show()
|
[
"numpy.arange",
"numpy.exp",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] |
[((71, 99), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(0.1)'], {}), '(0, 2 * np.pi, 0.1)\n', (80, 99), True, 'import numpy as np\n'), ((98, 107), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (104, 107), True, 'import numpy as np\n'), ((108, 122), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (116, 122), True, 'import matplotlib.pyplot as plt\n'), ((122, 132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (130, 132), True, 'import matplotlib.pyplot as plt\n')]
|
"""
Copyright (c) 2019-present NAVER Corp.
MIT License
"""
# -*- coding: utf-8 -*-
import sys
import os
import time
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from PIL import Image
import cv2
from skimage import io
import numpy as np
import json
import zipfile
import tools.utils as utils
import tools.dataset as dataset
import tools.imgproc as imgproc
import tools.craft_utils as craft_utils
from models.craft import CRAFT
from models.moran import MORAN
import matplotlib.pyplot as plt
from collections import OrderedDict
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
def str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1")
def craft_net(net, image, text_threshold, link_threshold, low_text, cuda, poly, refine_net=None):
t0 = time.time()
# resize
img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, args.canvas_size, interpolation=cv2.INTER_LINEAR, mag_ratio=args.mag_ratio)
ratio_h = ratio_w = 1 / target_ratio
# preprocessing
x = imgproc.normalizeMeanVariance(img_resized)
x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]
x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]
if cuda:
x = x.cuda()
# forward pass
with torch.no_grad():
y, feature = net(x)
# make score and link map
score_text = y[0,:,:,0].cpu().data.numpy()
score_link = y[0,:,:,1].cpu().data.numpy()
tmp1 = score_link.copy()
tmp2 = score_text.copy()
# Post-processing
boxes, polys, rot_rects = craft_utils.getDetBoxes(score_text, score_link, text_threshold, link_threshold, low_text, False)
# coordinate adjustment
boxes = craft_utils.adjustResultCoordinates(boxes, ratio_w, ratio_h)
rot_rects = craft_utils.adjustResultCoordinatesNew(rot_rects, ratio_w, ratio_h)
# render results (optional)
render_img = score_text.copy()
render_img = np.hstack((render_img, score_link))
ret_score_text = imgproc.cvt2HeatmapImg(render_img)
if args.show_time : print("\ninfer/postproc time : {:.3f}/{:.3f}".format(t0, t1))
return boxes, ret_score_text,rot_rects
parser = argparse.ArgumentParser(description='CRAFT Text Detection')
# CRAFT args
parser.add_argument('--craft_trained_model', default='pretrained/craft_mlt_25k.pth', type=str, help='pretrained model')
parser.add_argument('--img_path', default='test/1.jpg', type=str, help='folder path to input images')
parser.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold')
parser.add_argument('--low_text', default=0.4, type=float, help='text low-bound score')
parser.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold')
parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda for inference')
parser.add_argument('--canvas_size', default=1280, type=int, help='image size for inference')
parser.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio')
parser.add_argument('--poly', default=False, action='store_true', help='enable polygon type')
parser.add_argument('--show_time', default=False, action='store_true', help='show processing time')
parser.add_argument('--refine', default=False, action='store_true', help='enable link refiner')
parser.add_argument('--refiner_model', default='pretrained/craft_refiner_CTW1500.pth', type=str, help='pretrained refiner model')
# moran
parser.add_argument('--moran_path', default='pretrained/moran.pth', type=str, help='pretrained moran model')
args = parser.parse_args()
moran_path = args.moran_path
alphabet = '0:1:2:3:4:5:6:7:8:9:a:b:c:d:e:f:g:h:i:j:k:l:m:n:o:p:q:r:s:t:u:v:w:x:y:z:$'
if __name__ == '__main__':
################################################
# cv2 initialize
################################################
cap = cv2.VideoCapture(0)
################################################
# CRAFT loading part
################################################
# load net
net = CRAFT() # initialize
if args.cuda:
net.load_state_dict(copyStateDict(torch.load(args.craft_trained_model)))
else:
net.load_state_dict(copyStateDict(torch.load(args.craft_trained_model, map_location='cpu')))
if args.cuda:
net = net.cuda()
net = torch.nn.DataParallel(net)
cudnn.benchmark = False
net.eval()
################################################
# MORAN loading part
################################################
cuda_flag = False
if torch.cuda.is_available():
cuda_flag = True
MORAN = MORAN(1, len(alphabet.split(':')), 256, 32, 100, BidirDecoder=True, CUDA=cuda_flag)
MORAN = MORAN.cuda()
else:
MORAN = MORAN(1, len(alphabet.split(':')), 256, 32, 100, BidirDecoder=True, inputDataType='torch.FloatTensor', CUDA=cuda_flag)
print('loading pretrained model from %s' % moran_path)
if cuda_flag:
state_dict = torch.load(moran_path)
else:
state_dict = torch.load(moran_path, map_location='cpu')
MORAN_state_dict_rename = OrderedDict()
for k, v in state_dict.items():
name = k.replace("module.", "") # remove `module.`
MORAN_state_dict_rename[name] = v
MORAN.load_state_dict(MORAN_state_dict_rename)
for p in MORAN.parameters():
p.requires_grad = False
MORAN.eval()
while(cap.isOpened()):
all_text = []
all_text_reverse = []
################################################
# CRAFT processing part
################################################
# load data
tik = time.time()
ret, image = cap.read()
# image = cv2.imread('test/1.jpg')
image_raw = image.copy()
bboxes, score_text,rot_rects = craft_net(net, image, args.text_threshold, args.link_threshold, args.low_text, args.cuda, args.poly)
print("time1: ",time.time()-tik)
# save text rectangles
filename, file_ext = os.path.splitext(os.path.basename(args.img_path))
# 这个可以保存切分的图片
img_cuts = utils.saveSplitTextRects(image,rot_rects,save_file=False,save_prefix="rect_"+filename)
print("time2: ",time.time()-tik)
if not img_cuts:
cv2.imshow('Capture', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
continue
###############################################
# MORAN processing part
################################################
converter = utils.strLabelConverterForAttention(alphabet, ':')
transformer = dataset.resizeNormalize((100, 32))
images = [transformer(Image.fromarray(img.astype('uint8')).convert('L')) for img in img_cuts]
images = [Variable(img.view(1, *img.size())) for img in images]
all_image = torch.cat(images,axis=0)
if cuda_flag:
all_image = all_image.cuda()
text = torch.LongTensor(1 * 5)
length = torch.IntTensor(1)
text = Variable(text)
length = Variable(length)
# 从单张修改为多张,只需要改Length
# 作者给的处理工具已经考虑了多个图片同时处理的情况
max_iter = 20
t, l = converter.encode('0'*max_iter)
utils.loadData(text, t)
utils.loadData(length, l)
length = torch.ones(len(img_cuts))*20
length = length.int()
output = MORAN(all_image, length, text, text, test=True, debug=False)
preds, preds_reverse = output[0]
_, preds = preds.max(1)
_, preds_reverse = preds_reverse.max(1)
sim_preds = converter.decode(preds.data, length.data)
all_text = [v.strip().split('$')[0] for v in sim_preds]
print(sim_preds)
print("time3: ",time.time()-tik)
result_img = utils.saveResult(args.img_path, image_raw[:,:,::-1], bboxes,save_file=False, texts=all_text)
print("time4: ",time.time()-tik)
print(all_text)
cv2.imshow('Capture', result_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
|
[
"argparse.ArgumentParser",
"torch.cat",
"tools.craft_utils.adjustResultCoordinates",
"models.moran.MORAN.cuda",
"torch.no_grad",
"cv2.imshow",
"models.moran.MORAN.eval",
"torch.load",
"models.craft.CRAFT",
"tools.utils.strLabelConverterForAttention",
"tools.utils.saveResult",
"tools.utils.loadData",
"torch.autograd.Variable",
"os.path.basename",
"cv2.waitKey",
"numpy.hstack",
"tools.imgproc.cvt2HeatmapImg",
"torch.cuda.is_available",
"tools.utils.saveSplitTextRects",
"torch.IntTensor",
"tools.craft_utils.adjustResultCoordinatesNew",
"torch.from_numpy",
"models.moran.MORAN",
"torch.LongTensor",
"tools.craft_utils.getDetBoxes",
"tools.imgproc.normalizeMeanVariance",
"time.time",
"cv2.VideoCapture",
"tools.dataset.resizeNormalize",
"models.moran.MORAN.parameters",
"tools.imgproc.resize_aspect_ratio",
"models.moran.MORAN.load_state_dict",
"collections.OrderedDict",
"torch.nn.DataParallel"
] |
[((2591, 2650), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CRAFT Text Detection"""'}), "(description='CRAFT Text Detection')\n", (2614, 2650), False, 'import argparse\n'), ((812, 825), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (823, 825), False, 'from collections import OrderedDict\n'), ((1164, 1175), 'time.time', 'time.time', ([], {}), '()\n', (1173, 1175), False, 'import time\n'), ((1239, 1354), 'tools.imgproc.resize_aspect_ratio', 'imgproc.resize_aspect_ratio', (['image', 'args.canvas_size'], {'interpolation': 'cv2.INTER_LINEAR', 'mag_ratio': 'args.mag_ratio'}), '(image, args.canvas_size, interpolation=cv2.\n INTER_LINEAR, mag_ratio=args.mag_ratio)\n', (1266, 1354), True, 'import tools.imgproc as imgproc\n'), ((1424, 1466), 'tools.imgproc.normalizeMeanVariance', 'imgproc.normalizeMeanVariance', (['img_resized'], {}), '(img_resized)\n', (1453, 1466), True, 'import tools.imgproc as imgproc\n'), ((1979, 2079), 'tools.craft_utils.getDetBoxes', 'craft_utils.getDetBoxes', (['score_text', 'score_link', 'text_threshold', 'link_threshold', 'low_text', '(False)'], {}), '(score_text, score_link, text_threshold,\n link_threshold, low_text, False)\n', (2002, 2079), True, 'import tools.craft_utils as craft_utils\n'), ((2118, 2178), 'tools.craft_utils.adjustResultCoordinates', 'craft_utils.adjustResultCoordinates', (['boxes', 'ratio_w', 'ratio_h'], {}), '(boxes, ratio_w, ratio_h)\n', (2153, 2178), True, 'import tools.craft_utils as craft_utils\n'), ((2196, 2263), 'tools.craft_utils.adjustResultCoordinatesNew', 'craft_utils.adjustResultCoordinatesNew', (['rot_rects', 'ratio_w', 'ratio_h'], {}), '(rot_rects, ratio_w, ratio_h)\n', (2234, 2263), True, 'import tools.craft_utils as craft_utils\n'), ((2351, 2386), 'numpy.hstack', 'np.hstack', (['(render_img, score_link)'], {}), '((render_img, score_link))\n', (2360, 2386), True, 'import numpy as np\n'), ((2409, 2443), 'tools.imgproc.cvt2HeatmapImg', 'imgproc.cvt2HeatmapImg', (['render_img'], {}), '(render_img)\n', (2431, 2443), True, 'import tools.imgproc as imgproc\n'), ((4322, 4341), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (4338, 4341), False, 'import cv2\n'), ((4503, 4510), 'models.craft.CRAFT', 'CRAFT', ([], {}), '()\n', (4508, 4510), False, 'from models.craft import CRAFT\n'), ((5047, 5072), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5070, 5072), False, 'import torch\n'), ((5611, 5624), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5622, 5624), False, 'from collections import OrderedDict\n'), ((5770, 5816), 'models.moran.MORAN.load_state_dict', 'MORAN.load_state_dict', (['MORAN_state_dict_rename'], {}), '(MORAN_state_dict_rename)\n', (5791, 5816), False, 'from models.moran import MORAN\n'), ((5833, 5851), 'models.moran.MORAN.parameters', 'MORAN.parameters', ([], {}), '()\n', (5849, 5851), False, 'from models.moran import MORAN\n'), ((5891, 5903), 'models.moran.MORAN.eval', 'MORAN.eval', ([], {}), '()\n', (5901, 5903), False, 'from models.moran import MORAN\n'), ((1686, 1701), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1699, 1701), False, 'import torch\n'), ((4804, 4830), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (4825, 4830), False, 'import torch\n'), ((5218, 5230), 'models.moran.MORAN.cuda', 'MORAN.cuda', ([], {}), '()\n', (5228, 5230), False, 'from models.moran import MORAN\n'), ((5481, 5503), 'torch.load', 'torch.load', (['moran_path'], {}), '(moran_path)\n', (5491, 5503), False, 'import torch\n'), ((5537, 5579), 'torch.load', 'torch.load', (['moran_path'], {'map_location': '"""cpu"""'}), "(moran_path, map_location='cpu')\n", (5547, 5579), False, 'import torch\n'), ((6183, 6194), 'time.time', 'time.time', ([], {}), '()\n', (6192, 6194), False, 'import time\n'), ((6644, 6740), 'tools.utils.saveSplitTextRects', 'utils.saveSplitTextRects', (['image', 'rot_rects'], {'save_file': '(False)', 'save_prefix': "('rect_' + filename)"}), "(image, rot_rects, save_file=False, save_prefix=\n 'rect_' + filename)\n", (6668, 6740), True, 'import tools.utils as utils\n'), ((7106, 7156), 'tools.utils.strLabelConverterForAttention', 'utils.strLabelConverterForAttention', (['alphabet', '""":"""'], {}), "(alphabet, ':')\n", (7141, 7156), True, 'import tools.utils as utils\n'), ((7180, 7214), 'tools.dataset.resizeNormalize', 'dataset.resizeNormalize', (['(100, 32)'], {}), '((100, 32))\n', (7203, 7214), True, 'import tools.dataset as dataset\n'), ((7412, 7437), 'torch.cat', 'torch.cat', (['images'], {'axis': '(0)'}), '(images, axis=0)\n', (7421, 7437), False, 'import torch\n'), ((7518, 7541), 'torch.LongTensor', 'torch.LongTensor', (['(1 * 5)'], {}), '(1 * 5)\n', (7534, 7541), False, 'import torch\n'), ((7560, 7578), 'torch.IntTensor', 'torch.IntTensor', (['(1)'], {}), '(1)\n', (7575, 7578), False, 'import torch\n'), ((7595, 7609), 'torch.autograd.Variable', 'Variable', (['text'], {}), '(text)\n', (7603, 7609), False, 'from torch.autograd import Variable\n'), ((7628, 7644), 'torch.autograd.Variable', 'Variable', (['length'], {}), '(length)\n', (7636, 7644), False, 'from torch.autograd import Variable\n'), ((7793, 7816), 'tools.utils.loadData', 'utils.loadData', (['text', 't'], {}), '(text, t)\n', (7807, 7816), True, 'import tools.utils as utils\n'), ((7826, 7851), 'tools.utils.loadData', 'utils.loadData', (['length', 'l'], {}), '(length, l)\n', (7840, 7851), True, 'import tools.utils as utils\n'), ((7948, 8008), 'models.moran.MORAN', 'MORAN', (['all_image', 'length', 'text', 'text'], {'test': '(True)', 'debug': '(False)'}), '(all_image, length, text, text, test=True, debug=False)\n', (7953, 8008), False, 'from models.moran import MORAN\n'), ((8353, 8453), 'tools.utils.saveResult', 'utils.saveResult', (['args.img_path', 'image_raw[:, :, ::-1]', 'bboxes'], {'save_file': '(False)', 'texts': 'all_text'}), '(args.img_path, image_raw[:, :, ::-1], bboxes, save_file=\n False, texts=all_text)\n', (8369, 8453), True, 'import tools.utils as utils\n'), ((8522, 8555), 'cv2.imshow', 'cv2.imshow', (['"""Capture"""', 'result_img'], {}), "('Capture', result_img)\n", (8532, 8555), False, 'import cv2\n'), ((1476, 1495), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1492, 1495), False, 'import torch\n'), ((6568, 6599), 'os.path.basename', 'os.path.basename', (['args.img_path'], {}), '(args.img_path)\n', (6584, 6599), False, 'import os\n'), ((6812, 6840), 'cv2.imshow', 'cv2.imshow', (['"""Capture"""', 'image'], {}), "('Capture', image)\n", (6822, 6840), False, 'import cv2\n'), ((4590, 4626), 'torch.load', 'torch.load', (['args.craft_trained_model'], {}), '(args.craft_trained_model)\n', (4600, 4626), False, 'import torch\n'), ((4683, 4739), 'torch.load', 'torch.load', (['args.craft_trained_model'], {'map_location': '"""cpu"""'}), "(args.craft_trained_model, map_location='cpu')\n", (4693, 4739), False, 'import torch\n'), ((6472, 6483), 'time.time', 'time.time', ([], {}), '()\n', (6481, 6483), False, 'import time\n'), ((6756, 6767), 'time.time', 'time.time', ([], {}), '()\n', (6765, 6767), False, 'import time\n'), ((8314, 8325), 'time.time', 'time.time', ([], {}), '()\n', (8323, 8325), False, 'import time\n'), ((8471, 8482), 'time.time', 'time.time', ([], {}), '()\n', (8480, 8482), False, 'import time\n'), ((8568, 8582), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8579, 8582), False, 'import cv2\n'), ((6857, 6871), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (6868, 6871), False, 'import cv2\n')]
|
import os
import sys
from contextlib import contextmanager
from typing import Iterator
def exists_case_sensitive(path: str) -> bool:
"""Returns if the given path exists and also matches the case on Windows.
When finding files that can be imported, it is important for the cases to match because while
file os.path.exists("module.py") and os.path.exists("MODULE.py") both return True on Windows,
Python can only import using the case of the real file.
"""
result = os.path.exists(path)
if (sys.platform.startswith("win") or sys.platform == "darwin") and result:
directory, basename = os.path.split(path)
result = basename in os.listdir(directory)
return result
@contextmanager
def chdir(path: str) -> Iterator[None]:
"""Context manager for changing dir and restoring previous workdir after exit."""
curdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curdir)
|
[
"os.listdir",
"sys.platform.startswith",
"os.getcwd",
"os.path.exists",
"os.path.split",
"os.chdir"
] |
[((491, 511), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (505, 511), False, 'import os\n'), ((868, 879), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (877, 879), False, 'import os\n'), ((884, 898), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (892, 898), False, 'import os\n'), ((622, 641), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (635, 641), False, 'import os\n'), ((943, 959), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (951, 959), False, 'import os\n'), ((520, 550), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (543, 550), False, 'import sys\n'), ((671, 692), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (681, 692), False, 'import os\n')]
|
# Author: <NAME> <<EMAIL>>
# License: Simplified BSD
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.utils.extmath import safe_sparse_dot
from scipy.sparse import issparse
import numpy as np
def safe_power(X, degree=2):
"""Element-wise power supporting both sparse and dense data.
Parameters
----------
X : ndarray or sparse
The array whose entries to raise to the power.
degree : int, default: 2
The power to which to raise the elements.
Returns
-------
X_ret : ndarray or sparse
Same shape as X, but (x_ret)_ij = (x)_ij ^ degree
"""
if issparse(X):
if hasattr(X, 'power'):
return X.power(degree)
else:
# old scipy
X = X.copy()
X.data **= degree
return X
else:
return X ** degree
def _D(X, P, degree=2):
"""The "replacement" part of the homogeneous polynomial kernel.
D[i, j] = sum_k [(X_ik * P_jk) ** degree]
"""
return safe_sparse_dot(safe_power(X, degree), P.T ** degree)
def homogeneous_kernel(X, P, degree=2):
"""Convenience alias for homogeneous polynomial kernel between X and P::
K_P(x, p) = <x, p> ^ degree
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 2
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
return polynomial_kernel(X, P, degree=degree, gamma=1, coef0=0)
def anova_kernel(X, P, degree=2):
"""ANOVA kernel between X and P::
K_A(x, p) = sum_i1>i2>...>id x_i1 p_i1 x_i2 p_i2 ... x_id p_id
See <NAME> and <NAME>,
Kernel Methods for Pattern Analysis section 9.2.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 2
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
if degree == 2:
K = homogeneous_kernel(X, P, degree=2)
K -= _D(X, P, degree=2)
K /= 2
elif degree == 3:
K = homogeneous_kernel(X, P, degree=3)
K -= 3 * _D(X, P, degree=2) * _D(X, P, degree=1)
K += 2 * _D(X, P, degree=3)
K /= 6
else:
raise NotImplementedError("ANOVA kernel for degree >= 4 not yet "
"implemented efficiently.")
return K
def _poly_predict(X, P, lams, kernel, degree=2):
if kernel == "anova":
K = anova_kernel(X, P, degree)
elif kernel == "poly":
K = homogeneous_kernel(X, P, degree)
else:
raise ValueError(("Unsuppported kernel: {}. Use one "
"of {{'anova'|'poly'}}").format(kernel))
return np.dot(K, lams)
|
[
"numpy.dot",
"scipy.sparse.issparse",
"sklearn.metrics.pairwise.polynomial_kernel"
] |
[((630, 641), 'scipy.sparse.issparse', 'issparse', (['X'], {}), '(X)\n', (638, 641), False, 'from scipy.sparse import issparse\n'), ((1498, 1554), 'sklearn.metrics.pairwise.polynomial_kernel', 'polynomial_kernel', (['X', 'P'], {'degree': 'degree', 'gamma': '(1)', 'coef0': '(0)'}), '(X, P, degree=degree, gamma=1, coef0=0)\n', (1515, 1554), False, 'from sklearn.metrics.pairwise import polynomial_kernel\n'), ((2827, 2842), 'numpy.dot', 'np.dot', (['K', 'lams'], {}), '(K, lams)\n', (2833, 2842), True, 'import numpy as np\n')]
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, DateField
from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length
from barbearia.models import Usuario
class RegisterForm(FlaskForm):
username = StringField(label='Nome:', validators=[DataRequired(), Length(min=2, max=30)])
cpf = StringField(label='CPF:', validators=[DataRequired(), Length(11)])
telefone = StringField(label='Telefone:', validators=[DataRequired(), Length(11)])
email = StringField(label='Email:', validators=[DataRequired(), Email()])
senha = PasswordField(label='Senha:', validators=[DataRequired(), Length(min=6)])
confirmaSenha = PasswordField(label='Confirme a Senha:', validators=[EqualTo('senha')])
submit = SubmitField(label='Criar Conta')
data_nascimento = StringField(label='Data de Nascimento:', validators=[DataRequired(), Length(max=14)])
def validate_username(self, username_to_check):
user = Usuario.query.filter_by(username=username_to_check.data).first()
if user:
raise ValidationError('Nome de usuário existente, por favor use outro')
def validate_email(self, email_to_check):
email = Usuario.query.filter_by(email=email_to_check.data).first()
if email:
raise ValidationError('E-mail já em uso, tente novamente')
class LoginForm(FlaskForm):
email = StringField(label="Email", validators=[DataRequired()])
senha = PasswordField(label="<PASSWORD>ha", validators=[DataRequired()])
submit = SubmitField(label='Entre')
|
[
"wtforms.validators.Email",
"wtforms.validators.Length",
"barbearia.models.Usuario.query.filter_by",
"wtforms.SubmitField",
"wtforms.validators.EqualTo",
"wtforms.validators.DataRequired",
"wtforms.validators.ValidationError"
] |
[((785, 817), 'wtforms.SubmitField', 'SubmitField', ([], {'label': '"""Criar Conta"""'}), "(label='Criar Conta')\n", (796, 817), False, 'from wtforms import StringField, PasswordField, SubmitField, DateField\n'), ((1559, 1585), 'wtforms.SubmitField', 'SubmitField', ([], {'label': '"""Entre"""'}), "(label='Entre')\n", (1570, 1585), False, 'from wtforms import StringField, PasswordField, SubmitField, DateField\n'), ((1095, 1160), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Nome de usuário existente, por favor use outro"""'], {}), "('Nome de usuário existente, por favor use outro')\n", (1110, 1160), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((1319, 1371), 'wtforms.validators.ValidationError', 'ValidationError', (['"""E-mail já em uso, tente novamente"""'], {}), "('E-mail já em uso, tente novamente')\n", (1334, 1371), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((312, 326), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (324, 326), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((328, 349), 'wtforms.validators.Length', 'Length', ([], {'min': '(2)', 'max': '(30)'}), '(min=2, max=30)\n', (334, 349), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((400, 414), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (412, 414), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((416, 426), 'wtforms.validators.Length', 'Length', (['(11)'], {}), '(11)\n', (422, 426), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((487, 501), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (499, 501), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((503, 513), 'wtforms.validators.Length', 'Length', (['(11)'], {}), '(11)\n', (509, 513), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((568, 582), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (580, 582), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((584, 591), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (589, 591), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((648, 662), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (660, 662), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((664, 677), 'wtforms.validators.Length', 'Length', ([], {'min': '(6)'}), '(min=6)\n', (670, 677), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((753, 769), 'wtforms.validators.EqualTo', 'EqualTo', (['"""senha"""'], {}), "('senha')\n", (760, 769), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((893, 907), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (905, 907), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((909, 923), 'wtforms.validators.Length', 'Length', ([], {'max': '(14)'}), '(max=14)\n', (915, 923), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((995, 1051), 'barbearia.models.Usuario.query.filter_by', 'Usuario.query.filter_by', ([], {'username': 'username_to_check.data'}), '(username=username_to_check.data)\n', (1018, 1051), False, 'from barbearia.models import Usuario\n'), ((1224, 1274), 'barbearia.models.Usuario.query.filter_by', 'Usuario.query.filter_by', ([], {'email': 'email_to_check.data'}), '(email=email_to_check.data)\n', (1247, 1274), False, 'from barbearia.models import Usuario\n'), ((1452, 1466), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1464, 1466), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n'), ((1529, 1543), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1541, 1543), False, 'from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length\n')]
|
#!/usr/bin/env python
# This Script is needed to change the frindly name in the device
import paho.mqtt.client as mqtt, sys
import time
# main
def on_connect(client, userdata, flags, rc):
print("Connected")
client.is_connected = True
def on_message(client, userdata, message):
''' note: message is a tuple of (topic, payload, qos, retain)'''
print("Got a message with topic [" + message.topic + "] and payload [" + str(message.payload) + "]" )
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.is_connected = False
client.loop_start()
client.connect("arupiot.com")
time.sleep(6)
if not client.is_connected:
print("problem connecting to the MQTT server; please check your settings")
sys.exit(1)
client.subscribe("project/sonoff/building/" + sys.argv[1] + "/power/stat/#")
client.publish("project/sonoff/building/" + sys.argv[1] + "/power/cmnd/FriendlyName", "jovial_goldberg")
time.sleep(2)
client.loop_stop()
client.disconnect()
|
[
"paho.mqtt.client.Client",
"sys.exit",
"time.sleep"
] |
[((493, 506), 'paho.mqtt.client.Client', 'mqtt.Client', ([], {}), '()\n', (504, 506), True, 'import paho.mqtt.client as mqtt, sys\n'), ((657, 670), 'time.sleep', 'time.sleep', (['(6)'], {}), '(6)\n', (667, 670), False, 'import time\n'), ((986, 999), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (996, 999), False, 'import time\n'), ((785, 796), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (793, 796), False, 'import paho.mqtt.client as mqtt, sys\n')]
|
from typing import Union, Tuple
from screeninfo import get_monitors
RESOLUTION = [1 << 32, 1 << 32]
for monitor in get_monitors():
RESOLUTION[0] = min(RESOLUTION[0], monitor.width)
RESOLUTION[1] = min(RESOLUTION[1], monitor.height)
def get_target_size(image_size, target: Union[int, float, Tuple[int, int]] = 0.9, return_k=False):
if isinstance(target, float):
target = (RESOLUTION[0] * target, RESOLUTION[1] * target)
elif isinstance(target, int):
target = (target, target)
k = min(target[0] / image_size[0], target[1] / image_size[1])
target_size = int(round(image_size[0] * k)), int(round(image_size[1] * k))
if return_k:
return target_size, k
return target_size
|
[
"screeninfo.get_monitors"
] |
[((117, 131), 'screeninfo.get_monitors', 'get_monitors', ([], {}), '()\n', (129, 131), False, 'from screeninfo import get_monitors\n')]
|
import numpy
from noise import snoise2
from worldengine.model.world import Step
from worldengine.simulations.basic import find_threshold_f
from worldengine.simulations.hydrology import WatermapSimulation
from worldengine.simulations.irrigation import IrrigationSimulation
from worldengine.simulations.humidity import HumiditySimulation
from worldengine.simulations.temperature import TemperatureSimulation
from worldengine.simulations.permeability import PermeabilitySimulation
from worldengine.simulations.erosion import ErosionSimulation
from worldengine.simulations.precipitation import PrecipitationSimulation
from worldengine.simulations.biome import BiomeSimulation
from worldengine.simulations.icecap import IcecapSimulation
from worldengine.common import anti_alias, get_verbose
# ------------------
# Initial generation
# ------------------
def center_land(world):
"""Translate the map horizontally and vertically to put as much ocean as
possible at the borders. It operates on elevation and plates map"""
y_sums = world.layers['elevation'].data.sum(1) # 1 == sum along x-axis
y_with_min_sum = y_sums.argmin()
if get_verbose():
print("geo.center_land: height complete")
x_sums = world.layers['elevation'].data.sum(0) # 0 == sum along y-axis
x_with_min_sum = x_sums.argmin()
if get_verbose():
print("geo.center_land: width complete")
latshift = 0
world.layers['elevation'].data = numpy.roll(numpy.roll(world.layers['elevation'].data, -y_with_min_sum + latshift, axis=0), - x_with_min_sum, axis=1)
world.layers['plates'].data = numpy.roll(numpy.roll(world.layers['plates'].data, -y_with_min_sum + latshift, axis=0), - x_with_min_sum, axis=1)
if get_verbose():
print("geo.center_land: width complete")
def place_oceans_at_map_borders(world):
"""
Lower the elevation near the border of the map
"""
ocean_border = int(min(30, max(world.width / 5, world.height / 5)))
def place_ocean(x, y, i):
world.layers['elevation'].data[y, x] = \
(world.layers['elevation'].data[y, x] * i) / ocean_border
for x in range(world.width):
for i in range(ocean_border):
place_ocean(x, i, i)
place_ocean(x, world.height - i - 1, i)
for y in range(world.height):
for i in range(ocean_border):
place_ocean(i, y, i)
place_ocean(world.width - i - 1, y, i)
def add_noise_to_elevation(world, seed):
octaves = 8
freq = 16.0 * octaves
for y in range(world.height):
for x in range(world.width):
n = snoise2(x / freq * 2, y / freq * 2, octaves, base=seed)
world.layers['elevation'].data[y, x] += n
def fill_ocean(elevation, sea_level):#TODO: Make more use of numpy?
height, width = elevation.shape
ocean = numpy.zeros(elevation.shape, dtype=bool)
to_expand = []
for x in range(width):#handle top and bottom border of the map
if elevation[0, x] <= sea_level:
to_expand.append((x, 0))
if elevation[height - 1, x] <= sea_level:
to_expand.append((x, height - 1))
for y in range(height):#handle left- and rightmost border of the map
if elevation[y, 0] <= sea_level:
to_expand.append((0, y))
if elevation[y, width - 1] <= sea_level:
to_expand.append((width - 1, y))
for t in to_expand:
tx, ty = t
if not ocean[ty, tx]:
ocean[ty, tx] = True
for px, py in _around(tx, ty, width, height):
if not ocean[py, px] and elevation[py, px] <= sea_level:
to_expand.append((px, py))
return ocean
def initialize_ocean_and_thresholds(world, ocean_level=1.0):
"""
Calculate the ocean, the sea depth and the elevation thresholds
:param world: a world having elevation but not thresholds
:param ocean_level: the elevation representing the ocean level
:return: nothing, the world will be changed
"""
e = world.layers['elevation'].data
ocean = fill_ocean(e, ocean_level)
hl = find_threshold_f(e, 0.10) # the highest 10% of all (!) land are declared hills
ml = find_threshold_f(e, 0.03) # the highest 3% are declared mountains
e_th = [('sea', ocean_level),
('plain', hl),
('hill', ml),
('mountain', None)]
harmonize_ocean(ocean, e, ocean_level)
world.ocean = ocean
world.elevation = (e, e_th)
world.sea_depth = sea_depth(world, ocean_level)
def harmonize_ocean(ocean, elevation, ocean_level):
"""
The goal of this function is to make the ocean floor less noisy.
The underwater erosion should cause the ocean floor to be more uniform
"""
shallow_sea = ocean_level * 0.85
midpoint = shallow_sea / 2.0
ocean_points = numpy.logical_and(elevation < shallow_sea, ocean)
shallow_ocean = numpy.logical_and(elevation < midpoint, ocean_points)
elevation[shallow_ocean] = midpoint - ((midpoint - elevation[shallow_ocean]) / 5.0)
deep_ocean = numpy.logical_and(elevation > midpoint, ocean_points)
elevation[deep_ocean] = midpoint + ((elevation[deep_ocean] - midpoint) / 5.0)
# ----
# Misc
# ----
def sea_depth(world, sea_level):
# a dynamic programming approach to gather how far the next land is
# from a given coordinate up to a maximum distance of max_radius
# result is 0 for land coordinates and -1 for coordinates further than
# max_radius away from land
# there might be even faster ways but it does the trick
def next_land_dynamic(ocean, max_radius=5):
next_land = numpy.full(ocean.shape, -1, int)
# non ocean tiles are zero distance away from next land
next_land[numpy.logical_not(ocean)]=0
height, width = ocean.shape
for dist in range(max_radius):
indices = numpy.transpose(numpy.where(next_land==dist))
for y, x in indices:
for dy in range(-1, 2):
ny = y + dy
if 0 <= ny < height:
for dx in range(-1, 2):
nx = x + dx
if 0 <= nx < width:
if next_land[ny,nx] == -1:
next_land[ny,nx] = dist + 1
return next_land
# We want to multiply the raw sea_depth by one of these factors
# depending on the distance from the next land
# possible TODO: make this a parameter
factors = [0.0, 0.3, 0.5, 0.7, 0.9]
next_land = next_land_dynamic(world.layers['ocean'].data)
sea_depth = sea_level - world.layers['elevation'].data
for y in range(world.height):
for x in range(world.width):
dist_to_next_land = next_land[y,x]
if dist_to_next_land > 0:
sea_depth[y,x]*=factors[dist_to_next_land-1]
sea_depth = anti_alias(sea_depth, 10)
min_depth = sea_depth.min()
max_depth = sea_depth.max()
sea_depth = (sea_depth - min_depth) / (max_depth - min_depth)
return sea_depth
def _around(x, y, width, height):
ps = []
for dx in range(-1, 2):
nx = x + dx
if 0 <= nx < width:
for dy in range(-1, 2):
ny = y + dy
if 0 <= ny < height and (dx != 0 or dy != 0):
ps.append((nx, ny))
return ps
def generate_world(w, step):
if isinstance(step, str):
step = Step.get_by_name(step)
if not step.include_precipitations:
return w
# Prepare sufficient seeds for the different steps of the generation
rng = numpy.random.RandomState(w.seed) # create a fresh RNG in case the global RNG is compromised (i.e. has been queried an indefinite amount of times before generate_world() was called)
sub_seeds = rng.randint(0, numpy.iinfo(numpy.int32).max, size=100) # choose lowest common denominator (32 bit Windows numpy cannot handle a larger value)
seed_dict = {
'PrecipitationSimulation': sub_seeds[ 0], # after 0.19.0 do not ever switch out the seeds here to maximize seed-compatibility
'ErosionSimulation': sub_seeds[ 1],
'WatermapSimulation': sub_seeds[ 2],
'IrrigationSimulation': sub_seeds[ 3],
'TemperatureSimulation': sub_seeds[ 4],
'HumiditySimulation': sub_seeds[ 5],
'PermeabilitySimulation': sub_seeds[ 6],
'BiomeSimulation': sub_seeds[ 7],
'IcecapSimulation': sub_seeds[ 8],
'': sub_seeds[99]
}
TemperatureSimulation().execute(w, seed_dict['TemperatureSimulation'])
# Precipitation with thresholds
PrecipitationSimulation().execute(w, seed_dict['PrecipitationSimulation'])
if not step.include_erosion:
return w
ErosionSimulation().execute(w, seed_dict['ErosionSimulation']) # seed not currently used
if get_verbose():
print("...erosion calculated")
WatermapSimulation().execute(w, seed_dict['WatermapSimulation']) # seed not currently used
# FIXME: create setters
IrrigationSimulation().execute(w, seed_dict['IrrigationSimulation']) # seed not currently used
HumiditySimulation().execute(w, seed_dict['HumiditySimulation']) # seed not currently used
PermeabilitySimulation().execute(w, seed_dict['PermeabilitySimulation'])
cm, biome_cm = BiomeSimulation().execute(w, seed_dict['BiomeSimulation']) # seed not currently used
for cl in cm.keys():
count = cm[cl]
if get_verbose():
print("%s = %i" % (str(cl), count))
if get_verbose():
print('') # empty line
print('Biome obtained:')
for cl in biome_cm.keys():
count = biome_cm[cl]
if get_verbose():
print(" %30s = %7i" % (str(cl), count))
IcecapSimulation().execute(w, seed_dict['IcecapSimulation']) # makes use of temperature-map
return w
|
[
"worldengine.simulations.basic.find_threshold_f",
"worldengine.simulations.temperature.TemperatureSimulation",
"numpy.iinfo",
"worldengine.simulations.permeability.PermeabilitySimulation",
"worldengine.simulations.biome.BiomeSimulation",
"worldengine.simulations.hydrology.WatermapSimulation",
"worldengine.simulations.precipitation.PrecipitationSimulation",
"numpy.full",
"numpy.logical_not",
"numpy.random.RandomState",
"noise.snoise2",
"worldengine.simulations.erosion.ErosionSimulation",
"worldengine.common.get_verbose",
"worldengine.common.anti_alias",
"numpy.roll",
"worldengine.simulations.irrigation.IrrigationSimulation",
"worldengine.simulations.icecap.IcecapSimulation",
"worldengine.model.world.Step.get_by_name",
"worldengine.simulations.humidity.HumiditySimulation",
"numpy.logical_and",
"numpy.zeros",
"numpy.where"
] |
[((1152, 1165), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (1163, 1165), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((1338, 1351), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (1349, 1351), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((1729, 1742), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (1740, 1742), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((2840, 2880), 'numpy.zeros', 'numpy.zeros', (['elevation.shape'], {'dtype': 'bool'}), '(elevation.shape, dtype=bool)\n', (2851, 2880), False, 'import numpy\n'), ((4099, 4123), 'worldengine.simulations.basic.find_threshold_f', 'find_threshold_f', (['e', '(0.1)'], {}), '(e, 0.1)\n', (4115, 4123), False, 'from worldengine.simulations.basic import find_threshold_f\n'), ((4188, 4213), 'worldengine.simulations.basic.find_threshold_f', 'find_threshold_f', (['e', '(0.03)'], {}), '(e, 0.03)\n', (4204, 4213), False, 'from worldengine.simulations.basic import find_threshold_f\n'), ((4830, 4879), 'numpy.logical_and', 'numpy.logical_and', (['(elevation < shallow_sea)', 'ocean'], {}), '(elevation < shallow_sea, ocean)\n', (4847, 4879), False, 'import numpy\n'), ((4901, 4954), 'numpy.logical_and', 'numpy.logical_and', (['(elevation < midpoint)', 'ocean_points'], {}), '(elevation < midpoint, ocean_points)\n', (4918, 4954), False, 'import numpy\n'), ((5061, 5114), 'numpy.logical_and', 'numpy.logical_and', (['(elevation > midpoint)', 'ocean_points'], {}), '(elevation > midpoint, ocean_points)\n', (5078, 5114), False, 'import numpy\n'), ((6918, 6943), 'worldengine.common.anti_alias', 'anti_alias', (['sea_depth', '(10)'], {}), '(sea_depth, 10)\n', (6928, 6943), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((7642, 7674), 'numpy.random.RandomState', 'numpy.random.RandomState', (['w.seed'], {}), '(w.seed)\n', (7666, 7674), False, 'import numpy\n'), ((9024, 9037), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (9035, 9037), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((9714, 9727), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (9725, 9727), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((1468, 1546), 'numpy.roll', 'numpy.roll', (["world.layers['elevation'].data", '(-y_with_min_sum + latshift)'], {'axis': '(0)'}), "(world.layers['elevation'].data, -y_with_min_sum + latshift, axis=0)\n", (1478, 1546), False, 'import numpy\n'), ((1619, 1694), 'numpy.roll', 'numpy.roll', (["world.layers['plates'].data", '(-y_with_min_sum + latshift)'], {'axis': '(0)'}), "(world.layers['plates'].data, -y_with_min_sum + latshift, axis=0)\n", (1629, 1694), False, 'import numpy\n'), ((5637, 5669), 'numpy.full', 'numpy.full', (['ocean.shape', '(-1)', 'int'], {}), '(ocean.shape, -1, int)\n', (5647, 5669), False, 'import numpy\n'), ((7477, 7499), 'worldengine.model.world.Step.get_by_name', 'Step.get_by_name', (['step'], {}), '(step)\n', (7493, 7499), False, 'from worldengine.model.world import Step\n'), ((9643, 9656), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (9654, 9656), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((9866, 9879), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (9877, 9879), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((2611, 2666), 'noise.snoise2', 'snoise2', (['(x / freq * 2)', '(y / freq * 2)', 'octaves'], {'base': 'seed'}), '(x / freq * 2, y / freq * 2, octaves, base=seed)\n', (2618, 2666), False, 'from noise import snoise2\n'), ((5753, 5777), 'numpy.logical_not', 'numpy.logical_not', (['ocean'], {}), '(ocean)\n', (5770, 5777), False, 'import numpy\n'), ((7855, 7879), 'numpy.iinfo', 'numpy.iinfo', (['numpy.int32'], {}), '(numpy.int32)\n', (7866, 7879), False, 'import numpy\n'), ((8686, 8709), 'worldengine.simulations.temperature.TemperatureSimulation', 'TemperatureSimulation', ([], {}), '()\n', (8707, 8709), False, 'from worldengine.simulations.temperature import TemperatureSimulation\n'), ((8797, 8822), 'worldengine.simulations.precipitation.PrecipitationSimulation', 'PrecipitationSimulation', ([], {}), '()\n', (8820, 8822), False, 'from worldengine.simulations.precipitation import PrecipitationSimulation\n'), ((8927, 8946), 'worldengine.simulations.erosion.ErosionSimulation', 'ErosionSimulation', ([], {}), '()\n', (8944, 8946), False, 'from worldengine.simulations.erosion import ErosionSimulation\n'), ((9083, 9103), 'worldengine.simulations.hydrology.WatermapSimulation', 'WatermapSimulation', ([], {}), '()\n', (9101, 9103), False, 'from worldengine.simulations.hydrology import WatermapSimulation\n'), ((9208, 9230), 'worldengine.simulations.irrigation.IrrigationSimulation', 'IrrigationSimulation', ([], {}), '()\n', (9228, 9230), False, 'from worldengine.simulations.irrigation import IrrigationSimulation\n'), ((9308, 9328), 'worldengine.simulations.humidity.HumiditySimulation', 'HumiditySimulation', ([], {}), '()\n', (9326, 9328), False, 'from worldengine.simulations.humidity import HumiditySimulation\n'), ((9405, 9429), 'worldengine.simulations.permeability.PermeabilitySimulation', 'PermeabilitySimulation', ([], {}), '()\n', (9427, 9429), False, 'from worldengine.simulations.permeability import PermeabilitySimulation\n'), ((9498, 9515), 'worldengine.simulations.biome.BiomeSimulation', 'BiomeSimulation', ([], {}), '()\n', (9513, 9515), False, 'from worldengine.simulations.biome import BiomeSimulation\n'), ((9938, 9956), 'worldengine.simulations.icecap.IcecapSimulation', 'IcecapSimulation', ([], {}), '()\n', (9954, 9956), False, 'from worldengine.simulations.icecap import IcecapSimulation\n'), ((5896, 5926), 'numpy.where', 'numpy.where', (['(next_land == dist)'], {}), '(next_land == dist)\n', (5907, 5926), False, 'import numpy\n')]
|
from django.conf.urls import url
from DPMAPI import views
urlpatterns = [
url('', views.forecast),
url('Forecast/', views.forecast)
]
|
[
"django.conf.urls.url"
] |
[((79, 102), 'django.conf.urls.url', 'url', (['""""""', 'views.forecast'], {}), "('', views.forecast)\n", (82, 102), False, 'from django.conf.urls import url\n'), ((108, 140), 'django.conf.urls.url', 'url', (['"""Forecast/"""', 'views.forecast'], {}), "('Forecast/', views.forecast)\n", (111, 140), False, 'from django.conf.urls import url\n')]
|
from raytracerchallenge_python.shape import Shape
from raytracerchallenge_python.intersection import Intersection, Intersections
from raytracerchallenge_python.tuple import Vector
from raytracerchallenge_python.helpers import EPSILON
class Cube(Shape):
def local_normal_at(self, point):
maxc = max(abs(point.x), abs(point.y), abs(point.z))
if maxc == abs(point.x):
return Vector(point.x, 0, 0)
elif maxc == abs(point.y):
return Vector(0, point.y, 0)
else:
return Vector(0, 0, point.z)
def local_intersect(self, ray):
def check_axis(origin, direction):
tmin_numerator = (-1 - origin)
tmax_numerator = (1 - origin)
if abs(direction) >= EPSILON:
tmin = tmin_numerator / direction
tmax = tmax_numerator / direction
else:
tmin = tmin_numerator * float('inf')
tmax = tmax_numerator * float('inf')
if tmin > tmax:
tmin, tmax = tmax, tmin
return (tmin, tmax)
xtmin, xtmax = check_axis(ray.origin.x, ray.direction.x)
ytmin, ytmax = check_axis(ray.origin.y, ray.direction.y)
ztmin, ztmax = check_axis(ray.origin.z, ray.direction.z)
tmin = max(xtmin, ytmin, ztmin)
tmax = min(xtmax, ytmax, ztmax)
if tmin > tmax:
return Intersections()
else:
return Intersections(Intersection(tmin, self),
Intersection(tmax, self))
|
[
"raytracerchallenge_python.intersection.Intersection",
"raytracerchallenge_python.tuple.Vector",
"raytracerchallenge_python.intersection.Intersections"
] |
[((409, 430), 'raytracerchallenge_python.tuple.Vector', 'Vector', (['point.x', '(0)', '(0)'], {}), '(point.x, 0, 0)\n', (415, 430), False, 'from raytracerchallenge_python.tuple import Vector\n'), ((1417, 1432), 'raytracerchallenge_python.intersection.Intersections', 'Intersections', ([], {}), '()\n', (1430, 1432), False, 'from raytracerchallenge_python.intersection import Intersection, Intersections\n'), ((485, 506), 'raytracerchallenge_python.tuple.Vector', 'Vector', (['(0)', 'point.y', '(0)'], {}), '(0, point.y, 0)\n', (491, 506), False, 'from raytracerchallenge_python.tuple import Vector\n'), ((540, 561), 'raytracerchallenge_python.tuple.Vector', 'Vector', (['(0)', '(0)', 'point.z'], {}), '(0, 0, point.z)\n', (546, 561), False, 'from raytracerchallenge_python.tuple import Vector\n'), ((1480, 1504), 'raytracerchallenge_python.intersection.Intersection', 'Intersection', (['tmin', 'self'], {}), '(tmin, self)\n', (1492, 1504), False, 'from raytracerchallenge_python.intersection import Intersection, Intersections\n'), ((1539, 1563), 'raytracerchallenge_python.intersection.Intersection', 'Intersection', (['tmax', 'self'], {}), '(tmax, self)\n', (1551, 1563), False, 'from raytracerchallenge_python.intersection import Intersection, Intersections\n')]
|
#!/usr/bin/env python
import copy
import numpy as np
from scipy import signal
from edrixs.photon_transition import dipole_polvec_rixs
from edrixs.utils import boltz_dist
from edrixs.rixs_utils import scattering_mat
if __name__ == "__main__":
'''
Purpose: This example shows how to calculate RIXS spectrum.
This example use purely python code.
'''
# PARAMETERS
#-----------
# the parameters for the experimental RIXS geometry are taken from [PRL 117, 147401 (2016)]
# the incident angle of X-ray
thin, thout = 15/180.0*np.pi, 75/180.0*np.pi
# azimuthal angle
phi = 0.0
# core-hole life-time broadening
gamma_n = 0.20
# resolution of RIXS excitations
gamma_f = 0.10
# energy offset of the incident X-ray
om_offset = 857.4
# set energy mesh of the incident X-ray (eV)
# L3 edge
om1, om2 = -5.9,-0.9
# L2 dege
#om1, om2 = 10.9, 14.9
nom = 100
om_mesh = np.linspace(om1, om2, nom)
# energy loss mesh
neloss = 1000
eloss_mesh = np.linspace(-0.5, 5.0, neloss)
# ground state list
gs_list=list(range(0, 3))
# temperature
T = 300
# END of PARAMETERS
#------------------
# load data, the eigenvalues of the initial and the intermediate Hamiltonian, and the transition matrix
data = np.loadtxt('eval_i.dat')
eval_i = data[:,1]
data = np.loadtxt('eval_n.dat')
eval_n = data[:,1]
ncfgs_n, ncfgs_i = len(eval_n), len(eval_i)
# the transition operator for the absorption process
data = np.loadtxt('trans_mat.dat')
trans_mat_abs = data[:,3].reshape((3, ncfgs_n, ncfgs_i)) + 1j * data[:,4].reshape((3, ncfgs_n, ncfgs_i))
# the transition operator for the emission process
trans_mat_emi = np.zeros((3, ncfgs_i, ncfgs_n), dtype=np.complex128)
for i in range(3):
trans_mat_emi[i] = np.conj(np.transpose(trans_mat_abs[i]))
# We calculate RIXS for \pi-\pi, \pi-\sigma, \sigma-\pi, \sigma-\sigma polarizations
rixs = np.zeros((4, neloss, nom), dtype=np.float64)
gs_prob = boltz_dist([eval_i[i] for i in gs_list], T)
polvec_list = [(0,0), (0,np.pi/2.0), (np.pi/2.0, 0), (np.pi/2.0, np.pi/2.0)]
print("edrixs >>> calculating RIXS ...")
for i, om_inc in enumerate(om_mesh):
print(" incident X-ray energy: ", i, " ", om_inc)
F_fi = scattering_mat(eval_i, eval_n, trans_mat_abs[:,:,gs_list], trans_mat_emi, om_inc, gamma_n)
for j, (alpha, beta) in enumerate(polvec_list):
ei, ef = dipole_polvec_rixs(thin, thout, phi, alpha, beta)
F_magnitude = np.zeros((ncfgs_i, len(gs_list)), dtype=np.complex128)
for m in range(3):
for n in range(3):
F_magnitude[:,:] += ef[m] * F_fi[m,n] * ei[n]
for m in gs_list:
for n in range(ncfgs_i):
rixs[j, :, i] += np.abs(F_magnitude[n,m])**2 * gamma_f/np.pi / ( (eloss_mesh-(eval_i[n]-eval_i[m]))**2 + gamma_f**2 ) * gs_prob[m]
# gaussian broadening
inc_res = 0.17
emi_res = 0.12
gauss = np.zeros((neloss, nom))
mid_in = ( min(om_mesh) + max(om_mesh) ) /2.0
mid_out = ( min(eloss_mesh)+max(eloss_mesh)) /2.0
for i in range(nom):
for j in range(neloss):
gauss[j,i] = 1/(2.0*np.pi*inc_res*emi_res)*np.exp(-((mid_in-om_mesh[i])**2/(2*inc_res**2) + (mid_out-eloss_mesh[j])**2/(2*emi_res**2)))
for i in range(4):
rixs[i,:,:] = signal.fftconvolve(rixs[i,:,:], gauss, mode = 'same')
print("edrixs >>> done !")
f=open('rixs.dat', 'w')
for i in range(neloss):
for j in range(nom):
str_form = "{:20.10f}"*6 +"\n"
line=str_form.format(eloss_mesh[i], om_mesh[j]+om_offset, rixs[0,i,j], rixs[1,i,j], rixs[2,i,j], rixs[3,i,j])
f.write(line)
f.close()
|
[
"edrixs.photon_transition.dipole_polvec_rixs",
"numpy.abs",
"scipy.signal.fftconvolve",
"edrixs.utils.boltz_dist",
"numpy.zeros",
"numpy.transpose",
"numpy.loadtxt",
"numpy.linspace",
"numpy.exp",
"edrixs.rixs_utils.scattering_mat"
] |
[((1002, 1028), 'numpy.linspace', 'np.linspace', (['om1', 'om2', 'nom'], {}), '(om1, om2, nom)\n', (1013, 1028), True, 'import numpy as np\n'), ((1087, 1117), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(5.0)', 'neloss'], {}), '(-0.5, 5.0, neloss)\n', (1098, 1117), True, 'import numpy as np\n'), ((1372, 1396), 'numpy.loadtxt', 'np.loadtxt', (['"""eval_i.dat"""'], {}), "('eval_i.dat')\n", (1382, 1396), True, 'import numpy as np\n'), ((1432, 1456), 'numpy.loadtxt', 'np.loadtxt', (['"""eval_n.dat"""'], {}), "('eval_n.dat')\n", (1442, 1456), True, 'import numpy as np\n'), ((1598, 1625), 'numpy.loadtxt', 'np.loadtxt', (['"""trans_mat.dat"""'], {}), "('trans_mat.dat')\n", (1608, 1625), True, 'import numpy as np\n'), ((1810, 1862), 'numpy.zeros', 'np.zeros', (['(3, ncfgs_i, ncfgs_n)'], {'dtype': 'np.complex128'}), '((3, ncfgs_i, ncfgs_n), dtype=np.complex128)\n', (1818, 1862), True, 'import numpy as np\n'), ((2054, 2098), 'numpy.zeros', 'np.zeros', (['(4, neloss, nom)'], {'dtype': 'np.float64'}), '((4, neloss, nom), dtype=np.float64)\n', (2062, 2098), True, 'import numpy as np\n'), ((2113, 2156), 'edrixs.utils.boltz_dist', 'boltz_dist', (['[eval_i[i] for i in gs_list]', 'T'], {}), '([eval_i[i] for i in gs_list], T)\n', (2123, 2156), False, 'from edrixs.utils import boltz_dist\n'), ((3142, 3165), 'numpy.zeros', 'np.zeros', (['(neloss, nom)'], {}), '((neloss, nom))\n', (3150, 3165), True, 'import numpy as np\n'), ((2409, 2505), 'edrixs.rixs_utils.scattering_mat', 'scattering_mat', (['eval_i', 'eval_n', 'trans_mat_abs[:, :, gs_list]', 'trans_mat_emi', 'om_inc', 'gamma_n'], {}), '(eval_i, eval_n, trans_mat_abs[:, :, gs_list], trans_mat_emi,\n om_inc, gamma_n)\n', (2423, 2505), False, 'from edrixs.rixs_utils import scattering_mat\n'), ((3521, 3574), 'scipy.signal.fftconvolve', 'signal.fftconvolve', (['rixs[i, :, :]', 'gauss'], {'mode': '"""same"""'}), "(rixs[i, :, :], gauss, mode='same')\n", (3539, 3574), False, 'from scipy import signal\n'), ((1921, 1951), 'numpy.transpose', 'np.transpose', (['trans_mat_abs[i]'], {}), '(trans_mat_abs[i])\n', (1933, 1951), True, 'import numpy as np\n'), ((2579, 2628), 'edrixs.photon_transition.dipole_polvec_rixs', 'dipole_polvec_rixs', (['thin', 'thout', 'phi', 'alpha', 'beta'], {}), '(thin, thout, phi, alpha, beta)\n', (2597, 2628), False, 'from edrixs.photon_transition import dipole_polvec_rixs\n'), ((3383, 3499), 'numpy.exp', 'np.exp', (['(-((mid_in - om_mesh[i]) ** 2 / (2 * inc_res ** 2) + (mid_out - eloss_mesh[\n j]) ** 2 / (2 * emi_res ** 2)))'], {}), '(-((mid_in - om_mesh[i]) ** 2 / (2 * inc_res ** 2) + (mid_out -\n eloss_mesh[j]) ** 2 / (2 * emi_res ** 2)))\n', (3389, 3499), True, 'import numpy as np\n'), ((2950, 2975), 'numpy.abs', 'np.abs', (['F_magnitude[n, m]'], {}), '(F_magnitude[n, m])\n', (2956, 2975), True, 'import numpy as np\n')]
|
from pandac.PandaModules import *
from direct.particles import ParticleEffect
from direct.directnotify import DirectNotifyGlobal
from direct.showbase import AppRunnerGlobal
import os
class CarSmoke(NodePath):
def __init__(self, parent):
NodePath.__init__(self)
notify = DirectNotifyGlobal.directNotify.newCategory('CarSmokeParticles')
self.effectNode = parent.attachNewNode('carSmoke')
self.effectNode.setBin('fixed', 1)
self.effectNode.setDepthWrite(0)
self.effect = ParticleEffect.ParticleEffect()
particleSearchPath = DSearchPath()
if AppRunnerGlobal.appRunner:
particleSearchPath.appendDirectory(Filename.expandFrom('$TT_3_5_ROOT/phase_3.5/etc'))
else:
basePath = os.path.expandvars('$TOONTOWN') or './toontown'
particleSearchPath.appendDirectory(Filename.fromOsSpecific(basePath + '/src/effects'))
particleSearchPath.appendDirectory(Filename('phase_3.5/etc'))
particleSearchPath.appendDirectory(Filename('phase_4/etc'))
particleSearchPath.appendDirectory(Filename('phase_5/etc'))
particleSearchPath.appendDirectory(Filename('phase_6/etc'))
particleSearchPath.appendDirectory(Filename('phase_7/etc'))
particleSearchPath.appendDirectory(Filename('phase_8/etc'))
particleSearchPath.appendDirectory(Filename('phase_9/etc'))
particleSearchPath.appendDirectory(Filename('.'))
pfile = Filename('smokeTest4.ptf')
found = vfs.resolveFilename(pfile, particleSearchPath)
if not found:
notify.warning('loadParticleFile() - no path: %s' % pfile)
return
notify.debug('Loading particle file: %s' % pfile)
self.effect.loadConfig(pfile)
ren = self.effect.getParticlesNamed('particles-1').getRenderer()
ren.setTextureFromNode('phase_4/models/props/tt_m_efx_ext_smoke', '**/*')
def start(self):
self.effect.start(parent=self.effectNode)
def stop(self):
try:
self.effect.disable()
except AttributeError:
pass
def destroy(self):
self.stop()
self.effect.cleanup()
self.effectNode.removeNode()
del self.effect
del self.effectNode
|
[
"os.path.expandvars",
"direct.directnotify.DirectNotifyGlobal.directNotify.newCategory",
"direct.particles.ParticleEffect.ParticleEffect"
] |
[((292, 356), 'direct.directnotify.DirectNotifyGlobal.directNotify.newCategory', 'DirectNotifyGlobal.directNotify.newCategory', (['"""CarSmokeParticles"""'], {}), "('CarSmokeParticles')\n", (335, 356), False, 'from direct.directnotify import DirectNotifyGlobal\n'), ((522, 553), 'direct.particles.ParticleEffect.ParticleEffect', 'ParticleEffect.ParticleEffect', ([], {}), '()\n', (551, 553), False, 'from direct.particles import ParticleEffect\n'), ((770, 801), 'os.path.expandvars', 'os.path.expandvars', (['"""$TOONTOWN"""'], {}), "('$TOONTOWN')\n", (788, 801), False, 'import os\n')]
|
from flask import (render_template, Blueprint, g, redirect, Response,
request, current_app, abort, url_for, jsonify)
from flask_babel import _
from marshmallow import Schema, fields, validates, ValidationError
from config import Config as cfg
from gcode_contour_circles import GCode_Contour_Circle
from gcode_contour_circles import GCode_Contour_Arc
from gcode_contour_rectangles import GCode_Contour_Rectangle
from gcode_contour_rectangles import GCode_Contour_RoundedRectangle
from app import app
import json
# By adding a url_prefix to the blueprint, we can tell the route to expect a
# language code in the first part of the URL.
multilingual = Blueprint('multilingual', __name__,
template_folder='templates', url_prefix='/<lang_code>')
buttons = ["btn_gen_gcode","btn_save_gcode","btn_close_gcode"]
def load_defaults():
try:
j = {}
fname = './defaults.json'
with open(fname ) as f:
j = json.load(f)
except Exception as err:
print (f"Error: {fname} file not found")
sys.exit(1)
return j
def load_materials():
try:
j = {}
fname = './materials.json'
with open(fname ) as f:
j = json.load(f)
except Exception as err:
print (f"Error: {fname} file not found")
sys.exit(1)
return j
def request2json(data):
d = {}
for k,v in data.items():
d[k] = v
return d
def returnButtonClicked(data):
"""return a tuple with button and boolean which button was clicked
Args:
data ([request.form]): [description]
"""
for b in buttons:
if b in data:
return (b, True)
return (None, None)
'''
The method add_language_code() is mainly responsible for providing a default
language code for all url_for() calls in our application in case the language
code is not specifically defined. This means that if we add a
url_for('multilingual.contour') call in the template, and our current language
is German, we will get the German version of the contour website and not the
English one.
'''
@multilingual.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', g.lang_code)
'''
The second method pull_lang_code() takes the language code out of our request
and into the g variable. Since our endpoints are not expecting any variables
passed to them we just handle the URL prefix in this manner and now Babel
can access the language and serve content respectively.
'''
@multilingual.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.lang_code = values.pop('lang_code')
@multilingual.before_request
def before_request():
if g.lang_code not in current_app.config['LANGUAGES']:
adapter = app.url_map.bind('')
try:
endpoint, args = adapter.match(
'/en' + request.full_path.rstrip('/ ?'))
return redirect(url_for(endpoint, **args), 301)
except:
abort(404)
dfl = request.url_rule.defaults
if 'lang_code' in dfl:
if dfl['lang_code'] != request.full_path.split('/')[1]:
abort(404)
@multilingual.route('/')
@multilingual.route('/index')
def index():
return render_template('multilingual/index.html', title=_('Home'))
#------- routes for contours
@multilingual.route('/contour', methods=['GET','POST'])
def contour():
return render_template('multilingual/contour/contour.html', title=_('Contour'))
@multilingual.route('gcode_view.html', methods=['GET','POST'])
def gcode_editor():
save_data = None
if request.methode == 'POST':
pass
@multilingual.route('downloadGCode')
def downloadGCode():
gcode = "M2"
return Response(
gcode,
mimetype = "text/text",
headers={"Content-disposition":
"attachment; filename=gcode.nc"}
)
@multilingual.route('/contour/circle360', methods=['GET','POST'])
def circle360():
defaults = load_defaults()
defaults['usexy'] = True
save_data = None
if request.method == 'POST':
data = request2json(request.form)
(btn, clicked) = returnButtonClicked(request.form)
if btn == "btn_gen_gcode":
gcgen = GCode_Contour_Circle(cfg)
data = gcgen.generateGcode(data)
data = gcgen.getGcode('ascii')
sava_data = data
return render_template('multilingual/gcode_view.html', title=_('GCode Editor'), data=data)
if btn == "btn_save_gcode":
return render_template('multilingual/gcode_view.html', title=_('GCode Editor'), data=sava_data)
else:
return render_template('multilingual/contour/circle360.html', title=_('Circle 360'), data=defaults)
@multilingual.route('/contour/circlearc', methods=['GET','POST'])
def circlearc():
defaults = load_defaults()
# add other center points
defaults['cp']['2'] = False
if request.method == 'POST':
data = request2json(request.form)
(btn, clicked) = returnButtonClicked(request.form)
if btn == "btn_gen_gcode":
gcgen = GCode_Contour_Arc(cfg)
data = gcgen.generateGcode(data)
data = gcgen.getGcode('ascii')
return render_template('multilingual/gcode_view.html', title=_('GCode Editor'), data=data)
else:
return render_template('multilingual/contour/circlearc.html', title=_('Circle arc'), data=defaults)
@multilingual.route('/contour/rectangle', methods=['GET','POST'])
def rectangle():
defaults = load_defaults()
# add other center points
defaults['cp']['2'] = False
save_data = None
if request.method == 'POST':
data = request2json(request.form)
(btn, clicked) = returnButtonClicked(request.form)
if btn == "btn_gen_gcode":
gcgen = GCode_Contour_Rectangle(cfg)
data = gcgen.generateGcode(data)
data = gcgen.getGcode('ascii')
save_data = data
return render_template('multilingual/gcode_view.html', title=_('GCode Editor'), data=data)
else:
return render_template('multilingual/contour/rectangle.html', title=_('rectangle'), data=defaults)
@multilingual.route('/contour/rectanglerounded', methods=['GET','POST'])
def rectanglerounded():
defaults = load_defaults()
# add other center points
defaults['cp']['2'] = False
if request.method == 'POST':
data = request2json(request.form)
(btn, clicked) = returnButtonClicked(request.form)
if btn == "btn_gen_gcode":
gcgen = GCode_Contour_RoundedRectangle(cfg)
data = gcgen.generateGcode(data)
data = gcgen.getGcode('ascii')
save_data = data
return render_template('multilingual/gcode_view.html', title=_('GCode Editor'), data=data)
pass
else:
return render_template('multilingual/contour/rectanglerounded.html', title=_('rectanglerounded'), data=defaults)
#------- routes for surfaces
@multilingual.route('/surface', methods=['GET','POST'])
def surface():
return render_template('multilingual/surface/surface.html', title=_('Surface'))
#------- routes for pockets
@multilingual.route('/pocket', methods=['GET','POST'])
def pocket():
return render_template('multilingual/pocket/pocket.html', title=_('Pocket'))
@multilingual.route('/drilling', methods=['GET','POST'])
def drilling():
return render_template('multilingual/drilling/drilling.html', title=_('Drilling'))
#-------- Routes for calculations AJAX -------------
@multilingual.route('/addxy', methods=['GET','POST'])
def addxy():
if request.method == "POST":
pass
return "addxy"
|
[
"app.app.url_map.bind",
"flask.Blueprint",
"json.load",
"flask.abort",
"gcode_contour_rectangles.GCode_Contour_Rectangle",
"flask.request.full_path.split",
"gcode_contour_circles.GCode_Contour_Circle",
"flask.url_for",
"gcode_contour_rectangles.GCode_Contour_RoundedRectangle",
"flask.request.full_path.rstrip",
"gcode_contour_circles.GCode_Contour_Arc",
"flask.Response",
"flask_babel._"
] |
[((674, 770), 'flask.Blueprint', 'Blueprint', (['"""multilingual"""', '__name__'], {'template_folder': '"""templates"""', 'url_prefix': '"""/<lang_code>"""'}), "('multilingual', __name__, template_folder='templates', url_prefix\n ='/<lang_code>')\n", (683, 770), False, 'from flask import render_template, Blueprint, g, redirect, Response, request, current_app, abort, url_for, jsonify\n'), ((3766, 3873), 'flask.Response', 'Response', (['gcode'], {'mimetype': '"""text/text"""', 'headers': "{'Content-disposition': 'attachment; filename=gcode.nc'}"}), "(gcode, mimetype='text/text', headers={'Content-disposition':\n 'attachment; filename=gcode.nc'})\n", (3774, 3873), False, 'from flask import render_template, Blueprint, g, redirect, Response, request, current_app, abort, url_for, jsonify\n'), ((2793, 2813), 'app.app.url_map.bind', 'app.url_map.bind', (['""""""'], {}), "('')\n", (2809, 2813), False, 'from app import app\n'), ((984, 996), 'json.load', 'json.load', (['f'], {}), '(f)\n', (993, 996), False, 'import json\n'), ((1238, 1250), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1247, 1250), False, 'import json\n'), ((3167, 3177), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (3172, 3177), False, 'from flask import render_template, Blueprint, g, redirect, Response, request, current_app, abort, url_for, jsonify\n'), ((3308, 3317), 'flask_babel._', '_', (['"""Home"""'], {}), "('Home')\n", (3309, 3317), False, 'from flask_babel import _\n'), ((3493, 3505), 'flask_babel._', '_', (['"""Contour"""'], {}), "('Contour')\n", (3494, 3505), False, 'from flask_babel import _\n'), ((4279, 4304), 'gcode_contour_circles.GCode_Contour_Circle', 'GCode_Contour_Circle', (['cfg'], {}), '(cfg)\n', (4299, 4304), False, 'from gcode_contour_circles import GCode_Contour_Circle\n'), ((5153, 5175), 'gcode_contour_circles.GCode_Contour_Arc', 'GCode_Contour_Arc', (['cfg'], {}), '(cfg)\n', (5170, 5175), False, 'from gcode_contour_circles import GCode_Contour_Arc\n'), ((5872, 5900), 'gcode_contour_rectangles.GCode_Contour_Rectangle', 'GCode_Contour_Rectangle', (['cfg'], {}), '(cfg)\n', (5895, 5900), False, 'from gcode_contour_rectangles import GCode_Contour_Rectangle\n'), ((6618, 6653), 'gcode_contour_rectangles.GCode_Contour_RoundedRectangle', 'GCode_Contour_RoundedRectangle', (['cfg'], {}), '(cfg)\n', (6648, 6653), False, 'from gcode_contour_rectangles import GCode_Contour_RoundedRectangle\n'), ((7192, 7204), 'flask_babel._', '_', (['"""Surface"""'], {}), "('Surface')\n", (7193, 7204), False, 'from flask_babel import _\n'), ((7373, 7384), 'flask_babel._', '_', (['"""Pocket"""'], {}), "('Pocket')\n", (7374, 7384), False, 'from flask_babel import _\n'), ((7532, 7545), 'flask_babel._', '_', (['"""Drilling"""'], {}), "('Drilling')\n", (7533, 7545), False, 'from flask_babel import _\n'), ((2956, 2981), 'flask.url_for', 'url_for', (['endpoint'], {}), '(endpoint, **args)\n', (2963, 2981), False, 'from flask import render_template, Blueprint, g, redirect, Response, request, current_app, abort, url_for, jsonify\n'), ((3016, 3026), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (3021, 3026), False, 'from flask import render_template, Blueprint, g, redirect, Response, request, current_app, abort, url_for, jsonify\n'), ((3122, 3150), 'flask.request.full_path.split', 'request.full_path.split', (['"""/"""'], {}), "('/')\n", (3145, 3150), False, 'from flask import render_template, Blueprint, g, redirect, Response, request, current_app, abort, url_for, jsonify\n'), ((4755, 4770), 'flask_babel._', '_', (['"""Circle 360"""'], {}), "('Circle 360')\n", (4756, 4770), False, 'from flask_babel import _\n'), ((5453, 5468), 'flask_babel._', '_', (['"""Circle arc"""'], {}), "('Circle arc')\n", (5454, 5468), False, 'from flask_babel import _\n'), ((6207, 6221), 'flask_babel._', '_', (['"""rectangle"""'], {}), "('rectangle')\n", (6208, 6221), False, 'from flask_babel import _\n'), ((6981, 7002), 'flask_babel._', '_', (['"""rectanglerounded"""'], {}), "('rectanglerounded')\n", (6982, 7002), False, 'from flask_babel import _\n'), ((2895, 2926), 'flask.request.full_path.rstrip', 'request.full_path.rstrip', (['"""/ ?"""'], {}), "('/ ?')\n", (2919, 2926), False, 'from flask import render_template, Blueprint, g, redirect, Response, request, current_app, abort, url_for, jsonify\n'), ((4495, 4512), 'flask_babel._', '_', (['"""GCode Editor"""'], {}), "('GCode Editor')\n", (4496, 4512), False, 'from flask_babel import _\n'), ((4634, 4651), 'flask_babel._', '_', (['"""GCode Editor"""'], {}), "('GCode Editor')\n", (4635, 4651), False, 'from flask_babel import _\n'), ((5337, 5354), 'flask_babel._', '_', (['"""GCode Editor"""'], {}), "('GCode Editor')\n", (5338, 5354), False, 'from flask_babel import _\n'), ((6091, 6108), 'flask_babel._', '_', (['"""GCode Editor"""'], {}), "('GCode Editor')\n", (6092, 6108), False, 'from flask_babel import _\n'), ((6844, 6861), 'flask_babel._', '_', (['"""GCode Editor"""'], {}), "('GCode Editor')\n", (6845, 6861), False, 'from flask_babel import _\n')]
|
print('Testing ntheory...', end='\t')
from ntheory import gcd,modinv,egcd,crt
gcd_tests = [
(1,1,1),
(1,2,1),
(2,2,2),
(2,4,2),
(3*5, 3*7, 3),
(312, 182, 26)
]
for (a,b,d) in gcd_tests :
assert gcd(a,b) == d
x,y,d = egcd(a,b)
assert x*a + y*b == d
modinv_tests = [
(2,5), # 2*3 == 1 mod 5
(2,6), # 2 is not invertible mod 6
(5,6), # 5*5 == 1 mod 6
(3,11), # 3*4 == 1 mod 11
(321093,109239012)
]
for (a,m) in modinv_tests :
x = modinv(a,m)
assert (x == None and gcd(a,m) != 1) or a*x % m == 1
crt_tests = [
([2,4,5],[3,5,7]),
([2,4,5],[9,5,7]),
([5,7,8,2],[11,26,17,19]),
([0,7,0,2],[59,26,17,19]),
([1], [2]),
([0], [2]),
([2], [3])
]
for (xs,ps) in crt_tests :
z = crt(xs,ps)
assert all(z % p == x % p for x,p in zip(xs,ps)), "Got {z} for residues {xs} and moduli {ps}".format(z=z,xs=xs,ps=ps)
print('Done.')
|
[
"ntheory.egcd",
"ntheory.gcd",
"ntheory.crt",
"ntheory.modinv"
] |
[((250, 260), 'ntheory.egcd', 'egcd', (['a', 'b'], {}), '(a, b)\n', (254, 260), False, 'from ntheory import gcd, modinv, egcd, crt\n'), ((485, 497), 'ntheory.modinv', 'modinv', (['a', 'm'], {}), '(a, m)\n', (491, 497), False, 'from ntheory import gcd, modinv, egcd, crt\n'), ((735, 746), 'ntheory.crt', 'crt', (['xs', 'ps'], {}), '(xs, ps)\n', (738, 746), False, 'from ntheory import gcd, modinv, egcd, crt\n'), ((224, 233), 'ntheory.gcd', 'gcd', (['a', 'b'], {}), '(a, b)\n', (227, 233), False, 'from ntheory import gcd, modinv, egcd, crt\n'), ((520, 529), 'ntheory.gcd', 'gcd', (['a', 'm'], {}), '(a, m)\n', (523, 529), False, 'from ntheory import gcd, modinv, egcd, crt\n')]
|
# -*- coding:utf-8 -*-
# author:huawei
from python2sky.context.context_carrier import ContextCarrier
from tests.base_test_case import BaseTestCase
class TestContextCarrier(BaseTestCase):
def test_serialize(self):
self.assertEqual(self.SW6, self.context_carrier.serialize())
def test_deserialize(self):
text = self.SW6
context = ContextCarrier()
context.deserialize(text)
self.assertEqual(context.peer, self.context_carrier.peer)
|
[
"python2sky.context.context_carrier.ContextCarrier"
] |
[((365, 381), 'python2sky.context.context_carrier.ContextCarrier', 'ContextCarrier', ([], {}), '()\n', (379, 381), False, 'from python2sky.context.context_carrier import ContextCarrier\n')]
|
import fire
def main(input_file: str = "input.txt") -> None:
with open(input_file) as f:
data = [int(x) for x in f.read().splitlines()]
count = sum(cur > prev for prev, cur in zip(data[:-1], data[1:]))
print(count)
if __name__ == "__main__":
fire.Fire(main)
|
[
"fire.Fire"
] |
[((272, 287), 'fire.Fire', 'fire.Fire', (['main'], {}), '(main)\n', (281, 287), False, 'import fire\n')]
|
# Copyright (c) 2017, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
#
# WSO2 Inc. licenses this file to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file except
# in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import logging
from time import sleep
from PySiddhi4.DataTypes.LongType import LongType
from PySiddhi4.core.SiddhiManager import SiddhiManager
from PySiddhi4.core.query.output.callback.QueryCallback import QueryCallback
from PySiddhi4.core.util.EventPrinter import PrintEvent
logging.basicConfig(level=logging.INFO)
class BasicTests(unittest.TestCase):
def setUp(self):
# Creating SiddhiManager
self.siddhiManager = SiddhiManager()
self.siddhiApp = "" + "define stream cseEventStream (symbol string, price float, volume long); " \
+ "" + "@info(name = 'query1') " + "from cseEventStream[volume < 150] " \
+ "select symbol,price " + "insert into outputStream ;"
# Generating runtime
# print(self.siddhiApp)
self.siddhiAppRuntime = self.siddhiManager.createSiddhiAppRuntime(self.siddhiApp)
def test_input_handler(self):
logging.info("Test1: Test Input Handler")
# Retrieving input handler to push events into Siddhi
inputHandler = self.siddhiAppRuntime.getInputHandler("cseEventStream")
# Starting event processing
self.siddhiAppRuntime.start()
# Sending events to Siddhi
inputHandler.send(["IBM", 700.0, LongType(100)])
inputHandler.send(["WSO2", 60.5, LongType(200)])
inputHandler.send(["GOOG", 50, LongType(30)])
inputHandler.send(["IBM", 76.6, LongType(400)])
def test_siddhi_app_runtime_callback(self):
logging.info("Test2: Test Siddhi App Runtime Callback")
# Adding callback to retrieve output events from query
global hitCount
hitCount = 2
class ConcreteQueryCallback(QueryCallback):
def receive(self, timestamp, inEvents, outEvents):
PrintEvent(timestamp, inEvents, outEvents)
global hitCount
hitCount -= 1
self.siddhiAppRuntime.addCallback("query1", ConcreteQueryCallback())
# Retrieving input handler to push events into Siddhi
inputHandler = self.siddhiAppRuntime.getInputHandler("cseEventStream")
# Starting event processing
self.siddhiAppRuntime.start()
# Sending events to Siddhi
inputHandler.send(["IBM", 700.0, LongType(100)])
inputHandler.send(["WSO2", 60.5, LongType(200)])
inputHandler.send(["GOOG", 50, LongType(30)])
inputHandler.send(["IBM", 76.6, LongType(400)])
sleep(0.5)
self.assertEqual(hitCount, 0)
def tearDown(self):
# shutting down the runtime
self.siddhiAppRuntime.shutdown()
# shutting down Siddhi
self.siddhiManager.shutdown()
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"PySiddhi4.DataTypes.LongType.LongType",
"logging.basicConfig",
"time.sleep",
"PySiddhi4.core.SiddhiManager.SiddhiManager",
"logging.info",
"PySiddhi4.core.util.EventPrinter.PrintEvent"
] |
[((947, 986), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (966, 986), False, 'import logging\n'), ((3399, 3414), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3412, 3414), False, 'import unittest\n'), ((1109, 1124), 'PySiddhi4.core.SiddhiManager.SiddhiManager', 'SiddhiManager', ([], {}), '()\n', (1122, 1124), False, 'from PySiddhi4.core.SiddhiManager import SiddhiManager\n'), ((1606, 1647), 'logging.info', 'logging.info', (['"""Test1: Test Input Handler"""'], {}), "('Test1: Test Input Handler')\n", (1618, 1647), False, 'import logging\n'), ((2180, 2235), 'logging.info', 'logging.info', (['"""Test2: Test Siddhi App Runtime Callback"""'], {}), "('Test2: Test Siddhi App Runtime Callback')\n", (2192, 2235), False, 'import logging\n'), ((3145, 3155), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (3150, 3155), False, 'from time import sleep\n'), ((1940, 1953), 'PySiddhi4.DataTypes.LongType.LongType', 'LongType', (['(100)'], {}), '(100)\n', (1948, 1953), False, 'from PySiddhi4.DataTypes.LongType import LongType\n'), ((1997, 2010), 'PySiddhi4.DataTypes.LongType.LongType', 'LongType', (['(200)'], {}), '(200)\n', (2005, 2010), False, 'from PySiddhi4.DataTypes.LongType import LongType\n'), ((2052, 2064), 'PySiddhi4.DataTypes.LongType.LongType', 'LongType', (['(30)'], {}), '(30)\n', (2060, 2064), False, 'from PySiddhi4.DataTypes.LongType import LongType\n'), ((2107, 2120), 'PySiddhi4.DataTypes.LongType.LongType', 'LongType', (['(400)'], {}), '(400)\n', (2115, 2120), False, 'from PySiddhi4.DataTypes.LongType import LongType\n'), ((2477, 2519), 'PySiddhi4.core.util.EventPrinter.PrintEvent', 'PrintEvent', (['timestamp', 'inEvents', 'outEvents'], {}), '(timestamp, inEvents, outEvents)\n', (2487, 2519), False, 'from PySiddhi4.core.util.EventPrinter import PrintEvent\n'), ((2953, 2966), 'PySiddhi4.DataTypes.LongType.LongType', 'LongType', (['(100)'], {}), '(100)\n', (2961, 2966), False, 'from PySiddhi4.DataTypes.LongType import LongType\n'), ((3010, 3023), 'PySiddhi4.DataTypes.LongType.LongType', 'LongType', (['(200)'], {}), '(200)\n', (3018, 3023), False, 'from PySiddhi4.DataTypes.LongType import LongType\n'), ((3065, 3077), 'PySiddhi4.DataTypes.LongType.LongType', 'LongType', (['(30)'], {}), '(30)\n', (3073, 3077), False, 'from PySiddhi4.DataTypes.LongType import LongType\n'), ((3120, 3133), 'PySiddhi4.DataTypes.LongType.LongType', 'LongType', (['(400)'], {}), '(400)\n', (3128, 3133), False, 'from PySiddhi4.DataTypes.LongType import LongType\n')]
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import chain
from operator import attrgetter
from robot.utils import setter
from .itemlist import ItemList
from .message import Message, Messages
from .modelobject import ModelObject
class Keyword(ModelObject):
"""Base model for single keyword."""
__slots__ = ['parent', 'name', 'doc', 'args', 'type', 'timeout',
'_sort_key', '_next_child_sort_key']
KEYWORD_TYPE = 'kw'
SETUP_TYPE = 'setup'
TEARDOWN_TYPE = 'teardown'
FOR_LOOP_TYPE = 'for'
FOR_ITEM_TYPE = 'foritem'
keyword_class = None
message_class = Message
def __init__(self, name='', doc='', args=(), type='kw', timeout=None):
#: :class:`~.model.testsuite.TestSuite` or
#: :class:`~.model.testcase.TestCase` or
#: :class:`~.model.keyword.Keyword` that contains this keyword.
self.parent = None
#: Keyword name.
self.name = name
#: Keyword documentation.
self.doc = doc
#: Keyword arguments, a list of strings.
self.args = args
#: 'SETUP', 'TEARDOWN' or 'KW'.
self.type = type
#: Keyword timeout.
self.timeout = timeout
#: Keyword messages as :class:`~.model.message.Message` instances.
self.messages = None
#: Child keywords as :class:`~.model.keyword.Keyword` instances.
self.keywords = None
self._sort_key = -1
self._next_child_sort_key = 0
@setter
def parent(self, parent):
if parent and parent is not self.parent:
self._sort_key = getattr(parent, '_child_sort_key', -1)
return parent
@property
def _child_sort_key(self):
self._next_child_sort_key += 1
return self._next_child_sort_key
@setter
def keywords(self, keywords):
return Keywords(self.keyword_class or self.__class__, self, keywords)
@setter
def messages(self, messages):
return Messages(self.message_class, self, messages)
@property
def children(self):
"""Child keywords and messages in creation order."""
# It would be cleaner to store keywords/messages in same `children`
# list and turn `keywords` and `messages` to properties that pick items
# from it. That would require bigger changes to the model, though.
return sorted(chain(self.keywords, self.messages),
key=attrgetter('_sort_key'))
@property
def id(self):
if not self.parent:
return 'k1'
return '%s-k%d' % (self.parent.id, self.parent.keywords.index(self)+1)
def visit(self, visitor):
visitor.visit_keyword(self)
class Keywords(ItemList):
__slots__ = []
def __init__(self, keyword_class=Keyword, parent=None, keywords=None):
ItemList.__init__(self, keyword_class, {'parent': parent}, keywords)
@property
def setup(self):
return self[0] if (self and self[0].type == 'setup') else None
@property
def teardown(self):
return self[-1] if (self and self[-1].type == 'teardown') else None
@property
def all(self):
return self
@property
def normal(self):
for kw in self:
if kw.type not in ('setup', 'teardown'):
yield kw
def __setitem__(self, index, item):
old = self[index]
ItemList.__setitem__(self, index, item)
self[index]._sort_key = old._sort_key
|
[
"operator.attrgetter",
"itertools.chain"
] |
[((2936, 2971), 'itertools.chain', 'chain', (['self.keywords', 'self.messages'], {}), '(self.keywords, self.messages)\n', (2941, 2971), False, 'from itertools import chain\n'), ((2999, 3022), 'operator.attrgetter', 'attrgetter', (['"""_sort_key"""'], {}), "('_sort_key')\n", (3009, 3022), False, 'from operator import attrgetter\n')]
|
from datetime import datetime, timezone, date, time, timedelta
from fastapi.testclient import TestClient
from humtemp.main import app
from humtemp.database import connection, connect
client = TestClient(app)
def setup_function():
connect(host='localhost')
connection.flushdb()
def test_calculation():
yesterday = datetime.combine(date.today() - timedelta(days=1), time.min, tzinfo=timezone.utc)
yesterday_ts = int(yesterday.timestamp())
timestamp = yesterday_ts
for i in range(10):
data = {
"lab_id": "lab01",
"timestamp": timestamp,
"temp": i+1,
"humidity": 10+i+1
}
response = client.post('/observation', json=data)
assert response.status_code == 200
timestamp += 30
response = client.get('/summary')
assert response.status_code == 200
assert response.json() == [{'lab_id': 'lab01', 'avg_temp': 5.5, 'avg_humidity': 15.5}]
response = client.get('/summary', params={'offset': 0})
assert response.status_code == 200
assert response.json() == []
response = client.get('/summary', params={'offset': -2})
assert response.status_code == 200
assert response.json() == []
|
[
"datetime.date.today",
"humtemp.database.connect",
"fastapi.testclient.TestClient",
"datetime.timedelta",
"humtemp.database.connection.flushdb"
] |
[((195, 210), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (205, 210), False, 'from fastapi.testclient import TestClient\n'), ((239, 264), 'humtemp.database.connect', 'connect', ([], {'host': '"""localhost"""'}), "(host='localhost')\n", (246, 264), False, 'from humtemp.database import connection, connect\n'), ((269, 289), 'humtemp.database.connection.flushdb', 'connection.flushdb', ([], {}), '()\n', (287, 289), False, 'from humtemp.database import connection, connect\n'), ((349, 361), 'datetime.date.today', 'date.today', ([], {}), '()\n', (359, 361), False, 'from datetime import datetime, timezone, date, time, timedelta\n'), ((364, 381), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (373, 381), False, 'from datetime import datetime, timezone, date, time, timedelta\n')]
|
# This script gets quality metrics for the outputs.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from covid_bronx.quality import fasta_files, sam_files
from covid_bronx.quality.gaps import *
primer_binding_sites = "data/external/amplicon_binding_sites.csv"
for sample_id in tqdm(fasta_files.keys()):
try:
consensus_fasta = fasta_files[sample_id]
consensus_sam = sam_files[sample_id]
out = f"data/processed/quality/{sample_id}_gaps"
df = compute_primer_coverages(consensus_sam, consensus_fasta, primer_binding_sites, out)
df.to_csv(f"data/processed/quality/{sample_id}.csv")
except:
print(f"Could not read files for {sample_id}.")
|
[
"covid_bronx.quality.fasta_files.keys"
] |
[((331, 349), 'covid_bronx.quality.fasta_files.keys', 'fasta_files.keys', ([], {}), '()\n', (347, 349), False, 'from covid_bronx.quality import fasta_files, sam_files\n')]
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
from torch.autograd import Variable
import math, random, sys
import numpy as np
import pandas as pd
import argparse
from collections import deque
import pickle as pickle
from fast_jtnn import *
import rdkit
from tqdm import tqdm
vocab_path = './vocab.txt'
vocab = [x.strip("\r\n ") for x in open(vocab_path)]
vocab = Vocab(vocab)
prefix = '03'
model = JTNNVAE(vocab, 500, 128, 20, 3)
model.load_state_dict(torch.load('vae_model/model.iter-8000-04kl'))
model = model.cuda()
with open('./keys'+prefix+'.txt') as f:
data = [line.strip("\r\n ").split()[0] for line in f]
print(len(data))
ans = []
error_num = []
for num,k in tqdm(enumerate(data)):
try:
val = model.encode_from_smiles([k,])
ans.append((k,val))
except Exception as e:
print('Error on:',num,e)
error_num.append(num)
results = {}
for num,k in enumerate(ans):
if num in error_num:
print('Skipping:',num)
continue
x_tree_vecs = k[1][0][:500]
x_mol_vecs = k[1][0][500:]
z_tree_vecs,tree_kl = model.rsample(x_tree_vecs, model.T_mean, model.T_var)
z_mol_vecs,mol_kl = model.rsample(x_mol_vecs, model.G_mean, model.G_var)
z1 = z_tree_vecs.cpu().detach().numpy()
z2 = z_mol_vecs.cpu().detach().numpy()
results[k[0]] = (z1,z2)
vae_features = pd.DataFrame.from_dict(results,orient='index')
vae_features.to_csv('./vae_features'+prefix+'-04kl.csv')
|
[
"torch.load",
"pandas.DataFrame.from_dict"
] |
[((1380, 1427), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['results'], {'orient': '"""index"""'}), "(results, orient='index')\n", (1402, 1427), True, 'import pandas as pd\n'), ((562, 606), 'torch.load', 'torch.load', (['"""vae_model/model.iter-8000-04kl"""'], {}), "('vae_model/model.iter-8000-04kl')\n", (572, 606), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import datetime
import os
import sys
sys.path.insert(0, os.path.abspath("../.."))
# -- Project information -----------------------------------------------------
year = datetime.datetime.now().strftime("%Y")
project = "Piccolo"
author = "<NAME>"
copyright = f"{year}, {author}"
import piccolo # noqa: E402
version = ".".join(piccolo.__VERSION__.split(".")[:2])
release = piccolo.__VERSION__
# -- General configuration ---------------------------------------------------
extensions = [
"sphinx.ext.coverage",
]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# -- Intersphinx -------------------------------------------------------------
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
extensions += ["sphinx.ext.intersphinx"]
# -- Autodoc -----------------------------------------------------------------
extensions += ["sphinx.ext.autodoc"]
autodoc_typehints = "signature"
autodoc_typehints_format = "short"
autoclass_content = "both"
# -- Options for HTML output -------------------------------------------------
html_theme = "piccolo_theme"
html_show_sphinx = False
globaltoc_maxdepth = 3
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "Piccolodoc"
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "piccolo", "Piccolo Documentation", [author], 1)]
|
[
"os.path.abspath",
"piccolo.__VERSION__.split",
"datetime.datetime.now"
] |
[((621, 645), 'os.path.abspath', 'os.path.abspath', (['"""../.."""'], {}), "('../..')\n", (636, 645), False, 'import os\n'), ((735, 758), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (756, 758), False, 'import datetime\n'), ((895, 925), 'piccolo.__VERSION__.split', 'piccolo.__VERSION__.split', (['"""."""'], {}), "('.')\n", (920, 925), False, 'import piccolo\n')]
|
import time
import sys
import json
import primality.miller_rabin as mr
import primality.solovay_strassen as ss
import prng.blum_blum_shub as bbs
import prng.mersenne_twister as mt
# Parameter
seed = 27
keys_sizes = [40, 56, 80, 128, 168, 224, 256, 512, 1024, 2048, 4096]
def test_simple_primality():
print("------>Testing pseudo primality<------")
composte_numbers = [4,20,36,21,50]
prime_numbers = [2,23,31,37,43]
#MR
for c in composte_numbers:
assert not mr.is_prob_prime(c), f"ERROR(MR):Detect {c} composte as prime"
print("(MR) Composte Detection: ok")
for p in prime_numbers:
assert mr.is_prob_prime(p), f"ERROR(MR):Detect {p} prime as composte"
print("(MR) Prime Detection: ok")
#SS
for c in composte_numbers:
assert not ss.is_prob_prime(c), f"ERROR(SS):Detect {c} composte as prime"
print("(SS) Composte Detection: ok")
for p in prime_numbers:
assert ss.is_prob_prime(p), f"ERROR(SS):Detect {p} prime as composte"
print("(SS) Prime Detection: ok")
def _gen_key(size, checker, generator):
prime_number = 4
aux = 0
while not checker.is_prob_prime(prime_number):
sys.stdout.write("Generating: %s \r" % ('.' * (aux%9)))
sys.stdout.flush()
aux += 1
prime_number = generator.gen_n_bits(size) | 1
return prime_number
# Testing time
def test_key_gen_time():
print("------>Testing time for a key bit length<------")
keys = {}
print("========= MR | BBS =========")
keys['mr|bbs'] = {}
bbs.set_seed(23)
mt.set_as_mt19937_64()
mt.set_seed(23)
for key_size in keys_sizes:
start = time.time()
num = _gen_key(key_size, mr, bbs)
end = time.time()
print(f"\n{key_size} bits: {end - start} s")
keys['mr|bbs'][key_size] = {'key':num, 'time': end-start}
print("=======================")
print("========= MR | MT =========")
keys['mr|mt'] = {}
for key_size in keys_sizes:
start = time.time()
num = _gen_key(key_size, mr, mt)
end = time.time()
print(f"\n{key_size} bits: {end - start} s")
keys['mr|mt'][key_size] = {'key':num, 'time': end-start}
print("=======================")
json.dump(keys, open('test_keys_seed.json', 'w', encoding='utf-8'), ensure_ascii=False, indent=4)
# Unfurtunatly the test with SS was taken to much time and for some bits
# was neve end :(
if __name__ == '__main__':
test_simple_primality()
test_key_gen_time()
|
[
"sys.stdout.write",
"prng.blum_blum_shub.set_seed",
"primality.miller_rabin.is_prob_prime",
"time.time",
"prng.mersenne_twister.set_seed",
"sys.stdout.flush",
"prng.mersenne_twister.set_as_mt19937_64",
"primality.solovay_strassen.is_prob_prime"
] |
[((1550, 1566), 'prng.blum_blum_shub.set_seed', 'bbs.set_seed', (['(23)'], {}), '(23)\n', (1562, 1566), True, 'import prng.blum_blum_shub as bbs\n'), ((1571, 1593), 'prng.mersenne_twister.set_as_mt19937_64', 'mt.set_as_mt19937_64', ([], {}), '()\n', (1591, 1593), True, 'import prng.mersenne_twister as mt\n'), ((1598, 1613), 'prng.mersenne_twister.set_seed', 'mt.set_seed', (['(23)'], {}), '(23)\n', (1609, 1613), True, 'import prng.mersenne_twister as mt\n'), ((639, 658), 'primality.miller_rabin.is_prob_prime', 'mr.is_prob_prime', (['p'], {}), '(p)\n', (655, 658), True, 'import primality.miller_rabin as mr\n'), ((948, 967), 'primality.solovay_strassen.is_prob_prime', 'ss.is_prob_prime', (['p'], {}), '(p)\n', (964, 967), True, 'import primality.solovay_strassen as ss\n'), ((1183, 1240), 'sys.stdout.write', 'sys.stdout.write', (["('Generating: %s \\r' % ('.' * (aux % 9)))"], {}), "('Generating: %s \\r' % ('.' * (aux % 9)))\n", (1199, 1240), False, 'import sys\n'), ((1247, 1265), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1263, 1265), False, 'import sys\n'), ((1663, 1674), 'time.time', 'time.time', ([], {}), '()\n', (1672, 1674), False, 'import time\n'), ((1731, 1742), 'time.time', 'time.time', ([], {}), '()\n', (1740, 1742), False, 'import time\n'), ((2012, 2023), 'time.time', 'time.time', ([], {}), '()\n', (2021, 2023), False, 'import time\n'), ((2079, 2090), 'time.time', 'time.time', ([], {}), '()\n', (2088, 2090), False, 'import time\n'), ((491, 510), 'primality.miller_rabin.is_prob_prime', 'mr.is_prob_prime', (['c'], {}), '(c)\n', (507, 510), True, 'import primality.miller_rabin as mr\n'), ((800, 819), 'primality.solovay_strassen.is_prob_prime', 'ss.is_prob_prime', (['c'], {}), '(c)\n', (816, 819), True, 'import primality.solovay_strassen as ss\n')]
|
# encoding: utf-8
# Standard Library
import json
from typing import Any
from typing import Dict
from typing import List
from typing import Union
from typing import Optional
from typing import NoReturn
# 3rd Party Library
from requests import Response
# Current Folder
from .exception import ColumnException
from .exception import SpreadsheetException
# Current Application
from bktools.framework.google import session
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
class Column(object):
__slots__ = ('__index', '__value', )
def __init__(self, index: Union[int, str]):
self.__index = index
@property
def index(self) -> Union[int, str]:
return self.__index
@property
def empty(self) -> bool:
return self.__value is None
@property
def value(self) -> Optional[Any]:
return self.__value
@property
def string(self) -> Optional[Any]:
return self.__value
@property
def integer(self) -> Optional[Any]:
return int(self.__value) if self.__value else None
@property
def decimal(self) -> Optional[Any]:
return float(self.__value) if self.__value else None
class Row(object):
__slots__ = ('__index', '__columns', )
def __init__(self, index: int):
self.__index = index
self.__columns: List[Column] = list()
@property
def index(self) -> int:
return self.__index
def add_column(self, column: Column) -> NoReturn:
if [r for r in self.__columns if r.__index == column.__index]:
raise ColumnException('', '')
self.__columns.append(column)
def columns(self) -> List[Column]:
return self.__columns
class Worksheet(object):
__slots__ = ('__spreadsheet', '__identity', '__index', '__title', '__rows')
def __init__(self, spreadsheet: str, properties: Dict):
self.__spreadsheet: str = spreadsheet
self.__identity: str = properties['sheetId']
self.__index: int = int(properties['index'])
self.__title: str = properties['title']
self.__rows: List[Row] = list()
@property
def identity(self) -> int:
return int(self.__identity)
@property
def index(self) -> int:
return int(self.__index)
@property
def title(self) -> str:
return self.__title
@property
def rows(self) -> List[Row]:
return self.__rows
def load(self) -> NoReturn:
result: Response = session.execute('sheet', 'get', 'general', self.__spreadsheet, params={
'ranges': self.title,
'includeGridData': True
})
print(json.loads(result.text))
def __repr__(self):
return f"<Worksheet [{self.index}][{self.title}]>"
class Spreadsheet(object):
__slots__ = ('__sheet', '__identity', '__title', '__url', '__worksheets')
def __init__(self, identity: str, title: str):
self.__identity = identity
self.__title = title
self.__worksheets: List[Worksheet] = list()
result: Response = session.execute('sheet', 'get', 'general', identity)
metadata: Dict = json.loads(result.text)
self.__url = metadata['spreadsheetUrl']
for sheet in metadata['sheets']:
self.__worksheets.append(Worksheet(self.__identity, sheet['properties']))
@property
def identity(self) -> str:
return self.__identity
@property
def title(self) -> str:
return self.__title
def create(self, title: str, rows: int, cols: int, index: int = None) -> Worksheet:
body = {
'requests': [
{
'addSheet': {
'properties': {
'title': title,
'sheetType': 'GRID',
'gridProperties': {
'rowCount': rows,
'columnCount': cols
}
}
}
}
]
}
if index is not None:
body["requests"][0]["addSheet"]["properties"]["index"] = index
result: Response = session.execute('sheet', 'post', 'batch_update', self.__identity, body)
if result.status_code == 400:
payload = json.loads(result.text)['error']
raise SpreadsheetException(payload['code'], payload['message'])
worksheet = Worksheet(self.identity, json.loads(result.text)['replies'][0]['addSheet']['properties'])
self.__worksheets.append(worksheet)
return worksheet
def delete(self, worksheet: Worksheet) -> NoReturn:
body = {
'requests': [
{
'deleteSheet': {
'sheetId': worksheet.identity
}
}
]
}
session.execute('sheet', 'post', 'batch_update', self.__identity, body)
self.__worksheets.remove(worksheet)
def worksheets(self) -> List[Worksheet]:
return self.__worksheets
def __repr__(self):
return f"<Spreadsheet [{self.__title}][{self.__identity}]>"
|
[
"bktools.framework.google.session.execute",
"json.loads"
] |
[((2540, 2663), 'bktools.framework.google.session.execute', 'session.execute', (['"""sheet"""', '"""get"""', '"""general"""', 'self.__spreadsheet'], {'params': "{'ranges': self.title, 'includeGridData': True}"}), "('sheet', 'get', 'general', self.__spreadsheet, params={\n 'ranges': self.title, 'includeGridData': True})\n", (2555, 2663), False, 'from bktools.framework.google import session\n'), ((3120, 3172), 'bktools.framework.google.session.execute', 'session.execute', (['"""sheet"""', '"""get"""', '"""general"""', 'identity'], {}), "('sheet', 'get', 'general', identity)\n", (3135, 3172), False, 'from bktools.framework.google import session\n'), ((3198, 3221), 'json.loads', 'json.loads', (['result.text'], {}), '(result.text)\n', (3208, 3221), False, 'import json\n'), ((4267, 4338), 'bktools.framework.google.session.execute', 'session.execute', (['"""sheet"""', '"""post"""', '"""batch_update"""', 'self.__identity', 'body'], {}), "('sheet', 'post', 'batch_update', self.__identity, body)\n", (4282, 4338), False, 'from bktools.framework.google import session\n'), ((4971, 5042), 'bktools.framework.google.session.execute', 'session.execute', (['"""sheet"""', '"""post"""', '"""batch_update"""', 'self.__identity', 'body'], {}), "('sheet', 'post', 'batch_update', self.__identity, body)\n", (4986, 5042), False, 'from bktools.framework.google import session\n'), ((2708, 2731), 'json.loads', 'json.loads', (['result.text'], {}), '(result.text)\n', (2718, 2731), False, 'import json\n'), ((4399, 4422), 'json.loads', 'json.loads', (['result.text'], {}), '(result.text)\n', (4409, 4422), False, 'import json\n'), ((4554, 4577), 'json.loads', 'json.loads', (['result.text'], {}), '(result.text)\n', (4564, 4577), False, 'import json\n')]
|
from unittest import TestCase
from pbx_gs_python_utils.utils.Dev import Dev
from pbx_gs_python_utils.utils.Misc import Misc
from osbot_jupyter.api.Jupyter_Kernel import Jupyter_Kernel
from osbot_jupyter.helpers.Test_Server import Test_Server
class test_Jupyter_Session(TestCase):
def setUp(self):
self.notebook = 'work/test-AAAA.ipynb'
self.jp_session = Test_Server().docker().jupyter_session().set_to_notebook(self.notebook)
self.result = None
def tearDown(self):
if self.jp_session.exists():
self.jp_session.delete()
if self.result is not None:
Dev.pprint(self.result)
def test_info(self):
info = self.jp_session.info()
assert set(info) == {'kernel', 'name', 'id', 'type', 'path'}
assert info.get('path') == self.notebook
assert info.get('type') == 'python3'
assert info.get('id') == self.jp_session.session_id
assert info.get('kernel').get('name') == 'python3'
def test_delete(self):
assert self.jp_session.delete() is True
assert self.jp_session.delete() is False
def test_rename(self):
name = Misc.random_string_and_numbers()
assert self.jp_session.rename(name).get('name') == name
assert self.jp_session.info ( ).get('name') == name
def test_sessions(self):
assert len(self.jp_session.sessions()) > 0
def test_sessions_ids(self):
assert len(self.jp_session.sessions_ids()) > 0
|
[
"pbx_gs_python_utils.utils.Dev.Dev.pprint",
"osbot_jupyter.helpers.Test_Server.Test_Server",
"pbx_gs_python_utils.utils.Misc.Misc.random_string_and_numbers"
] |
[((1179, 1211), 'pbx_gs_python_utils.utils.Misc.Misc.random_string_and_numbers', 'Misc.random_string_and_numbers', ([], {}), '()\n', (1209, 1211), False, 'from pbx_gs_python_utils.utils.Misc import Misc\n'), ((631, 654), 'pbx_gs_python_utils.utils.Dev.Dev.pprint', 'Dev.pprint', (['self.result'], {}), '(self.result)\n', (641, 654), False, 'from pbx_gs_python_utils.utils.Dev import Dev\n'), ((381, 394), 'osbot_jupyter.helpers.Test_Server.Test_Server', 'Test_Server', ([], {}), '()\n', (392, 394), False, 'from osbot_jupyter.helpers.Test_Server import Test_Server\n')]
|
"""
Copyright 2020 Tianshu AI Platform. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=============================================================
"""
import numpy as np
import torch
from kamal.core.metrics.stream_metrics import Metric
from typing import Callable
__all__=['AverageMetric']
class AverageMetric(Metric):
def __init__(self, fn:Callable, attach_to=None):
super(AverageMetric, self).__init__(attach_to=attach_to)
self._fn = fn
self.reset()
@torch.no_grad()
def update(self, outputs, targets):
outputs, targets = self._attach(outputs, targets)
m = self._fn( outputs, targets )
if m.ndim > 1:
self._cnt += m.shape[0]
self._accum += m.sum(0)
else:
self._cnt += 1
self._accum += m
def get_results(self):
return (self._accum / self._cnt).detach().cpu()
def reset(self):
self._accum = 0.
self._cnt = 0.
|
[
"torch.no_grad"
] |
[((1001, 1016), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1014, 1016), False, 'import torch\n')]
|
#!/usr/bin/python
import sys, os, argparse, re, json
from multiprocessing import Pool, cpu_count
from subprocess import Popen, PIPE
from signal import signal, SIGINT, SIG_IGN
#from Canvas import Line
DESCRIPTION="""
A utility to help parse results from the tgen traffic generator.
This script enables processing of tgen log files and storing processed
data in json format for plotting. It was written so that the log files
need never be stored on disk decompressed, which is useful when log file
sizes reach tens of gigabytes.
Use the help menu to understand usage:
$ python parse-tgen.py -h
The standard way to run the script is to give the path to a directory tree
under which one or several tgen log files exist:
$ python parse-tgen.py shadow.data/hosts/
$ python parse-tgen.py ./
This path will be searched for log files whose names match those created
by shadow; additional patterns can be added with the '-e' option.
A single tgen log file can also be passed on STDIN with the special '-' path:
$ cat tgen.log | python parse-tgen.py -
$ xzcat tgen.log.xz | python parse-tgen.py -
The default mode is to filter and parse the log files using a single
process; this will be done with multiple worker processes when passing
the '-m' option.
"""
TGENJSON="stats.tgen.json"
def main():
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=argparse.RawTextHelpFormatter)#ArgumentDefaultsHelpFormatter)
parser.add_argument(
help="""The PATH to search for tgen log files, which may be '-'
for STDIN; each log file may end in '.xz' to enable
inline xz decompression""",
metavar="PATH",
action="store", dest="searchpath")
parser.add_argument('-e', '--expression',
help="""Append a regex PATTERN to the list of strings used with
re.search to find tgen log file names in the search path""",
action="append", dest="patterns",
metavar="PATTERN",
default=["tgen.*\.log"])
parser.add_argument('-f', '--filter',
help="""Specify comma delimited list of substrings that must be found
in the filename in order to be considered""",
action="store", dest="filters_string",
metavar="FILTER",
default="")
parser.add_argument('-m', '--multiproc',
help="""Enable multiprocessing with N worker process, use '0'
to use the number of processor cores""",
metavar="N",
action="store", dest="nprocesses", type=type_nonnegative_integer,
default=1)
parser.add_argument('-p', '--prefix',
help="""A STRING directory path prefix where the processed data
files generated by this script will be written""",
metavar="STRING",
action="store", dest="prefix",
default=os.getcwd())
args = parser.parse_args()
args.searchpath = os.path.abspath(os.path.expanduser(args.searchpath))
args.prefix = os.path.abspath(os.path.expanduser(args.prefix))
if args.nprocesses == 0: args.nprocesses = cpu_count()
run(args)
def run(args):
logfilepaths = find_file_paths(args.searchpath, args.patterns, args.filters_string.split('|'))
print >> sys.stderr, "processing input from {0} files...".format(len(logfilepaths))
p = Pool(args.nprocesses)
r = []
try:
mr = p.map_async(process_tgen_log, logfilepaths)
p.close()
while not mr.ready(): mr.wait(1)
r = mr.get()
except KeyboardInterrupt:
print >> sys.stderr, "interrupted, terminating process pool"
p.terminate()
p.join()
sys.exit()
d = {'nodes':{}}
name_count, noname_count, success_count, error_count = 0, 0, 0, 0
for item in r:
if item is None:
continue
name, data = item[0], item[1]
if name is None:
noname_count += 1
continue
name_count += 1
d['nodes'][name] = data
success_count += item[2]
error_count += item[3]
print >> sys.stderr, "done processing input: {0} total successes, {1} total errors, {2} files with names, {3} files without names".format(success_count, error_count, name_count, noname_count)
print >> sys.stderr, "dumping stats in {0}".format(args.prefix)
dump(d, args.prefix, TGENJSON)
print >> sys.stderr, "all done!"
def process_tgen_log(filename):
signal(SIGINT, SIG_IGN) # ignore interrupts
source, xzproc = source_prepare(filename)
d = {'firstbyte':{}, 'lastbyte':{}, 'errors':{}}
name = None
success_count, error_count = 0, 0
for line in source:
try:
if name is None and re.search("Initializing traffic generator on host", line) is not None:
name = line.strip().split()[11]
elif re.search("transfer-complete", line) is not None or re.search("transfer-error", line) is not None:
parts = line.strip().split()
if len(parts) < 26: continue
sim_seconds = timestamp_to_seconds(parts[2])
second = int(sim_seconds)
ioparts = parts[13].split('=')
iodirection = ioparts[0]
if 'read' not in iodirection: continue #return None # this is a server, do we want its stats?
bytes = int(ioparts[1].split('/')[0])
if 'transfer-complete' in parts[6]:
success_count += 1
cmdtime = int(parts[21].split('=')[1])/1000000.0
rsptime = int(parts[22].split('=')[1])/1000000.0
fbtime = int(parts[23].split('=')[1])/1000000.0
lbtime = int(parts[24].split('=')[1])/1000000.0
chktime = int(parts[25].split('=')[1])/1000000.0
if bytes not in d['firstbyte']: d['firstbyte'][bytes] = {}
if second not in d['firstbyte'][bytes]: d['firstbyte'][bytes][second] = []
d['firstbyte'][bytes][second].append(fbtime-cmdtime)
if bytes not in d['lastbyte']: d['lastbyte'][bytes] = {}
if second not in d['lastbyte'][bytes]: d['lastbyte'][bytes][second] = []
d['lastbyte'][bytes][second].append(lbtime-cmdtime)
elif 'transfer-error' in parts[6]:
error_count += 1
code = parts[10].strip('()').split(',')[8].split('=')[1]
if code not in d['errors']: d['errors'][code] = {}
if second not in d['errors'][code]: d['errors'][code][second] = []
d['errors'][code][second].append(bytes)
except: continue # data format error
source_cleanup(filename, source, xzproc)
return [name, d, success_count, error_count]
def find_file_paths(searchpath, patterns, filters):
paths = []
if searchpath.endswith("/-"): paths.append("-")
else:
for root, dirs, files in os.walk(searchpath):
for name in files:
found = False
fpath = os.path.join(root, name)
fbase = os.path.basename(fpath)
for pattern in patterns:
if re.search(pattern, fbase) and not any(s not in fbase for s in filters):
found = True
if found: paths.append(fpath)
return paths
def type_nonnegative_integer(value):
i = int(value)
if i < 0: raise argparse.ArgumentTypeError("%s is an invalid non-negative int value" % value)
return i
def timestamp_to_seconds(stamp): # unix timestamp
return float(stamp)
def source_prepare(filename):
source, xzproc = None, None
if filename == '-':
source = sys.stdin
elif filename.endswith(".xz"):
xzproc = Popen(["xz", "--decompress", "--stdout", filename], stdout=PIPE)
source = xzproc.stdout
else:
source = open(filename, 'r')
return source, xzproc
def source_cleanup(filename, source, xzproc):
if xzproc is not None: xzproc.wait()
elif filename != '-': source.close()
def dump(data, prefix, filename, compress=True):
if not os.path.exists(prefix): os.makedirs(prefix)
if compress: # inline compression
with open("/dev/null", 'a') as nullf:
path = "{0}/{1}.xz".format(prefix, filename)
xzp = Popen(["xz", "--threads=3", "-"], stdin=PIPE, stdout=PIPE)
ddp = Popen(["dd", "of={0}".format(path)], stdin=xzp.stdout, stdout=nullf, stderr=nullf)
json.dump(data, xzp.stdin, sort_keys=True, separators=(',', ': '), indent=2)
xzp.stdin.close()
xzp.wait()
ddp.wait()
else: # no compression
path = "{0}/{1}".format(prefix, filename)
with open(path, 'w') as outf: json.dump(data, outf, sort_keys=True, separators=(',', ': '), indent=2)
if __name__ == '__main__': sys.exit(main())
|
[
"json.dump",
"subprocess.Popen",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.join",
"os.getcwd",
"os.path.basename",
"os.walk",
"argparse.ArgumentTypeError",
"os.path.exists",
"re.search",
"multiprocessing.Pool",
"signal.signal",
"os.path.expanduser",
"sys.exit",
"multiprocessing.cpu_count"
] |
[((1309, 1409), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'DESCRIPTION', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=DESCRIPTION, formatter_class=argparse.\n RawTextHelpFormatter)\n', (1332, 1409), False, 'import sys, os, argparse, re, json\n'), ((3231, 3252), 'multiprocessing.Pool', 'Pool', (['args.nprocesses'], {}), '(args.nprocesses)\n', (3235, 3252), False, 'from multiprocessing import Pool, cpu_count\n'), ((4332, 4355), 'signal.signal', 'signal', (['SIGINT', 'SIG_IGN'], {}), '(SIGINT, SIG_IGN)\n', (4338, 4355), False, 'from signal import signal, SIGINT, SIG_IGN\n'), ((2841, 2876), 'os.path.expanduser', 'os.path.expanduser', (['args.searchpath'], {}), '(args.searchpath)\n', (2859, 2876), False, 'import sys, os, argparse, re, json\n'), ((2912, 2943), 'os.path.expanduser', 'os.path.expanduser', (['args.prefix'], {}), '(args.prefix)\n', (2930, 2943), False, 'import sys, os, argparse, re, json\n'), ((2992, 3003), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (3001, 3003), False, 'from multiprocessing import Pool, cpu_count\n'), ((6894, 6913), 'os.walk', 'os.walk', (['searchpath'], {}), '(searchpath)\n', (6901, 6913), False, 'import sys, os, argparse, re, json\n'), ((7386, 7463), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s is an invalid non-negative int value' % value)"], {}), "('%s is an invalid non-negative int value' % value)\n", (7412, 7463), False, 'import sys, os, argparse, re, json\n'), ((8077, 8099), 'os.path.exists', 'os.path.exists', (['prefix'], {}), '(prefix)\n', (8091, 8099), False, 'import sys, os, argparse, re, json\n'), ((8101, 8120), 'os.makedirs', 'os.makedirs', (['prefix'], {}), '(prefix)\n', (8112, 8120), False, 'import sys, os, argparse, re, json\n'), ((2758, 2769), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2767, 2769), False, 'import sys, os, argparse, re, json\n'), ((3556, 3566), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3564, 3566), False, 'import sys, os, argparse, re, json\n'), ((7718, 7782), 'subprocess.Popen', 'Popen', (["['xz', '--decompress', '--stdout', filename]"], {'stdout': 'PIPE'}), "(['xz', '--decompress', '--stdout', filename], stdout=PIPE)\n", (7723, 7782), False, 'from subprocess import Popen, PIPE\n'), ((8280, 8338), 'subprocess.Popen', 'Popen', (["['xz', '--threads=3', '-']"], {'stdin': 'PIPE', 'stdout': 'PIPE'}), "(['xz', '--threads=3', '-'], stdin=PIPE, stdout=PIPE)\n", (8285, 8338), False, 'from subprocess import Popen, PIPE\n'), ((8452, 8528), 'json.dump', 'json.dump', (['data', 'xzp.stdin'], {'sort_keys': '(True)', 'separators': "(',', ': ')", 'indent': '(2)'}), "(data, xzp.stdin, sort_keys=True, separators=(',', ': '), indent=2)\n", (8461, 8528), False, 'import sys, os, argparse, re, json\n'), ((8720, 8791), 'json.dump', 'json.dump', (['data', 'outf'], {'sort_keys': '(True)', 'separators': "(',', ': ')", 'indent': '(2)'}), "(data, outf, sort_keys=True, separators=(',', ': '), indent=2)\n", (8729, 8791), False, 'import sys, os, argparse, re, json\n'), ((7000, 7024), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (7012, 7024), False, 'import sys, os, argparse, re, json\n'), ((7049, 7072), 'os.path.basename', 'os.path.basename', (['fpath'], {}), '(fpath)\n', (7065, 7072), False, 'import sys, os, argparse, re, json\n'), ((4600, 4657), 're.search', 're.search', (['"""Initializing traffic generator on host"""', 'line'], {}), "('Initializing traffic generator on host', line)\n", (4609, 4657), False, 'import sys, os, argparse, re, json\n'), ((4736, 4772), 're.search', 're.search', (['"""transfer-complete"""', 'line'], {}), "('transfer-complete', line)\n", (4745, 4772), False, 'import sys, os, argparse, re, json\n'), ((4788, 4821), 're.search', 're.search', (['"""transfer-error"""', 'line'], {}), "('transfer-error', line)\n", (4797, 4821), False, 'import sys, os, argparse, re, json\n'), ((7137, 7162), 're.search', 're.search', (['pattern', 'fbase'], {}), '(pattern, fbase)\n', (7146, 7162), False, 'import sys, os, argparse, re, json\n')]
|
from django import forms
from .models import Quizark
from random import randint
def random_user():
adj = ["kul", "teit", "rar", "gul", "glittrende"]
sub = ["pølse", "ku", "gris", "ape", "sykkel", "sko", "esel"]
return adj[randint(0, len(adj)-1)].capitalize() + sub[randint(0, len(sub)-1)].capitalize()
class QuizCodeForm(forms.Form):
# Må fikse bug her
code = forms.IntegerField(label='Kode: ', min_value=100000, max_value=999999)
username = forms.CharField(required=False, max_length="100", initial=random_user)
class QuizCreateForm(forms.Form):
quizark = forms.ModelChoiceField(queryset=Quizark.objects.all())
|
[
"django.forms.CharField",
"django.forms.IntegerField"
] |
[((382, 452), 'django.forms.IntegerField', 'forms.IntegerField', ([], {'label': '"""Kode: """', 'min_value': '(100000)', 'max_value': '(999999)'}), "(label='Kode: ', min_value=100000, max_value=999999)\n", (400, 452), False, 'from django import forms\n'), ((468, 538), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(False)', 'max_length': '"""100"""', 'initial': 'random_user'}), "(required=False, max_length='100', initial=random_user)\n", (483, 538), False, 'from django import forms\n')]
|
from datetime import datetime
import json
import os
from openpyxl import Workbook
from openpyxl.styles import Font
import re
import requests
import time
def timer(func):
"""
Print the runtime of the decorated function
:param func: function that we want to be timed
:return: value of function, but prints string of how long function ran
"""
import functools
import time
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.time()
value = func(*args, **kwargs)
end_time = time.time()
run_time = end_time - start_time
if run_time > 3659:
hours = int(run_time/3600)
print(f"Finished {func.__name__} in {hours:.0f} hours, {(run_time-hours*3600)/60:.0f} minutes and {run_time%60:.0f} secs")
elif run_time > 59:
print(f"Finished {func.__name__} in {run_time/60:.0f} minutes and {run_time%60:.0f} secs")
else:
print(f"Finished {func.__name__} in {run_time:.2f} secs")
return value
return wrapper_timer
def collection_call(next_page=1, page_call=''):
"""
Makes an API call to get collection
:param next_page: boolean if this is the first call
:param string of api call
:return: list of details for collection item, pagination dict
"""
user_name = os.getenv('USER_NAME')
token = os.getenv('DISCOGS_TOKEN')
if next_page == 1:
get_url = 'https://api.discogs.com/users/{}/collection/folders/0/releases?token={}'.format(user_name, token)
else:
get_url = page_call
r = requests.get(get_url)
response = r.json()
return response.get("releases"), response.get("pagination")
def collection_grab():
"""
Iterates over discogs collection API call to build a list() of titles
:return: completed list of all items in Discogs collection
"""
titles = list()
init_titles, pagination = collection_call()
titles += init_titles
page_tries = pagination.get("pages")
next_call = pagination.get("urls", {}).get("next")
if page_tries == 1:
print("Titles less than 50")
else:
for page in range(2, page_tries+1):
new_titles, new_page = collection_call(page, next_call)
titles += new_titles
next_call = new_page.get("urls", {}).get("next")
print("Total collection is {} items".format(len(titles)))
return titles
def title_flatten(title_dict):
"""
Flattens the title dictionary, removing some details
:param title_dict: dict() of json title response from Discogs
:return:
"""
time.sleep(2)
title_flat = dict()
bi = title_dict.get("basic_information")
title_id = title_dict.get("id")
discog_artist = bi.get("artists", {})[0]["name"]
artist = re.sub('.(\d+\))', '', discog_artist).rstrip()
title = bi.get("title")
# IDs
title_flat["id"] = title_id
title_flat["master_id"] = bi.get("master_id")
# Time
date_long = title_dict.get("date_added").split("T")
title_flat["date_added"] = date_long[0]
title_flat["time_added"] = date_long[1]
# Release Details
title_flat["artist"] = artist
title_flat["artist_id"] = bi.get("artists", {})[0]["id"]
title_flat["title"] = title
title_flat["year"] = bi.get("year")
title_flat["format"] = bi.get("formats", {})[0]["name"]
title_flat["format_info"] = bi.get("formats", {})[0].get("text", "N/A")
title_flat["genres"] = str(bi.get("genres"))[1:-1]
title_flat["styles"] = str(bi.get("styles"))[1:-1]
# Collection Notes
title_flat["notes"] = title_dict.get("notes", {})[0]["value"]
# Get Lowest Price
rs_url = requests.get('https://api.discogs.com/marketplace/stats/{}?curr_abbr=USD&token={}'.format(title_id,
os.getenv('DISCOGS_TOKEN')))
release_stats = rs_url.json()
if not release_stats.get("num_for_sale"):
print("{} by {} has no copies for sale".format(title, artist))
lowest_price = 0.0
else:
lowest_price = release_stats.get("lowest_price", {}).get("value", 0.0)
title_flat["lowest_price"] = lowest_price
return title_flat
def collection_lowest_price(title_list):
"""
Prints out total value (based on lowest available price in market) for your collection
:param title_list:
:return:
"""
missing = list()
prices = list()
for title in title_list:
price = title.get("lowest_price")
if not price:
missing.append(title.get("title"))
else:
prices.append(price)
tot_value = round(sum(prices), 2)
str_missing = str(missing)[1:-1]
print("Total value is {} but prices are missing from {}".format(tot_value, str_missing))
return
def list_to_xl(title_list):
"""
Iterates flattend lists and converts them to excel, saved in current folder
:param title_list:
:return:
"""
# Creating Workbook
wb = Workbook()
ws = wb.active
# Setting Up Headers
ws['A1'] = 'ID'
ws['B1'] = 'Artist'
ws['C1'] = 'Title'
ws['D1'] = 'Year'
ws['E1'] = 'Format'
ws['F1'] = 'Format Info'
ws['G1'] = 'Genres'
ws['H1'] = 'Styles'
ws['I1'] = 'Lowest Price'
ws['J1'] = 'Date Added'
ws['K1'] = 'Time Added'
ws['L1'] = 'Artist ID'
ws['M1'] = 'Master ID'
ws['N1'] = 'Notes'
a1 = ws['A1']
b1 = ws['B1']
c1 = ws['C1']
d1 = ws['D1']
e1 = ws['E1']
f1 = ws['F1']
g1 = ws['G1']
h1 = ws['H1']
i1 = ws['I1']
j1 = ws['J1']
k1 = ws['K1']
l1 = ws['L1']
m1 = ws['M1']
n1 = ws['N1']
headers = [a1, b1, c1, d1, e1, f1, g1, h1, i1, j1, k1, l1, m1, n1]
for cell in headers:
cell.font = Font(size=14, bold=True)
for row_num, row in enumerate(title_list, 2):
ws['A{}'.format(row_num)] = row.get("id")
ws['B{}'.format(row_num)] = row.get('artist')
ws['C{}'.format(row_num)] = row.get('title')
ws['D{}'.format(row_num)] = row.get('year')
ws['E{}'.format(row_num)] = row.get('format')
ws['F{}'.format(row_num)] = row.get('format_info')
ws['G{}'.format(row_num)] = row.get('genres')
ws['H{}'.format(row_num)] = row.get('styles')
ws['I{}'.format(row_num)] = row.get('lowest_price')
ws['J{}'.format(row_num)] = row.get('date_added')
ws['K{}'.format(row_num)] = row.get('time_added')
ws['L{}'.format(row_num)] = row.get('artist_id')
ws['M{}'.format(row_num)] = row.get('master_id')
ws['N{}'.format(row_num)] = row.get('notes')
# Saving
today = str(datetime.today())[:10].replace('-','_')
wb.save("discogs_collection{}.xlsx".format(today))
return
|
[
"openpyxl.Workbook",
"datetime.datetime.today",
"openpyxl.styles.Font",
"time.sleep",
"time.time",
"requests.get",
"functools.wraps",
"re.sub",
"os.getenv"
] |
[((404, 425), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (419, 425), False, 'import functools\n'), ((1347, 1369), 'os.getenv', 'os.getenv', (['"""USER_NAME"""'], {}), "('USER_NAME')\n", (1356, 1369), False, 'import os\n'), ((1382, 1408), 'os.getenv', 'os.getenv', (['"""DISCOGS_TOKEN"""'], {}), "('DISCOGS_TOKEN')\n", (1391, 1408), False, 'import os\n'), ((1596, 1617), 'requests.get', 'requests.get', (['get_url'], {}), '(get_url)\n', (1608, 1617), False, 'import requests\n'), ((2626, 2639), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2636, 2639), False, 'import time\n'), ((5041, 5051), 'openpyxl.Workbook', 'Workbook', ([], {}), '()\n', (5049, 5051), False, 'from openpyxl import Workbook\n'), ((487, 498), 'time.time', 'time.time', ([], {}), '()\n', (496, 498), False, 'import time\n'), ((556, 567), 'time.time', 'time.time', ([], {}), '()\n', (565, 567), False, 'import time\n'), ((5820, 5844), 'openpyxl.styles.Font', 'Font', ([], {'size': '(14)', 'bold': '(True)'}), '(size=14, bold=True)\n', (5824, 5844), False, 'from openpyxl.styles import Font\n'), ((2811, 2850), 're.sub', 're.sub', (['""".(\\\\d+\\\\))"""', '""""""', 'discog_artist'], {}), "('.(\\\\d+\\\\))', '', discog_artist)\n", (2817, 2850), False, 'import re\n'), ((3883, 3909), 'os.getenv', 'os.getenv', (['"""DISCOGS_TOKEN"""'], {}), "('DISCOGS_TOKEN')\n", (3892, 3909), False, 'import os\n'), ((6698, 6714), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (6712, 6714), False, 'from datetime import datetime\n')]
|
'''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Array/quality_mosaic.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Array/quality_mosaic.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Array/quality_mosaic.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Array/quality_mosaic.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell. Uncomment these lines if you are running this notebook for the first time.
'''
# %%
# %%capture
# !pip install earthengine-api
# !pip install geehydro
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for the first time or if you are getting an authentication error.
'''
# %%
# ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
# Array-based quality mosaic.
# Returns a mosaic built by sorting each stack of pixels by the first band
# in descending order, and taking the highest quality pixel.
# function qualityMosaic(bands) {
def qualityMosaic(bands):
# Convert to an array, and declare names for the axes and indices along the
# band axis.
array = bands.toArray()
imageAxis = 0
bandAxis = 1
qualityIndex = 0
valuesIndex = 1
# Slice the quality and values off the main array, and sort the values by the
# quality in descending order.
quality = array.arraySlice(bandAxis, qualityIndex, qualityIndex + 1)
values = array.arraySlice(bandAxis, valuesIndex)
valuesByQuality = values.arraySort(quality.multiply(-1))
# Get an image where each pixel is the array of band values where the quality
# band is greatest. Note that while the array is 2-D, the first axis is
# length one.
best = valuesByQuality.arraySlice(imageAxis, 0, 1)
# Project the best 2D array down to a single dimension, and convert it back
# to a regular scalar image by naming each position along the axis. Note we
# provide the original band names, but slice off the first band since the
# quality band is not part of the result. Also note to get at the band names,
# we have to do some kind of reduction, but it won't really calculate pixels
# if we only access the band names.
bandNames = bands.min().bandNames().slice(1)
return best.arrayProject([bandAxis]).arrayFlatten([bandNames])
# }
# Load the l7_l1t collection for the year 2000, and make sure the first band
# is our quality measure, in this case the normalized difference values.
l7 = ee.ImageCollection('LANDSAT/LE07/C01/T1') \
.filterDate('2000-01-01', '2001-01-01')
withNd = l7.map(lambda image: image.normalizedDifference(['B4', 'B3']).addBands(image))
# Build a mosaic using the NDVI of bands 4 and 3, essentially showing the
# greenest pixels from the year 2000.
greenest = qualityMosaic(withNd)
# Select out the color bands to visualize. An interesting artifact of this
# approach is that clouds are greener than water. So all the water is white.
rgb = greenest.select(['B3', 'B2', 'B1'])
Map.addLayer(rgb, {'gain': [1.4, 1.4, 1.1]}, 'Greenest')
Map.setCenter(-90.08789, 16.38339, 11)
# %%
'''
## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
|
[
"ee.ImageCollection",
"ee.Initialize",
"folium.Map"
] |
[((2130, 2145), 'ee.Initialize', 'ee.Initialize', ([], {}), '()\n', (2143, 2145), False, 'import ee\n'), ((2503, 2548), 'folium.Map', 'folium.Map', ([], {'location': '[40, -100]', 'zoom_start': '(4)'}), '(location=[40, -100], zoom_start=4)\n', (2513, 2548), False, 'import folium\n'), ((4265, 4306), 'ee.ImageCollection', 'ee.ImageCollection', (['"""LANDSAT/LE07/C01/T1"""'], {}), "('LANDSAT/LE07/C01/T1')\n", (4283, 4306), False, 'import ee\n')]
|
# -*- coding: utf-8 -*-
import io
import os
import shutil
import itertools
import gzip
import warnings
import tempfile
import atexit
import zarr
import h5py
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pytest
from pytest import approx
from allel.io.vcf_read import (iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5,
vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv,
vcf_to_recarray, read_vcf_headers)
from allel.test.tools import compare_arrays
# needed for PY2/PY3 consistent behaviour
warnings.resetwarnings()
warnings.simplefilter('always')
# setup temp dir for testing
tempdir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, tempdir)
def fixture_path(fn):
return os.path.join(os.path.dirname(__file__), os.pardir, 'data', fn)
def test_read_vcf_chunks():
vcf_path = fixture_path('sample.vcf')
fields, samples, headers, it = iter_vcf_chunks(vcf_path, fields='*', chunk_length=4,
buffer_size=100)
# check headers
assert 'q10' in headers.filters
assert 's50' in headers.filters
assert 'AA' in headers.infos
assert 'AC' in headers.infos
assert 'AF' in headers.infos
assert 'AN' in headers.infos
assert 'DB' in headers.infos
assert 'DP' in headers.infos
assert 'H2' in headers.infos
assert 'NS' in headers.infos
assert 'DP' in headers.formats
assert 'GQ' in headers.formats
assert 'GT' in headers.formats
assert 'HQ' in headers.formats
assert ['NA00001', 'NA00002', 'NA00003'] == headers.samples
assert ['NA00001', 'NA00002', 'NA00003'] == samples.tolist()
assert '1' == headers.infos['AA']['Number']
assert 'String' == headers.infos['AA']['Type']
assert 'Ancestral Allele' == headers.infos['AA']['Description']
assert '2' == headers.formats['HQ']['Number']
assert 'Integer' == headers.formats['HQ']['Type']
assert 'Haplotype Quality' == headers.formats['HQ']['Description']
# check chunk lengths
chunks = [chunk for chunk, _, _, _ in it]
assert 3 == len(chunks)
assert 4 == chunks[0]['variants/POS'].shape[0]
assert 4 == chunks[1]['variants/POS'].shape[0]
assert 1 == chunks[2]['variants/POS'].shape[0]
# check chunk contents
expected_fields = [
# fixed fields
'variants/CHROM',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'variants/altlen',
'variants/numalt',
'variants/is_snp',
# FORMAT fields
'calldata/GT',
'calldata/GQ',
'calldata/HQ',
'calldata/DP',
]
for chunk in chunks:
assert sorted(expected_fields) == sorted(chunk.keys())
def test_fields_all():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='*')
expected_fields = [
'samples',
# fixed fields
'variants/CHROM',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'variants/altlen',
'variants/numalt',
'variants/is_snp',
# FORMAT fields
'calldata/GT',
'calldata/GQ',
'calldata/HQ',
'calldata/DP',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_exclude():
vcf_path = fixture_path('sample.vcf')
exclude = ['variants/altlen', 'ID', 'calldata/DP']
callset = read_vcf(vcf_path, fields='*', exclude_fields=exclude)
expected_fields = [
'samples',
# fixed fields
'variants/CHROM',
'variants/POS',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'variants/numalt',
'variants/is_snp',
# FORMAT fields
'calldata/GT',
'calldata/GQ',
'calldata/HQ',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_rename():
vcf_path = fixture_path('sample.vcf')
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'foo/bar'}
callset = read_vcf(vcf_path, fields='*', rename_fields=rename)
print(sorted(callset.keys()))
expected_fields = [
'samples',
# fixed fields
'variants/chromosome',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'spam/eggs',
'variants/numalt',
'variants/is_snp',
# FORMAT fields
'foo/bar',
'calldata/DP',
'calldata/GQ',
'calldata/HQ',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_rename_clash():
vcf_path = fixture_path('sample.vcf')
# rename two fields to the same path
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'spam/eggs'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# rename two fields to the same path (case insensitive)
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'SPAM/EGGS'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'spam'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'SPAM'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam',
'calldata/GT': 'spam/eggs'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam',
'calldata/GT': 'SPAM/EGGS'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
def test_fields_default():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path)
expected_fields = [
'samples',
'variants/CHROM',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'calldata/GT',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_all_variants():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='variants/*')
expected_fields = [
# fixed fields
'variants/CHROM',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'variants/altlen',
'variants/numalt',
'variants/is_snp',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_info():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='INFO')
expected_fields = [
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_filter():
vcf_path = fixture_path('sample.vcf')
callset1 = read_vcf(vcf_path, fields='FILTER')
expected_fields = [
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
]
assert sorted(expected_fields) == sorted(callset1.keys())
# this has explicit PASS definition in header, shouldn't cause problems
vcf_path = fixture_path('test16.vcf')
callset2 = read_vcf(vcf_path, fields='FILTER')
expected_fields = [
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
]
assert sorted(expected_fields) == sorted(callset2.keys())
for k in callset1.keys():
assert_array_equal(callset1[k], callset2[k])
def test_fields_all_calldata():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='calldata/*')
expected_fields = [
'calldata/GT',
'calldata/GQ',
'calldata/HQ',
'calldata/DP',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_selected():
vcf_path = fixture_path('sample.vcf')
# without samples
callset = read_vcf(vcf_path,
fields=['CHROM', 'variants/POS', 'AC', 'variants/AF', 'GT',
'calldata/HQ', 'FILTER_q10', 'variants/numalt'])
expected_fields = [
'variants/CHROM',
'variants/POS',
'variants/FILTER_q10',
'variants/AC',
'variants/AF',
'variants/numalt',
# FORMAT fields
'calldata/GT',
'calldata/HQ',
]
assert sorted(expected_fields) == sorted(callset.keys())
# with samples
callset = read_vcf(vcf_path,
fields=['CHROM', 'variants/POS', 'AC', 'variants/AF', 'GT',
'calldata/HQ', 'FILTER_q10', 'variants/numalt', 'samples'],
chunk_length=4, buffer_size=100)
expected_fields = [
'samples',
'variants/CHROM',
'variants/POS',
'variants/FILTER_q10',
'variants/AC',
'variants/AF',
'variants/numalt',
# FORMAT fields
'calldata/GT',
'calldata/HQ',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_dups():
vcf_path = fixture_path('sample.vcf')
# silently collapse dups
callset = read_vcf(vcf_path,
fields=['CHROM', 'variants/CHROM', 'variants/AF', 'variants/AF',
'numalt', 'variants/numalt'])
expected_fields = [
'variants/CHROM',
'variants/AF',
'variants/numalt'
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_dups_case_insensitive():
vcf_path = fixture_path('altlen.vcf')
# allow case-insensitive dups here (but not in vcf_to_zarr)
callset = read_vcf(vcf_path, fields=['ALTLEN', 'altlen'])
expected_fields = [
'variants/ALTLEN',
'variants/altlen',
]
assert sorted(expected_fields) == sorted(callset.keys())
def _test_read_vcf_content(vcf, chunk_length, buffer_size):
# object dtype for strings
if isinstance(vcf, str):
input_file = vcf
close = False
else:
input_file = vcf()
close = True
callset = read_vcf(input_file,
fields='*',
chunk_length=chunk_length,
buffer_size=buffer_size,
types={'calldata/DP': 'object'})
if close:
input_file.close()
# samples
assert (3,) == callset['samples'].shape
assert 'O' == callset['samples'].dtype.kind
assert ['NA00001', 'NA00002', 'NA00003'] == callset['samples'].tolist()
# fixed fields
assert (9,) == callset['variants/CHROM'].shape
assert np.dtype(object) == callset['variants/CHROM'].dtype
assert '19' == callset['variants/CHROM'][0]
assert (9,) == callset['variants/POS'].shape
assert 111 == callset['variants/POS'][0]
assert (9,) == callset['variants/ID'].shape
assert np.dtype(object) == callset['variants/ID'].dtype
assert 'rs6054257' == callset['variants/ID'][2]
assert (9,) == callset['variants/REF'].shape
assert np.dtype(object) == callset['variants/REF'].dtype
assert 'A' == callset['variants/REF'][0]
assert (9, 3) == callset['variants/ALT'].shape
assert np.dtype(object) == callset['variants/ALT'].dtype
assert 'ATG' == callset['variants/ALT'][8, 1]
assert (9,) == callset['variants/QUAL'].shape
assert 10.0 == callset['variants/QUAL'][1]
assert (9,) == callset['variants/FILTER_PASS'].shape
assert callset['variants/FILTER_PASS'][2]
assert not callset['variants/FILTER_PASS'][3]
assert (9,) == callset['variants/FILTER_q10'].shape
assert callset['variants/FILTER_q10'][3]
# INFO fields
assert 3 == callset['variants/NS'][2]
assert .5 == callset['variants/AF'][2, 0]
assert callset['variants/DB'][2]
assert (3, 1, -1) == tuple(callset['variants/AC'][6])
# test calldata content
assert (9, 3, 2) == callset['calldata/GT'].shape
assert (0, 0) == tuple(callset['calldata/GT'][0, 0])
assert (-1, -1) == tuple(callset['calldata/GT'][6, 2])
assert (-1, -1) == tuple(callset['calldata/GT'][7, 2])
assert (9, 3, 2) == callset['calldata/HQ'].shape
assert (10, 15) == tuple(callset['calldata/HQ'][0, 0])
assert (9, 3) == callset['calldata/DP'].shape
assert np.dtype(object) == callset['calldata/DP'].dtype
assert ('4', '2', '3') == tuple(callset['calldata/DP'][6])
# String (S) dtype
if isinstance(vcf, str):
input_file = vcf
close = False
else:
input_file = vcf()
close = True
types = {'CHROM': 'S12', 'ID': 'S20', 'REF': 'S20', 'ALT': 'S20', 'calldata/DP': 'S3',
'samples': 'S20'}
callset = read_vcf(input_file, fields='*', chunk_length=chunk_length,
buffer_size=buffer_size, types=types)
if close:
input_file.close()
# samples
assert (3,) == callset['samples'].shape
assert 'S' == callset['samples'].dtype.kind
assert [b'NA00001', b'NA00002', b'NA00003'] == callset['samples'].tolist()
# fixed fields
assert (9,) == callset['variants/CHROM'].shape
assert 'S' == callset['variants/CHROM'].dtype.kind
assert b'19' == callset['variants/CHROM'][0]
assert (9,) == callset['variants/POS'].shape
assert 111 == callset['variants/POS'][0]
assert (9,) == callset['variants/ID'].shape
assert 'S' == callset['variants/ID'].dtype.kind
assert b'rs6054257' == callset['variants/ID'][2]
assert (9,) == callset['variants/REF'].shape
assert b'A' == callset['variants/REF'][0]
assert 'S' == callset['variants/REF'].dtype.kind
assert (9, 3) == callset['variants/ALT'].shape
assert b'ATG' == callset['variants/ALT'][8, 1]
assert 'S' == callset['variants/ALT'].dtype.kind
assert (9,) == callset['variants/QUAL'].shape
assert 10.0 == callset['variants/QUAL'][1]
assert (9,) == callset['variants/FILTER_PASS'].shape
assert callset['variants/FILTER_PASS'][2]
assert not callset['variants/FILTER_PASS'][3]
assert (9,) == callset['variants/FILTER_q10'].shape
assert callset['variants/FILTER_q10'][3]
# INFO fields
assert 3 == callset['variants/NS'][2]
assert .5 == callset['variants/AF'][2, 0]
assert callset['variants/DB'][2]
assert (3, 1, -1) == tuple(callset['variants/AC'][6])
# test calldata content
assert (9, 3, 2) == callset['calldata/GT'].shape
assert (0, 0) == tuple(callset['calldata/GT'][0, 0])
assert (-1, -1) == tuple(callset['calldata/GT'][6, 2])
assert (-1, -1) == tuple(callset['calldata/GT'][7, 2])
assert (9, 3, 2) == callset['calldata/HQ'].shape
assert (10, 15) == tuple(callset['calldata/HQ'][0, 0])
assert (9, 3) == callset['calldata/DP'].shape
assert 'S' == callset['calldata/DP'].dtype.kind
assert (b'4', b'2', b'3') == tuple(callset['calldata/DP'][6])
def test_inputs():
vcf_path = fixture_path('sample.vcf')
with open(vcf_path, mode='rb') as f:
data = f.read(-1)
inputs = (vcf_path,
vcf_path + '.gz',
lambda: open(vcf_path, mode='rb'),
lambda: gzip.open(vcf_path + '.gz', mode='rb'),
lambda: io.BytesIO(data),
lambda: io.BytesIO(data.replace(b'\n', b'\r')),
lambda: io.BytesIO(data.replace(b'\n', b'\r\n')))
chunk_length = 3
buffer_size = 10
for i in inputs:
_test_read_vcf_content(i, chunk_length, buffer_size)
def test_chunk_lengths():
vcf_path = fixture_path('sample.vcf')
chunk_lengths = 1, 2, 3, 5, 10, 20
buffer_size = 10
for chunk_length in chunk_lengths:
_test_read_vcf_content(vcf_path, chunk_length, buffer_size)
def test_buffer_sizes():
vcf_path = fixture_path('sample.vcf')
chunk_length = 3
buffer_sizes = 1, 2, 4, 8, 16, 32, 64, 128, 256, 512
for buffer_size in buffer_sizes:
_test_read_vcf_content(vcf_path, chunk_length, buffer_size)
def test_utf8():
vcf_path = fixture_path('sample.utf8.vcf')
callset = read_vcf(vcf_path, fields='*')
# samples
assert (3,) == callset['samples'].shape
assert 'O' == callset['samples'].dtype.kind
assert [u'NA00001', u'Γεια σου κόσμε!', u'NA00003'] == callset['samples'].tolist()
# CHROM
assert (9,) == callset['variants/CHROM'].shape
assert np.dtype(object) == callset['variants/CHROM'].dtype
assert '19' == callset['variants/CHROM'][0]
assert u'Njatjeta Botë!' == callset['variants/CHROM'][-2]
# POS
assert (9,) == callset['variants/POS'].shape
assert 111 == callset['variants/POS'][0]
# ID
assert (9,) == callset['variants/ID'].shape
assert np.dtype(object) == callset['variants/ID'].dtype
assert 'foo' == callset['variants/ID'][0]
assert u'¡Hola mundo!' == callset['variants/ID'][1]
# REF
assert (9,) == callset['variants/REF'].shape
assert np.dtype(object) == callset['variants/REF'].dtype
assert 'A' == callset['variants/REF'][0]
# ALT
assert (9, 3) == callset['variants/ALT'].shape
assert np.dtype(object) == callset['variants/ALT'].dtype
assert 'ATG' == callset['variants/ALT'][8, 1]
# QUAL
assert (9,) == callset['variants/QUAL'].shape
assert 10.0 == callset['variants/QUAL'][1]
# FILTER
assert (9,) == callset['variants/FILTER_PASS'].shape
assert callset['variants/FILTER_PASS'][2]
assert not callset['variants/FILTER_PASS'][5]
assert (9,) == callset[u'variants/FILTER_Helló_világ!'].shape
assert not callset[u'variants/FILTER_Helló_világ!'][0]
assert callset[u'variants/FILTER_Helló_világ!'][5]
# INFO fields
assert u'foo' == callset['variants/TEXT'][0]
assert u'こんにちは世界' == callset['variants/TEXT'][4]
# calldata
assert (9, 3, 2) == callset['calldata/GT'].shape
assert (0, 0) == tuple(callset['calldata/GT'][0, 0])
assert (-1, -1) == tuple(callset['calldata/GT'][6, 2])
assert (-1, -1) == tuple(callset['calldata/GT'][7, 2])
assert (9, 3, 2) == callset['calldata/HQ'].shape
assert (10, 15) == tuple(callset['calldata/HQ'][0, 0])
assert (9, 3) == callset['calldata/DP'].shape
assert (4, 2, 3) == tuple(callset['calldata/DP'][6])
assert (u'foo', u'Hej Världen!', u'.') == tuple(callset['calldata/GTXT'][0])
def test_truncation_chrom():
input_data = (b"#CHROM\n"
b"2L\n"
b"2R\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
for string_type in 'S10', 'object':
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['CHROM', 'samples'],
types={'CHROM': string_type})
# check fields
expected_fields = ['variants/CHROM']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/CHROM']
assert 2 == len(a)
if string_type == 'S10':
assert b'2L' == a[0]
assert b'2R' == a[1]
else:
assert '2L' == a[0]
assert '2R' == a[1]
def test_truncation_pos():
input_data = (b"#CHROM\tPOS\n"
b"2L\t12\n"
b"2R\t34\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['POS', 'samples'])
# check fields
expected_fields = ['variants/POS']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/POS']
assert 2 == len(a)
assert 12 == a[0]
assert 34 == a[1]
def test_truncation_id():
input_data = (b"#CHROM\tPOS\tID\n"
b"2L\t12\tfoo\n"
b"2R\t34\tbar\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
for string_type in 'S10', 'object':
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['ID', 'samples'],
types={'ID': string_type})
# check fields
expected_fields = ['variants/ID']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/ID']
assert 2 == len(a)
if string_type == 'S10':
assert b'foo' == a[0]
assert b'bar' == a[1]
else:
assert 'foo' == a[0]
assert 'bar' == a[1]
def test_truncation_ref():
input_data = (b"#CHROM\tPOS\tID\tREF\n"
b"2L\t12\tfoo\tA\n"
b"2R\t34\tbar\tC\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
for string_type in 'S10', 'object':
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['REF', 'samples'],
types={'REF': string_type})
# check fields
expected_fields = ['variants/REF']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/REF']
assert 2 == len(a)
if string_type == 'S10':
assert b'A' == a[0]
assert b'C' == a[1]
else:
assert 'A' == a[0]
assert 'C' == a[1]
def test_truncation_alt():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\n"
b"2L\t12\tfoo\tA\tC\n"
b"2R\t34\tbar\tC\tG\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
for string_type in 'S10', 'object':
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['ALT', 'samples'], numbers=dict(ALT=1),
types={'ALT': string_type})
# check fields
expected_fields = ['variants/ALT']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/ALT']
assert 2 == len(a)
if string_type == 'S10':
assert b'C' == a[0]
assert b'G' == a[1]
else:
assert 'C' == a[0]
assert 'G' == a[1]
def test_truncation_qual():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\n"
b"2L\t12\tfoo\tA\tC\t1.2\n"
b"2R\t34\tbar\tC\tG\t3.4\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['QUAL', 'samples'])
# check fields
expected_fields = ['variants/QUAL']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/QUAL']
assert 2 == len(a)
assert approx(1.2) == a[0]
assert approx(3.4) == a[1]
def test_truncation_filter():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\n"
b"2R\t34\tbar\tC\tG\t3.4\tPASS\n"
b"2R\t56\tbaz\tG\tT\t56.77\tq10,s50\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file,
fields=['FILTER_PASS', 'FILTER_q10', 'FILTER_s50', 'samples'])
# check fields
expected_fields = ['variants/FILTER_PASS', 'variants/FILTER_q10',
'variants/FILTER_s50']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/FILTER_PASS']
assert 3 == len(a)
assert [False, True, False] == a.tolist()
a = callset['variants/FILTER_q10']
assert 3 == len(a)
assert [False, False, True] == a.tolist()
a = callset['variants/FILTER_s50']
assert 3 == len(a)
assert [False, False, True] == a.tolist()
def test_truncation_info():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\tfoo=42;bar=1.2\n"
b"2R\t34\tbar\tC\tG\t3.4\tPASS\t.\n"
b"2R\t56\tbaz\tG\tT\t56.77\tq10,s50\t\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file,
fields=['foo', 'bar', 'samples'],
types=dict(foo='Integer', bar='Float'))
# check fields
expected_fields = ['variants/foo', 'variants/bar']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/foo']
assert 3 == len(a)
assert 42 == a[0]
assert -1 == a[1]
assert -1 == a[2]
a = callset['variants/bar']
assert 3 == len(a)
assert approx(1.2) == a[0]
assert np.isnan(a[1])
assert np.isnan(a[2])
def test_truncation_format():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\tfoo=42;bar=1.2\tGT:GQ\n"
b"2R\t34\tbar\tC\tG\t3.4\tPASS\t.\t.\n"
b"2R\t56\tbaz\tG\tT\t56.77\tq10,s50\t\t\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file,
fields=['foo', 'bar', 'samples'],
types=dict(foo='Integer', bar='Float'))
# check fields
expected_fields = ['variants/foo', 'variants/bar']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/foo']
assert 3 == len(a)
assert 42 == a[0]
assert -1 == a[1]
assert -1 == a[2]
a = callset['variants/bar']
assert 3 == len(a)
assert approx(1.2) == a[0]
assert np.isnan(a[1])
assert np.isnan(a[2])
def test_truncation_calldata():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\tfoo=42;bar=1.2\tGT:GQ\t0/1:12\t1/2:34\n"
b"2R\t34\tbar\tC\tG\t3.4\tPASS\t.\tGT\t./.\n"
b"2R\t56\tbaz\tG\tT\t56.77\tq10,s50\t\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file,
fields=['calldata/GT', 'calldata/GQ', 'samples'],
types={'calldata/GT': 'i1', 'calldata/GQ': 'i2'})
# check fields
expected_fields = ['calldata/GT', 'calldata/GQ', 'samples']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
assert 2 == len(callset['samples'])
assert ['S2', 'S1'] == callset['samples'].tolist()
a = callset['calldata/GT']
assert (3, 2, 2) == a.shape
assert (0, 1) == tuple(a[0, 0])
assert (1, 2) == tuple(a[0, 1])
assert (-1, -1) == tuple(a[1, 0])
assert (-1, -1) == tuple(a[1, 1])
assert (-1, -1) == tuple(a[2, 0])
assert (-1, -1) == tuple(a[2, 1])
a = callset['calldata/GQ']
assert (3, 2) == a.shape
assert 12 == a[0, 0]
assert 34 == a[0, 1]
assert -1 == a[1, 0]
assert -1 == a[1, 1]
assert -1 == a[2, 0]
assert -1 == a[2, 1]
def test_info_types():
vcf_path = fixture_path('sample.vcf')
for dtype in ('i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'f4', 'f8', 'S10',
'object'):
callset = read_vcf(vcf_path, fields=['variants/DP', 'variants/AC'],
types={'variants/DP': dtype, 'variants/AC': dtype},
numbers={'variants/AC': 3})
assert np.dtype(dtype) == callset['variants/DP'].dtype
assert (9,) == callset['variants/DP'].shape
assert (9, 3) == callset['variants/AC'].shape
def test_vcf_types():
input_data = (
b'##INFO=<ID=foo,Number=1,Type=String,Description="Testing 123.">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\t.\tA\tC\t.\t.\tfoo=bar\t.\n"
)
callset = read_vcf(io.BytesIO(input_data), fields=['foo'])
assert np.dtype(object) == callset['variants/foo'].dtype
input_data = (
b'##INFO=<ID=foo,Number=1,Type=Integer,Description="Testing 123.">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\t.\tA\tC\t.\t.\tfoo=42\t.\n"
)
callset = read_vcf(io.BytesIO(input_data), fields=['foo'])
assert np.dtype('i4') == callset['variants/foo'].dtype
input_data = (
b'##INFO=<ID=foo,Number=1,Type=Float,Description="Testing 123.">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\t.\tA\tC\t.\t.\tfoo=42.0\t.\n"
)
callset = read_vcf(io.BytesIO(input_data), fields=['foo'])
assert np.dtype('f4') == callset['variants/foo'].dtype
input_data = (
b'##INFO=<ID=foo,Number=1,Type=Character,Description="Testing 123.">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\t.\tA\tC\t.\t.\tfoo=b\t.\n"
)
callset = read_vcf(io.BytesIO(input_data), fields=['foo'])
assert np.dtype('S1') == callset['variants/foo'].dtype
def test_genotype_types():
vcf_path = fixture_path('sample.vcf')
for dtype in 'i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'S3', 'object':
callset = read_vcf(vcf_path, fields=['GT'], types={'GT': dtype},
numbers={'GT': 2})
assert np.dtype(dtype) == callset['calldata/GT'].dtype
assert (9, 3, 2) == callset['calldata/GT'].shape
# non-GT field with genotype dtype
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS1\tS2\tS3\n"
b"2L\t12\t.\tA\t.\t.\t.\t.\tCustomGT:CustomGQ\t0/0/0:11\t0/1/2:12\t././.:.\n"
b"2L\t34\t.\tC\tT\t.\t.\t.\tCustomGT:CustomGQ\t0/1/2:22\t3/3/.:33\t.\n"
b"3R\t45\t.\tG\tA,T\t.\t.\t.\tCustomGT:CustomGQ\t0/1:.\t5:12\t\n"
)
callset = read_vcf(io.BytesIO(input_data),
fields=['calldata/CustomGT', 'calldata/CustomGQ'],
numbers={'calldata/CustomGT': 3, 'calldata/CustomGQ': 1},
types={'calldata/CustomGT': 'genotype/i1',
'calldata/CustomGQ': 'i2'})
e = np.array([[[0, 0, 0], [0, 1, 2], [-1, -1, -1]],
[[0, 1, 2], [3, 3, -1], [-1, -1, -1]],
[[0, 1, -1], [5, -1, -1], [-1, -1, -1]]], dtype='i1')
a = callset['calldata/CustomGT']
assert_array_equal(e, a)
assert e.dtype == a.dtype
e = np.array([[11, 12, -1],
[22, 33, -1],
[-1, 12, -1]], dtype='i2')
a = callset['calldata/CustomGQ']
assert_array_equal(e, a)
assert e.dtype == a.dtype
def test_calldata_types():
vcf_path = fixture_path('sample.vcf')
for dtype in ('i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'f4', 'f8', 'S10',
'object'):
callset = read_vcf(vcf_path, fields=['HQ'], types={'HQ': dtype},
numbers={'HQ': 2})
assert np.dtype(dtype) == callset['calldata/HQ'].dtype
assert (9, 3, 2) == callset['calldata/HQ'].shape
def test_genotype_ploidy():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=1))
gt = callset['calldata/GT']
assert (9, 3) == gt.shape
assert (0, 0, 0) == tuple(gt[8, :])
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=2))
gt = callset['calldata/GT']
assert (9, 3, 2) == gt.shape
assert (0, -1) == tuple(gt[8, 0])
assert (0, 1) == tuple(gt[8, 1])
assert (0, 2) == tuple(gt[8, 2])
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=3))
gt = callset['calldata/GT']
assert (9, 3, 3) == gt.shape
assert (0, -1, -1) == tuple(gt[8, 0])
assert (0, 1, -1) == tuple(gt[8, 1])
assert (0, 2, -1) == tuple(gt[8, 2])
def test_fills_info():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='AN', numbers=dict(AN=1))
a = callset['variants/AN']
assert (9,) == a.shape
assert -1 == a[0]
assert -1 == a[1]
assert -1 == a[2]
callset = read_vcf(vcf_path, fields='AN', numbers=dict(AN=1), fills=dict(AN=-2))
a = callset['variants/AN']
assert (9,) == a.shape
assert -2 == a[0]
assert -2 == a[1]
assert -2 == a[2]
callset = read_vcf(vcf_path, fields='AN', numbers=dict(AN=1), fills=dict(AN=-1))
a = callset['variants/AN']
assert (9,) == a.shape
assert -1 == a[0]
assert -1 == a[1]
assert -1 == a[2]
def test_fills_genotype():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=2))
gt = callset['calldata/GT']
assert (9, 3, 2) == gt.shape
assert (0, -1) == tuple(gt[8, 0])
assert (0, 1) == tuple(gt[8, 1])
assert (0, 2) == tuple(gt[8, 2])
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=2), fills=dict(GT=-2))
gt = callset['calldata/GT']
assert (9, 3, 2) == gt.shape
assert (0, -2) == tuple(gt[8, 0])
assert (0, 1) == tuple(gt[8, 1])
assert (0, 2) == tuple(gt[8, 2])
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=3), fills=dict(GT=-1))
gt = callset['calldata/GT']
assert (9, 3, 3) == gt.shape
assert (0, -1, -1) == tuple(gt[8, 0])
assert (0, 1, -1) == tuple(gt[8, 1])
assert (0, 2, -1) == tuple(gt[8, 2])
def test_fills_calldata():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='HQ', numbers=dict(HQ=2))
a = callset['calldata/HQ']
assert (9, 3, 2) == a.shape
assert (10, 15) == tuple(a[0, 0])
assert (-1, -1) == tuple(a[7, 0])
assert (-1, -1) == tuple(a[8, 0])
callset = read_vcf(vcf_path, fields='HQ', numbers=dict(HQ=2), fills=dict(HQ=-2))
a = callset['calldata/HQ']
assert (9, 3, 2) == a.shape
assert (10, 15) == tuple(a[0, 0])
assert (-2, -2) == tuple(a[7, 0])
assert (-2, -2) == tuple(a[8, 0])
callset = read_vcf(vcf_path, fields='HQ', numbers=dict(HQ=2), fills=dict(HQ=-1))
a = callset['calldata/HQ']
assert (9, 3, 2) == a.shape
assert (10, 15) == tuple(a[0, 0])
assert (-1, -1) == tuple(a[7, 0])
assert (-1, -1) == tuple(a[8, 0])
def test_numbers():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields=['ALT'], numbers=dict(ALT=1))
a = callset['variants/ALT']
assert (9,) == a.shape
assert 'A' == a[8]
callset = read_vcf(vcf_path, fields=['ALT'], numbers=dict(ALT=2),
types=dict(ALT='S4'))
a = callset['variants/ALT']
assert (9, 2) == a.shape
assert b'A' == a[8, 0]
assert b'ATG' == a[8, 1]
callset = read_vcf(vcf_path, fields=['ALT'], numbers=dict(ALT=3),
types=dict(ALT='S4'))
a = callset['variants/ALT']
assert (9, 3) == a.shape
assert b'A' == a[8, 0]
assert b'ATG' == a[8, 1]
assert b'C' == a[8, 2]
callset = read_vcf(vcf_path, fields=['AC'], numbers=dict(AC=0))
a = callset['variants/AC']
assert (9,) == a.shape
assert not a[0]
assert a[6]
callset = read_vcf(vcf_path, fields=['AC'], numbers=dict(AC=1))
a = callset['variants/AC']
assert (9,) == a.shape
assert -1 == a[0]
assert 3 == a[6]
callset = read_vcf(vcf_path, fields=['AC'], numbers=dict(AC=2))
a = callset['variants/AC']
assert (9, 2) == a.shape
assert -1 == a[0, 0]
assert -1 == a[0, 1]
assert 3 == a[6, 0]
assert 1 == a[6, 1]
callset = read_vcf(vcf_path, fields='AF', numbers=dict(AF=1))
a = callset['variants/AF']
assert (9,) == a.shape
assert 0.5 == a[2]
assert approx(0.333) == a[4]
callset = read_vcf(vcf_path, fields='AF', numbers=dict(AF=2))
a = callset['variants/AF']
assert (9, 2) == a.shape
assert 0.5 == a[2, 0]
assert np.isnan(a[2, 1])
assert approx(0.333) == a[4, 0]
assert approx(0.667) == a[4, 1]
callset = read_vcf(vcf_path, fields=['HQ'], numbers=dict(HQ=1))
a = callset['calldata/HQ']
assert (9, 3) == a.shape
assert 10 == a[0, 0]
assert 51 == a[2, 0]
assert -1 == a[6, 0]
callset = read_vcf(vcf_path, fields=['HQ'], numbers=dict(HQ=2))
a = callset['calldata/HQ']
assert (9, 3, 2) == a.shape
assert (10, 15) == tuple(a[0, 0])
assert (51, 51) == tuple(a[2, 0])
assert (-1, -1) == tuple(a[6, 0])
def test_alt_number():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=2)
a = callset['variants/ALT']
assert (9, 2) == a.shape
a = callset['variants/AC']
assert (9, 2) == a.shape
a = callset['variants/AF']
assert (9, 2) == a.shape
callset = read_vcf(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=1)
a = callset['variants/ALT']
assert (9,) == a.shape
a = callset['variants/AC']
assert (9,) == a.shape
a = callset['variants/AF']
assert (9,) == a.shape
callset = read_vcf(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=5)
a = callset['variants/ALT']
assert (9, 5) == a.shape
a = callset['variants/AC']
assert (9, 5) == a.shape
a = callset['variants/AF']
assert (9, 5) == a.shape
# can override
callset = read_vcf(vcf_path, fields=['ALT', 'AC', 'AF'],
alt_number=5, numbers={'ALT': 2, 'AC': 4})
a = callset['variants/ALT']
assert (9, 2) == a.shape
a = callset['variants/AC']
assert (9, 4) == a.shape
a = callset['variants/AF']
assert (9, 5) == a.shape
def test_read_region():
for vcf_path in (fixture_path('sample.vcf.gz'),
fixture_path('sample.vcf')):
for tabix in 'tabix', None, 'foobar':
region = '19'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '19')
assert 2 == len(pos)
assert_array_equal([111, 112], pos)
region = '20'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 6 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 6 == len(pos)
assert_array_equal([14370, 17330, 1110696, 1230237, 1234567, 1235237], pos)
region = 'X'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 1 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == 'X')
assert 1 == len(pos)
assert_array_equal([10], pos)
region = 'Y'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
assert callset is None
region = '20:1-100000'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([14370, 17330], pos)
region = '20:1000000-1233000'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([1110696, 1230237], pos)
region = '20:1233000-2000000'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([1234567, 1235237], pos)
def test_read_region_unsorted():
# Test behaviour when data are not sorted by chromosome or position and tabix is
# not available.
fn = fixture_path('unsorted.vcf')
tabix = None
region = '19'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '19')
assert 2 == len(pos)
assert_array_equal([111, 112], pos)
region = '20'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 6 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 6 == len(pos)
assert_array_equal([14370, 1230237, 1234567, 1235237, 17330, 1110696], pos)
region = 'X'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 1 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == 'X')
assert 1 == len(pos)
assert_array_equal([10], pos)
region = 'Y'
callset = read_vcf(fn, region=region, tabix=tabix)
assert callset is None
region = '20:1-100000'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([14370, 17330], pos)
region = '20:1000000-1233000'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([1230237, 1110696], pos)
region = '20:1233000-2000000'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([1234567, 1235237], pos)
def test_read_samples():
vcf_path = fixture_path('sample.vcf')
for samples in ['NA00001', 'NA00003'], [0, 2], ['NA00003', 'NA00001'], [2, 'NA00001']:
callset = read_vcf(vcf_path, fields=['samples', 'GT'], samples=samples)
assert ['NA00001', 'NA00003'] == callset['samples'].astype('U').tolist()
gt = callset['calldata/GT']
assert (9, 2, 2) == gt.shape
assert (0, 0) == tuple(gt[2, 0])
assert (1, 1) == tuple(gt[2, 1])
assert (1, 2) == tuple(gt[4, 0])
assert (2, 2) == tuple(gt[4, 1])
for samples in ['NA00002'], [1]:
callset = read_vcf(vcf_path, fields=['samples', 'GT'], samples=samples)
assert ['NA00002'] == callset['samples'].astype('U').tolist()
gt = callset['calldata/GT']
assert (9, 1, 2) == gt.shape
assert (1, 0) == tuple(gt[2, 0])
assert (2, 1) == tuple(gt[4, 0])
def test_read_empty():
vcf_path = fixture_path('empty.vcf')
callset = read_vcf(vcf_path)
assert callset is None
def test_ann():
vcf_path = fixture_path('ann.vcf')
# all ANN fields
callset = read_vcf(vcf_path, fields=['ANN'], transformers=[ANNTransformer()])
expect_keys = sorted(['variants/ANN_Allele',
'variants/ANN_Annotation',
'variants/ANN_Annotation_Impact',
'variants/ANN_Gene_Name',
'variants/ANN_Gene_ID',
'variants/ANN_Feature_Type',
'variants/ANN_Feature_ID',
'variants/ANN_Transcript_BioType',
'variants/ANN_Rank',
'variants/ANN_HGVS_c',
'variants/ANN_HGVS_p',
'variants/ANN_cDNA_pos',
'variants/ANN_cDNA_length',
'variants/ANN_CDS_pos',
'variants/ANN_CDS_length',
'variants/ANN_AA_pos',
'variants/ANN_AA_length',
'variants/ANN_Distance'])
assert expect_keys == sorted(callset.keys())
a = callset['variants/ANN_Allele']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['T', '', 'T'], a)
a = callset['variants/ANN_Annotation']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['intergenic_region', '', 'missense_variant'], a)
a = callset['variants/ANN_Annotation_Impact']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['MODIFIER', '', 'MODERATE'], a)
a = callset['variants/ANN_Gene_Name']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['AGAP004677', '', 'AGAP005273'], a)
a = callset['variants/ANN_Gene_ID']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['AGAP004677', '', 'AGAP005273'], a)
a = callset['variants/ANN_Feature_Type']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['intergenic_region', '', 'transcript'], a)
a = callset['variants/ANN_Feature_ID']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['AGAP004677', '', 'AGAP005273-RA'], a)
a = callset['variants/ANN_Transcript_BioType']
assert np.dtype('object') == a.dtype
assert (3,) == a.shape
assert_array_equal(['', '', 'VectorBase'], a)
assert np.dtype('object') == a.dtype
a = callset['variants/ANN_Rank']
assert (3,) == a.shape
assert np.dtype('int8') == a.dtype
assert_array_equal([-1, -1, 1], a[:])
a = callset['variants/ANN_HGVS_c']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['', '', '17A>T'], a)
a = callset['variants/ANN_HGVS_p']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['', '', 'Asp6Val'], a)
a = callset['variants/ANN_cDNA_pos']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 17], a)
a = callset['variants/ANN_cDNA_length']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 4788], a)
a = callset['variants/ANN_CDS_pos']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 17], a)
a = callset['variants/ANN_CDS_length']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 4788], a)
a = callset['variants/ANN_AA_pos']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 6], a)
a = callset['variants/ANN_AA_length']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 1596], a)
a = callset['variants/ANN_Distance']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([3000, -1, -1], a)
# numbers=2
callset = read_vcf(vcf_path, fields=['ANN'], numbers={'ANN': 2},
transformers=[ANNTransformer()])
a = callset['variants/ANN_Allele']
assert (3, 2) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['T', ''], a[0])
assert_array_equal(['', ''], a[1])
assert_array_equal(['T', 'G'], a[2])
a = callset['variants/ANN_cDNA_pos']
assert (3, 2) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 17], a[:, 0])
assert_array_equal([-1, -1, 12], a[:, 1])
a = callset['variants/ANN_cDNA_length']
assert (3, 2) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 4788], a[:, 0])
assert_array_equal([-1, -1, 4768], a[:, 1])
# choose fields and types
transformers = [
ANNTransformer(
fields=['Allele', 'ANN_HGVS_c', 'variants/ANN_cDNA_pos'],
types={'Allele': 'S12',
'ANN_HGVS_c': 'S20',
'variants/ANN_cDNA_pos': 'i8'})
]
callset = read_vcf(vcf_path, fields=['ANN'], transformers=transformers)
assert (sorted(['variants/ANN_Allele', 'variants/ANN_HGVS_c',
'variants/ANN_cDNA_pos']) == sorted(callset.keys()))
a = callset['variants/ANN_Allele']
assert (3,) == a.shape
assert np.dtype('S12') == a.dtype
assert_array_equal([b'T', b'', b'T'], a)
a = callset['variants/ANN_HGVS_c']
assert (3,) == a.shape
assert np.dtype('S20') == a.dtype
assert_array_equal([b'', b'', b'17A>T'], a)
a = callset['variants/ANN_cDNA_pos']
assert (3,) == a.shape
assert np.dtype('i8') == a.dtype
assert_array_equal([-1, -1, 17], a)
def test_format_inconsistencies():
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\t.\tGT:GQ\t0/1:12\t1/2\t2/3:34:67,89\t\n"
b"2R\t34\tbar\tC\tG\t3.4\t.\t.\tGT\t./.\t\t3/3:45\t1/2:11:55,67\n"
)
input_file = io.BytesIO(input_data)
callset = read_vcf(input_file, fields=['calldata/GT', 'calldata/GQ'])
gt = callset['calldata/GT']
assert (2, 4, 2) == gt.shape
assert_array_equal([[0, 1], [1, 2], [2, 3], [-1, -1]], gt[0])
assert_array_equal([[-1, -1], [-1, -1], [3, 3], [1, 2]], gt[1])
gq = callset['calldata/GQ']
assert (2, 4) == gq.shape
assert_array_equal([12, -1, 34, -1], gq[0])
assert_array_equal([-1, -1, -1, -1], gq[1])
# noinspection PyTypeChecker
def test_warnings():
warnings.resetwarnings()
warnings.simplefilter('error')
# empty CHROM
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"\t12\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# empty POS
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dodgy POS
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\taaa\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dodgy POS
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12aaa\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dodgy QUAL
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\taaa\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dodgy QUAL
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t1.2aaa\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# empty QUAL - no warning
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t\t.\t.\t.\t.\t.\t.\t.\n"
)
read_vcf(io.BytesIO(input_data))
# empty FILTER - no warning
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t\t.\t.\t.\t.\t.\t.\n"
)
read_vcf(io.BytesIO(input_data))
# empty INFO - no warning
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t\t.\t.\t.\t.\t.\n"
)
read_vcf(io.BytesIO(input_data))
# empty FORMAT - no warning
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\t\t.\t.\t.\t.\n"
)
read_vcf(io.BytesIO(input_data))
# dodgy calldata (integer)
input_data = (
b'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tGT\t0/1\taa/bb\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GT'])
# dodgy calldata (integer)
input_data = (
b'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tGT\t0/1\t12aa/22\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GT'])
# dodgy calldata (float)
input_data = (
b'##FORMAT=<ID=MQ,Number=1,Type=Float,Description="Mapping Quality">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tMQ\t.\t12.3\taaa\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/MQ'])
# dodgy calldata (float)
input_data = (
b'##FORMAT=<ID=MQ,Number=1,Type=Float,Description="Mapping Quality">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tMQ\t.\t12.3\t34.5aaa\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/MQ'])
# dodgy INFO (missing key)
input_data = (
b'##INFO=<ID=MQ,Number=1,Type=Float,Description="Mapping Quality">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\tfoo=qux;MQ=12\t.\t.\t.\t.\t.\n"
b"2L\t34\t.\t.\t.\t.\t.\tfoo=bar;=34;baz\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['variants/MQ'])
# INFO not declared in header
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\tfoo\tA\tC,T\t12.3\tPASS\tfoo=bar\tGT:GQ\t0/0:99\t0/1:12\t./.:.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['variants/foo'])
# FORMAT not declared in header
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\tfoo\tA\tC,T\t12.3\tPASS\tfoo=bar\tGT:GQ\t0/0:99\t0/1:12\t./.:.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GT'])
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GQ'])
warnings.resetwarnings()
warnings.simplefilter('always')
def test_missing_headers():
vcf_path = fixture_path('test14.vcf')
# INFO DP not declared
callset = read_vcf(vcf_path, fields=['DP'], types={'DP': 'String'})
a = callset['variants/DP']
assert '14' == a[2] # default type is string
callset = read_vcf(vcf_path, fields=['DP'], types={'DP': 'Integer'})
a = callset['variants/DP']
assert 14 == a[2]
# what about a field which isn't present at all?
callset = read_vcf(vcf_path, fields=['FOO'])
assert '' == callset['variants/FOO'][2] # default missing value for string field
# FORMAT field DP not declared in VCF header
callset = read_vcf(vcf_path, fields=['calldata/DP'],
types={'calldata/DP': 'Integer'})
assert 1 == callset['calldata/DP'][2, 0]
def test_extra_samples():
# more calldata samples than samples declared in header
path = fixture_path('test48b.vcf')
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tGT:GQ\t0/0:34\t0/1:45\t1/1:56\t1/2:99\t2/3:101\n"
)
warnings.resetwarnings()
warnings.simplefilter('error')
with pytest.warns(UserWarning):
read_vcf(path)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GT', 'calldata/GQ'])
warnings.resetwarnings()
warnings.simplefilter('always')
# try again without raising warnings to check data
callset = read_vcf(io.BytesIO(input_data), fields=['calldata/GT', 'calldata/GQ'])
assert (1, 4, 2) == callset['calldata/GT'].shape
callset = read_vcf(path)
assert (9, 2, 2) == callset['calldata/GT'].shape
# noinspection PyTypeChecker
def test_no_samples():
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\tfoo\tA\tC,T\t12.3\tPASS\tfoo=bar\tGT:GQ\t0/0:99\t0/1:12\t./.:.\t.\n"
)
callset = read_vcf(io.BytesIO(input_data),
fields=['calldata/GT', 'calldata/GQ', 'samples', 'POS'])
assert 'variants/POS' in callset
assert 'samples' not in callset
assert 'calldata/GT' not in callset
assert 'calldata/GQ' not in callset
h5_path = os.path.join(tempdir, 'sample.h5')
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(io.BytesIO(input_data), h5_path,
fields=['calldata/GT', 'calldata/GQ', 'samples', 'POS'])
with h5py.File(h5_path, mode='r') as callset:
assert 'variants/POS' in callset
assert 'samples' not in callset
assert 'calldata/GT' not in callset
assert 'calldata/GQ' not in callset
zarr_path = os.path.join(tempdir, 'sample.zarr')
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(io.BytesIO(input_data), zarr_path,
fields=['calldata/GT', 'calldata/GQ', 'samples', 'POS'])
callset = zarr.open_group(zarr_path, mode='r')
assert 'variants/POS' in callset
assert 'samples' not in callset
assert 'calldata/GT' not in callset
assert 'calldata/GQ' not in callset
def test_computed_fields():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t2\t.\t.\t.\t.\t.\t.\t.\n"
b"2L\t4\t.\t.\tG\t.\t.\t.\t.\n"
b"2L\t12\t.\tA\t.\t.\t.\t.\t.\n"
b"2L\t34\t.\tC\tT\t.\t.\t.\t.\n"
b"3R\t45\t.\tG\tA,T\t.\t.\t.\t.\n"
b"3R\t47\t.\tG\tC,T,*\t.\t.\t.\t.\n"
b"3R\t56\t.\tG\tA,GTAC\t.\t.\t.\t.\n"
b"3R\t56\t.\tCATG\tC,GATG\t.\t.\t.\t.\n"
b"3R\t56\t.\tGTAC\tATAC,GTACTACTAC,G,GTACA,GTA\t.\t.\t.\t.\n")
for string_dtype in 'S20', 'object':
callset = read_vcf(io.BytesIO(input_data),
fields='*',
numbers={'ALT': 5},
types={'REF': string_dtype, 'ALT': string_dtype})
a = callset['variants/ALT']
assert (9, 5) == a.shape
e = np.array([[b'', b'', b'', b'', b''],
[b'G', b'', b'', b'', b''],
[b'', b'', b'', b'', b''],
[b'T', b'', b'', b'', b''],
[b'A', b'T', b'', b'', b''],
[b'C', b'T', b'*', b'', b''],
[b'A', b'GTAC', b'', b'', b''],
[b'C', b'GATG', b'', b'', b''],
[b'ATAC', b'GTACTACTAC', b'G', b'GTACA', b'GTA']])
if a.dtype.kind == 'O':
e = e.astype('U').astype(object)
assert_array_equal(e, a)
a = callset['variants/numalt']
assert (9,) == a.shape
assert_array_equal([0, 1, 0, 1, 2, 3, 2, 2, 5], a)
a = callset['variants/altlen']
assert (9, 5) == a.shape
e = np.array([[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, -1, 0, 0],
[0, 3, 0, 0, 0],
[-3, 0, 0, 0, 0],
[0, 6, -3, 1, -1]])
assert_array_equal(e, a)
a = callset['variants/is_snp']
assert (9,) == a.shape
assert np.dtype(bool) == a.dtype
assert_array_equal([False, False, False, True, True, False, False, False, False],
a)
# test is_snp with reduced ALT number
callset = read_vcf(io.BytesIO(input_data),
fields='*',
numbers={'ALT': 1},
types={'REF': string_dtype, 'ALT': string_dtype})
a = callset['variants/ALT']
assert (9,) == a.shape
e = np.array([b'', b'G', b'', b'T', b'A', b'C', b'A', b'C', b'ATAC'])
if a.dtype.kind == 'O':
e = e.astype('U').astype(object)
assert_array_equal(e, a)
a = callset['variants/numalt']
assert (9,) == a.shape
assert_array_equal([0, 1, 0, 1, 2, 3, 2, 2, 5], a)
a = callset['variants/altlen']
assert (9,) == a.shape
e = np.array([0, 1, 0, 0, 0, 0, 0, -3, 0])
assert_array_equal(e, a)
a = callset['variants/is_snp']
assert (9,) == a.shape
assert np.dtype(bool) == a.dtype
assert_array_equal([False, False, False, True, True, False, False, False, False],
a)
def test_genotype_ac():
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS1\tS2\tS3\n"
b"2L\t12\t.\tA\t.\t.\t.\t.\tGT:GQ\t0/0/0:11\t0/1/2:12\t././.:.\n"
b"2L\t34\t.\tC\tT\t.\t.\t.\tGT:GQ\t0/1/2:22\t3/3/.:33\t.\n"
b"3R\t45\t.\tG\tA,T\t.\t.\t.\tGT:GQ\t0/1:.\t3:12\t\n"
b"X\t55\t.\tG\tA,T\t.\t.\t.\tGT:GQ\t0/1/1/3/4:.\t1/1/2/2/4/4/5:12\t0/0/1/2/3/./4\n"
)
for t in 'i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8':
callset = read_vcf(io.BytesIO(input_data),
fields=['calldata/GT'],
numbers={'calldata/GT': 4},
types={'calldata/GT': 'genotype_ac/' + t})
e = np.array([[[3, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]],
[[1, 1, 1, 0], [0, 0, 0, 2], [0, 0, 0, 0]],
[[1, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]],
[[1, 2, 0, 1], [0, 2, 2, 0], [2, 1, 1, 1]]], dtype=t)
a = callset['calldata/GT']
assert e.dtype == a.dtype
assert_array_equal(e, a)
vcf_path = fixture_path('test63.vcf')
callset = read_vcf(vcf_path, fields='GT', numbers={'GT': 3},
types={'GT': 'genotype_ac/i1'})
e = np.array([
[(2, 0, 0), (3, 0, 0), (1, 0, 0)],
[(0, 1, 0), (1, 1, 0), (1, 1, 1)],
[(0, 0, 0), (0, 0, 0), (0, 0, 0)],
[(0, 0, 0), (0, 0, 0), (0, 0, 0)],
])
a = callset['calldata/GT']
assert_array_equal(e, a)
def test_region_truncate():
vcf_path = fixture_path('test54.vcf.gz')
for tabix in 'tabix', None:
callset = read_vcf(vcf_path, region='chr1:10-100', tabix=tabix)
pos = callset['variants/POS']
assert 2 == pos.shape[0]
assert_array_equal([20, 30], pos)
def test_errors():
# try to open a directory
path = '.'
with pytest.raises(OSError):
read_vcf(path)
# try to open a file that doesn't exist
path = 'doesnotexist.vcf'
with pytest.raises(FileNotFoundError):
read_vcf(path)
# try to open a file that doesn't exist
path = 'doesnotexist.vcf.gz'
with pytest.raises(FileNotFoundError):
read_vcf(path)
# file is nothing like a VCF (has no header)
path = fixture_path('test48a.vcf')
with pytest.raises(RuntimeError):
read_vcf(path)
def test_dup_headers():
warnings.resetwarnings()
warnings.simplefilter('error')
# dup FILTER
input_data = b"""##fileformat=VCFv4.1
##FILTER=<ID=s50,Description="Less than 50% of samples have data">
##FILTER=<ID=s50,Description="Less than 50% of samples have data">
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=AD,Number=A,Type=Integer,Description="Allele Depths">
##FORMAT=<ID=ZZ,Number=1,Type=String,Description="ZZ">
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT test1 test2 test3 test4
chr1 1 . A G . PASS DP=2 GT:AD 0:1,0 .:1,0 0:0,0 .:0,0
chr1 2 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 0:1,0 0:0,0 .:0,0
chr1 3 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 1:1,0 . ./.
"""
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dup INFO
input_data = b"""##fileformat=VCFv4.1
##FILTER=<ID=s50,Description="Less than 50% of samples have data">
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=AD,Number=A,Type=Integer,Description="Allele Depths">
##FORMAT=<ID=ZZ,Number=1,Type=String,Description="ZZ">
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT test1 test2 test3 test4
chr1 1 . A G . PASS DP=2 GT:AD 0:1,0 .:1,0 0:0,0 .:0,0
chr1 2 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 0:1,0 0:0,0 .:0,0
chr1 3 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 1:1,0 . ./.
"""
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dup FORMAT
input_data = b"""##fileformat=VCFv4.1
##FILTER=<ID=s50,Description="Less than 50% of samples have data">
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=AD,Number=A,Type=Integer,Description="Allele Depths">
##FORMAT=<ID=AD,Number=A,Type=Integer,Description="Allele Depths">
##FORMAT=<ID=ZZ,Number=1,Type=String,Description="ZZ">
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT test1 test2 test3 test4
chr1 1 . A G . PASS DP=2 GT:AD 0:1,0 .:1,0 0:0,0 .:0,0
chr1 2 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 0:1,0 0:0,0 .:0,0
chr1 3 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 1:1,0 . ./.
"""
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
warnings.resetwarnings()
warnings.simplefilter('always')
def test_override_vcf_type():
vcf_path = fixture_path('test4.vcf')
callset = read_vcf(vcf_path, fields=['MQ0FractionTest'])
assert 0 == callset['variants/MQ0FractionTest'][2]
callset = read_vcf(vcf_path, fields=['MQ0FractionTest'],
types={'MQ0FractionTest': 'Float'})
assert approx(0.03) == callset['variants/MQ0FractionTest'][2]
def test_header_overrides_default_vcf_type():
vcf_path = fixture_path('test176.vcf')
callset = read_vcf(vcf_path, fields='*')
gq = callset['calldata/GQ']
assert 'f' == gq.dtype.kind
assert np.isnan(gq[0, 0])
assert approx(48.2) == gq[2, 0]
assert approx(48.1) == gq[2, 1]
assert approx(43.9) == gq[2, 2]
assert approx(49.) == gq[3, 0]
assert approx(3.) == gq[3, 1]
assert approx(41.) == gq[3, 2]
def test_missing_calldata():
vcf_path = fixture_path('test1.vcf')
callset = read_vcf(vcf_path, fields='calldata/*', numbers={'AD': 2})
gt = callset['calldata/GT']
ad = callset['calldata/AD']
assert (-1, -1) == tuple(gt[0, 1])
assert (1, 0) == tuple(ad[0, 1])
assert (-1, -1) == tuple(gt[2, 2])
assert (-1, -1) == tuple(ad[2, 2])
assert (-1, -1) == tuple(gt[2, 3])
assert (-1, -1) == tuple(ad[2, 3])
def test_calldata_cleared():
vcf_path = fixture_path('test32.vcf')
callset = read_vcf(vcf_path, fields=['calldata/GT', 'calldata/DP', 'calldata/GQ'])
gt = callset['calldata/GT']
dp = callset['calldata/DP']
gq = callset['calldata/GQ']
assert (0, 0) == tuple(gt[0, 3])
assert 8 == dp[0, 3]
assert 3 == gq[0, 3]
assert (-1, -1) == tuple(gt[1, 3])
assert -1 == dp[1, 3]
assert -1 == gq[1, 3]
def test_calldata_quirks():
vcf_path = fixture_path('test1.vcf')
callset = read_vcf(vcf_path, fields=['AD', 'GT'], numbers={'AD': 2})
gt = callset['calldata/GT']
ad = callset['calldata/AD']
e = np.array([[-1, -1], [0, -1], [1, -1]])
assert_array_equal(e, gt[:, 1])
e = np.array([[1, 0], [1, 0], [1, 0]])
assert_array_equal(e, ad[:, 1])
def test_vcf_to_npz():
vcf_paths = [fixture_path(x) for x in ['sample.vcf', 'sample.vcf.gz']]
npz_path = os.path.join(tempdir, 'sample.npz')
region_values = None, '20', '20:10000-20000', 'Y'
tabix_values = 'tabix', None
samples_values = None, ['NA00001', 'NA00003']
string_type_values = 'S10', 'object'
param_matrix = itertools.product(vcf_paths, region_values, tabix_values,
samples_values, string_type_values)
for vcf_path, region, tabix, samples, string_type in param_matrix:
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
expected = read_vcf(vcf_path, fields='*', alt_number=2, region=region,
tabix=tabix, samples=samples, types=types)
if os.path.exists(npz_path):
os.remove(npz_path)
vcf_to_npz(vcf_path, npz_path, fields='*', chunk_length=2, alt_number=2,
region=region, tabix=tabix, samples=samples, types=types)
if expected is None:
assert not os.path.exists(npz_path)
else:
actual = np.load(npz_path, allow_pickle=True)
for key in expected.keys():
if expected[key].dtype.kind == 'f':
assert_array_almost_equal(expected[key], actual[key])
else:
assert_array_equal(expected[key], actual[key])
for key in actual.keys():
assert key in expected
actual.close()
def test_vcf_to_npz_exclude():
vcf_path = fixture_path('sample.vcf')
npz_path = os.path.join(tempdir, 'sample.npz')
exclude = ['variants/altlen', 'ID', 'calldata/DP']
expected = read_vcf(vcf_path, fields='*', exclude_fields=exclude)
if os.path.exists(npz_path):
os.remove(npz_path)
vcf_to_npz(vcf_path, npz_path, fields='*', exclude_fields=exclude)
actual = np.load(npz_path, allow_pickle=True)
for key in expected.keys():
if expected[key].dtype.kind == 'f':
assert_array_almost_equal(expected[key], actual[key])
else:
assert_array_equal(expected[key], actual[key])
for key in actual.keys():
assert key in expected
actual.close()
def test_vcf_to_npz_rename():
vcf_path = fixture_path('sample.vcf')
npz_path = os.path.join(tempdir, 'sample.npz')
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'foo/bar'}
expected = read_vcf(vcf_path, fields='*', rename_fields=rename)
if os.path.exists(npz_path):
os.remove(npz_path)
vcf_to_npz(vcf_path, npz_path, fields='*', rename_fields=rename)
actual = np.load(npz_path, allow_pickle=True)
for key in expected.keys():
if expected[key].dtype.kind == 'f':
assert_array_almost_equal(expected[key], actual[key])
else:
assert_array_equal(expected[key], actual[key])
for key in actual.keys():
assert key in expected
actual.close()
def test_vcf_to_zarr():
vcf_paths = [fixture_path(x) for x in ['sample.vcf', 'sample.vcf.gz']]
zarr_path = os.path.join(tempdir, 'sample.zarr')
region_values = None, '20', '20:10000-20000', 'Y'
tabix_values = 'tabix', None
samples_values = None, ['NA00001', 'NA00003']
string_type_values = 'S10', 'object'
param_matrix = itertools.product(vcf_paths, region_values, tabix_values,
samples_values, string_type_values)
for vcf_path, region, tabix, samples, string_type in param_matrix:
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
expected = read_vcf(vcf_path, fields='*', alt_number=2, region=region,
tabix=tabix, samples=samples, types=types)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,
region=region, tabix=tabix, samples=samples, types=types)
if expected is None:
assert not os.path.exists(zarr_path)
else:
actual = zarr.open_group(zarr_path, mode='r')
for key in expected.keys():
e = expected[key]
a = actual[key][:]
compare_arrays(e, a)
assert (actual['variants/NS'].attrs['Description'] ==
'Number of Samples With Data')
assert (actual['calldata/GQ'].attrs['Description'] ==
'Genotype Quality')
for key in actual.keys():
if key not in {'variants', 'calldata'}:
assert key in expected
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_zarr_exclude():
vcf_path = fixture_path('sample.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
exclude = ['variants/altlen', 'ID', 'calldata/DP']
expected = read_vcf(vcf_path, fields='*', exclude_fields=exclude)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', exclude_fields=exclude)
actual = zarr.open_group(zarr_path, mode='r')
for key in expected.keys():
e = expected[key]
a = actual[key][:]
compare_arrays(e, a)
for key in actual.keys():
if key not in {'variants', 'calldata'}:
assert key in expected
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_zarr_rename():
vcf_path = fixture_path('sample.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'foo/bar'}
expected = read_vcf(vcf_path, fields='*', rename_fields=rename)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', rename_fields=rename)
actual = zarr.open_group(zarr_path, mode='r')
for key in expected.keys():
e = expected[key]
a = actual[key][:]
compare_arrays(e, a)
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_zarr_rename_clash():
vcf_path = fixture_path('sample.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
# dup values
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'spam/eggs'}
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'spam'}
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam',
'calldata/GT': 'spam/eggs'}
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields='*', rename_fields=rename)
def test_vcf_to_zarr_dup_fields_case_insensitive():
vcf_path = fixture_path('altlen.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields=['ALTLEN', 'altlen'])
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields=['variants/ALTLEN', 'variants/altlen'])
# should be fine if renamed
vcf_to_zarr(vcf_path, zarr_path, fields=['ALTLEN', 'altlen'],
rename_fields={'altlen': 'variants/spam'})
def test_vcf_to_zarr_group():
vcf_path = fixture_path('sample.vcf.gz')
zarr_path = os.path.join(tempdir, 'sample.zarr')
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
chroms = ['19', '20', 'X']
for chrom in chroms:
vcf_to_zarr(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,
region=chrom, group=chrom)
actual = zarr.open_group(zarr_path, mode='r')
assert chroms == sorted(actual)
for chrom in chroms:
assert ['calldata', 'samples', 'variants'] == sorted(actual[chrom])
expect = read_vcf(vcf_path, fields='*', alt_number=2, region=chrom)
for key in expect.keys():
e = expect[key]
a = actual[chrom][key][:]
compare_arrays(e, a)
assert (actual[chrom]['variants/NS'].attrs['Description'] ==
'Number of Samples With Data')
assert (actual[chrom]['calldata/GQ'].attrs['Description'] ==
'Genotype Quality')
def test_vcf_to_zarr_string_codec():
vcf_path = fixture_path('sample.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
types = {'CHROM': object, 'ALT': object, 'samples': object}
expect = read_vcf(vcf_path, fields='*', alt_number=2, types=types)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,
types=types)
actual = zarr.open_group(zarr_path, mode='r')
for key in expect.keys():
e = expect[key]
a = actual[key][:]
compare_arrays(e, a)
def test_vcf_to_zarr_ann():
vcf_path = fixture_path('ann.vcf')
zarr_path = os.path.join(tempdir, 'ann.zarr')
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
expected = read_vcf(vcf_path, fields='*', alt_number=2, types=types,
transformers=transformers)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,
types=types, transformers=transformers)
actual = zarr.open_group(zarr_path, mode='r')
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
def test_vcf_to_zarr_empty():
vcf_path = fixture_path('empty.vcf')
zarr_path = os.path.join(tempdir, 'empty.zarr')
vcf_to_zarr(vcf_path, zarr_path)
assert not os.path.exists(zarr_path)
def test_vcf_to_hdf5():
vcf_paths = [fixture_path(x) for x in ['sample.vcf', 'sample.vcf.gz']]
h5_path = os.path.join(tempdir, 'sample.h5')
region_values = None, '20', '20:10000-20000', 'Y'
tabix_values = 'tabix', None
samples_values = None, ['NA00001', 'NA00003']
string_type_values = 'S10', 'object'
param_matrix = itertools.product(vcf_paths, region_values, tabix_values,
samples_values, string_type_values)
for vcf_path, region, tabix, samples, string_type in param_matrix:
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
expected = read_vcf(vcf_path, fields='*', alt_number=2, region=region,
tabix=tabix, samples=samples, types=types)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields='*', alt_number=2, chunk_length=2,
region=region, tabix=tabix, samples=samples, types=types)
if expected is None:
assert not os.path.exists(h5_path)
else:
with h5py.File(h5_path, mode='r') as actual:
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
assert (actual['variants/NS'].attrs['Description'] ==
'Number of Samples With Data')
assert (actual['calldata/GQ'].attrs['Description'] ==
'Genotype Quality')
for key in actual.keys():
if key not in {'variants', 'calldata'}:
assert key in expected
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_hdf5_exclude():
vcf_path = fixture_path('sample.vcf')
h5_path = os.path.join(tempdir, 'sample.h5')
exclude = ['variants/altlen', 'ID', 'calldata/DP']
expected = read_vcf(vcf_path, fields='*', exclude_fields=exclude)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields='*', exclude_fields=exclude)
with h5py.File(h5_path, mode='r') as actual:
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
for key in actual.keys():
if key not in {'variants', 'calldata'}:
assert key in expected
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_hdf5_rename():
vcf_path = fixture_path('sample.vcf')
h5_path = os.path.join(tempdir, 'sample.h5')
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'foo/bar'}
expected = read_vcf(vcf_path, fields='*', rename_fields=rename)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields='*', rename_fields=rename)
with h5py.File(h5_path, mode='r') as actual:
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_hdf5_group():
vcf_path = fixture_path('sample.vcf.gz')
h5_path = os.path.join(tempdir, 'sample.h5')
if os.path.exists(h5_path):
os.remove(h5_path)
chroms = ['19', '20', 'X']
for chrom in chroms:
vcf_to_hdf5(vcf_path, h5_path, fields='*', alt_number=2, chunk_length=2,
region=chrom, group=chrom)
with h5py.File(h5_path, mode='r') as actual:
assert chroms == sorted(actual)
for chrom in chroms:
assert ['calldata', 'samples', 'variants'] == sorted(actual[chrom])
expect = read_vcf(vcf_path, fields='*', alt_number=2, region=chrom)
for key in expect.keys():
e = expect[key]
a = actual[chrom][key][:]
compare_arrays(e, a)
assert (actual[chrom]['variants/NS'].attrs['Description'] ==
'Number of Samples With Data')
assert (actual[chrom]['calldata/GQ'].attrs['Description'] ==
'Genotype Quality')
def test_vcf_to_hdf5_ann():
vcf_path = fixture_path('ann.vcf')
h5_path = os.path.join(tempdir, 'ann.h5')
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
expected = read_vcf(vcf_path, fields='*', types=types, transformers=transformers)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields='*', chunk_length=2, types=types,
transformers=transformers)
with h5py.File(h5_path, mode='r') as actual:
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
def test_vcf_to_hdf5_vlen():
vcf_path = fixture_path('sample.vcf')
h5_path = os.path.join(tempdir, 'sample.h5')
fields = ['CHROM', 'ID', 'samples']
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ID': string_type, 'samples': string_type}
expect = read_vcf(vcf_path, fields=fields, alt_number=2, types=types)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields=fields, alt_number=2, chunk_length=3,
types=types, vlen=False)
with h5py.File(h5_path, mode='r') as actual:
for key in expect.keys():
if expect[key].dtype.kind == 'f':
assert_array_almost_equal(expect[key], actual[key][:])
elif expect[key].dtype.kind == 'O':
# strings always stored as fixed length if vlen=False
assert 'S' == actual[key].dtype.kind
assert_array_equal(expect[key].astype('S'), actual[key][:])
else:
assert_array_equal(expect[key], actual[key][:])
def test_vcf_to_hdf5_empty():
vcf_path = fixture_path('empty.vcf')
h5_path = os.path.join(tempdir, 'empty.h5')
vcf_to_hdf5(vcf_path, h5_path)
assert not os.path.exists(h5_path)
def to_pandas_expectation(e):
# expect that all string fields end up as objects with nans for missing
if e.dtype.kind == 'S':
e = e.astype('U').astype(object)
if e.dtype == object:
e[e == ''] = np.nan
return e
def check_dataframe(callset, df):
for k in callset:
if k.startswith('variants/'):
group, name = k.split('/')
e = to_pandas_expectation(callset[k])
if e.ndim == 1:
compare_arrays(e, df[name].values)
elif e.ndim == 2:
for i in range(e.shape[1]):
compare_arrays(e[:, i], df['%s_%s' % (name, i + 1)])
def test_vcf_to_dataframe():
vcf_path = fixture_path('sample.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'DP', 'AC', 'GT']
numbers = {'AC': 3}
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type}
callset = read_vcf(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types)
df = vcf_to_dataframe(vcf_path, fields=fields, alt_number=2, numbers=numbers,
chunk_length=2, types=types)
assert (['CHROM', 'POS', 'REF', 'ALT_1', 'ALT_2', 'DP', 'AC_1', 'AC_2', 'AC_3'] ==
df.columns.tolist())
# always convert strings to object dtype for pandas
assert np.dtype(object) == df['CHROM'].dtype
assert np.dtype(object) == df['ALT_1'].dtype
check_dataframe(callset, df)
def test_vcf_to_dataframe_all():
vcf_path = fixture_path('sample.vcf')
fields = '*'
numbers = {'AC': 3}
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type}
callset = read_vcf(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types)
df = vcf_to_dataframe(vcf_path, fields=fields, alt_number=2, numbers=numbers,
chunk_length=2, types=types)
for k in ['CHROM', 'POS', 'ID', 'REF', 'ALT_1', 'ALT_2', 'DP', 'AC_1',
'AC_2', 'AC_3']:
assert k in df.columns.tolist()
# always convert strings to object dtype for pandas
assert np.dtype(object) == df['CHROM'].dtype
assert np.dtype(object) == df['ALT_1'].dtype
check_dataframe(callset, df)
def test_vcf_to_dataframe_exclude():
vcf_path = fixture_path('sample.vcf')
fields = '*'
exclude = ['ALT', 'ID']
df = vcf_to_dataframe(vcf_path, fields=fields, exclude_fields=exclude)
for k in ['CHROM', 'POS', 'REF', 'DP', 'AC_1', 'AC_2', 'AC_3']:
assert k in df.columns.tolist()
for k in ['ALT_1', 'ALT_2', 'ID']:
assert k not in df.columns.tolist()
def test_vcf_to_dataframe_ann():
vcf_path = fixture_path('ann.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'ANN', 'DP', 'AC', 'GT']
numbers = {'AC': 2, 'ALT': 2}
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
callset = read_vcf(vcf_path, fields=fields, numbers=numbers, types=types,
transformers=transformers)
df = vcf_to_dataframe(vcf_path, fields=fields, numbers=numbers, chunk_length=2,
types=types, transformers=transformers)
assert (['CHROM', 'POS', 'REF', 'ALT_1', 'ALT_2', 'ANN_Allele', 'ANN_HGVS_c',
'ANN_AA_pos', 'ANN_AA_length', 'DP', 'AC_1', 'AC_2'] ==
df.columns.tolist())
# always convert strings to object dtype for pandas
assert np.dtype(object) == df['CHROM'].dtype
assert np.dtype(object) == df['ALT_1'].dtype
check_dataframe(callset, df)
def test_vcf_to_csv():
vcf_path = fixture_path('sample.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'DP', 'AC', 'GT']
numbers = {'AC': 3}
for string_type in 'S20', 'object':
types = {'REF': string_type, 'ALT': string_type}
df = vcf_to_dataframe(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types, chunk_length=2)
csv_path = os.path.join(tempdir, 'test.csv')
if os.path.exists(csv_path):
os.remove(csv_path)
vcf_to_csv(vcf_path, csv_path, fields=fields, alt_number=2, numbers=numbers,
types=types, chunk_length=2)
import pandas
adf = pandas.read_csv(csv_path, na_filter=True)
assert df.columns.tolist() == adf.columns.tolist()
for k in df.columns:
compare_arrays(df[k].values, adf[k].values)
def test_vcf_to_csv_all():
vcf_path = fixture_path('sample.vcf')
fields = '*'
df = vcf_to_dataframe(vcf_path, fields=fields)
csv_path = os.path.join(tempdir, 'test.csv')
if os.path.exists(csv_path):
os.remove(csv_path)
vcf_to_csv(vcf_path, csv_path, fields=fields)
import pandas
adf = pandas.read_csv(csv_path, na_filter=True)
assert df.columns.tolist() == adf.columns.tolist()
for k in df.columns:
compare_arrays(df[k].values, adf[k].values)
def test_vcf_to_csv_exclude():
vcf_path = fixture_path('sample.vcf')
fields = '*'
exclude = ['ALT', 'ID']
df = vcf_to_dataframe(vcf_path, fields=fields, exclude_fields=exclude)
csv_path = os.path.join(tempdir, 'test.csv')
if os.path.exists(csv_path):
os.remove(csv_path)
vcf_to_csv(vcf_path, csv_path, fields=fields, exclude_fields=exclude)
import pandas
adf = pandas.read_csv(csv_path, na_filter=True)
assert df.columns.tolist() == adf.columns.tolist()
def test_vcf_to_csv_ann():
vcf_path = fixture_path('ann.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'DP', 'AC', 'ANN', 'GT']
numbers = {'AC': 2, 'ALT': 2}
for string_type in 'S20', 'object':
types = {'CHROM': string_type, 'REF': string_type, 'ALT': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
df = vcf_to_dataframe(vcf_path, fields=fields, numbers=numbers, types=types,
chunk_length=2, transformers=transformers)
csv_path = os.path.join(tempdir, 'test.csv')
if os.path.exists(csv_path):
os.remove(csv_path)
vcf_to_csv(vcf_path, csv_path, fields=fields, numbers=numbers, types=types,
chunk_length=2, transformers=transformers)
import pandas
adf = pandas.read_csv(csv_path, na_filter=True)
assert df.columns.tolist() == adf.columns.tolist()
for k in df.columns:
compare_arrays(df[k].values, adf[k].values)
def test_vcf_to_recarray():
vcf_path = fixture_path('sample.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'DP', 'AC', 'GT']
numbers = {'AC': 3}
for string_type in 'S20', 'object':
types = {'CHROM': string_type, 'REF': string_type, 'ALT': string_type}
callset = read_vcf(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types)
a = vcf_to_recarray(vcf_path, fields=fields, alt_number=2, numbers=numbers,
chunk_length=2, types=types)
assert (['CHROM', 'POS', 'REF', 'ALT_1', 'ALT_2', 'DP', 'AC_1', 'AC_2', 'AC_3']
== list(a.dtype.names))
assert np.dtype(string_type) == a['CHROM'].dtype
for k in callset:
if k.startswith('variants/'):
group, name = k.split('/')
e = callset[k]
if e.ndim == 1:
assert_array_equal(e, a[name])
elif e.ndim == 2:
for i in range(e.shape[1]):
assert_array_equal(e[:, i], a['%s_%s' % (name, i + 1)])
else:
assert False, (k, e.ndim)
def test_vcf_to_recarray_all():
vcf_path = fixture_path('sample.vcf')
fields = '*'
numbers = {'AC': 3}
for string_type in 'S20', 'object':
types = {'CHROM': string_type, 'REF': string_type, 'ALT': string_type}
callset = read_vcf(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types)
a = vcf_to_recarray(vcf_path, fields=fields, alt_number=2, numbers=numbers,
chunk_length=2, types=types)
for k in ['CHROM', 'POS', 'ID', 'REF', 'ALT_1', 'ALT_2', 'DP', 'AC_1',
'AC_2', 'AC_3']:
assert k in a.dtype.names
assert np.dtype(string_type) == a['CHROM'].dtype
for k in callset:
if k.startswith('variants/'):
group, name = k.split('/')
e = callset[k]
if e.ndim == 1:
assert_array_equal(e, a[name])
elif e.ndim == 2:
for i in range(e.shape[1]):
assert_array_equal(e[:, i], a['%s_%s' % (name, i + 1)])
else:
assert False, (k, e.ndim)
def test_vcf_to_recarray_exclude():
vcf_path = fixture_path('sample.vcf')
fields = '*'
exclude = ['ALT', 'ID']
a = vcf_to_recarray(vcf_path, fields=fields, exclude_fields=exclude)
for k in ['CHROM', 'POS', 'REF', 'DP', 'AC_1', 'AC_2', 'AC_3']:
assert k in a.dtype.names
for k in 'ALT_1', 'ALT_2', 'ALT', 'ID':
assert k not in a.dtype.names
def test_vcf_to_recarray_ann():
vcf_path = fixture_path('ann.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'ANN', 'DP', 'AC', 'GT']
numbers = {'AC': 2, 'ALT': 2}
for string_type in 'S20', 'object':
types = {'CHROM': string_type, 'REF': string_type, 'ALT': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
callset = read_vcf(vcf_path, fields=fields, numbers=numbers, types=types,
transformers=transformers)
a = vcf_to_recarray(vcf_path, fields=fields, numbers=numbers, chunk_length=2,
types=types, transformers=transformers)
assert (['CHROM', 'POS', 'REF', 'ALT_1', 'ALT_2', 'ANN_Allele', 'ANN_HGVS_c',
'ANN_AA_pos', 'ANN_AA_length', 'DP', 'AC_1', 'AC_2'] ==
list(a.dtype.names))
assert np.dtype(string_type) == a['CHROM'].dtype
assert np.dtype(string_type) == a['ALT_1'].dtype
for k in callset:
group, name = k.split('/')
if group == 'variants':
e = callset[k]
if e.ndim == 1:
assert_array_equal(e, a[name])
elif e.ndim == 2:
for i in range(e.shape[1]):
assert_array_equal(e[:, i], a['%s_%s' % (name, i + 1)])
else:
assert False, (k, e.ndim)
else:
assert name not in a.dtype.names
def test_read_vcf_headers():
vcf_path = fixture_path('sample.vcf')
headers = read_vcf_headers(vcf_path)
# check headers
assert 'q10' in headers.filters
assert 's50' in headers.filters
assert 'AA' in headers.infos
assert 'AC' in headers.infos
assert 'AF' in headers.infos
assert 'AN' in headers.infos
assert 'DB' in headers.infos
assert 'DP' in headers.infos
assert 'H2' in headers.infos
assert 'NS' in headers.infos
assert 'DP' in headers.formats
assert 'GQ' in headers.formats
assert 'GT' in headers.formats
assert 'HQ' in headers.formats
assert ['NA00001', 'NA00002', 'NA00003'] == headers.samples
assert '1' == headers.infos['AA']['Number']
assert 'String' == headers.infos['AA']['Type']
assert 'Ancestral Allele' == headers.infos['AA']['Description']
assert '2' == headers.formats['HQ']['Number']
assert 'Integer' == headers.formats['HQ']['Type']
assert 'Haplotype Quality' == headers.formats['HQ']['Description']
|
[
"atexit.register",
"numpy.load",
"os.remove",
"pandas.read_csv",
"numpy.isnan",
"allel.io.vcf_read.read_vcf",
"allel.io.vcf_read.vcf_to_dataframe",
"shutil.rmtree",
"numpy.testing.assert_array_almost_equal",
"os.path.join",
"zarr.open_group",
"allel.io.vcf_read.vcf_to_csv",
"allel.io.vcf_read.vcf_to_hdf5",
"allel.io.vcf_read.vcf_to_recarray",
"warnings.simplefilter",
"pytest.warns",
"os.path.dirname",
"os.path.exists",
"pytest.raises",
"tempfile.mkdtemp",
"itertools.product",
"io.BytesIO",
"h5py.File",
"numpy.testing.assert_array_equal",
"allel.io.vcf_read.iter_vcf_chunks",
"allel.test.tools.compare_arrays",
"allel.io.vcf_read.ANNTransformer",
"pytest.approx",
"allel.io.vcf_read.read_vcf_headers",
"numpy.all",
"gzip.open",
"warnings.resetwarnings",
"numpy.dtype",
"allel.io.vcf_read.vcf_to_npz",
"numpy.array",
"allel.io.vcf_read.vcf_to_zarr"
] |
[((617, 641), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (639, 641), False, 'import warnings\n'), ((642, 673), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (663, 673), False, 'import warnings\n'), ((715, 733), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (731, 733), False, 'import tempfile\n'), ((734, 773), 'atexit.register', 'atexit.register', (['shutil.rmtree', 'tempdir'], {}), '(shutil.rmtree, tempdir)\n', (749, 773), False, 'import atexit\n'), ((980, 1050), 'allel.io.vcf_read.iter_vcf_chunks', 'iter_vcf_chunks', (['vcf_path'], {'fields': '"""*"""', 'chunk_length': '(4)', 'buffer_size': '(100)'}), "(vcf_path, fields='*', chunk_length=4, buffer_size=100)\n", (995, 1050), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((3251, 3281), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""'}), "(vcf_path, fields='*')\n", (3259, 3281), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((4232, 4286), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'exclude_fields': 'exclude'}), "(vcf_path, fields='*', exclude_fields=exclude)\n", (4240, 4286), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((5240, 5292), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (5248, 5292), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((7842, 7860), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {}), '(vcf_path)\n', (7850, 7860), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((8262, 8301), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""variants/*"""'}), "(vcf_path, fields='variants/*')\n", (8270, 8301), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((9059, 9092), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""INFO"""'}), "(vcf_path, fields='INFO')\n", (9067, 9092), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((9475, 9510), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""FILTER"""'}), "(vcf_path, fields='FILTER')\n", (9483, 9510), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((9831, 9866), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""FILTER"""'}), "(vcf_path, fields='FILTER')\n", (9839, 9866), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((10226, 10265), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""calldata/*"""'}), "(vcf_path, fields='calldata/*')\n", (10234, 10265), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((10558, 10689), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['CHROM', 'variants/POS', 'AC', 'variants/AF', 'GT', 'calldata/HQ',\n 'FILTER_q10', 'variants/numalt']"}), "(vcf_path, fields=['CHROM', 'variants/POS', 'AC', 'variants/AF',\n 'GT', 'calldata/HQ', 'FILTER_q10', 'variants/numalt'])\n", (10566, 10689), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((11089, 11268), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['CHROM', 'variants/POS', 'AC', 'variants/AF', 'GT', 'calldata/HQ',\n 'FILTER_q10', 'variants/numalt', 'samples']", 'chunk_length': '(4)', 'buffer_size': '(100)'}), "(vcf_path, fields=['CHROM', 'variants/POS', 'AC', 'variants/AF',\n 'GT', 'calldata/HQ', 'FILTER_q10', 'variants/numalt', 'samples'],\n chunk_length=4, buffer_size=100)\n", (11097, 11268), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((11784, 11901), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['CHROM', 'variants/CHROM', 'variants/AF', 'variants/AF', 'numalt',\n 'variants/numalt']"}), "(vcf_path, fields=['CHROM', 'variants/CHROM', 'variants/AF',\n 'variants/AF', 'numalt', 'variants/numalt'])\n", (11792, 11901), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((12282, 12329), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['ALTLEN', 'altlen']"}), "(vcf_path, fields=['ALTLEN', 'altlen'])\n", (12290, 12329), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((12719, 12841), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': '"""*"""', 'chunk_length': 'chunk_length', 'buffer_size': 'buffer_size', 'types': "{'calldata/DP': 'object'}"}), "(input_file, fields='*', chunk_length=chunk_length, buffer_size=\n buffer_size, types={'calldata/DP': 'object'})\n", (12727, 12841), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((15297, 15399), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': '"""*"""', 'chunk_length': 'chunk_length', 'buffer_size': 'buffer_size', 'types': 'types'}), "(input_file, fields='*', chunk_length=chunk_length, buffer_size=\n buffer_size, types=types)\n", (15305, 15399), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((18622, 18652), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""'}), "(vcf_path, fields='*')\n", (18630, 18652), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((33588, 33732), 'numpy.array', 'np.array', (['[[[0, 0, 0], [0, 1, 2], [-1, -1, -1]], [[0, 1, 2], [3, 3, -1], [-1, -1, -1]\n ], [[0, 1, -1], [5, -1, -1], [-1, -1, -1]]]'], {'dtype': '"""i1"""'}), "([[[0, 0, 0], [0, 1, 2], [-1, -1, -1]], [[0, 1, 2], [3, 3, -1], [-1,\n -1, -1]], [[0, 1, -1], [5, -1, -1], [-1, -1, -1]]], dtype='i1')\n", (33596, 33732), True, 'import numpy as np\n'), ((33806, 33830), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (33824, 33830), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((33870, 33934), 'numpy.array', 'np.array', (['[[11, 12, -1], [22, 33, -1], [-1, 12, -1]]'], {'dtype': '"""i2"""'}), "([[11, 12, -1], [22, 33, -1], [-1, 12, -1]], dtype='i2')\n", (33878, 33934), True, 'import numpy as np\n'), ((34012, 34036), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (34030, 34036), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((39220, 39237), 'numpy.isnan', 'np.isnan', (['a[2, 1]'], {}), '(a[2, 1])\n', (39228, 39237), True, 'import numpy as np\n'), ((39842, 39902), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['ALT', 'AC', 'AF']", 'alt_number': '(2)'}), "(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=2)\n", (39850, 39902), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((40099, 40159), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['ALT', 'AC', 'AF']", 'alt_number': '(1)'}), "(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=1)\n", (40107, 40159), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((40350, 40410), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['ALT', 'AC', 'AF']", 'alt_number': '(5)'}), "(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=5)\n", (40358, 40410), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((40626, 40719), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['ALT', 'AC', 'AF']", 'alt_number': '(5)', 'numbers': "{'ALT': 2, 'AC': 4}"}), "(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=5, numbers={'ALT':\n 2, 'AC': 4})\n", (40634, 40719), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((43888, 43928), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['fn'], {'region': 'region', 'tabix': 'tabix'}), '(fn, region=region, tabix=tabix)\n', (43896, 43928), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((44080, 44101), 'numpy.all', 'np.all', (["(chrom == '19')"], {}), "(chrom == '19')\n", (44086, 44101), True, 'import numpy as np\n'), ((44131, 44166), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[111, 112]', 'pos'], {}), '([111, 112], pos)\n', (44149, 44166), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((44200, 44240), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['fn'], {'region': 'region', 'tabix': 'tabix'}), '(fn, region=region, tabix=tabix)\n', (44208, 44240), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((44392, 44413), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (44398, 44413), True, 'import numpy as np\n'), ((44443, 44518), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[14370, 1230237, 1234567, 1235237, 17330, 1110696]', 'pos'], {}), '([14370, 1230237, 1234567, 1235237, 17330, 1110696], pos)\n', (44461, 44518), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((44551, 44591), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['fn'], {'region': 'region', 'tabix': 'tabix'}), '(fn, region=region, tabix=tabix)\n', (44559, 44591), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((44743, 44763), 'numpy.all', 'np.all', (["(chrom == 'X')"], {}), "(chrom == 'X')\n", (44749, 44763), True, 'import numpy as np\n'), ((44793, 44822), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[10]', 'pos'], {}), '([10], pos)\n', (44811, 44822), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((44855, 44895), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['fn'], {'region': 'region', 'tabix': 'tabix'}), '(fn, region=region, tabix=tabix)\n', (44863, 44895), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((44965, 45005), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['fn'], {'region': 'region', 'tabix': 'tabix'}), '(fn, region=region, tabix=tabix)\n', (44973, 45005), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((45157, 45178), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (45163, 45178), True, 'import numpy as np\n'), ((45208, 45247), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[14370, 17330]', 'pos'], {}), '([14370, 17330], pos)\n', (45226, 45247), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((45297, 45337), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['fn'], {'region': 'region', 'tabix': 'tabix'}), '(fn, region=region, tabix=tabix)\n', (45305, 45337), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((45489, 45510), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (45495, 45510), True, 'import numpy as np\n'), ((45540, 45583), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[1230237, 1110696]', 'pos'], {}), '([1230237, 1110696], pos)\n', (45558, 45583), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((45633, 45673), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['fn'], {'region': 'region', 'tabix': 'tabix'}), '(fn, region=region, tabix=tabix)\n', (45641, 45673), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((45825, 45846), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (45831, 45846), True, 'import numpy as np\n'), ((45876, 45919), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[1234567, 1235237]', 'pos'], {}), '([1234567, 1235237], pos)\n', (45894, 45919), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((46902, 46920), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {}), '(vcf_path)\n', (46910, 46920), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((48208, 48245), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['T', '', 'T']", 'a'], {}), "(['T', '', 'T'], a)\n", (48226, 48245), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((48361, 48429), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['intergenic_region', '', 'missense_variant']", 'a'], {}), "(['intergenic_region', '', 'missense_variant'], a)\n", (48379, 48429), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((48552, 48603), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['MODIFIER', '', 'MODERATE']", 'a'], {}), "(['MODIFIER', '', 'MODERATE'], a)\n", (48570, 48603), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((48718, 48773), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['AGAP004677', '', 'AGAP005273']", 'a'], {}), "(['AGAP004677', '', 'AGAP005273'], a)\n", (48736, 48773), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((48886, 48941), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['AGAP004677', '', 'AGAP005273']", 'a'], {}), "(['AGAP004677', '', 'AGAP005273'], a)\n", (48904, 48941), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((49059, 49121), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['intergenic_region', '', 'transcript']", 'a'], {}), "(['intergenic_region', '', 'transcript'], a)\n", (49077, 49121), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((49237, 49295), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['AGAP004677', '', 'AGAP005273-RA']", 'a'], {}), "(['AGAP004677', '', 'AGAP005273-RA'], a)\n", (49255, 49295), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((49419, 49464), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['', '', 'VectorBase']", 'a'], {}), "(['', '', 'VectorBase'], a)\n", (49437, 49464), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((49613, 49650), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 1]', 'a[:]'], {}), '([-1, -1, 1], a[:])\n', (49631, 49650), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((49762, 49802), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['', '', '17A>T']", 'a'], {}), "(['', '', '17A>T'], a)\n", (49780, 49802), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((49914, 49956), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['', '', 'Asp6Val']", 'a'], {}), "(['', '', 'Asp6Val'], a)\n", (49932, 49956), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((50069, 50104), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 17]', 'a'], {}), '([-1, -1, 17], a)\n', (50087, 50104), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((50220, 50257), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 4788]', 'a'], {}), '([-1, -1, 4788], a)\n', (50238, 50257), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((50369, 50404), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 17]', 'a'], {}), '([-1, -1, 17], a)\n', (50387, 50404), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((50519, 50556), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 4788]', 'a'], {}), '([-1, -1, 4788], a)\n', (50537, 50556), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((50667, 50701), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 6]', 'a'], {}), '([-1, -1, 6], a)\n', (50685, 50701), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((50815, 50852), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 1596]', 'a'], {}), '([-1, -1, 1596], a)\n', (50833, 50852), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((50965, 51002), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[3000, -1, -1]', 'a'], {}), '([3000, -1, -1], a)\n', (50983, 51002), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((51258, 51293), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['T', '']", 'a[0]'], {}), "(['T', ''], a[0])\n", (51276, 51293), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((51298, 51332), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['', '']", 'a[1]'], {}), "(['', ''], a[1])\n", (51316, 51332), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((51337, 51373), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['T', 'G']", 'a[2]'], {}), "(['T', 'G'], a[2])\n", (51355, 51373), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((51488, 51529), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 17]', 'a[:, 0]'], {}), '([-1, -1, 17], a[:, 0])\n', (51506, 51529), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((51534, 51575), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 12]', 'a[:, 1]'], {}), '([-1, -1, 12], a[:, 1])\n', (51552, 51575), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((51693, 51736), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 4788]', 'a[:, 0]'], {}), '([-1, -1, 4788], a[:, 0])\n', (51711, 51736), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((51741, 51784), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 4768]', 'a[:, 1]'], {}), '([-1, -1, 4768], a[:, 1])\n', (51759, 51784), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((52078, 52139), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['ANN']", 'transformers': 'transformers'}), "(vcf_path, fields=['ANN'], transformers=transformers)\n", (52086, 52139), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((52387, 52427), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["[b'T', b'', b'T']", 'a'], {}), "([b'T', b'', b'T'], a)\n", (52405, 52427), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((52536, 52579), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["[b'', b'', b'17A>T']", 'a'], {}), "([b'', b'', b'17A>T'], a)\n", (52554, 52579), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((52689, 52724), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 17]', 'a'], {}), '([-1, -1, 17], a)\n', (52707, 52724), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((53042, 53064), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (53052, 53064), False, 'import io\n'), ((53079, 53138), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['calldata/GT', 'calldata/GQ']"}), "(input_file, fields=['calldata/GT', 'calldata/GQ'])\n", (53087, 53138), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((53208, 53269), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[[0, 1], [1, 2], [2, 3], [-1, -1]]', 'gt[0]'], {}), '([[0, 1], [1, 2], [2, 3], [-1, -1]], gt[0])\n', (53226, 53269), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((53274, 53337), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[[-1, -1], [-1, -1], [3, 3], [1, 2]]', 'gt[1]'], {}), '([[-1, -1], [-1, -1], [3, 3], [1, 2]], gt[1])\n', (53292, 53337), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((53404, 53447), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[12, -1, 34, -1]', 'gq[0]'], {}), '([12, -1, 34, -1], gq[0])\n', (53422, 53447), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((53452, 53495), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, -1, -1]', 'gq[1]'], {}), '([-1, -1, -1, -1], gq[1])\n', (53470, 53495), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((53553, 53577), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (53575, 53577), False, 'import warnings\n'), ((53582, 53612), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""'], {}), "('error')\n", (53603, 53612), False, 'import warnings\n'), ((58804, 58828), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (58826, 58828), False, 'import warnings\n'), ((58833, 58864), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (58854, 58864), False, 'import warnings\n'), ((58979, 59036), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['DP']", 'types': "{'DP': 'String'}"}), "(vcf_path, fields=['DP'], types={'DP': 'String'})\n", (58987, 59036), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((59132, 59190), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['DP']", 'types': "{'DP': 'Integer'}"}), "(vcf_path, fields=['DP'], types={'DP': 'Integer'})\n", (59140, 59190), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((59311, 59345), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['FOO']"}), "(vcf_path, fields=['FOO'])\n", (59319, 59345), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((59496, 59572), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['calldata/DP']", 'types': "{'calldata/DP': 'Integer'}"}), "(vcf_path, fields=['calldata/DP'], types={'calldata/DP': 'Integer'})\n", (59504, 59572), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((59967, 59991), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (59989, 59991), False, 'import warnings\n'), ((59996, 60026), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""'], {}), "('error')\n", (60017, 60026), False, 'import warnings\n'), ((60207, 60231), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (60229, 60231), False, 'import warnings\n'), ((60236, 60267), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (60257, 60267), False, 'import warnings\n'), ((60476, 60490), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['path'], {}), '(path)\n', (60484, 60490), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((61075, 61109), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.h5"""'], {}), "(tempdir, 'sample.h5')\n", (61087, 61109), False, 'import os\n'), ((61117, 61140), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (61131, 61140), False, 'import os\n'), ((61527, 61563), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (61539, 61563), False, 'import os\n'), ((61571, 61596), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (61585, 61596), False, 'import os\n'), ((61769, 61805), 'zarr.open_group', 'zarr.open_group', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (61784, 61805), False, 'import zarr\n'), ((66499, 66585), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""GT"""', 'numbers': "{'GT': 3}", 'types': "{'GT': 'genotype_ac/i1'}"}), "(vcf_path, fields='GT', numbers={'GT': 3}, types={'GT':\n 'genotype_ac/i1'})\n", (66507, 66585), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((66613, 66768), 'numpy.array', 'np.array', (['[[(2, 0, 0), (3, 0, 0), (1, 0, 0)], [(0, 1, 0), (1, 1, 0), (1, 1, 1)], [(0,\n 0, 0), (0, 0, 0), (0, 0, 0)], [(0, 0, 0), (0, 0, 0), (0, 0, 0)]]'], {}), '([[(2, 0, 0), (3, 0, 0), (1, 0, 0)], [(0, 1, 0), (1, 1, 0), (1, 1, \n 1)], [(0, 0, 0), (0, 0, 0), (0, 0, 0)], [(0, 0, 0), (0, 0, 0), (0, 0, 0)]])\n', (66621, 66768), True, 'import numpy as np\n'), ((66838, 66862), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (66856, 66862), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((67744, 67768), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (67766, 67768), False, 'import warnings\n'), ((67773, 67803), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""'], {}), "('error')\n", (67794, 67803), False, 'import warnings\n'), ((70107, 70131), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (70129, 70131), False, 'import warnings\n'), ((70136, 70167), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (70157, 70167), False, 'import warnings\n'), ((70255, 70301), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['MQ0FractionTest']"}), "(vcf_path, fields=['MQ0FractionTest'])\n", (70263, 70301), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((70371, 70457), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['MQ0FractionTest']", 'types': "{'MQ0FractionTest': 'Float'}"}), "(vcf_path, fields=['MQ0FractionTest'], types={'MQ0FractionTest':\n 'Float'})\n", (70379, 70457), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((70648, 70678), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""'}), "(vcf_path, fields='*')\n", (70656, 70678), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((70754, 70772), 'numpy.isnan', 'np.isnan', (['gq[0, 0]'], {}), '(gq[0, 0])\n', (70762, 70772), True, 'import numpy as np\n'), ((71071, 71129), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""calldata/*"""', 'numbers': "{'AD': 2}"}), "(vcf_path, fields='calldata/*', numbers={'AD': 2})\n", (71079, 71129), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((71513, 71585), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['calldata/GT', 'calldata/DP', 'calldata/GQ']"}), "(vcf_path, fields=['calldata/GT', 'calldata/DP', 'calldata/GQ'])\n", (71521, 71585), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((71945, 72003), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['AD', 'GT']", 'numbers': "{'AD': 2}"}), "(vcf_path, fields=['AD', 'GT'], numbers={'AD': 2})\n", (71953, 72003), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((72076, 72114), 'numpy.array', 'np.array', (['[[-1, -1], [0, -1], [1, -1]]'], {}), '([[-1, -1], [0, -1], [1, -1]])\n', (72084, 72114), True, 'import numpy as np\n'), ((72119, 72150), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'gt[:, 1]'], {}), '(e, gt[:, 1])\n', (72137, 72150), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((72159, 72193), 'numpy.array', 'np.array', (['[[1, 0], [1, 0], [1, 0]]'], {}), '([[1, 0], [1, 0], [1, 0]])\n', (72167, 72193), True, 'import numpy as np\n'), ((72198, 72229), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'ad[:, 1]'], {}), '(e, ad[:, 1])\n', (72216, 72229), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((72345, 72380), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.npz"""'], {}), "(tempdir, 'sample.npz')\n", (72357, 72380), False, 'import os\n'), ((72578, 72675), 'itertools.product', 'itertools.product', (['vcf_paths', 'region_values', 'tabix_values', 'samples_values', 'string_type_values'], {}), '(vcf_paths, region_values, tabix_values, samples_values,\n string_type_values)\n', (72595, 72675), False, 'import itertools\n'), ((73838, 73873), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.npz"""'], {}), "(tempdir, 'sample.npz')\n", (73850, 73873), False, 'import os\n'), ((73944, 73998), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'exclude_fields': 'exclude'}), "(vcf_path, fields='*', exclude_fields=exclude)\n", (73952, 73998), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((74006, 74030), 'os.path.exists', 'os.path.exists', (['npz_path'], {}), '(npz_path)\n', (74020, 74030), False, 'import os\n'), ((74064, 74130), 'allel.io.vcf_read.vcf_to_npz', 'vcf_to_npz', (['vcf_path', 'npz_path'], {'fields': '"""*"""', 'exclude_fields': 'exclude'}), "(vcf_path, npz_path, fields='*', exclude_fields=exclude)\n", (74074, 74130), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((74144, 74180), 'numpy.load', 'np.load', (['npz_path'], {'allow_pickle': '(True)'}), '(npz_path, allow_pickle=True)\n', (74151, 74180), True, 'import numpy as np\n'), ((74565, 74600), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.npz"""'], {}), "(tempdir, 'sample.npz')\n", (74577, 74600), False, 'import os\n'), ((74748, 74800), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (74756, 74800), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((74808, 74832), 'os.path.exists', 'os.path.exists', (['npz_path'], {}), '(npz_path)\n', (74822, 74832), False, 'import os\n'), ((74866, 74930), 'allel.io.vcf_read.vcf_to_npz', 'vcf_to_npz', (['vcf_path', 'npz_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, npz_path, fields='*', rename_fields=rename)\n", (74876, 74930), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((74944, 74980), 'numpy.load', 'np.load', (['npz_path'], {'allow_pickle': '(True)'}), '(npz_path, allow_pickle=True)\n', (74951, 74980), True, 'import numpy as np\n'), ((75393, 75429), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (75405, 75429), False, 'import os\n'), ((75627, 75724), 'itertools.product', 'itertools.product', (['vcf_paths', 'region_values', 'tabix_values', 'samples_values', 'string_type_values'], {}), '(vcf_paths, region_values, tabix_values, samples_values,\n string_type_values)\n', (75644, 75724), False, 'import itertools\n'), ((77268, 77304), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (77280, 77304), False, 'import os\n'), ((77375, 77429), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'exclude_fields': 'exclude'}), "(vcf_path, fields='*', exclude_fields=exclude)\n", (77383, 77429), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((77437, 77462), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (77451, 77462), False, 'import os\n'), ((77501, 77569), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'exclude_fields': 'exclude'}), "(vcf_path, zarr_path, fields='*', exclude_fields=exclude)\n", (77512, 77569), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((77583, 77619), 'zarr.open_group', 'zarr.open_group', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (77598, 77619), False, 'import zarr\n'), ((78112, 78148), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (78124, 78148), False, 'import os\n'), ((78296, 78348), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (78304, 78348), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((78356, 78381), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (78370, 78381), False, 'import os\n'), ((78420, 78486), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, zarr_path, fields='*', rename_fields=rename)\n", (78431, 78486), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((78500, 78536), 'zarr.open_group', 'zarr.open_group', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (78515, 78536), False, 'import zarr\n'), ((78922, 78958), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (78934, 78958), False, 'import os\n'), ((79854, 79890), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (79866, 79890), False, 'import os\n'), ((80157, 80266), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': "['ALTLEN', 'altlen']", 'rename_fields': "{'altlen': 'variants/spam'}"}), "(vcf_path, zarr_path, fields=['ALTLEN', 'altlen'], rename_fields\n ={'altlen': 'variants/spam'})\n", (80168, 80266), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((80371, 80407), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (80383, 80407), False, 'import os\n'), ((80415, 80440), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (80429, 80440), False, 'import os\n'), ((80674, 80710), 'zarr.open_group', 'zarr.open_group', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (80689, 80710), False, 'import zarr\n'), ((81391, 81427), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (81403, 81427), False, 'import os\n'), ((81505, 81562), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'types': 'types'}), "(vcf_path, fields='*', alt_number=2, types=types)\n", (81513, 81562), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((81570, 81595), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (81584, 81595), False, 'import os\n'), ((81634, 81725), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'chunk_length': '(2)', 'types': 'types'}), "(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,\n types=types)\n", (81645, 81725), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((81751, 81787), 'zarr.open_group', 'zarr.open_group', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (81766, 81787), False, 'import zarr\n'), ((81983, 82016), 'os.path.join', 'os.path.join', (['tempdir', '"""ann.zarr"""'], {}), "(tempdir, 'ann.zarr')\n", (81995, 82016), False, 'import os\n'), ((82941, 82976), 'os.path.join', 'os.path.join', (['tempdir', '"""empty.zarr"""'], {}), "(tempdir, 'empty.zarr')\n", (82953, 82976), False, 'import os\n'), ((82981, 83013), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {}), '(vcf_path, zarr_path)\n', (82992, 83013), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((83170, 83204), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.h5"""'], {}), "(tempdir, 'sample.h5')\n", (83182, 83204), False, 'import os\n'), ((83402, 83499), 'itertools.product', 'itertools.product', (['vcf_paths', 'region_values', 'tabix_values', 'samples_values', 'string_type_values'], {}), '(vcf_paths, region_values, tabix_values, samples_values,\n string_type_values)\n', (83419, 83499), False, 'import itertools\n'), ((85020, 85054), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.h5"""'], {}), "(tempdir, 'sample.h5')\n", (85032, 85054), False, 'import os\n'), ((85125, 85179), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'exclude_fields': 'exclude'}), "(vcf_path, fields='*', exclude_fields=exclude)\n", (85133, 85179), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((85187, 85210), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (85201, 85210), False, 'import os\n'), ((85243, 85309), 'allel.io.vcf_read.vcf_to_hdf5', 'vcf_to_hdf5', (['vcf_path', 'h5_path'], {'fields': '"""*"""', 'exclude_fields': 'exclude'}), "(vcf_path, h5_path, fields='*', exclude_fields=exclude)\n", (85254, 85309), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((85857, 85891), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.h5"""'], {}), "(tempdir, 'sample.h5')\n", (85869, 85891), False, 'import os\n'), ((86039, 86091), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (86047, 86091), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((86099, 86122), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (86113, 86122), False, 'import os\n'), ((86155, 86219), 'allel.io.vcf_read.vcf_to_hdf5', 'vcf_to_hdf5', (['vcf_path', 'h5_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, h5_path, fields='*', rename_fields=rename)\n", (86166, 86219), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((86644, 86678), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.h5"""'], {}), "(tempdir, 'sample.h5')\n", (86656, 86678), False, 'import os\n'), ((86686, 86709), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (86700, 86709), False, 'import os\n'), ((87685, 87716), 'os.path.join', 'os.path.join', (['tempdir', '"""ann.h5"""'], {}), "(tempdir, 'ann.h5')\n", (87697, 87716), False, 'import os\n'), ((88580, 88614), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.h5"""'], {}), "(tempdir, 'sample.h5')\n", (88592, 88614), False, 'import os\n'), ((89707, 89740), 'os.path.join', 'os.path.join', (['tempdir', '"""empty.h5"""'], {}), "(tempdir, 'empty.h5')\n", (89719, 89740), False, 'import os\n'), ((89745, 89775), 'allel.io.vcf_read.vcf_to_hdf5', 'vcf_to_hdf5', (['vcf_path', 'h5_path'], {}), '(vcf_path, h5_path)\n', (89756, 89775), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((92308, 92373), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields', 'exclude_fields': 'exclude'}), '(vcf_path, fields=fields, exclude_fields=exclude)\n', (92324, 92373), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((94717, 94758), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields'}), '(vcf_path, fields=fields)\n', (94733, 94758), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((94774, 94807), 'os.path.join', 'os.path.join', (['tempdir', '"""test.csv"""'], {}), "(tempdir, 'test.csv')\n", (94786, 94807), False, 'import os\n'), ((94815, 94839), 'os.path.exists', 'os.path.exists', (['csv_path'], {}), '(csv_path)\n', (94829, 94839), False, 'import os\n'), ((94873, 94918), 'allel.io.vcf_read.vcf_to_csv', 'vcf_to_csv', (['vcf_path', 'csv_path'], {'fields': 'fields'}), '(vcf_path, csv_path, fields=fields)\n', (94883, 94918), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((94947, 94988), 'pandas.read_csv', 'pandas.read_csv', (['csv_path'], {'na_filter': '(True)'}), '(csv_path, na_filter=True)\n', (94962, 94988), False, 'import pandas\n'), ((95250, 95315), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields', 'exclude_fields': 'exclude'}), '(vcf_path, fields=fields, exclude_fields=exclude)\n', (95266, 95315), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((95331, 95364), 'os.path.join', 'os.path.join', (['tempdir', '"""test.csv"""'], {}), "(tempdir, 'test.csv')\n", (95343, 95364), False, 'import os\n'), ((95372, 95396), 'os.path.exists', 'os.path.exists', (['csv_path'], {}), '(csv_path)\n', (95386, 95396), False, 'import os\n'), ((95430, 95499), 'allel.io.vcf_read.vcf_to_csv', 'vcf_to_csv', (['vcf_path', 'csv_path'], {'fields': 'fields', 'exclude_fields': 'exclude'}), '(vcf_path, csv_path, fields=fields, exclude_fields=exclude)\n', (95440, 95499), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((95528, 95569), 'pandas.read_csv', 'pandas.read_csv', (['csv_path'], {'na_filter': '(True)'}), '(csv_path, na_filter=True)\n', (95543, 95569), False, 'import pandas\n'), ((99255, 99319), 'allel.io.vcf_read.vcf_to_recarray', 'vcf_to_recarray', (['vcf_path'], {'fields': 'fields', 'exclude_fields': 'exclude'}), '(vcf_path, fields=fields, exclude_fields=exclude)\n', (99270, 99319), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((101212, 101238), 'allel.io.vcf_read.read_vcf_headers', 'read_vcf_headers', (['vcf_path'], {}), '(vcf_path)\n', (101228, 101238), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((822, 847), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (837, 847), False, 'import os\n'), ((6393, 6418), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6406, 6418), False, 'import pytest\n'), ((6428, 6480), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (6436, 6480), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((6685, 6710), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6698, 6710), False, 'import pytest\n'), ((6720, 6772), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (6728, 6772), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((6931, 6956), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6944, 6956), False, 'import pytest\n'), ((6966, 7018), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (6974, 7018), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((7177, 7202), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7190, 7202), False, 'import pytest\n'), ((7212, 7264), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (7220, 7264), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((7423, 7448), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7436, 7448), False, 'import pytest\n'), ((7458, 7510), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (7466, 7510), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((7669, 7694), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7682, 7694), False, 'import pytest\n'), ((7704, 7756), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (7712, 7756), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((10091, 10135), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['callset1[k]', 'callset2[k]'], {}), '(callset1[k], callset2[k])\n', (10109, 10135), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((13235, 13251), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (13243, 13251), True, 'import numpy as np\n'), ((13488, 13504), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (13496, 13504), True, 'import numpy as np\n'), ((13649, 13665), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (13657, 13665), True, 'import numpy as np\n'), ((13806, 13822), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (13814, 13822), True, 'import numpy as np\n'), ((14889, 14905), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (14897, 14905), True, 'import numpy as np\n'), ((18922, 18938), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (18930, 18938), True, 'import numpy as np\n'), ((19258, 19274), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (19266, 19274), True, 'import numpy as np\n'), ((19480, 19496), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (19488, 19496), True, 'import numpy as np\n'), ((19648, 19664), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (19656, 19664), True, 'import numpy as np\n'), ((21992, 22008), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (22002, 22008), False, 'import io\n'), ((22027, 22074), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['POS', 'samples']"}), "(input_file, fields=['POS', 'samples'])\n", (22035, 22074), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((25388, 25404), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (25398, 25404), False, 'import io\n'), ((25423, 25471), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['QUAL', 'samples']"}), "(input_file, fields=['QUAL', 'samples'])\n", (25431, 25471), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((26139, 26155), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (26149, 26155), False, 'import io\n'), ((26174, 26261), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['FILTER_PASS', 'FILTER_q10', 'FILTER_s50', 'samples']"}), "(input_file, fields=['FILTER_PASS', 'FILTER_q10', 'FILTER_s50',\n 'samples'])\n", (26182, 26261), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((27284, 27300), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (27294, 27300), False, 'import io\n'), ((27900, 27914), 'numpy.isnan', 'np.isnan', (['a[1]'], {}), '(a[1])\n', (27908, 27914), True, 'import numpy as np\n'), ((27930, 27944), 'numpy.isnan', 'np.isnan', (['a[2]'], {}), '(a[2])\n', (27938, 27944), True, 'import numpy as np\n'), ((28362, 28378), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (28372, 28378), False, 'import io\n'), ((28978, 28992), 'numpy.isnan', 'np.isnan', (['a[1]'], {}), '(a[1])\n', (28986, 28992), True, 'import numpy as np\n'), ((29008, 29022), 'numpy.isnan', 'np.isnan', (['a[2]'], {}), '(a[2])\n', (29016, 29022), True, 'import numpy as np\n'), ((29470, 29486), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (29480, 29486), False, 'import io\n'), ((29505, 29629), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['calldata/GT', 'calldata/GQ', 'samples']", 'types': "{'calldata/GT': 'i1', 'calldata/GQ': 'i2'}"}), "(input_file, fields=['calldata/GT', 'calldata/GQ', 'samples'],\n types={'calldata/GT': 'i1', 'calldata/GQ': 'i2'})\n", (29513, 29629), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((30732, 30874), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['variants/DP', 'variants/AC']", 'types': "{'variants/DP': dtype, 'variants/AC': dtype}", 'numbers': "{'variants/AC': 3}"}), "(vcf_path, fields=['variants/DP', 'variants/AC'], types={\n 'variants/DP': dtype, 'variants/AC': dtype}, numbers={'variants/AC': 3})\n", (30740, 30874), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((31357, 31379), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (31367, 31379), False, 'import io\n'), ((31408, 31424), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (31416, 31424), True, 'import numpy as np\n'), ((31698, 31720), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (31708, 31720), False, 'import io\n'), ((31749, 31763), 'numpy.dtype', 'np.dtype', (['"""i4"""'], {}), "('i4')\n", (31757, 31763), True, 'import numpy as np\n'), ((32037, 32059), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (32047, 32059), False, 'import io\n'), ((32088, 32102), 'numpy.dtype', 'np.dtype', (['"""f4"""'], {}), "('f4')\n", (32096, 32102), True, 'import numpy as np\n'), ((32377, 32399), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (32387, 32399), False, 'import io\n'), ((32428, 32442), 'numpy.dtype', 'np.dtype', (['"""S1"""'], {}), "('S1')\n", (32436, 32442), True, 'import numpy as np\n'), ((32647, 32720), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['GT']", 'types': "{'GT': dtype}", 'numbers': "{'GT': 2}"}), "(vcf_path, fields=['GT'], types={'GT': dtype}, numbers={'GT': 2})\n", (32655, 32720), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((33276, 33298), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (33286, 33298), False, 'import io\n'), ((34271, 34344), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['HQ']", 'types': "{'HQ': dtype}", 'numbers': "{'HQ': 2}"}), "(vcf_path, fields=['HQ'], types={'HQ': dtype}, numbers={'HQ': 2})\n", (34279, 34344), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((39034, 39047), 'pytest.approx', 'approx', (['(0.333)'], {}), '(0.333)\n', (39040, 39047), False, 'from pytest import approx\n'), ((39249, 39262), 'pytest.approx', 'approx', (['(0.333)'], {}), '(0.333)\n', (39255, 39262), False, 'from pytest import approx\n'), ((39285, 39298), 'pytest.approx', 'approx', (['(0.667)'], {}), '(0.667)\n', (39291, 39298), False, 'from pytest import approx\n'), ((46099, 46160), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['samples', 'GT']", 'samples': 'samples'}), "(vcf_path, fields=['samples', 'GT'], samples=samples)\n", (46107, 46160), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((46535, 46596), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['samples', 'GT']", 'samples': 'samples'}), "(vcf_path, fields=['samples', 'GT'], samples=samples)\n", (46543, 46596), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((48174, 48192), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (48182, 48192), True, 'import numpy as np\n'), ((48327, 48345), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (48335, 48345), True, 'import numpy as np\n'), ((48518, 48536), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (48526, 48536), True, 'import numpy as np\n'), ((48684, 48702), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (48692, 48702), True, 'import numpy as np\n'), ((48852, 48870), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (48860, 48870), True, 'import numpy as np\n'), ((49025, 49043), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (49033, 49043), True, 'import numpy as np\n'), ((49203, 49221), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (49211, 49221), True, 'import numpy as np\n'), ((49358, 49376), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (49366, 49376), True, 'import numpy as np\n'), ((49476, 49494), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (49484, 49494), True, 'import numpy as np\n'), ((49581, 49597), 'numpy.dtype', 'np.dtype', (['"""int8"""'], {}), "('int8')\n", (49589, 49597), True, 'import numpy as np\n'), ((49728, 49746), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (49736, 49746), True, 'import numpy as np\n'), ((49880, 49898), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (49888, 49898), True, 'import numpy as np\n'), ((50036, 50053), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (50044, 50053), True, 'import numpy as np\n'), ((50187, 50204), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (50195, 50204), True, 'import numpy as np\n'), ((50336, 50353), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (50344, 50353), True, 'import numpy as np\n'), ((50486, 50503), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (50494, 50503), True, 'import numpy as np\n'), ((50634, 50651), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (50642, 50651), True, 'import numpy as np\n'), ((50782, 50799), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (50790, 50799), True, 'import numpy as np\n'), ((50932, 50949), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (50940, 50949), True, 'import numpy as np\n'), ((51224, 51242), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (51232, 51242), True, 'import numpy as np\n'), ((51455, 51472), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (51463, 51472), True, 'import numpy as np\n'), ((51660, 51677), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (51668, 51677), True, 'import numpy as np\n'), ((51845, 52003), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {'fields': "['Allele', 'ANN_HGVS_c', 'variants/ANN_cDNA_pos']", 'types': "{'Allele': 'S12', 'ANN_HGVS_c': 'S20', 'variants/ANN_cDNA_pos': 'i8'}"}), "(fields=['Allele', 'ANN_HGVS_c', 'variants/ANN_cDNA_pos'],\n types={'Allele': 'S12', 'ANN_HGVS_c': 'S20', 'variants/ANN_cDNA_pos': 'i8'}\n )\n", (51859, 52003), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((52356, 52371), 'numpy.dtype', 'np.dtype', (['"""S12"""'], {}), "('S12')\n", (52364, 52371), True, 'import numpy as np\n'), ((52505, 52520), 'numpy.dtype', 'np.dtype', (['"""S20"""'], {}), "('S20')\n", (52513, 52520), True, 'import numpy as np\n'), ((52659, 52673), 'numpy.dtype', 'np.dtype', (['"""i8"""'], {}), "('i8')\n", (52667, 52673), True, 'import numpy as np\n'), ((53800, 53825), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (53812, 53825), False, 'import pytest\n'), ((54053, 54078), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (54065, 54078), False, 'import pytest\n'), ((54309, 54334), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (54321, 54334), False, 'import pytest\n'), ((54567, 54592), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (54579, 54592), False, 'import pytest\n'), ((54825, 54850), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (54837, 54850), False, 'import pytest\n'), ((55086, 55111), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (55098, 55111), False, 'import pytest\n'), ((55358, 55380), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (55368, 55380), False, 'import io\n'), ((55588, 55610), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (55598, 55610), False, 'import io\n'), ((55816, 55838), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (55826, 55838), False, 'import io\n'), ((56046, 56068), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (56056, 56068), False, 'import io\n'), ((56353, 56378), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (56365, 56378), False, 'import pytest\n'), ((56730, 56755), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (56742, 56755), False, 'import pytest\n'), ((57108, 57133), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (57120, 57133), False, 'import pytest\n'), ((57490, 57515), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (57502, 57515), False, 'import pytest\n'), ((57941, 57966), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (57953, 57966), False, 'import pytest\n'), ((58272, 58297), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (58284, 58297), False, 'import pytest\n'), ((58606, 58631), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (58618, 58631), False, 'import pytest\n'), ((58707, 58732), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (58719, 58732), False, 'import pytest\n'), ((60036, 60061), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (60048, 60061), False, 'import pytest\n'), ((60071, 60085), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['path'], {}), '(path)\n', (60079, 60085), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((60095, 60120), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (60107, 60120), False, 'import pytest\n'), ((60346, 60368), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (60356, 60368), False, 'import io\n'), ((60802, 60824), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (60812, 60824), False, 'import io\n'), ((61150, 61168), 'os.remove', 'os.remove', (['h5_path'], {}), '(h5_path)\n', (61159, 61168), False, 'import os\n'), ((61185, 61207), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (61195, 61207), False, 'import io\n'), ((61300, 61328), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r"""'}), "(h5_path, mode='r')\n", (61309, 61328), False, 'import h5py\n'), ((61606, 61630), 'shutil.rmtree', 'shutil.rmtree', (['zarr_path'], {}), '(zarr_path)\n', (61619, 61630), False, 'import shutil\n'), ((61647, 61669), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (61657, 61669), False, 'import io\n'), ((62912, 63217), 'numpy.array', 'np.array', (["[[b'', b'', b'', b'', b''], [b'G', b'', b'', b'', b''], [b'', b'', b'', b'',\n b''], [b'T', b'', b'', b'', b''], [b'A', b'T', b'', b'', b''], [b'C',\n b'T', b'*', b'', b''], [b'A', b'GTAC', b'', b'', b''], [b'C', b'GATG',\n b'', b'', b''], [b'ATAC', b'GTACTACTAC', b'G', b'GTACA', b'GTA']]"], {}), "([[b'', b'', b'', b'', b''], [b'G', b'', b'', b'', b''], [b'', b'',\n b'', b'', b''], [b'T', b'', b'', b'', b''], [b'A', b'T', b'', b'', b''],\n [b'C', b'T', b'*', b'', b''], [b'A', b'GTAC', b'', b'', b''], [b'C',\n b'GATG', b'', b'', b''], [b'ATAC', b'GTACTACTAC', b'G', b'GTACA', b'GTA']])\n", (62920, 63217), True, 'import numpy as np\n'), ((63467, 63491), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (63485, 63491), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((63571, 63621), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[0, 1, 0, 1, 2, 3, 2, 2, 5]', 'a'], {}), '([0, 1, 0, 1, 2, 3, 2, 2, 5], a)\n', (63589, 63621), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((63707, 63883), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0], [0, 0, -1, 0, 0], [0, 3, 0, 0, 0], [-3, 0, 0, 0, 0], [0, 6, -\n 3, 1, -1]]'], {}), '([[0, 0, 0, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0\n ], [0, 0, 0, 0, 0], [0, 0, -1, 0, 0], [0, 3, 0, 0, 0], [-3, 0, 0, 0, 0],\n [0, 6, -3, 1, -1]])\n', (63715, 63883), True, 'import numpy as np\n'), ((64059, 64083), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (64077, 64083), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((64204, 64293), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[False, False, False, True, True, False, False, False, False]', 'a'], {}), '([False, False, False, True, True, False, False, False, \n False], a)\n', (64222, 64293), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((64657, 64722), 'numpy.array', 'np.array', (["[b'', b'G', b'', b'T', b'A', b'C', b'A', b'C', b'ATAC']"], {}), "([b'', b'G', b'', b'T', b'A', b'C', b'A', b'C', b'ATAC'])\n", (64665, 64722), True, 'import numpy as np\n'), ((64808, 64832), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (64826, 64832), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((64912, 64962), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[0, 1, 0, 1, 2, 3, 2, 2, 5]', 'a'], {}), '([0, 1, 0, 1, 2, 3, 2, 2, 5], a)\n', (64930, 64962), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((65046, 65084), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 0, 0, 0, -3, 0]'], {}), '([0, 1, 0, 0, 0, 0, 0, -3, 0])\n', (65054, 65084), True, 'import numpy as np\n'), ((65093, 65117), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (65111, 65117), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((65238, 65327), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[False, False, False, True, True, False, False, False, False]', 'a'], {}), '([False, False, False, True, True, False, False, False, \n False], a)\n', (65256, 65327), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((66078, 66282), 'numpy.array', 'np.array', (['[[[3, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]], [[1, 1, 1, 0], [0, 0, 0, 2], [\n 0, 0, 0, 0]], [[1, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]], [[1, 2, 0, 1],\n [0, 2, 2, 0], [2, 1, 1, 1]]]'], {'dtype': 't'}), '([[[3, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]], [[1, 1, 1, 0], [0, 0,\n 0, 2], [0, 0, 0, 0]], [[1, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]], [[1, \n 2, 0, 1], [0, 2, 2, 0], [2, 1, 1, 1]]], dtype=t)\n', (66086, 66282), True, 'import numpy as np\n'), ((66417, 66441), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (66435, 66441), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((66988, 67041), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': '"""chr1:10-100"""', 'tabix': 'tabix'}), "(vcf_path, region='chr1:10-100', tabix=tabix)\n", (66996, 67041), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((67121, 67154), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[20, 30]', 'pos'], {}), '([20, 30], pos)\n', (67139, 67154), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((67231, 67253), 'pytest.raises', 'pytest.raises', (['OSError'], {}), '(OSError)\n', (67244, 67253), False, 'import pytest\n'), ((67263, 67277), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['path'], {}), '(path)\n', (67271, 67277), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((67362, 67394), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (67375, 67394), False, 'import pytest\n'), ((67404, 67418), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['path'], {}), '(path)\n', (67412, 67418), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((67506, 67538), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (67519, 67538), False, 'import pytest\n'), ((67548, 67562), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['path'], {}), '(path)\n', (67556, 67562), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((67661, 67688), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (67674, 67688), False, 'import pytest\n'), ((67698, 67712), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['path'], {}), '(path)\n', (67706, 67712), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((68504, 68529), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (68516, 68529), False, 'import pytest\n'), ((69266, 69291), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (69278, 69291), False, 'import pytest\n'), ((70034, 70059), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (70046, 70059), False, 'import pytest\n'), ((70488, 70500), 'pytest.approx', 'approx', (['(0.03)'], {}), '(0.03)\n', (70494, 70500), False, 'from pytest import approx\n'), ((70784, 70796), 'pytest.approx', 'approx', (['(48.2)'], {}), '(48.2)\n', (70790, 70796), False, 'from pytest import approx\n'), ((70820, 70832), 'pytest.approx', 'approx', (['(48.1)'], {}), '(48.1)\n', (70826, 70832), False, 'from pytest import approx\n'), ((70856, 70868), 'pytest.approx', 'approx', (['(43.9)'], {}), '(43.9)\n', (70862, 70868), False, 'from pytest import approx\n'), ((70892, 70904), 'pytest.approx', 'approx', (['(49.0)'], {}), '(49.0)\n', (70898, 70904), False, 'from pytest import approx\n'), ((70927, 70938), 'pytest.approx', 'approx', (['(3.0)'], {}), '(3.0)\n', (70933, 70938), False, 'from pytest import approx\n'), ((70961, 70973), 'pytest.approx', 'approx', (['(41.0)'], {}), '(41.0)\n', (70967, 70973), False, 'from pytest import approx\n'), ((72882, 72988), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'region': 'region', 'tabix': 'tabix', 'samples': 'samples', 'types': 'types'}), "(vcf_path, fields='*', alt_number=2, region=region, tabix=tabix,\n samples=samples, types=types)\n", (72890, 72988), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((73024, 73048), 'os.path.exists', 'os.path.exists', (['npz_path'], {}), '(npz_path)\n', (73038, 73048), False, 'import os\n'), ((73090, 73224), 'allel.io.vcf_read.vcf_to_npz', 'vcf_to_npz', (['vcf_path', 'npz_path'], {'fields': '"""*"""', 'chunk_length': '(2)', 'alt_number': '(2)', 'region': 'region', 'tabix': 'tabix', 'samples': 'samples', 'types': 'types'}), "(vcf_path, npz_path, fields='*', chunk_length=2, alt_number=2,\n region=region, tabix=tabix, samples=samples, types=types)\n", (73100, 73224), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((74040, 74059), 'os.remove', 'os.remove', (['npz_path'], {}), '(npz_path)\n', (74049, 74059), False, 'import os\n'), ((74842, 74861), 'os.remove', 'os.remove', (['npz_path'], {}), '(npz_path)\n', (74851, 74861), False, 'import os\n'), ((75931, 76037), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'region': 'region', 'tabix': 'tabix', 'samples': 'samples', 'types': 'types'}), "(vcf_path, fields='*', alt_number=2, region=region, tabix=tabix,\n samples=samples, types=types)\n", (75939, 76037), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((76073, 76098), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (76087, 76098), False, 'import os\n'), ((76145, 76281), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'chunk_length': '(2)', 'region': 'region', 'tabix': 'tabix', 'samples': 'samples', 'types': 'types'}), "(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,\n region=region, tabix=tabix, samples=samples, types=types)\n", (76156, 76281), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((77472, 77496), 'shutil.rmtree', 'shutil.rmtree', (['zarr_path'], {}), '(zarr_path)\n', (77485, 77496), False, 'import shutil\n'), ((77713, 77733), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e', 'a'], {}), '(e, a)\n', (77727, 77733), False, 'from allel.test.tools import compare_arrays\n'), ((78391, 78415), 'shutil.rmtree', 'shutil.rmtree', (['zarr_path'], {}), '(zarr_path)\n', (78404, 78415), False, 'import shutil\n'), ((78630, 78650), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e', 'a'], {}), '(e, a)\n', (78644, 78650), False, 'from allel.test.tools import compare_arrays\n'), ((79120, 79145), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (79133, 79145), False, 'import pytest\n'), ((79155, 79221), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, zarr_path, fields='*', rename_fields=rename)\n", (79166, 79221), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((79380, 79405), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (79393, 79405), False, 'import pytest\n'), ((79415, 79481), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, zarr_path, fields='*', rename_fields=rename)\n", (79426, 79481), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((79640, 79665), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (79653, 79665), False, 'import pytest\n'), ((79675, 79741), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, zarr_path, fields='*', rename_fields=rename)\n", (79686, 79741), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((79900, 79925), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (79913, 79925), False, 'import pytest\n'), ((79935, 79996), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': "['ALTLEN', 'altlen']"}), "(vcf_path, zarr_path, fields=['ALTLEN', 'altlen'])\n", (79946, 79996), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((80006, 80031), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (80019, 80031), False, 'import pytest\n'), ((80041, 80120), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': "['variants/ALTLEN', 'variants/altlen']"}), "(vcf_path, zarr_path, fields=['variants/ALTLEN', 'variants/altlen'])\n", (80052, 80120), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((80450, 80474), 'shutil.rmtree', 'shutil.rmtree', (['zarr_path'], {}), '(zarr_path)\n', (80463, 80474), False, 'import shutil\n'), ((80539, 80644), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'chunk_length': '(2)', 'region': 'chrom', 'group': 'chrom'}), "(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,\n region=chrom, group=chrom)\n", (80550, 80644), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((80865, 80923), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'region': 'chrom'}), "(vcf_path, fields='*', alt_number=2, region=chrom)\n", (80873, 80923), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((81605, 81629), 'shutil.rmtree', 'shutil.rmtree', (['zarr_path'], {}), '(zarr_path)\n', (81618, 81629), False, 'import shutil\n'), ((81877, 81897), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e', 'a'], {}), '(e, a)\n', (81891, 81897), False, 'from allel.test.tools import compare_arrays\n'), ((82373, 82462), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'types': 'types', 'transformers': 'transformers'}), "(vcf_path, fields='*', alt_number=2, types=types, transformers=\n transformers)\n", (82381, 82462), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((82497, 82522), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (82511, 82522), False, 'import os\n'), ((82569, 82687), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'chunk_length': '(2)', 'types': 'types', 'transformers': 'transformers'}), "(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,\n types=types, transformers=transformers)\n", (82580, 82687), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((82721, 82757), 'zarr.open_group', 'zarr.open_group', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (82736, 82757), False, 'import zarr\n'), ((83029, 83054), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (83043, 83054), False, 'import os\n'), ((83706, 83812), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'region': 'region', 'tabix': 'tabix', 'samples': 'samples', 'types': 'types'}), "(vcf_path, fields='*', alt_number=2, region=region, tabix=tabix,\n samples=samples, types=types)\n", (83714, 83812), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((83848, 83871), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (83862, 83871), False, 'import os\n'), ((83912, 84046), 'allel.io.vcf_read.vcf_to_hdf5', 'vcf_to_hdf5', (['vcf_path', 'h5_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'chunk_length': '(2)', 'region': 'region', 'tabix': 'tabix', 'samples': 'samples', 'types': 'types'}), "(vcf_path, h5_path, fields='*', alt_number=2, chunk_length=2,\n region=region, tabix=tabix, samples=samples, types=types)\n", (83923, 84046), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((85220, 85238), 'os.remove', 'os.remove', (['h5_path'], {}), '(h5_path)\n', (85229, 85238), False, 'import os\n'), ((85319, 85347), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r"""'}), "(h5_path, mode='r')\n", (85328, 85347), False, 'import h5py\n'), ((86132, 86150), 'os.remove', 'os.remove', (['h5_path'], {}), '(h5_path)\n', (86141, 86150), False, 'import os\n'), ((86229, 86257), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r"""'}), "(h5_path, mode='r')\n", (86238, 86257), False, 'import h5py\n'), ((86719, 86737), 'os.remove', 'os.remove', (['h5_path'], {}), '(h5_path)\n', (86728, 86737), False, 'import os\n'), ((86802, 86905), 'allel.io.vcf_read.vcf_to_hdf5', 'vcf_to_hdf5', (['vcf_path', 'h5_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'chunk_length': '(2)', 'region': 'chrom', 'group': 'chrom'}), "(vcf_path, h5_path, fields='*', alt_number=2, chunk_length=2,\n region=chrom, group=chrom)\n", (86813, 86905), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((86931, 86959), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r"""'}), "(h5_path, mode='r')\n", (86940, 86959), False, 'import h5py\n'), ((88073, 88143), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'types': 'types', 'transformers': 'transformers'}), "(vcf_path, fields='*', types=types, transformers=transformers)\n", (88081, 88143), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((88155, 88178), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (88169, 88178), False, 'import os\n'), ((88219, 88321), 'allel.io.vcf_read.vcf_to_hdf5', 'vcf_to_hdf5', (['vcf_path', 'h5_path'], {'fields': '"""*"""', 'chunk_length': '(2)', 'types': 'types', 'transformers': 'transformers'}), "(vcf_path, h5_path, fields='*', chunk_length=2, types=types,\n transformers=transformers)\n", (88230, 88321), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((88794, 88854), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, types=types)\n', (88802, 88854), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((88866, 88889), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (88880, 88889), False, 'import os\n'), ((88930, 89034), 'allel.io.vcf_read.vcf_to_hdf5', 'vcf_to_hdf5', (['vcf_path', 'h5_path'], {'fields': 'fields', 'alt_number': '(2)', 'chunk_length': '(3)', 'types': 'types', 'vlen': '(False)'}), '(vcf_path, h5_path, fields=fields, alt_number=2, chunk_length=3,\n types=types, vlen=False)\n', (88941, 89034), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((89791, 89814), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (89805, 89814), False, 'import os\n'), ((90746, 90823), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers, types=types)\n', (90754, 90823), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((90864, 90969), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'chunk_length': '(2)', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers,\n chunk_length=2, types=types)\n', (90880, 90969), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((91562, 91639), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers, types=types)\n', (91570, 91639), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((91680, 91785), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'chunk_length': '(2)', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers,\n chunk_length=2, types=types)\n', (91696, 91785), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((93073, 93167), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': 'fields', 'numbers': 'numbers', 'types': 'types', 'transformers': 'transformers'}), '(vcf_path, fields=fields, numbers=numbers, types=types,\n transformers=transformers)\n', (93081, 93167), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((93204, 93322), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields', 'numbers': 'numbers', 'chunk_length': '(2)', 'types': 'types', 'transformers': 'transformers'}), '(vcf_path, fields=fields, numbers=numbers, chunk_length=2,\n types=types, transformers=transformers)\n', (93220, 93322), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((94011, 94116), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'types': 'types', 'chunk_length': '(2)'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers,\n types=types, chunk_length=2)\n', (94027, 94116), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((94162, 94195), 'os.path.join', 'os.path.join', (['tempdir', '"""test.csv"""'], {}), "(tempdir, 'test.csv')\n", (94174, 94195), False, 'import os\n'), ((94207, 94231), 'os.path.exists', 'os.path.exists', (['csv_path'], {}), '(csv_path)\n', (94221, 94231), False, 'import os\n'), ((94273, 94382), 'allel.io.vcf_read.vcf_to_csv', 'vcf_to_csv', (['vcf_path', 'csv_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'types': 'types', 'chunk_length': '(2)'}), '(vcf_path, csv_path, fields=fields, alt_number=2, numbers=numbers,\n types=types, chunk_length=2)\n', (94283, 94382), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((94434, 94475), 'pandas.read_csv', 'pandas.read_csv', (['csv_path'], {'na_filter': '(True)'}), '(csv_path, na_filter=True)\n', (94449, 94475), False, 'import pandas\n'), ((94849, 94868), 'os.remove', 'os.remove', (['csv_path'], {}), '(csv_path)\n', (94858, 94868), False, 'import os\n'), ((95077, 95120), 'allel.test.tools.compare_arrays', 'compare_arrays', (['df[k].values', 'adf[k].values'], {}), '(df[k].values, adf[k].values)\n', (95091, 95120), False, 'from allel.test.tools import compare_arrays\n'), ((95406, 95425), 'os.remove', 'os.remove', (['csv_path'], {}), '(csv_path)\n', (95415, 95425), False, 'import os\n'), ((96142, 96260), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields', 'numbers': 'numbers', 'types': 'types', 'chunk_length': '(2)', 'transformers': 'transformers'}), '(vcf_path, fields=fields, numbers=numbers, types=types,\n chunk_length=2, transformers=transformers)\n', (96158, 96260), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((96306, 96339), 'os.path.join', 'os.path.join', (['tempdir', '"""test.csv"""'], {}), "(tempdir, 'test.csv')\n", (96318, 96339), False, 'import os\n'), ((96351, 96375), 'os.path.exists', 'os.path.exists', (['csv_path'], {}), '(csv_path)\n', (96365, 96375), False, 'import os\n'), ((96417, 96539), 'allel.io.vcf_read.vcf_to_csv', 'vcf_to_csv', (['vcf_path', 'csv_path'], {'fields': 'fields', 'numbers': 'numbers', 'types': 'types', 'chunk_length': '(2)', 'transformers': 'transformers'}), '(vcf_path, csv_path, fields=fields, numbers=numbers, types=types,\n chunk_length=2, transformers=transformers)\n', (96427, 96539), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((96591, 96632), 'pandas.read_csv', 'pandas.read_csv', (['csv_path'], {'na_filter': '(True)'}), '(csv_path, na_filter=True)\n', (96606, 96632), False, 'import pandas\n'), ((97072, 97149), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers, types=types)\n', (97080, 97149), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((97189, 97293), 'allel.io.vcf_read.vcf_to_recarray', 'vcf_to_recarray', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'chunk_length': '(2)', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers,\n chunk_length=2, types=types)\n', (97204, 97293), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((98212, 98289), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers, types=types)\n', (98220, 98289), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((98329, 98433), 'allel.io.vcf_read.vcf_to_recarray', 'vcf_to_recarray', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'chunk_length': '(2)', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers,\n chunk_length=2, types=types)\n', (98344, 98433), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((100031, 100125), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': 'fields', 'numbers': 'numbers', 'types': 'types', 'transformers': 'transformers'}), '(vcf_path, fields=fields, numbers=numbers, types=types,\n transformers=transformers)\n', (100039, 100125), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((100161, 100278), 'allel.io.vcf_read.vcf_to_recarray', 'vcf_to_recarray', (['vcf_path'], {'fields': 'fields', 'numbers': 'numbers', 'chunk_length': '(2)', 'types': 'types', 'transformers': 'transformers'}), '(vcf_path, fields=fields, numbers=numbers, chunk_length=2,\n types=types, transformers=transformers)\n', (100176, 100278), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((17719, 17757), 'gzip.open', 'gzip.open', (["(vcf_path + '.gz')"], {'mode': '"""rb"""'}), "(vcf_path + '.gz', mode='rb')\n", (17728, 17757), False, 'import gzip\n'), ((17781, 17797), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (17791, 17797), False, 'import io\n'), ((21147, 21163), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (21157, 21163), False, 'import io\n'), ((21186, 21265), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['CHROM', 'samples']", 'types': "{'CHROM': string_type}"}), "(input_file, fields=['CHROM', 'samples'], types={'CHROM': string_type})\n", (21194, 21265), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((22654, 22670), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (22664, 22670), False, 'import io\n'), ((22693, 22766), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['ID', 'samples']", 'types': "{'ID': string_type}"}), "(input_file, fields=['ID', 'samples'], types={'ID': string_type})\n", (22701, 22766), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((23564, 23580), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (23574, 23580), False, 'import io\n'), ((23603, 23678), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['REF', 'samples']", 'types': "{'REF': string_type}"}), "(input_file, fields=['REF', 'samples'], types={'REF': string_type})\n", (23611, 23678), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((24481, 24497), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (24491, 24497), False, 'import io\n'), ((25714, 25725), 'pytest.approx', 'approx', (['(1.2)'], {}), '(1.2)\n', (25720, 25725), False, 'from pytest import approx\n'), ((25749, 25760), 'pytest.approx', 'approx', (['(3.4)'], {}), '(3.4)\n', (25755, 25760), False, 'from pytest import approx\n'), ((27865, 27876), 'pytest.approx', 'approx', (['(1.2)'], {}), '(1.2)\n', (27871, 27876), False, 'from pytest import approx\n'), ((28943, 28954), 'pytest.approx', 'approx', (['(1.2)'], {}), '(1.2)\n', (28949, 28954), False, 'from pytest import approx\n'), ((30939, 30954), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (30947, 30954), True, 'import numpy as np\n'), ((32763, 32778), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (32771, 32778), True, 'import numpy as np\n'), ((34387, 34402), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (34395, 34402), True, 'import numpy as np\n'), ((41144, 41190), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': 'region', 'tabix': 'tabix'}), '(vcf_path, region=region, tabix=tabix)\n', (41152, 41190), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((41382, 41403), 'numpy.all', 'np.all', (["(chrom == '19')"], {}), "(chrom == '19')\n", (41388, 41403), True, 'import numpy as np\n'), ((41449, 41484), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[111, 112]', 'pos'], {}), '([111, 112], pos)\n', (41467, 41484), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((41534, 41580), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': 'region', 'tabix': 'tabix'}), '(vcf_path, region=region, tabix=tabix)\n', (41542, 41580), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((41772, 41793), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (41778, 41793), True, 'import numpy as np\n'), ((41839, 41914), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[14370, 17330, 1110696, 1230237, 1234567, 1235237]', 'pos'], {}), '([14370, 17330, 1110696, 1230237, 1234567, 1235237], pos)\n', (41857, 41914), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((41963, 42009), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': 'region', 'tabix': 'tabix'}), '(vcf_path, region=region, tabix=tabix)\n', (41971, 42009), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((42201, 42221), 'numpy.all', 'np.all', (["(chrom == 'X')"], {}), "(chrom == 'X')\n", (42207, 42221), True, 'import numpy as np\n'), ((42267, 42296), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[10]', 'pos'], {}), '([10], pos)\n', (42285, 42296), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((42345, 42391), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': 'region', 'tabix': 'tabix'}), '(vcf_path, region=region, tabix=tabix)\n', (42353, 42391), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((42485, 42531), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': 'region', 'tabix': 'tabix'}), '(vcf_path, region=region, tabix=tabix)\n', (42493, 42531), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((42723, 42744), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (42729, 42744), True, 'import numpy as np\n'), ((42790, 42829), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[14370, 17330]', 'pos'], {}), '([14370, 17330], pos)\n', (42808, 42829), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((42895, 42941), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': 'region', 'tabix': 'tabix'}), '(vcf_path, region=region, tabix=tabix)\n', (42903, 42941), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((43133, 43154), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (43139, 43154), True, 'import numpy as np\n'), ((43200, 43243), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[1110696, 1230237]', 'pos'], {}), '([1110696, 1230237], pos)\n', (43218, 43243), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((43309, 43355), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': 'region', 'tabix': 'tabix'}), '(vcf_path, region=region, tabix=tabix)\n', (43317, 43355), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((43547, 43568), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (43553, 43568), True, 'import numpy as np\n'), ((43614, 43657), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[1234567, 1235237]', 'pos'], {}), '([1234567, 1235237], pos)\n', (43632, 43657), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((53844, 53866), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (53854, 53866), False, 'import io\n'), ((54097, 54119), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (54107, 54119), False, 'import io\n'), ((54353, 54375), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (54363, 54375), False, 'import io\n'), ((54611, 54633), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (54621, 54633), False, 'import io\n'), ((54869, 54891), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (54879, 54891), False, 'import io\n'), ((55130, 55152), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (55140, 55152), False, 'import io\n'), ((56397, 56419), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (56407, 56419), False, 'import io\n'), ((56774, 56796), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (56784, 56796), False, 'import io\n'), ((57152, 57174), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (57162, 57174), False, 'import io\n'), ((57534, 57556), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (57544, 57556), False, 'import io\n'), ((57985, 58007), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (57995, 58007), False, 'import io\n'), ((58316, 58338), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (58326, 58338), False, 'import io\n'), ((58650, 58672), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (58660, 58672), False, 'import io\n'), ((58751, 58773), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (58761, 58773), False, 'import io\n'), ((60139, 60161), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (60149, 60161), False, 'import io\n'), ((62643, 62665), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (62653, 62665), False, 'import io\n'), ((64170, 64184), 'numpy.dtype', 'np.dtype', (['bool'], {}), '(bool)\n', (64178, 64184), True, 'import numpy as np\n'), ((64390, 64412), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (64400, 64412), False, 'import io\n'), ((65204, 65218), 'numpy.dtype', 'np.dtype', (['bool'], {}), '(bool)\n', (65212, 65218), True, 'import numpy as np\n'), ((65866, 65888), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (65876, 65888), False, 'import io\n'), ((68548, 68570), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (68558, 68570), False, 'import io\n'), ((69310, 69332), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (69320, 69332), False, 'import io\n'), ((70078, 70100), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (70088, 70100), False, 'import io\n'), ((73062, 73081), 'os.remove', 'os.remove', (['npz_path'], {}), '(npz_path)\n', (73071, 73081), False, 'import os\n'), ((73352, 73388), 'numpy.load', 'np.load', (['npz_path'], {'allow_pickle': '(True)'}), '(npz_path, allow_pickle=True)\n', (73359, 73388), True, 'import numpy as np\n'), ((74269, 74322), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['expected[key]', 'actual[key]'], {}), '(expected[key], actual[key])\n', (74294, 74322), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((74349, 74395), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['expected[key]', 'actual[key]'], {}), '(expected[key], actual[key])\n', (74367, 74395), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((75069, 75122), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['expected[key]', 'actual[key]'], {}), '(expected[key], actual[key])\n', (75094, 75122), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((75149, 75195), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['expected[key]', 'actual[key]'], {}), '(expected[key], actual[key])\n', (75167, 75195), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((76112, 76136), 'shutil.rmtree', 'shutil.rmtree', (['zarr_path'], {}), '(zarr_path)\n', (76125, 76136), False, 'import shutil\n'), ((76411, 76447), 'zarr.open_group', 'zarr.open_group', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (76426, 76447), False, 'import zarr\n'), ((81036, 81056), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e', 'a'], {}), '(e, a)\n', (81050, 81056), False, 'from allel.test.tools import compare_arrays\n'), ((82164, 82271), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {'fields': "['Allele', 'HGVS_c', 'AA']", 'types': "{'Allele': string_type, 'HGVS_c': string_type}"}), "(fields=['Allele', 'HGVS_c', 'AA'], types={'Allele':\n string_type, 'HGVS_c': string_type})\n", (82178, 82271), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((82536, 82560), 'shutil.rmtree', 'shutil.rmtree', (['zarr_path'], {}), '(zarr_path)\n', (82549, 82560), False, 'import shutil\n'), ((82806, 82851), 'allel.test.tools.compare_arrays', 'compare_arrays', (['expected[key]', 'actual[key][:]'], {}), '(expected[key], actual[key][:])\n', (82820, 82851), False, 'from allel.test.tools import compare_arrays\n'), ((83885, 83903), 'os.remove', 'os.remove', (['h5_path'], {}), '(h5_path)\n', (83894, 83903), False, 'import os\n'), ((85407, 85452), 'allel.test.tools.compare_arrays', 'compare_arrays', (['expected[key]', 'actual[key][:]'], {}), '(expected[key], actual[key][:])\n', (85421, 85452), False, 'from allel.test.tools import compare_arrays\n'), ((86317, 86362), 'allel.test.tools.compare_arrays', 'compare_arrays', (['expected[key]', 'actual[key][:]'], {}), '(expected[key], actual[key][:])\n', (86331, 86362), False, 'from allel.test.tools import compare_arrays\n'), ((87141, 87199), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'region': 'chrom'}), "(vcf_path, fields='*', alt_number=2, region=chrom)\n", (87149, 87199), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((87864, 87971), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {'fields': "['Allele', 'HGVS_c', 'AA']", 'types': "{'Allele': string_type, 'HGVS_c': string_type}"}), "(fields=['Allele', 'HGVS_c', 'AA'], types={'Allele':\n string_type, 'HGVS_c': string_type})\n", (87878, 87971), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((88192, 88210), 'os.remove', 'os.remove', (['h5_path'], {}), '(h5_path)\n', (88201, 88210), False, 'import os\n'), ((88351, 88379), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r"""'}), "(h5_path, mode='r')\n", (88360, 88379), False, 'import h5py\n'), ((88903, 88921), 'os.remove', 'os.remove', (['h5_path'], {}), '(h5_path)\n', (88912, 88921), False, 'import os\n'), ((89064, 89092), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r"""'}), "(h5_path, mode='r')\n", (89073, 89092), False, 'import h5py\n'), ((91199, 91215), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (91207, 91215), True, 'import numpy as np\n'), ((91252, 91268), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (91260, 91268), True, 'import numpy as np\n'), ((92045, 92061), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (92053, 92061), True, 'import numpy as np\n'), ((92098, 92114), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (92106, 92114), True, 'import numpy as np\n'), ((92865, 92972), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {'fields': "['Allele', 'HGVS_c', 'AA']", 'types': "{'Allele': string_type, 'HGVS_c': string_type}"}), "(fields=['Allele', 'HGVS_c', 'AA'], types={'Allele':\n string_type, 'HGVS_c': string_type})\n", (92879, 92972), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((93620, 93636), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (93628, 93636), True, 'import numpy as np\n'), ((93673, 93689), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (93681, 93689), True, 'import numpy as np\n'), ((94245, 94264), 'os.remove', 'os.remove', (['csv_path'], {}), '(csv_path)\n', (94254, 94264), False, 'import os\n'), ((94576, 94619), 'allel.test.tools.compare_arrays', 'compare_arrays', (['df[k].values', 'adf[k].values'], {}), '(df[k].values, adf[k].values)\n', (94590, 94619), False, 'from allel.test.tools import compare_arrays\n'), ((95939, 96046), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {'fields': "['Allele', 'HGVS_c', 'AA']", 'types': "{'Allele': string_type, 'HGVS_c': string_type}"}), "(fields=['Allele', 'HGVS_c', 'AA'], types={'Allele':\n string_type, 'HGVS_c': string_type})\n", (95953, 96046), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((96389, 96408), 'os.remove', 'os.remove', (['csv_path'], {}), '(csv_path)\n', (96398, 96408), False, 'import os\n'), ((96733, 96776), 'allel.test.tools.compare_arrays', 'compare_arrays', (['df[k].values', 'adf[k].values'], {}), '(df[k].values, adf[k].values)\n', (96747, 96776), False, 'from allel.test.tools import compare_arrays\n'), ((97461, 97482), 'numpy.dtype', 'np.dtype', (['string_type'], {}), '(string_type)\n', (97469, 97482), True, 'import numpy as np\n'), ((98625, 98646), 'numpy.dtype', 'np.dtype', (['string_type'], {}), '(string_type)\n', (98633, 98646), True, 'import numpy as np\n'), ((99823, 99930), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {'fields': "['Allele', 'HGVS_c', 'AA']", 'types': "{'Allele': string_type, 'HGVS_c': string_type}"}), "(fields=['Allele', 'HGVS_c', 'AA'], types={'Allele':\n string_type, 'HGVS_c': string_type})\n", (99837, 99930), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((100514, 100535), 'numpy.dtype', 'np.dtype', (['string_type'], {}), '(string_type)\n', (100522, 100535), True, 'import numpy as np\n'), ((100571, 100592), 'numpy.dtype', 'np.dtype', (['string_type'], {}), '(string_type)\n', (100579, 100592), True, 'import numpy as np\n'), ((47090, 47106), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {}), '()\n', (47104, 47106), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((51126, 51142), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {}), '()\n', (51140, 51142), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((73292, 73316), 'os.path.exists', 'os.path.exists', (['npz_path'], {}), '(npz_path)\n', (73306, 73316), False, 'import os\n'), ((76350, 76375), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (76364, 76375), False, 'import os\n'), ((76573, 76593), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e', 'a'], {}), '(e, a)\n', (76587, 76593), False, 'from allel.test.tools import compare_arrays\n'), ((84115, 84138), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (84129, 84138), False, 'import os\n'), ((84170, 84198), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r"""'}), "(h5_path, mode='r')\n", (84179, 84198), False, 'import h5py\n'), ((87328, 87348), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e', 'a'], {}), '(e, a)\n', (87342, 87348), False, 'from allel.test.tools import compare_arrays\n'), ((88447, 88492), 'allel.test.tools.compare_arrays', 'compare_arrays', (['expected[key]', 'actual[key][:]'], {}), '(expected[key], actual[key][:])\n', (88461, 88492), False, 'from allel.test.tools import compare_arrays\n'), ((90288, 90322), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e', 'df[name].values'], {}), '(e, df[name].values)\n', (90302, 90322), False, 'from allel.test.tools import compare_arrays\n'), ((73501, 73554), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['expected[key]', 'actual[key]'], {}), '(expected[key], actual[key])\n', (73526, 73554), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((73597, 73643), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['expected[key]', 'actual[key]'], {}), '(expected[key], actual[key])\n', (73615, 73643), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((84274, 84319), 'allel.test.tools.compare_arrays', 'compare_arrays', (['expected[key]', 'actual[key][:]'], {}), '(expected[key], actual[key][:])\n', (84288, 84319), False, 'from allel.test.tools import compare_arrays\n'), ((89212, 89266), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['expect[key]', 'actual[key][:]'], {}), '(expect[key], actual[key][:])\n', (89237, 89266), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((97697, 97727), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a[name]'], {}), '(e, a[name])\n', (97715, 97727), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((98861, 98891), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a[name]'], {}), '(e, a[name])\n', (98879, 98891), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((100797, 100827), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a[name]'], {}), '(e, a[name])\n', (100815, 100827), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((89572, 89619), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['expect[key]', 'actual[key][:]'], {}), '(expect[key], actual[key][:])\n', (89590, 89619), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((90417, 90469), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e[:, i]', "df['%s_%s' % (name, i + 1)]"], {}), "(e[:, i], df['%s_%s' % (name, i + 1)])\n", (90431, 90469), False, 'from allel.test.tools import compare_arrays\n'), ((97834, 97889), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e[:, i]', "a['%s_%s' % (name, i + 1)]"], {}), "(e[:, i], a['%s_%s' % (name, i + 1)])\n", (97852, 97889), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((98998, 99053), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e[:, i]', "a['%s_%s' % (name, i + 1)]"], {}), "(e[:, i], a['%s_%s' % (name, i + 1)])\n", (99016, 99053), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((100934, 100989), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e[:, i]', "a['%s_%s' % (name, i + 1)]"], {}), "(e[:, i], a['%s_%s' % (name, i + 1)])\n", (100952, 100989), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n')]
|
# -*- coding: utf-8 -*-
import os
import unittest
from datetime import datetime
from AbstractHandle.Utils.TokenCache import TokenCache
class TokenCacheTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.token = os.environ.get('KB_AUTH_TOKEN', None)
CACHE_EXPIRE_TIME = 300
cls.token_cache = TokenCache(1000, CACHE_EXPIRE_TIME)
@classmethod
def tearDownClass(cls):
cls.token_cache.clear()
print('Finished testing TokenCacheUtil')
def test_token_cache(self):
# empty token cache at the begining
self.assertIsNone(self.token_cache.get(self.token))
self.assertEqual(len(self.token_cache), 0)
# insert token into cache
current_time = int(datetime.utcnow().timestamp()*1000)
token_info = {'customroles': 'test_<PASSWORD>',
'expires': current_time + 1000}
self.token_cache[self.token] = token_info
# test inserted token info
self.assertCountEqual(self.token_cache.keys(), [self.token])
fetched_token_info = self.token_cache.get(self.token)
self.assertDictEqual(token_info, fetched_token_info)
# test expired token
current_time = int(datetime.utcnow().timestamp()*1000)
token_info = {'customroles': '<PASSWORD>',
'expires': current_time - 1000}
self.token_cache[self.token] = token_info
self.assertIsNone(self.token_cache.get(self.token))
|
[
"os.environ.get",
"datetime.datetime.utcnow",
"AbstractHandle.Utils.TokenCache.TokenCache"
] |
[((242, 279), 'os.environ.get', 'os.environ.get', (['"""KB_AUTH_TOKEN"""', 'None'], {}), "('KB_AUTH_TOKEN', None)\n", (256, 279), False, 'import os\n'), ((338, 373), 'AbstractHandle.Utils.TokenCache.TokenCache', 'TokenCache', (['(1000)', 'CACHE_EXPIRE_TIME'], {}), '(1000, CACHE_EXPIRE_TIME)\n', (348, 373), False, 'from AbstractHandle.Utils.TokenCache import TokenCache\n'), ((752, 769), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (767, 769), False, 'from datetime import datetime\n'), ((1233, 1250), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1248, 1250), False, 'from datetime import datetime\n')]
|
import os
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.test import TestCase
from ralph.accounts.models import Region
from ralph.assets.models.assets import (
AssetModel,
Environment,
Service,
ServiceEnvironment
)
from ralph.assets.models.choices import ObjectModelType
from ralph.back_office.models import BackOfficeAsset, Warehouse
from ralph.data_center.models.physical import DataCenter, Rack, ServerRoom
from ralph.data_center.tests.factories import DataCenterFactory
from ralph.data_importer.management.commands import importer
from ralph.data_importer.models import ImportedObjects
from ralph.data_importer.resources import AssetModelResource
class DataImporterTestCase(TestCase):
"""TestCase data importer command."""
def setUp(self): # noqa
self.base_dir = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))
)
asset_model = AssetModel()
asset_model.name = "asset_model_1"
asset_model.type = ObjectModelType.back_office
asset_model.save()
asset_content_type = ContentType.objects.get_for_model(AssetModel)
ImportedObjects.objects.create(
content_type=asset_content_type,
object_pk=asset_model.pk,
old_object_pk=1
)
warehouse = Warehouse()
warehouse.name = "warehouse_1"
warehouse.save()
warehouse_content_type = ContentType.objects.get_for_model(Warehouse)
ImportedObjects.objects.create(
content_type=warehouse_content_type,
object_pk=warehouse.pk,
old_object_pk=1
)
environment = Environment()
environment.name = "environment_1"
environment.save()
service = Service()
service.name = "service_1"
service.save()
service_environment = ServiceEnvironment()
service_environment.environment = environment
service_environment.service = service
service_environment.save()
region = Region(name='region_1')
region.save()
region_content_type = ContentType.objects.get_for_model(region)
ImportedObjects.objects.create(
content_type=region_content_type,
object_pk=region.pk,
old_object_pk=1
)
user_model = get_user_model()
for user in ('iron.man', 'superman', 'james.bond', 'sherlock.holmes'):
user_model.objects.create(username=user)
def test_get_resource(self):
"""Test get resources method."""
asset_model_resource = importer.get_resource('AssetModel')
self.assertIsInstance(asset_model_resource, AssetModelResource)
def test_importer_command_warehouse(self):
"""Test importer management command with Warehouse model."""
warehouse_csv = os.path.join(
self.base_dir,
'tests/samples/warehouses.csv'
)
management.call_command(
'importer',
warehouse_csv,
type='file',
model_name='Warehouse',
map_imported_id_to_new_id=True
)
self.assertTrue(Warehouse.objects.filter(
name="Poznań"
).exists())
def test_importer_command_back_office_asset(self):
"""Test importer management command with BackOfficeAsset model."""
back_office_csv = os.path.join(
self.base_dir,
'tests/samples/back_office_assets.csv'
)
management.call_command(
'importer',
back_office_csv,
type='file',
model_name='BackOfficeAsset',
map_imported_id_to_new_id=True
)
self.assertTrue(BackOfficeAsset.objects.filter(
sn="bo_asset_sn"
).exists())
back_office_asset = BackOfficeAsset.objects.get(sn="bo_asset_sn")
self.assertEqual(
back_office_asset.warehouse.name,
"warehouse_1"
)
self.assertEqual(
back_office_asset.model.name,
"asset_model_1"
)
self.assertEqual(
back_office_asset.service_env.service.name,
"service_1"
)
def test_importer_command_regions(self):
"""Test importer management command with BackOfficeAsset model."""
old_regions_count = Region.objects.count()
regions_csv = os.path.join(
self.base_dir,
'tests/samples/regions.csv'
)
management.call_command(
'importer',
regions_csv,
type='file',
model_name='Region',
map_imported_id_to_new_id=True
)
self.assertEqual(Region.objects.count(), old_regions_count + 2)
region_1 = Region.objects.get(name='USA')
for user in ('iron.man', 'superman'):
self.assertIn(
user, region_1.users.values_list('username', flat=True)
)
def test_importer_command_with_tab(self):
"""Test importer management command with Warehouse model and
tab separation file
"""
warehouse_csv = os.path.join(
self.base_dir,
'tests/samples/warehouses_tab.csv'
)
management.call_command(
'importer',
warehouse_csv,
type='file',
model_name='Warehouse',
delimiter='\t',
map_imported_id_to_new_id=True
)
self.assertTrue(Warehouse.objects.filter(
name="Barcelona"
).exists())
def test_importer_command_with_skipid(self):
"""Test importer management command with Warehouse model and
tab separation file
"""
warehouse_csv = os.path.join(
self.base_dir,
'tests/samples/warehouses_skipid.csv'
)
management.call_command(
'importer',
warehouse_csv,
'--skipid',
type='file',
model_name='Warehouse',
delimiter=',',
map_imported_id_to_new_id=True
)
warehouse = Warehouse.objects.filter(name="Cupertino").first()
self.assertNotEqual(warehouse.pk, 200)
warehouse_content_type = ContentType.objects.get_for_model(Warehouse)
warehouse_exists = ImportedObjects.objects.filter(
content_type=warehouse_content_type,
old_object_pk=200
).exists()
self.assertTrue(warehouse_exists)
def test_importer_command_with_semicolon(self):
"""Test importer management command with Warehouse model and
semicolon separation file
"""
warehouse_csv = os.path.join(
self.base_dir,
'tests/samples/warehouses_semicolon.csv'
)
management.call_command(
'importer',
warehouse_csv,
type='file',
model_name='Warehouse',
delimiter=';',
map_imported_id_to_new_id=True
)
self.assertTrue(Warehouse.objects.filter(
name="Berlin"
).exists())
def test_imported_object(self):
"""Test importer management command with ImportedObjects model."""
data_center = DataCenterFactory(name='CSV_test')
data_center_content_type = ContentType.objects.get_for_model(
DataCenter
)
ImportedObjects.objects.create(
content_type=data_center_content_type,
object_pk=data_center.pk,
old_object_pk=1
)
server_room_csv = os.path.join(
self.base_dir,
'tests/samples/server_room.csv'
)
rack_csv = os.path.join(
self.base_dir,
'tests/samples/rack.csv'
)
management.call_command(
'importer',
server_room_csv,
type='file',
model_name='ServerRoom',
delimiter=',',
map_imported_id_to_new_id=True
)
content_type = ContentType.objects.get_for_model(ServerRoom)
imported_object_exists = ImportedObjects.objects.filter(
content_type=content_type,
old_object_pk=1
).exists()
self.assertTrue(imported_object_exists)
management.call_command(
'importer',
rack_csv,
type='file',
model_name='Rack',
delimiter=',',
map_imported_id_to_new_id=True
)
self.assertTrue(Rack.objects.filter(
name="Rack_csv_test"
).exists())
def test_from_dir_command(self):
warehouse_dir = os.path.join(
self.base_dir,
'tests/samples/warehouses'
)
management.call_command(
'importer',
warehouse_dir,
type='dir',
map_imported_id_to_new_id=True
)
self.assertTrue(Warehouse.objects.filter(
name="From dir Warszawa"
).exists())
self.assertTrue(Warehouse.objects.filter(
name="From dir London"
).exists())
def test_from_zipfile_command(self):
warehouse_zip = os.path.join(
self.base_dir,
'tests/samples/warehouses.zip'
)
management.call_command(
'importer',
warehouse_zip,
type='zip',
map_imported_id_to_new_id=True
)
self.assertTrue(Warehouse.objects.filter(
name="From zip Warszawa"
).exists())
self.assertTrue(Warehouse.objects.filter(
name="From zip London"
).exists())
|
[
"ralph.assets.models.assets.ServiceEnvironment",
"ralph.accounts.models.Region.objects.get",
"ralph.assets.models.assets.AssetModel",
"os.path.join",
"os.path.abspath",
"ralph.data_importer.models.ImportedObjects.objects.filter",
"ralph.back_office.models.Warehouse",
"django.core.management.call_command",
"ralph.assets.models.assets.Service",
"ralph.data_center.tests.factories.DataCenterFactory",
"ralph.back_office.models.BackOfficeAsset.objects.get",
"ralph.back_office.models.Warehouse.objects.filter",
"ralph.accounts.models.Region",
"ralph.data_importer.management.commands.importer.get_resource",
"ralph.assets.models.assets.Environment",
"ralph.accounts.models.Region.objects.count",
"ralph.data_importer.models.ImportedObjects.objects.create",
"django.contrib.auth.get_user_model",
"django.contrib.contenttypes.models.ContentType.objects.get_for_model",
"ralph.back_office.models.BackOfficeAsset.objects.filter",
"ralph.data_center.models.physical.Rack.objects.filter"
] |
[((1017, 1029), 'ralph.assets.models.assets.AssetModel', 'AssetModel', ([], {}), '()\n', (1027, 1029), False, 'from ralph.assets.models.assets import AssetModel, Environment, Service, ServiceEnvironment\n'), ((1184, 1229), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['AssetModel'], {}), '(AssetModel)\n', (1217, 1229), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((1238, 1349), 'ralph.data_importer.models.ImportedObjects.objects.create', 'ImportedObjects.objects.create', ([], {'content_type': 'asset_content_type', 'object_pk': 'asset_model.pk', 'old_object_pk': '(1)'}), '(content_type=asset_content_type, object_pk=\n asset_model.pk, old_object_pk=1)\n', (1268, 1349), False, 'from ralph.data_importer.models import ImportedObjects\n'), ((1412, 1423), 'ralph.back_office.models.Warehouse', 'Warehouse', ([], {}), '()\n', (1421, 1423), False, 'from ralph.back_office.models import BackOfficeAsset, Warehouse\n'), ((1522, 1566), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['Warehouse'], {}), '(Warehouse)\n', (1555, 1566), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((1575, 1687), 'ralph.data_importer.models.ImportedObjects.objects.create', 'ImportedObjects.objects.create', ([], {'content_type': 'warehouse_content_type', 'object_pk': 'warehouse.pk', 'old_object_pk': '(1)'}), '(content_type=warehouse_content_type,\n object_pk=warehouse.pk, old_object_pk=1)\n', (1605, 1687), False, 'from ralph.data_importer.models import ImportedObjects\n'), ((1753, 1766), 'ralph.assets.models.assets.Environment', 'Environment', ([], {}), '()\n', (1764, 1766), False, 'from ralph.assets.models.assets import AssetModel, Environment, Service, ServiceEnvironment\n'), ((1856, 1865), 'ralph.assets.models.assets.Service', 'Service', ([], {}), '()\n', (1863, 1865), False, 'from ralph.assets.models.assets import AssetModel, Environment, Service, ServiceEnvironment\n'), ((1955, 1975), 'ralph.assets.models.assets.ServiceEnvironment', 'ServiceEnvironment', ([], {}), '()\n', (1973, 1975), False, 'from ralph.assets.models.assets import AssetModel, Environment, Service, ServiceEnvironment\n'), ((2129, 2152), 'ralph.accounts.models.Region', 'Region', ([], {'name': '"""region_1"""'}), "(name='region_1')\n", (2135, 2152), False, 'from ralph.accounts.models import Region\n'), ((2205, 2246), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['region'], {}), '(region)\n', (2238, 2246), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((2255, 2362), 'ralph.data_importer.models.ImportedObjects.objects.create', 'ImportedObjects.objects.create', ([], {'content_type': 'region_content_type', 'object_pk': 'region.pk', 'old_object_pk': '(1)'}), '(content_type=region_content_type, object_pk=\n region.pk, old_object_pk=1)\n', (2285, 2362), False, 'from ralph.data_importer.models import ImportedObjects\n'), ((2426, 2442), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (2440, 2442), False, 'from django.contrib.auth import get_user_model\n'), ((2681, 2716), 'ralph.data_importer.management.commands.importer.get_resource', 'importer.get_resource', (['"""AssetModel"""'], {}), "('AssetModel')\n", (2702, 2716), False, 'from ralph.data_importer.management.commands import importer\n'), ((2930, 2989), 'os.path.join', 'os.path.join', (['self.base_dir', '"""tests/samples/warehouses.csv"""'], {}), "(self.base_dir, 'tests/samples/warehouses.csv')\n", (2942, 2989), False, 'import os\n'), ((3032, 3156), 'django.core.management.call_command', 'management.call_command', (['"""importer"""', 'warehouse_csv'], {'type': '"""file"""', 'model_name': '"""Warehouse"""', 'map_imported_id_to_new_id': '(True)'}), "('importer', warehouse_csv, type='file', model_name=\n 'Warehouse', map_imported_id_to_new_id=True)\n", (3055, 3156), False, 'from django.core import management\n'), ((3475, 3542), 'os.path.join', 'os.path.join', (['self.base_dir', '"""tests/samples/back_office_assets.csv"""'], {}), "(self.base_dir, 'tests/samples/back_office_assets.csv')\n", (3487, 3542), False, 'import os\n'), ((3585, 3716), 'django.core.management.call_command', 'management.call_command', (['"""importer"""', 'back_office_csv'], {'type': '"""file"""', 'model_name': '"""BackOfficeAsset"""', 'map_imported_id_to_new_id': '(True)'}), "('importer', back_office_csv, type='file',\n model_name='BackOfficeAsset', map_imported_id_to_new_id=True)\n", (3608, 3716), False, 'from django.core import management\n'), ((3916, 3961), 'ralph.back_office.models.BackOfficeAsset.objects.get', 'BackOfficeAsset.objects.get', ([], {'sn': '"""bo_asset_sn"""'}), "(sn='bo_asset_sn')\n", (3943, 3961), False, 'from ralph.back_office.models import BackOfficeAsset, Warehouse\n'), ((4441, 4463), 'ralph.accounts.models.Region.objects.count', 'Region.objects.count', ([], {}), '()\n', (4461, 4463), False, 'from ralph.accounts.models import Region\n'), ((4486, 4542), 'os.path.join', 'os.path.join', (['self.base_dir', '"""tests/samples/regions.csv"""'], {}), "(self.base_dir, 'tests/samples/regions.csv')\n", (4498, 4542), False, 'import os\n'), ((4585, 4704), 'django.core.management.call_command', 'management.call_command', (['"""importer"""', 'regions_csv'], {'type': '"""file"""', 'model_name': '"""Region"""', 'map_imported_id_to_new_id': '(True)'}), "('importer', regions_csv, type='file', model_name=\n 'Region', map_imported_id_to_new_id=True)\n", (4608, 4704), False, 'from django.core import management\n'), ((4861, 4891), 'ralph.accounts.models.Region.objects.get', 'Region.objects.get', ([], {'name': '"""USA"""'}), "(name='USA')\n", (4879, 4891), False, 'from ralph.accounts.models import Region\n'), ((5231, 5294), 'os.path.join', 'os.path.join', (['self.base_dir', '"""tests/samples/warehouses_tab.csv"""'], {}), "(self.base_dir, 'tests/samples/warehouses_tab.csv')\n", (5243, 5294), False, 'import os\n'), ((5337, 5477), 'django.core.management.call_command', 'management.call_command', (['"""importer"""', 'warehouse_csv'], {'type': '"""file"""', 'model_name': '"""Warehouse"""', 'delimiter': '"""\t"""', 'map_imported_id_to_new_id': '(True)'}), "('importer', warehouse_csv, type='file', model_name=\n 'Warehouse', delimiter='\\t', map_imported_id_to_new_id=True)\n", (5360, 5477), False, 'from django.core import management\n'), ((5837, 5903), 'os.path.join', 'os.path.join', (['self.base_dir', '"""tests/samples/warehouses_skipid.csv"""'], {}), "(self.base_dir, 'tests/samples/warehouses_skipid.csv')\n", (5849, 5903), False, 'import os\n'), ((5946, 6096), 'django.core.management.call_command', 'management.call_command', (['"""importer"""', 'warehouse_csv', '"""--skipid"""'], {'type': '"""file"""', 'model_name': '"""Warehouse"""', 'delimiter': '""","""', 'map_imported_id_to_new_id': '(True)'}), "('importer', warehouse_csv, '--skipid', type='file',\n model_name='Warehouse', delimiter=',', map_imported_id_to_new_id=True)\n", (5969, 6096), False, 'from django.core import management\n'), ((6339, 6383), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['Warehouse'], {}), '(Warehouse)\n', (6372, 6383), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((6775, 6844), 'os.path.join', 'os.path.join', (['self.base_dir', '"""tests/samples/warehouses_semicolon.csv"""'], {}), "(self.base_dir, 'tests/samples/warehouses_semicolon.csv')\n", (6787, 6844), False, 'import os\n'), ((6887, 7026), 'django.core.management.call_command', 'management.call_command', (['"""importer"""', 'warehouse_csv'], {'type': '"""file"""', 'model_name': '"""Warehouse"""', 'delimiter': '""";"""', 'map_imported_id_to_new_id': '(True)'}), "('importer', warehouse_csv, type='file', model_name=\n 'Warehouse', delimiter=';', map_imported_id_to_new_id=True)\n", (6910, 7026), False, 'from django.core import management\n'), ((7334, 7368), 'ralph.data_center.tests.factories.DataCenterFactory', 'DataCenterFactory', ([], {'name': '"""CSV_test"""'}), "(name='CSV_test')\n", (7351, 7368), False, 'from ralph.data_center.tests.factories import DataCenterFactory\n'), ((7404, 7449), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['DataCenter'], {}), '(DataCenter)\n', (7437, 7449), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((7480, 7596), 'ralph.data_importer.models.ImportedObjects.objects.create', 'ImportedObjects.objects.create', ([], {'content_type': 'data_center_content_type', 'object_pk': 'data_center.pk', 'old_object_pk': '(1)'}), '(content_type=data_center_content_type,\n object_pk=data_center.pk, old_object_pk=1)\n', (7510, 7596), False, 'from ralph.data_importer.models import ImportedObjects\n'), ((7665, 7725), 'os.path.join', 'os.path.join', (['self.base_dir', '"""tests/samples/server_room.csv"""'], {}), "(self.base_dir, 'tests/samples/server_room.csv')\n", (7677, 7725), False, 'import os\n'), ((7779, 7832), 'os.path.join', 'os.path.join', (['self.base_dir', '"""tests/samples/rack.csv"""'], {}), "(self.base_dir, 'tests/samples/rack.csv')\n", (7791, 7832), False, 'import os\n'), ((7875, 8016), 'django.core.management.call_command', 'management.call_command', (['"""importer"""', 'server_room_csv'], {'type': '"""file"""', 'model_name': '"""ServerRoom"""', 'delimiter': '""","""', 'map_imported_id_to_new_id': '(True)'}), "('importer', server_room_csv, type='file',\n model_name='ServerRoom', delimiter=',', map_imported_id_to_new_id=True)\n", (7898, 8016), False, 'from django.core import management\n'), ((8119, 8164), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['ServerRoom'], {}), '(ServerRoom)\n', (8152, 8164), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((8373, 8502), 'django.core.management.call_command', 'management.call_command', (['"""importer"""', 'rack_csv'], {'type': '"""file"""', 'model_name': '"""Rack"""', 'delimiter': '""","""', 'map_imported_id_to_new_id': '(True)'}), "('importer', rack_csv, type='file', model_name=\n 'Rack', delimiter=',', map_imported_id_to_new_id=True)\n", (8396, 8502), False, 'from django.core import management\n'), ((8740, 8795), 'os.path.join', 'os.path.join', (['self.base_dir', '"""tests/samples/warehouses"""'], {}), "(self.base_dir, 'tests/samples/warehouses')\n", (8752, 8795), False, 'import os\n'), ((8838, 8936), 'django.core.management.call_command', 'management.call_command', (['"""importer"""', 'warehouse_dir'], {'type': '"""dir"""', 'map_imported_id_to_new_id': '(True)'}), "('importer', warehouse_dir, type='dir',\n map_imported_id_to_new_id=True)\n", (8861, 8936), False, 'from django.core import management\n'), ((9270, 9329), 'os.path.join', 'os.path.join', (['self.base_dir', '"""tests/samples/warehouses.zip"""'], {}), "(self.base_dir, 'tests/samples/warehouses.zip')\n", (9282, 9329), False, 'import os\n'), ((9372, 9470), 'django.core.management.call_command', 'management.call_command', (['"""importer"""', 'warehouse_zip'], {'type': '"""zip"""', 'map_imported_id_to_new_id': '(True)'}), "('importer', warehouse_zip, type='zip',\n map_imported_id_to_new_id=True)\n", (9395, 9470), False, 'from django.core import management\n'), ((4795, 4817), 'ralph.accounts.models.Region.objects.count', 'Region.objects.count', ([], {}), '()\n', (4815, 4817), False, 'from ralph.accounts.models import Region\n'), ((957, 982), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (972, 982), False, 'import os\n'), ((6207, 6249), 'ralph.back_office.models.Warehouse.objects.filter', 'Warehouse.objects.filter', ([], {'name': '"""Cupertino"""'}), "(name='Cupertino')\n", (6231, 6249), False, 'from ralph.back_office.models import BackOfficeAsset, Warehouse\n'), ((6411, 6501), 'ralph.data_importer.models.ImportedObjects.objects.filter', 'ImportedObjects.objects.filter', ([], {'content_type': 'warehouse_content_type', 'old_object_pk': '(200)'}), '(content_type=warehouse_content_type,\n old_object_pk=200)\n', (6441, 6501), False, 'from ralph.data_importer.models import ImportedObjects\n'), ((8198, 8272), 'ralph.data_importer.models.ImportedObjects.objects.filter', 'ImportedObjects.objects.filter', ([], {'content_type': 'content_type', 'old_object_pk': '(1)'}), '(content_type=content_type, old_object_pk=1)\n', (8228, 8272), False, 'from ralph.data_importer.models import ImportedObjects\n'), ((3246, 3285), 'ralph.back_office.models.Warehouse.objects.filter', 'Warehouse.objects.filter', ([], {'name': '"""Poznań"""'}), "(name='Poznań')\n", (3270, 3285), False, 'from ralph.back_office.models import BackOfficeAsset, Warehouse\n'), ((3807, 3855), 'ralph.back_office.models.BackOfficeAsset.objects.filter', 'BackOfficeAsset.objects.filter', ([], {'sn': '"""bo_asset_sn"""'}), "(sn='bo_asset_sn')\n", (3837, 3855), False, 'from ralph.back_office.models import BackOfficeAsset, Warehouse\n'), ((5579, 5621), 'ralph.back_office.models.Warehouse.objects.filter', 'Warehouse.objects.filter', ([], {'name': '"""Barcelona"""'}), "(name='Barcelona')\n", (5603, 5621), False, 'from ralph.back_office.models import BackOfficeAsset, Warehouse\n'), ((7128, 7167), 'ralph.back_office.models.Warehouse.objects.filter', 'Warehouse.objects.filter', ([], {'name': '"""Berlin"""'}), "(name='Berlin')\n", (7152, 7167), False, 'from ralph.back_office.models import BackOfficeAsset, Warehouse\n'), ((8604, 8645), 'ralph.data_center.models.physical.Rack.objects.filter', 'Rack.objects.filter', ([], {'name': '"""Rack_csv_test"""'}), "(name='Rack_csv_test')\n", (8623, 8645), False, 'from ralph.data_center.models.physical import DataCenter, Rack, ServerRoom\n'), ((9016, 9066), 'ralph.back_office.models.Warehouse.objects.filter', 'Warehouse.objects.filter', ([], {'name': '"""From dir Warszawa"""'}), "(name='From dir Warszawa')\n", (9040, 9066), False, 'from ralph.back_office.models import BackOfficeAsset, Warehouse\n'), ((9123, 9171), 'ralph.back_office.models.Warehouse.objects.filter', 'Warehouse.objects.filter', ([], {'name': '"""From dir London"""'}), "(name='From dir London')\n", (9147, 9171), False, 'from ralph.back_office.models import BackOfficeAsset, Warehouse\n'), ((9550, 9600), 'ralph.back_office.models.Warehouse.objects.filter', 'Warehouse.objects.filter', ([], {'name': '"""From zip Warszawa"""'}), "(name='From zip Warszawa')\n", (9574, 9600), False, 'from ralph.back_office.models import BackOfficeAsset, Warehouse\n'), ((9658, 9706), 'ralph.back_office.models.Warehouse.objects.filter', 'Warehouse.objects.filter', ([], {'name': '"""From zip London"""'}), "(name='From zip London')\n", (9682, 9706), False, 'from ralph.back_office.models import BackOfficeAsset, Warehouse\n')]
|
import torch
import torch.nn as nn
class VGG16(nn.Module):
def __init__(self, input_shape: tuple, output_dim: int):
super().__init__()
fc_dim = int((input_shape[1] * 0.5**5) *
(input_shape[2] * 0.5 ** 5) * 512)
self.maxpool = nn.MaxPool2d((2, 2), 2)
self.relu = nn.ReLU()
self.conv_1_1 = nn.Conv2d(
input_shape[0], 64, kernel_size=(3, 3), padding=1)
self.conv_1_2 = nn.Conv2d(
64, 64, kernel_size=(3, 3), padding=1)
self.conv_2_1 = nn.Conv2d(
64, 128, kernel_size=(3, 3), padding=1)
self.conv_2_2 = nn.Conv2d(
128, 128, kernel_size=(3, 3), padding=1)
self.conv_3_1 = nn.Conv2d(
128, 256, kernel_size=(3, 3), padding=1)
self.conv_3_2 = nn.Conv2d(
256, 256, kernel_size=(3, 3), padding=1)
self.conv_3_3 = nn.Conv2d(
256, 256, kernel_size=(3, 3), padding=1)
self.conv_4_1 = nn.Conv2d(
256, 512, kernel_size=(3, 3), padding=1)
self.conv_4_2 = nn.Conv2d(
512, 512, kernel_size=(3, 3), padding=1)
self.conv_4_3 = nn.Conv2d(
512, 512, kernel_size=(3, 3), padding=1)
self.conv_5_1 = nn.Conv2d(
512, 512, kernel_size=(3, 3), padding=1)
self.conv_5_2 = nn.Conv2d(
512, 512, kernel_size=(3, 3), padding=1)
self.conv_5_3 = nn.Conv2d(
512, 512, kernel_size=(3, 3), padding=1)
self.fc1 = nn.Linear(
fc_dim, 4096)
self.fc2 = nn.Linear(
4096, 4096)
self.fc3 = nn.Linear(
4096, output_dim)
def forward(self, x):
x = self.relu(self.conv_1_1(x))
x = self.relu(self.conv_1_2(x))
x = self.maxpool(x)
x = self.relu(self.conv_2_1(x))
x = self.relu(self.conv_2_2(x))
x = self.maxpool(x)
x = self.relu(self.conv_3_1(x))
x = self.relu(self.conv_3_2(x))
x = self.relu(self.conv_3_3(x))
x = self.maxpool(x)
x = self.relu(self.conv_4_1(x))
x = self.relu(self.conv_4_2(x))
x = self.relu(self.conv_4_3(x))
x = self.maxpool(x)
x = self.relu(self.conv_5_1(x))
x = self.relu(self.conv_5_2(x))
x = self.relu(self.conv_5_3(x))
x = self.maxpool(x)
x = torch.flatten(x, 1)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.relu(self.fc3(x))
x = nn.functional.softmax(x, dim=1)
return x
|
[
"torch.flatten",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.functional.softmax",
"torch.nn.Linear",
"torch.nn.MaxPool2d"
] |
[((277, 300), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)', '(2)'], {}), '((2, 2), 2)\n', (289, 300), True, 'import torch.nn as nn\n'), ((321, 330), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (328, 330), True, 'import torch.nn as nn\n'), ((355, 415), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_shape[0]', '(64)'], {'kernel_size': '(3, 3)', 'padding': '(1)'}), '(input_shape[0], 64, kernel_size=(3, 3), padding=1)\n', (364, 415), True, 'import torch.nn as nn\n'), ((453, 501), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3, 3)', 'padding': '(1)'}), '(64, 64, kernel_size=(3, 3), padding=1)\n', (462, 501), True, 'import torch.nn as nn\n'), ((539, 588), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(3, 3)', 'padding': '(1)'}), '(64, 128, kernel_size=(3, 3), padding=1)\n', (548, 588), True, 'import torch.nn as nn\n'), ((626, 676), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)'], {'kernel_size': '(3, 3)', 'padding': '(1)'}), '(128, 128, kernel_size=(3, 3), padding=1)\n', (635, 676), True, 'import torch.nn as nn\n'), ((714, 764), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(3, 3)', 'padding': '(1)'}), '(128, 256, kernel_size=(3, 3), padding=1)\n', (723, 764), True, 'import torch.nn as nn\n'), ((802, 852), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3, 3)', 'padding': '(1)'}), '(256, 256, kernel_size=(3, 3), padding=1)\n', (811, 852), True, 'import torch.nn as nn\n'), ((890, 940), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3, 3)', 'padding': '(1)'}), '(256, 256, kernel_size=(3, 3), padding=1)\n', (899, 940), True, 'import torch.nn as nn\n'), ((978, 1028), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)'], {'kernel_size': '(3, 3)', 'padding': '(1)'}), '(256, 512, kernel_size=(3, 3), padding=1)\n', (987, 1028), True, 'import torch.nn as nn\n'), ((1066, 1116), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)'], {'kernel_size': '(3, 3)', 'padding': '(1)'}), '(512, 512, kernel_size=(3, 3), padding=1)\n', (1075, 1116), True, 'import torch.nn as nn\n'), ((1154, 1204), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)'], {'kernel_size': '(3, 3)', 'padding': '(1)'}), '(512, 512, kernel_size=(3, 3), padding=1)\n', (1163, 1204), True, 'import torch.nn as nn\n'), ((1242, 1292), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)'], {'kernel_size': '(3, 3)', 'padding': '(1)'}), '(512, 512, kernel_size=(3, 3), padding=1)\n', (1251, 1292), True, 'import torch.nn as nn\n'), ((1330, 1380), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)'], {'kernel_size': '(3, 3)', 'padding': '(1)'}), '(512, 512, kernel_size=(3, 3), padding=1)\n', (1339, 1380), True, 'import torch.nn as nn\n'), ((1418, 1468), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)'], {'kernel_size': '(3, 3)', 'padding': '(1)'}), '(512, 512, kernel_size=(3, 3), padding=1)\n', (1427, 1468), True, 'import torch.nn as nn\n'), ((1501, 1524), 'torch.nn.Linear', 'nn.Linear', (['fc_dim', '(4096)'], {}), '(fc_dim, 4096)\n', (1510, 1524), True, 'import torch.nn as nn\n'), ((1557, 1578), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(4096)'], {}), '(4096, 4096)\n', (1566, 1578), True, 'import torch.nn as nn\n'), ((1611, 1638), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'output_dim'], {}), '(4096, output_dim)\n', (1620, 1638), True, 'import torch.nn as nn\n'), ((2351, 2370), 'torch.flatten', 'torch.flatten', (['x', '(1)'], {}), '(x, 1)\n', (2364, 2370), False, 'import torch\n'), ((2488, 2519), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (2509, 2519), True, 'import torch.nn as nn\n')]
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from arch.api.utils import log_utils
from federatedml.nn.hetero_nn.hetero_nn_model import HeteroNNGuestModel
from federatedml.nn.hetero_nn.hetero_nn_model import HeteroNNHostModel
from federatedml.nn.hetero_nn.model.interactive_layer import InterActiveGuestDenseLayer
from federatedml.nn.hetero_nn.model.interactive_layer import InteractiveHostDenseLayer
from federatedml.nn.homo_nn import nn_model
from federatedml.nn.hetero_nn.backend.tf_keras.data_generator import KerasSequenceDataConverter
from federatedml.nn.hetero_nn.model.hetero_nn_bottom_model import HeteroNNBottomModel
from federatedml.nn.hetero_nn.model.hetero_nn_top_model import HeteroNNTopModel
from federatedml.protobuf.generated.hetero_nn_model_meta_pb2 import HeteroNNModelMeta
from federatedml.protobuf.generated.hetero_nn_model_meta_pb2 import OptimizerParam
from federatedml.protobuf.generated.hetero_nn_model_param_pb2 import HeteroNNModelParam
import json
LOGGER = log_utils.getLogger()
class HeteroNNKerasGuestModel(HeteroNNGuestModel):
def __init__(self, hetero_nn_param):
super(HeteroNNKerasGuestModel, self).__init__()
self.bottom_model = None
self.interactive_model = None
self.top_model = None
self.bottom_nn_define = None
self.top_nn_define = None
self.interactive_layer_define = None
self.config_type = None
self.optimizer = None
self.loss = None
self.metrics = None
self.hetero_nn_param = None
self.transfer_variable = None
self.model_builder = None
self.bottom_model_input_shape = 0
self.top_model_input_shape = None
self.is_empty = False
self.set_nn_meta(hetero_nn_param)
self.model_builder = nn_model.get_nn_builder(config_type=self.config_type)
self.data_converter = KerasSequenceDataConverter()
def set_nn_meta(self, hetero_nn_param):
self.bottom_nn_define = hetero_nn_param.bottom_nn_define
self.top_nn_define = hetero_nn_param.top_nn_define
self.interactive_layer_define = hetero_nn_param.interactive_layer_define
self.config_type = hetero_nn_param.config_type
self.optimizer = hetero_nn_param.optimizer
self.loss = hetero_nn_param.loss
# self.metrics = hetero_nn_param.metrics
self.hetero_nn_param = hetero_nn_param
def set_empty(self):
self.is_empty = True
def train(self, x, y, epoch, batch_idx):
if not self.is_empty:
if self.bottom_model is None:
self.bottom_model_input_shape = x.shape[1]
self._build_bottom_model()
guest_bottom_output = self.bottom_model.forward(x)
else:
guest_bottom_output = None
if self.interactive_model is None:
self._build_interactive_model()
interactive_output = self.interactive_model.forward(guest_bottom_output, epoch, batch_idx)
if self.top_model is None:
self.top_model_input_shape = int(interactive_output.shape[1])
self._build_top_model()
gradients = self.top_model.train_and_get_backward_gradient(interactive_output, y)
guest_backward = self.interactive_model.backward(gradients, epoch, batch_idx)
if not self.is_empty:
self.bottom_model.backward(x, guest_backward)
def predict(self, x):
if not self.is_empty:
guest_bottom_output = self.bottom_model.predict(x)
else:
guest_bottom_output = None
interactive_output = self.interactive_model.forward(guest_bottom_output)
preds = self.top_model.predict(interactive_output)
return preds
def evaluate(self, x, y, epoch, batch):
if not self.is_empty:
guest_bottom_output = self.bottom_model.predict(x)
else:
guest_bottom_output = None
interactive_output = self.interactive_model.forward(guest_bottom_output, epoch, batch)
metrics = self.top_model.evaluate(interactive_output, y)
return metrics
def get_hetero_nn_model_param(self):
model_param = HeteroNNModelParam()
model_param.is_empty = self.is_empty
if not self.is_empty:
model_param.bottom_saved_model_bytes = self.bottom_model.export_model()
model_param.top_saved_model_bytes = self.top_model.export_model()
model_param.interactive_layer_param.CopyFrom(self.interactive_model.export_model())
model_param.bottom_model_input_shape = self.bottom_model_input_shape
model_param.top_model_input_shape = self.top_model_input_shape
return model_param
def set_hetero_nn_model_param(self, model_param):
self.is_empty = model_param.is_empty
self.top_model_input_shape = model_param.top_model_input_shape
self.bottom_model_input_shape = model_param.bottom_model_input_shape
if not self.is_empty:
self._restore_bottom_model(model_param.bottom_saved_model_bytes)
self._restore_interactive_model(model_param.interactive_layer_param)
self._restore_top_model(model_param.top_saved_model_bytes)
def get_hetero_nn_model_meta(self):
model_meta = HeteroNNModelMeta()
model_meta.config_type = self.config_type
if self.config_type == "nn":
for layer in self.bottom_nn_define:
model_meta.bottom_nn_define.append(json.dumps(layer))
for layer in self.top_nn_define:
model_meta.top_nn_define.append(json.dumps(layer))
elif self.config_type == "keras":
model_meta.bottom_nn_define.append(json.dumps(self.bottom_nn_define))
model_meta.top_nn_define.append(json.dumps(self.top_nn_define))
model_meta.interactive_layer_define = json.dumps(self.interactive_layer_define)
model_meta.interactive_layer_lr = self.hetero_nn_param.interactive_layer_lr
model_meta.loss = self.loss
"""
for metric in self.metrics:
model_meta.metrics.append(metric)
"""
optimizer_param = OptimizerParam()
optimizer_param.optimizer = self.optimizer.optimizer
optimizer_param.kwargs = json.dumps(self.optimizer.kwargs)
model_meta.optimizer_param.CopyFrom(optimizer_param)
return model_meta
def set_hetero_nn_model_meta(self, model_meta):
self.config_type = model_meta.config_type
if self.config_type == "nn":
self.bottom_nn_define = []
self.top_nn_define = []
for layer in model_meta.bottom_nn_define:
self.bottom_nn_define.append(json.loads(layer))
for layer in model_meta.top_nn_define:
self.top_nn_define.append(json.loads(layer))
elif self.config_type == 'keras':
self.bottom_nn_define = json.loads(model_meta.bottom_nn_define[0])
self.top_nn_define = json.loads(model_meta.top_nn_define[0])
self.interactive_layer_define = json.loads(model_meta.interactive_layer_define)
self.loss = model_meta.loss
"""
self.metrics = []
for metric in self.metrics:
self.metrics.append(metric)
"""
self.optimizer.optimizer = model_meta.optimizer_param.optimizer
self.optimizer.kwargs = json.loads(model_meta.optimizer_param.kwargs)
def set_transfer_variable(self, transfer_variable):
self.transfer_variable = transfer_variable
def set_partition(self, partition=1):
self.partition = partition
if self.interactive_model is not None:
self.interactive_model.set_partition(self.partition)
def _build_bottom_model(self):
self.bottom_model = HeteroNNBottomModel(input_shape=self.bottom_model_input_shape,
optimizer=self.optimizer,
layer_config=self.bottom_nn_define,
model_builder=self.model_builder)
self.bottom_model.set_data_converter(self.data_converter)
def _restore_bottom_model(self, model_bytes):
self._build_bottom_model()
self.bottom_model.restore_model(model_bytes)
def _build_top_model(self):
self.top_model = HeteroNNTopModel(input_shape=self.top_model_input_shape,
optimizer=self.optimizer,
layer_config=self.top_nn_define,
loss=self.loss,
metrics=self.metrics,
model_builder=self.model_builder)
self.top_model.set_data_converter(self.data_converter)
def _restore_top_model(self, model_bytes):
self._build_top_model()
self.top_model.restore_model(model_bytes)
def _build_interactive_model(self):
self.interactive_model = InterActiveGuestDenseLayer(self.hetero_nn_param,
self.interactive_layer_define,
model_builder=self.model_builder)
self.interactive_model.set_transfer_variable(self.transfer_variable)
self.interactive_model.set_partition(self.partition)
def _restore_interactive_model(self, interactive_model_param):
self._build_interactive_model()
self.interactive_model.restore_model(interactive_model_param)
class HeteroNNKerasHostModel(HeteroNNHostModel):
def __init__(self, hetero_nn_param):
super(HeteroNNKerasHostModel, self).__init__()
self.bottom_model_input_shape = None
self.bottom_model = None
self.interactive_model = None
self.bottom_nn_define = None
self.config_type = None
self.optimizer = None
self.hetero_nn_param = None
self.set_nn_meta(hetero_nn_param)
self.model_builder = nn_model.get_nn_builder(config_type=self.config_type)
self.data_converter = KerasSequenceDataConverter()
self.transfer_variable = None
def set_nn_meta(self, hetero_nn_param):
self.bottom_nn_define = hetero_nn_param.bottom_nn_define
self.config_type = hetero_nn_param.config_type
self.optimizer = hetero_nn_param.optimizer
self.hetero_nn_param = hetero_nn_param
def _build_bottom_model(self):
self.bottom_model = HeteroNNBottomModel(input_shape=self.bottom_model_input_shape,
optimizer=self.optimizer,
layer_config=self.bottom_nn_define,
model_builder=self.model_builder)
self.bottom_model.set_data_converter(self.data_converter)
def _restore_bottom_model(self, model_bytes):
self._build_bottom_model()
self.bottom_model.restore_model(model_bytes)
def _build_interactive_model(self):
self.interactive_model = InteractiveHostDenseLayer(self.hetero_nn_param)
self.interactive_model.set_transfer_variable(self.transfer_variable)
self.interactive_model.set_partition(self.partition)
def _restore_interactive_model(self, interactive_layer_param):
self._build_interactive_model()
self.interactive_model.restore_model(interactive_layer_param)
def set_transfer_variable(self, transfer_variable):
self.transfer_variable = transfer_variable
def set_partition(self, partition=1):
self.partition = partition
if self.interactive_model is not None:
self.interactive_model.set_partition(self.partition)
LOGGER.debug("set_partition, partition num is {}".format(self.partition))
def get_hetero_nn_model_meta(self):
model_meta = HeteroNNModelMeta()
model_meta.config_type = self.config_type
if self.config_type == "nn":
for layer in self.bottom_nn_define:
model_meta.bottom_nn_define.append(json.dumps(layer))
elif self.config_type == "keras":
model_meta.bottom_nn_define.append(json.dumps(self.bottom_nn_define))
model_meta.interactive_layer_lr = self.hetero_nn_param.interactive_layer_lr
optimizer_param = OptimizerParam()
optimizer_param.optimizer = self.optimizer.optimizer
optimizer_param.kwargs = json.dumps(self.optimizer.kwargs)
model_meta.optimizer_param.CopyFrom(optimizer_param)
return model_meta
def set_hetero_nn_model_meta(self, model_meta):
self.config_type = model_meta.config_type
if self.config_type == "nn":
self.bottom_nn_define = []
for layer in model_meta.bottom_nn_define:
self.bottom_nn_define.append(json.loads(layer))
elif self.config_type == 'keras':
self.bottom_nn_define = json.loads(model_meta.bottom_nn_define[0])
self.optimizer.optimizer = model_meta.optimizer_param.optimizer
self.optimizer.kwargs = json.loads(model_meta.optimizer_param.kwargs)
def set_hetero_nn_model_param(self, model_param):
self.bottom_model_input_shape = model_param.bottom_model_input_shape
self._restore_bottom_model(model_param.bottom_saved_model_bytes)
self._restore_interactive_model(model_param.interactive_layer_param)
def get_hetero_nn_model_param(self):
model_param = HeteroNNModelParam()
model_param.bottom_saved_model_bytes = self.bottom_model.export_model()
model_param.interactive_layer_param.CopyFrom(self.interactive_model.export_model())
return model_param
def train(self, x, epoch, batch_idx):
if self.bottom_model is None:
self.bottom_model_input_shape = x.shape[1]
self._build_bottom_model()
self._build_interactive_model()
host_bottom_output = self.bottom_model.forward(x)
self.interactive_model.forward(host_bottom_output, epoch, batch_idx)
host_gradient = self.interactive_model.backward(epoch, batch_idx)
self.bottom_model.backward(x, host_gradient)
def predict(self, x):
guest_bottom_output = self.bottom_model.predict(x)
self.interactive_model.forward(guest_bottom_output)
def evaluate(self, x, epoch, batch_idx):
guest_bottom_output = self.bottom_model.predict(x)
self.interactive_model.forward(guest_bottom_output, epoch, batch_idx)
|
[
"arch.api.utils.log_utils.getLogger",
"json.loads",
"federatedml.nn.hetero_nn.model.hetero_nn_bottom_model.HeteroNNBottomModel",
"federatedml.nn.hetero_nn.model.hetero_nn_top_model.HeteroNNTopModel",
"federatedml.nn.hetero_nn.model.interactive_layer.InterActiveGuestDenseLayer",
"federatedml.protobuf.generated.hetero_nn_model_meta_pb2.HeteroNNModelMeta",
"federatedml.protobuf.generated.hetero_nn_model_meta_pb2.OptimizerParam",
"json.dumps",
"federatedml.protobuf.generated.hetero_nn_model_param_pb2.HeteroNNModelParam",
"federatedml.nn.homo_nn.nn_model.get_nn_builder",
"federatedml.nn.hetero_nn.model.interactive_layer.InteractiveHostDenseLayer",
"federatedml.nn.hetero_nn.backend.tf_keras.data_generator.KerasSequenceDataConverter"
] |
[((1557, 1578), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', ([], {}), '()\n', (1576, 1578), False, 'from arch.api.utils import log_utils\n'), ((2357, 2410), 'federatedml.nn.homo_nn.nn_model.get_nn_builder', 'nn_model.get_nn_builder', ([], {'config_type': 'self.config_type'}), '(config_type=self.config_type)\n', (2380, 2410), False, 'from federatedml.nn.homo_nn import nn_model\n'), ((2441, 2469), 'federatedml.nn.hetero_nn.backend.tf_keras.data_generator.KerasSequenceDataConverter', 'KerasSequenceDataConverter', ([], {}), '()\n', (2467, 2469), False, 'from federatedml.nn.hetero_nn.backend.tf_keras.data_generator import KerasSequenceDataConverter\n'), ((4732, 4752), 'federatedml.protobuf.generated.hetero_nn_model_param_pb2.HeteroNNModelParam', 'HeteroNNModelParam', ([], {}), '()\n', (4750, 4752), False, 'from federatedml.protobuf.generated.hetero_nn_model_param_pb2 import HeteroNNModelParam\n'), ((5818, 5837), 'federatedml.protobuf.generated.hetero_nn_model_meta_pb2.HeteroNNModelMeta', 'HeteroNNModelMeta', ([], {}), '()\n', (5835, 5837), False, 'from federatedml.protobuf.generated.hetero_nn_model_meta_pb2 import HeteroNNModelMeta\n'), ((6404, 6445), 'json.dumps', 'json.dumps', (['self.interactive_layer_define'], {}), '(self.interactive_layer_define)\n', (6414, 6445), False, 'import json\n'), ((6701, 6717), 'federatedml.protobuf.generated.hetero_nn_model_meta_pb2.OptimizerParam', 'OptimizerParam', ([], {}), '()\n', (6715, 6717), False, 'from federatedml.protobuf.generated.hetero_nn_model_meta_pb2 import OptimizerParam\n'), ((6812, 6845), 'json.dumps', 'json.dumps', (['self.optimizer.kwargs'], {}), '(self.optimizer.kwargs)\n', (6822, 6845), False, 'import json\n'), ((7618, 7665), 'json.loads', 'json.loads', (['model_meta.interactive_layer_define'], {}), '(model_meta.interactive_layer_define)\n', (7628, 7665), False, 'import json\n'), ((7934, 7979), 'json.loads', 'json.loads', (['model_meta.optimizer_param.kwargs'], {}), '(model_meta.optimizer_param.kwargs)\n', (7944, 7979), False, 'import json\n'), ((8342, 8510), 'federatedml.nn.hetero_nn.model.hetero_nn_bottom_model.HeteroNNBottomModel', 'HeteroNNBottomModel', ([], {'input_shape': 'self.bottom_model_input_shape', 'optimizer': 'self.optimizer', 'layer_config': 'self.bottom_nn_define', 'model_builder': 'self.model_builder'}), '(input_shape=self.bottom_model_input_shape, optimizer=\n self.optimizer, layer_config=self.bottom_nn_define, model_builder=self.\n model_builder)\n', (8361, 8510), False, 'from federatedml.nn.hetero_nn.model.hetero_nn_bottom_model import HeteroNNBottomModel\n'), ((8909, 9106), 'federatedml.nn.hetero_nn.model.hetero_nn_top_model.HeteroNNTopModel', 'HeteroNNTopModel', ([], {'input_shape': 'self.top_model_input_shape', 'optimizer': 'self.optimizer', 'layer_config': 'self.top_nn_define', 'loss': 'self.loss', 'metrics': 'self.metrics', 'model_builder': 'self.model_builder'}), '(input_shape=self.top_model_input_shape, optimizer=self.\n optimizer, layer_config=self.top_nn_define, loss=self.loss, metrics=\n self.metrics, model_builder=self.model_builder)\n', (8925, 9106), False, 'from federatedml.nn.hetero_nn.model.hetero_nn_top_model import HeteroNNTopModel\n'), ((9575, 9693), 'federatedml.nn.hetero_nn.model.interactive_layer.InterActiveGuestDenseLayer', 'InterActiveGuestDenseLayer', (['self.hetero_nn_param', 'self.interactive_layer_define'], {'model_builder': 'self.model_builder'}), '(self.hetero_nn_param, self.\n interactive_layer_define, model_builder=self.model_builder)\n', (9601, 9693), False, 'from federatedml.nn.hetero_nn.model.interactive_layer import InterActiveGuestDenseLayer\n'), ((10600, 10653), 'federatedml.nn.homo_nn.nn_model.get_nn_builder', 'nn_model.get_nn_builder', ([], {'config_type': 'self.config_type'}), '(config_type=self.config_type)\n', (10623, 10653), False, 'from federatedml.nn.homo_nn import nn_model\n'), ((10684, 10712), 'federatedml.nn.hetero_nn.backend.tf_keras.data_generator.KerasSequenceDataConverter', 'KerasSequenceDataConverter', ([], {}), '()\n', (10710, 10712), False, 'from federatedml.nn.hetero_nn.backend.tf_keras.data_generator import KerasSequenceDataConverter\n'), ((11079, 11247), 'federatedml.nn.hetero_nn.model.hetero_nn_bottom_model.HeteroNNBottomModel', 'HeteroNNBottomModel', ([], {'input_shape': 'self.bottom_model_input_shape', 'optimizer': 'self.optimizer', 'layer_config': 'self.bottom_nn_define', 'model_builder': 'self.model_builder'}), '(input_shape=self.bottom_model_input_shape, optimizer=\n self.optimizer, layer_config=self.bottom_nn_define, model_builder=self.\n model_builder)\n', (11098, 11247), False, 'from federatedml.nn.hetero_nn.model.hetero_nn_bottom_model import HeteroNNBottomModel\n'), ((11662, 11709), 'federatedml.nn.hetero_nn.model.interactive_layer.InteractiveHostDenseLayer', 'InteractiveHostDenseLayer', (['self.hetero_nn_param'], {}), '(self.hetero_nn_param)\n', (11687, 11709), False, 'from federatedml.nn.hetero_nn.model.interactive_layer import InteractiveHostDenseLayer\n'), ((12470, 12489), 'federatedml.protobuf.generated.hetero_nn_model_meta_pb2.HeteroNNModelMeta', 'HeteroNNModelMeta', ([], {}), '()\n', (12487, 12489), False, 'from federatedml.protobuf.generated.hetero_nn_model_meta_pb2 import HeteroNNModelMeta\n'), ((12934, 12950), 'federatedml.protobuf.generated.hetero_nn_model_meta_pb2.OptimizerParam', 'OptimizerParam', ([], {}), '()\n', (12948, 12950), False, 'from federatedml.protobuf.generated.hetero_nn_model_meta_pb2 import OptimizerParam\n'), ((13045, 13078), 'json.dumps', 'json.dumps', (['self.optimizer.kwargs'], {}), '(self.optimizer.kwargs)\n', (13055, 13078), False, 'import json\n'), ((13694, 13739), 'json.loads', 'json.loads', (['model_meta.optimizer_param.kwargs'], {}), '(model_meta.optimizer_param.kwargs)\n', (13704, 13739), False, 'import json\n'), ((14086, 14106), 'federatedml.protobuf.generated.hetero_nn_model_param_pb2.HeteroNNModelParam', 'HeteroNNModelParam', ([], {}), '()\n', (14104, 14106), False, 'from federatedml.protobuf.generated.hetero_nn_model_param_pb2 import HeteroNNModelParam\n'), ((7461, 7503), 'json.loads', 'json.loads', (['model_meta.bottom_nn_define[0]'], {}), '(model_meta.bottom_nn_define[0])\n', (7471, 7503), False, 'import json\n'), ((7537, 7576), 'json.loads', 'json.loads', (['model_meta.top_nn_define[0]'], {}), '(model_meta.top_nn_define[0])\n', (7547, 7576), False, 'import json\n'), ((13546, 13588), 'json.loads', 'json.loads', (['model_meta.bottom_nn_define[0]'], {}), '(model_meta.bottom_nn_define[0])\n', (13556, 13588), False, 'import json\n'), ((6025, 6042), 'json.dumps', 'json.dumps', (['layer'], {}), '(layer)\n', (6035, 6042), False, 'import json\n'), ((6138, 6155), 'json.dumps', 'json.dumps', (['layer'], {}), '(layer)\n', (6148, 6155), False, 'import json\n'), ((6246, 6279), 'json.dumps', 'json.dumps', (['self.bottom_nn_define'], {}), '(self.bottom_nn_define)\n', (6256, 6279), False, 'import json\n'), ((6325, 6355), 'json.dumps', 'json.dumps', (['self.top_nn_define'], {}), '(self.top_nn_define)\n', (6335, 6355), False, 'import json\n'), ((7251, 7268), 'json.loads', 'json.loads', (['layer'], {}), '(layer)\n', (7261, 7268), False, 'import json\n'), ((7364, 7381), 'json.loads', 'json.loads', (['layer'], {}), '(layer)\n', (7374, 7381), False, 'import json\n'), ((12678, 12695), 'json.dumps', 'json.dumps', (['layer'], {}), '(layer)\n', (12688, 12695), False, 'import json\n'), ((12787, 12820), 'json.dumps', 'json.dumps', (['self.bottom_nn_define'], {}), '(self.bottom_nn_define)\n', (12797, 12820), False, 'import json\n'), ((13448, 13465), 'json.loads', 'json.loads', (['layer'], {}), '(layer)\n', (13458, 13465), False, 'import json\n')]
|
#!/usr/bin/env python
import urllib2
import zipfile
import os
import sys
import shutil
import fnmatch
import json
import tempfile
import re
import subprocess
import requests
import urlparse
import hashlib
import yaml
from argparse import ArgumentParser
from contextlib import contextmanager
import lunr
from lunr import trimmer
from markdown import Markdown
from markdown import Extension
from markdown.util import etree, AtomicString
from markdown.inlinepatterns import Pattern
SHA1 = {}
DOCS_ZIP = "doc-master.zip"
EXAMPLES_ZIP = "examples-master.zip"
CODEPAD_ZIP = "codepad-master.zip"
AWESOME_ZIP = "awesome-defold-master.zip"
ASSETINDEX_JSON = os.path.join("_data", "assetindex.json")
GAMES_JSON = os.path.join("_data", "games.json")
AUTHORINDEX_JSON = os.path.join("_data", "authorindex.json")
TAGINDEX_JSON = os.path.join("_data", "tagindex.json")
PLATFORMINDEX_JSON = os.path.join("_data", "platformindex.json")
REF_DATA_DIR = os.path.join("_data", "ref")
ASSET_MD_FRONTMATTER = """---
layout: asset
asset: {}
title: {}
description: {}
---
"""
AUTHOR_MD_FRONTMATTER = """---
layout: author
author: {}
title: {}
---
"""
TAG_MD_FRONTMATTER = """---
layout: assetportal
tag: {}
title: {}
---
"""
TAG_SORT_MD_FRONTMATTER = """---
layout: assetportal
tag: {}
title: {}
sort: {}
---
"""
REFDOC_MD_FRONTMATTER = """---
layout: api
branch: {}
ref: {}
type: {}
title: API reference ({})
---
"""
REFDOC_MD_BODY = "{% include anchor_headings.html html=content %}"
@contextmanager
def tmpdir():
name = tempfile.mkdtemp()
try:
yield name
finally:
shutil.rmtree(name)
def rmtree(dir):
if os.path.exists(dir):
shutil.rmtree(dir)
def rmmkdir(dir):
rmtree(dir)
os.mkdir(dir)
def rmcopytree(src, dst):
rmtree(dst)
shutil.copytree(src, dst)
def copytree(src, dst, overwrite = False):
if os.path.isdir(src):
if not os.path.isdir(dst):
os.makedirs(dst)
for f in os.listdir(src):
copytree(os.path.join(src, f), os.path.join(dst, f), overwrite)
elif overwrite or not os.path.exists(dst):
shutil.copyfile(src, dst)
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def call(args):
print(args)
ret = os.system(args)
if ret != 0:
sys.exit(1)
def unzip(filename, destination):
print("Unpacking {}".format(filename))
zip_ref = zipfile.ZipFile(filename, 'r')
zip_ref.extractall(destination)
zip_ref.close()
def download_file(url, destination, filename=None):
if not filename:
filename = url.rsplit('/', 1)[-1]
path = os.path.join(destination, filename)
if os.path.exists(path):
print("File %s already exists" % (path))
sys.exit(1)
print("Downloading {} to {}".format(url, path))
try:
f = urllib2.urlopen(url)
with open(path, 'wb') as output:
output.write(f.read())
except:
path = None
return path
def github_request(url, token):
try:
response = requests.get(url, headers={"Authorization": "token %s" % (token)})
response.raise_for_status()
return response.json()
except Exception as err:
print(err)
def download_string(url):
response = requests.get(url)
if response.status_code != 200:
print("Unable to download from %s (%d)" % (url, response.status_code))
return None
return response.text
def download_json(url):
response = requests.get(url)
if response.status_code != 200:
print("Unable to download from %s (%d)" % (url, response.status_code))
return None
return response.json()
def get_sha1(branch = "stable"):
global SHA1
if not SHA1.get(branch):
info = download_json("https://d.defold.com/{}/info.json".format(branch))
SHA1[branch] = info["sha1"]
return SHA1[branch]
def get_bob_filename(sha1):
return "bob_{}.jar".format(sha1)
def download_bob(sha1):
bob_filename = get_bob_filename(sha1)
if not os.path.exists(bob_filename):
download_file("http://d.defold.com/archive/{}/bob/bob.jar".format(sha1), ".", bob_filename)
def find_files(root_dir, file_pattern):
matches = []
for root, dirnames, filenames in os.walk(root_dir):
for filename in filenames:
fullname = os.path.join(root, filename)
if fnmatch.fnmatch(filename, file_pattern):
matches.append(os.path.join(root, filename))
return matches
def read_as_string(filename):
with open(filename) as f:
return f.read()
def read_as_json(filename):
with open(filename) as f:
return json.load(f)
def write_as_json(filename, data):
with open(filename, "w") as f:
json.dump(data, f, indent=4, sort_keys=True)
def replace_in_file(filename, old, new, flags=None):
with open(filename) as f:
if flags is None:
content = re.sub(old, new, f.read())
else:
content = re.sub(old, new, f.read(), flags=flags)
with open(filename, "w") as f:
f.write(content)
def append_to_file(filename, s):
with open(filename, "a") as f:
f.write(s)
def process_doc_file(file, language):
replace_in_file(file, r"({{{?)(.*?)(}}}?)", r"{% raw %}\1\2\3{% endraw %}")
replace_in_file(file, r"{% raw %}({{{?)(.*?include\..*?)(}}}?){% endraw %}", r"\1\2\3")
replace_in_file(file, r"{\s*srcset=.*?}", r"")
replace_in_file(file, r"::: sidenote(.*?):::", r"<div class='sidenote' markdown='1'>\1</div>", flags=re.DOTALL)
replace_in_file(file, r"::: important(.*?):::", r"<div class='important' markdown='1'>\1</div>", flags=re.DOTALL)
replace_in_file(file, r"\((.*?)#_(.*?)\)", r"(\1#\2)")
replace_in_file(file, r":\[.*?\]\(\.\.\/(.*?)\)", r"{% include \1 %}")
replace_in_file(file, r"\!\[.*?\]\((.*?)\){width=(.*) \.left}", r"<img src='../\1' width='\2'/>")
replace_in_file(file, r"{\.left}", r"")
replace_in_file(file, r"{\.icon}", r"")
replace_in_file(file, r"{\.inline}", r"")
replace_in_file(file, r"{\.mark}", r"")
# replace_in_file(file, r"\!\[(.*?)\]\((.*?)\)\{\.inline\}", r"<span style='display: inline'></span>")
replace_in_file(file, r"\(images\/", r"(../images/")
replace_in_file(file, r"\(\.\.\/shared\/", r"(/shared/")
replace_in_file(file, r"\{\% include shared\/(.*?)\.md(.*?)\%\}", r"{}".format("{% include shared/" + language + "/\\1.md\\2%}"))
def get_language_specific_dir(language, dir):
if language != "en":
dir = os.path.join(language, dir)
return dir
def process_docs(download = False):
if download:
if os.path.exists(DOCS_ZIP):
os.remove(DOCS_ZIP)
download_file("https://github.com/defold/doc/archive/master.zip", ".", DOCS_ZIP)
if not os.path.exists(DOCS_ZIP):
print("File {} does not exists".format(DOCS_ZIP))
sys.exit(1)
DOC_DIR=os.environ.get('DM_DOC_DIR', None)
with tmpdir() as tmp_dir:
if not DOC_DIR:
shutil.copyfile(DOCS_ZIP, os.path.join(tmp_dir, DOCS_ZIP))
unzip(os.path.join(tmp_dir, DOCS_ZIP), tmp_dir)
DOC_DIR = os.path.join(tmp_dir, "doc-master")
print("Processing docs")
print("...languages")
languages = read_as_json(os.path.join(DOC_DIR, "docs", "languages.json"))
language_list = []
for language in languages["languages"].keys():
language_data = languages["languages"][language]
if language_data["active"]:
language_data["language"] = language
if language == "en":
language_data["urlprefix"] = ""
else:
language_data["urlprefix"] = language
language_list.append(language_data)
write_as_json(os.path.join("_data", "languageindex.json"), language_list)
print("...index")
index_file = os.path.join("_data", "learnindex.json")
if os.path.exists(index_file):
os.remove(index_file)
shutil.copyfile(os.path.join(DOC_DIR, "docs", "en", "en.json"), index_file)
index = read_as_json(index_file)
for language in languages["languages"].keys():
print("...manuals ({})".format(language))
manuals_src_dir = os.path.join(DOC_DIR, "docs", language, "manuals")
if os.path.exists(manuals_src_dir):
manuals_dst_dir = get_language_specific_dir(language, "manuals")
rmcopytree(manuals_src_dir, manuals_dst_dir)
for filename in find_files(manuals_dst_dir, "*.md"):
process_doc_file(filename, language)
replace_in_file(filename, r"title\:", r"layout: manual\ntitle:")
replace_in_file(filename, r"title\:", r"language: {}\ntitle:".format(language))
replace_in_file(filename, r"title\:", r"github: {}\ntitle:".format("https://github.com/defold/doc"))
if language != "en":
replace_in_file(filename, r"\/manuals\/", r"/{}/manuals/".format(language))
replace_in_file(filename, r"\.\.\/images\/", r"/manuals/images/".format(language))
replace_in_file(filename, r"\.\.\/assets\/", r"/manuals/assets/".format(language))
print("...faq ({})".format(language))
faq_src_dir = os.path.join(DOC_DIR, "docs", language, "faq")
if os.path.exists(faq_src_dir):
faq_dst_dir = get_language_specific_dir(language, "faq")
rmcopytree(faq_src_dir, faq_dst_dir)
for filename in find_files(faq_dst_dir, "*.md"):
process_doc_file(filename, language)
replace_in_file(filename, r"title\:", r"language: {}\ntitle:".format(language))
replace_in_file(filename, r"title\:", r"layout: faq\ntitle:")
if language != "en":
replace_in_file(filename, r"\/manuals\/", r"/{}/manuals/".format(language))
replace_in_file(filename, r"\.\.\/images\/", r"/manuals/images/".format(language))
replace_in_file(filename, r"\.\.\/assets\/", r"/manuals/assets/".format(language))
for language in languages["languages"].keys():
print("...shared includes ({})".format(language))
shared_includes_src_dir_en = os.path.join(DOC_DIR, "docs", "en", "shared")
shared_includes_src_dir = os.path.join(DOC_DIR, "docs", language, "shared")
shared_includes_dst_dir = os.path.join("_includes", "shared", language)
rmcopytree(shared_includes_src_dir_en, shared_includes_dst_dir)
if os.path.exists(shared_includes_src_dir):
copytree(shared_includes_src_dir, shared_includes_dst_dir, overwrite = True)
shutil.rmtree(os.path.join(shared_includes_dst_dir, "images"))
for filename in find_files(shared_includes_dst_dir, "*.md"):
process_doc_file(filename, language)
print("...tutorials")
tutorials_src_dir = os.path.join(DOC_DIR, "docs", "en", "tutorials")
tutorials_dst_dir = "tutorials"
rmcopytree(tutorials_src_dir, tutorials_dst_dir)
for filename in find_files(tutorials_dst_dir, "*.md"):
process_doc_file(filename, "en")
replace_in_file(filename, r"title\:", r"layout: tutorial\ntitle:")
# figure out in which languages each manual exists
print("...index (incl. languages)")
for section in index["navigation"]["manuals"]:
for item in section["items"]:
item["languages"] = []
if not item["path"].startswith("http"):
path = item["path"][1:]
for language in languages["languages"].keys():
if os.path.exists(get_language_specific_dir(language, path + ".md")):
item["languages"].append(language)
write_as_json(index_file, index)
print("...shared images")
shared_images_src_dir = os.path.join(DOC_DIR, "docs", "en", "shared", "images")
shared_images_dst_dir = os.path.join("shared", "images")
rmcopytree(shared_images_src_dir, shared_images_dst_dir)
print("Done")
def process_extension(extension_name, download = False):
md = Markdown(extensions=['markdown.extensions.fenced_code','markdown.extensions.def_list', 'markdown.extensions.codehilite','markdown.extensions.tables'])
extension_zip = extension_name + ".zip"
github_url = "https://github.com/defold/{}".format(extension_name)
if download:
if os.path.exists(extension_zip):
os.remove(extension_zip)
for branch in ("master", "main"):
url = github_url + "/archive/" + branch + ".zip"
if download_file(url, ".", extension_zip):
break
if not os.path.exists(extension_zip):
print("File {} does not exist".format(extension_zip))
sys.exit(1)
print("Processing %s" % extension_zip)
with tmpdir() as tmp_dir:
shutil.copyfile(extension_zip, os.path.join(tmp_dir, extension_zip))
unzip(os.path.join(tmp_dir, extension_zip), tmp_dir)
unzipped_extension_dir = None
for suffix in ("master", "main"):
unzipped_extension_dir = os.path.join(tmp_dir, extension_name + "-" + suffix)
if os.path.exists(unzipped_extension_dir):
break
extension_dir = extension_name
rmmkdir(extension_dir)
# copy the documentation
docs_dir = os.path.join(unzipped_extension_dir, "docs")
rmcopytree(docs_dir, extension_dir)
index = os.path.join(extension_dir, "index.md")
append_to_file(index, "[API Reference](/{}/api)".format(extension_name))
replace_in_file(index, r"title\:", r"layout: manual\ntitle:")
replace_in_file(index, r"title\:", r"language: en\ntitle:")
replace_in_file(index, r"title\:", r"github: {}\ntitle:".format(github_url))
process_doc_file(index, "en")
# generate a dummy markdown page with some front matter for the api doc
with open(os.path.join(extension_dir, "api.html"), "w") as f:
f.write(REFDOC_MD_FRONTMATTER.format("stable", extension_name, "extension", extension_name) + REFDOC_MD_BODY)
# create a json data file for the API reference
# we generate it from the .script_api file (YAML format)
refdoc = {}
elements = []
refdoc["elements"] = elements
info = {}
refdoc["info"] = info
for filename in find_files(unzipped_extension_dir, "*.script_api"):
api = yaml.safe_load(read_as_string(filename))[0]
info["group"] = "EXTENSIONS"
info["description"] = api.get("desc", "")
info["namespace"] = api.get("name", "")
info["name"] = api.get("name", "")
info["brief"] = api.get("name", "")
for m in api["members"]:
element = {}
element["parameters"] = []
for p in m.get("parameters", []):
param = {}
param["name"] = p.get("name", "")
param["doc"] = p.get("desc", "")
element["parameters"].append(param)
element["returnvalues"] = []
for r in m.get("returns", []):
ret = {}
ret["name"] = r.get("type", "")
ret["doc"] = r.get("desc", "")
element["returnvalues"].append(ret)
element["description"] = m.get("desc", "")
type = m.get("type", "").upper()
if type == "NUMBER":
type = "VARIABLE"
element["type"] = type
element["name"] = m.get("name", "")
examples = []
for e in m.get("examples", []):
desc = e.get("desc", "")
examples.append(md.convert(desc))
element["examples"] = "".join(examples)
elements.append(element)
break
# write the json data file
extension_data_dir = os.path.join("_data", "extensions")
makedirs(extension_data_dir)
extension_data_file = os.path.join(extension_data_dir, extension_name + ".json")
write_as_json(extension_data_file, refdoc)
def process_examples(download = False):
if download:
if os.path.exists(EXAMPLES_ZIP):
os.remove(EXAMPLES_ZIP)
download_file("https://github.com/defold/examples/archive/master.zip", ".", EXAMPLES_ZIP)
download_bob(get_sha1())
if not os.path.exists(EXAMPLES_ZIP):
print("File {} does not exist".format(EXAMPLES_ZIP))
sys.exit(1)
bob_jar = get_bob_filename(get_sha1())
if not os.path.exists(bob_jar):
print("File {} does not exist".format(bob_jar))
sys.exit(1)
print("Processing examples")
with tmpdir() as tmp_dir:
shutil.copyfile(EXAMPLES_ZIP, os.path.join(tmp_dir, EXAMPLES_ZIP))
unzip(os.path.join(tmp_dir, EXAMPLES_ZIP), tmp_dir)
unzipped_examples_dir = os.path.join(tmp_dir, "examples-master", "examples")
print("..building app")
shutil.copyfile(bob_jar, os.path.join(tmp_dir, bob_jar))
input_dir = os.path.join(tmp_dir, "examples-master")
subprocess.call([ "java", "-jar", os.path.join(tmp_dir, bob_jar), "--archive", "--platform", "js-web", "resolve", "build", "bundle" ], cwd=input_dir)
print("...copying app")
examples_dir = "examples"
rmcopytree(os.path.join(input_dir, "build", "default", "Defold-examples", "archive"), os.path.join(examples_dir, "archive"))
for file in ["Defoldexamples_asmjs.js", "Defoldexamples_wasm.js", "Defoldexamples.wasm", "dmloader.js", "index.html"]:
shutil.copyfile(os.path.join(input_dir, "build", "default", "Defold-examples", file), os.path.join(examples_dir, file))
print("...processing index.html")
replace_in_file(os.path.join(examples_dir, "index.html"), "\<\!DOCTYPE html\>.*\<body\>", "", flags=re.DOTALL)
replace_in_file(os.path.join(examples_dir, "index.html"), "\<\/body\>.*", "", flags=re.DOTALL)
replace_in_file(os.path.join(examples_dir, "index.html"), "resize_game_canvas\(\)\;", "")
replace_in_file(os.path.join(examples_dir, "index.html"), "window.addEventListener.*", "")
replace_in_file(os.path.join(examples_dir, "index.html"), 'width=\"720\" height=\"720\"', 'width="680" height="680" style="max-width:100%"')
replace_in_file(os.path.join(examples_dir, "index.html"), 'dmloader.js', '/examples/dmloader.js')
replace_in_file(os.path.join(examples_dir, "index.html"), '\"archive\"', '"/examples/archive"')
replace_in_file(os.path.join(examples_dir, "index.html"), 'EngineLoader\.load\(\"canvas\", \"Defoldexamples\"\)', 'EngineLoader.load("canvas", "/examples/Defoldexamples")')
replace_in_file(os.path.join(examples_dir, "index.html"), "engine_arguments: \[", "engine_arguments: [ '--config=examples.start={{ page.collection }}'")
os.rename(os.path.join(examples_dir, "index.html"), "_includes/example.html")
print("...copying markdown")
examplesindex = []
for filename in find_files(unzipped_examples_dir, "*.md"):
basename = os.path.basename(filename)
collection = filename.replace(tmp_dir, "").replace("/examples-master/examples/", "").replace("/" + basename, "")
permalink = "examples/" + collection + "/"
examplesindex.append({
"collection": collection,
"category": collection.split("/")[0].upper(),
"name": collection.split("/")[1].replace("_", " ").capitalize(),
"path": collection
})
replace_in_file(filename, "title:", "layout: example\npermalink: {}\ncollection: {}\ntitle:".format(permalink, collection))
md_file = os.path.join(examples_dir, filename.replace(unzipped_examples_dir, "")[1:])
makedirs(os.path.dirname(md_file))
shutil.copyfile(filename, md_file)
print("...copying images")
for filename in find_files(unzipped_examples_dir, "*.png"):
png_file = os.path.join(examples_dir, filename.replace(unzipped_examples_dir, "")[1:])
makedirs(os.path.dirname(png_file))
shutil.copyfile(filename, png_file)
for filename in find_files(unzipped_examples_dir, "*.jpg"):
jpg_file = os.path.join(examples_dir, filename.replace(unzipped_examples_dir, "")[1:])
makedirs(os.path.dirname(jpg_file))
shutil.copyfile(filename, jpg_file)
print("...copying scripts")
includes_dir = "_includes/examples"
rmmkdir(includes_dir)
for filename in find_files(unzipped_examples_dir, "*.script"):
script_file = os.path.join(includes_dir, filename.replace(unzipped_examples_dir, "")[1:]).replace(".script", "_script.md")
makedirs(os.path.dirname(script_file))
shutil.copyfile(filename, script_file)
for filename in find_files(unzipped_examples_dir, "*.gui_script"):
script_file = os.path.join(includes_dir, filename.replace(unzipped_examples_dir, "")[1:]).replace(".gui_script", "_gui_script.md")
makedirs(os.path.dirname(script_file))
shutil.copyfile(filename, script_file)
print("...generating index")
index_file = os.path.join("_data", "examplesindex.json")
if os.path.exists(index_file):
os.remove(index_file)
write_as_json(os.path.join("_data", "examplesindex.json"), examplesindex)
def process_codepad(download = False):
if download:
if os.path.exists(CODEPAD_ZIP):
os.remove(CODEPAD_ZIP)
download_file("https://github.com/defold/codepad/archive/master.zip", ".", CODEPAD_ZIP)
download_bob(get_sha1())
if not os.path.exists(CODEPAD_ZIP):
print("File {} does not exist".format(CODEPAD_ZIP))
sys.exit(1)
bob_jar = get_bob_filename(get_sha1())
if not os.path.exists(bob_jar):
print("File {} does not exist".format(bob_jar))
sys.exit(1)
with tmpdir() as tmp_dir:
shutil.copyfile(CODEPAD_ZIP, os.path.join(tmp_dir, CODEPAD_ZIP))
unzip(os.path.join(tmp_dir, CODEPAD_ZIP), tmp_dir)
shutil.copyfile(bob_jar, os.path.join(tmp_dir, bob_jar))
input_dir = os.path.join(tmp_dir, "codepad-master")
subprocess.call([ "java", "-jar", os.path.join(tmp_dir, bob_jar), "--archive", "--platform", "js-web", "resolve", "distclean", "build", "bundle" ], cwd=input_dir)
codepad_dir = "codepad"
rmcopytree(os.path.join(input_dir, "build", "default", "DefoldCodePad"), codepad_dir)
def fix_tags_case(list):
if list:
for i,v in enumerate(list):
if v.lower() == "gui":
list[i] = "GUI"
elif v.lower() == "ai":
list[i] = "AI"
elif v.islower():
list[i] = v.capitalize()
return list
def fix_platforms_case(platforms):
if platforms:
for i,platform in enumerate(platforms):
if platform.lower() == "ios":
platforms[i] = "iOS"
elif platform.lower() == "html5":
platforms[i] = "HTML5"
elif platform.lower() == "macos":
platforms[i] = "macOS"
else:
platforms[i] = platform.capitalize()
return platforms
def process_assets(tmp_dir):
# Jekyll assets collection
asset_collection_dir = "assets"
rmmkdir(asset_collection_dir)
# Jekyll authors collection
author_collection_dir = "authors"
rmmkdir(author_collection_dir)
# Jekyll asset data
asset_data_dir = os.path.join("_data", "assets")
rmmkdir(asset_data_dir)
# Jekyll author data
author_data_dir = os.path.join("_data", "authors")
rmmkdir(author_data_dir)
# Jekyll tag data
tag_data_dir = os.path.join("_data", "tags")
rmmkdir(tag_data_dir)
# image data
image_dir = os.path.join("images", "assets")
rmcopytree(os.path.join(tmp_dir, "awesome-defold-master", "assets", "images"), image_dir)
assetindex = []
authorindex = {}
tagindex = {}
platformindex = {}
for filename in find_files(os.path.join(tmp_dir, "awesome-defold-master", "assets"), "*.json"):
basename = os.path.basename(filename)
print("Processing asset: {}".format(basename))
asset_id = basename.replace(".json", "")
# copy the data file as-is
asset_file = os.path.join(asset_data_dir, basename)
shutil.copyfile(filename, asset_file)
# read asset and add additional data
asset = read_as_json(asset_file)
fix_tags_case(asset["tags"])
fix_platforms_case(asset["platforms"])
author_name = asset["author"].encode('utf-8')
author_id = hashlib.md5(author_name).hexdigest()
asset["author_id"] = author_id
asset["asset_url"] = "https://github.com/defold/awesome-defold/blob/master/assets/%s.json" % asset_id
write_as_json(asset_file, asset)
# build asset index
assetindex.append({
"id": asset_id,
"tags": asset["tags"],
"platforms": asset["platforms"],
"stars": asset.get("stars") or 0,
"timestamp": asset.get("timestamp") or 0
})
# build tag index
for tag in asset["tags"]:
if not tag in tagindex:
tagindex[tag] = {
"id": tag.lower().replace(" ", ""),
"name": tag,
"assets": []
}
tagindex[tag]["assets"].append({
"id": asset_id,
"stars": asset.get("stars") or 0,
"timestamp": asset.get("timestamp") or 0
})
tagindex[tag]["assets"].sort(key=lambda x: x.get("id").lower())
# build platform index
for platform in asset["platforms"]:
if not platform in platformindex:
platformindex[platform] = {
"id": platform.lower().replace(" ", ""),
"name": platform,
"assets": []
}
platformindex[platform]["assets"].append({
"id": asset_id,
"stars": asset.get("stars") or 0
})
platformindex[platform]["assets"].sort(key=lambda x: x.get("id").lower())
# build author index
if not author_id in authorindex:
authorindex[author_id] = {
"id": author_id,
"name": author_name,
"assets": []
}
authorindex[author_id]["assets"].append({
"id": asset_id,
"stars": asset.get("stars") or 0
})
authorindex[author_id]["assets"].sort(key=lambda x: x.get("id").lower())
# generate a dummy markdown page with some front matter for each asset
with open(os.path.join(asset_collection_dir, basename.replace(".json", ".md")), "w") as f:
f.write(ASSET_MD_FRONTMATTER.format(asset_id, asset["name"], asset["description"].encode('utf-8').strip()))
# write asset index
assetindex.sort(key=lambda x: x.get("id").lower())
write_as_json(ASSETINDEX_JSON, assetindex)
# write author index
authorlist = authorindex.values()
authorlist.sort(key=lambda x: x.get("name").lower())
write_as_json(AUTHORINDEX_JSON, authorlist)
# write author data and a dummy markdown page with front matter
for author in authorlist:
author["assets"].sort(key=lambda x: x.get("id"))
filename = os.path.join(author_data_dir, author["id"] + ".json")
with open(filename, "w") as f:
f.write(json.dumps(author, indent=2, sort_keys=True))
with open(os.path.join(author_collection_dir, author["id"] + ".md"), "w") as f:
f.write(AUTHOR_MD_FRONTMATTER.format(author["id"], author["name"]))
# write tag index
taglist = tagindex.values()
taglist.sort(key=lambda x: x.get("id").lower())
write_as_json(TAGINDEX_JSON, taglist)
# write platform index
# platformlist = platformindex.values()
# platformlist.sort(key=lambda x: x.get("id").lower())
# write_as_json(PLATFORMINDEX_JSON, platformlist)
# Jekyll tags collection (one subdirectory per sort order)
tag_collection_dir = "tags"
sort_orders = ["stars", "timestamp"]
for sort_order in sort_orders:
rmmkdir(os.path.join(tag_collection_dir, sort_order))
# write tag data
for tag in taglist:
tag["assets"].sort(key=lambda x: x.get("id"))
# _data/tags
filename = os.path.join(tag_data_dir, tag["id"] + ".json")
with open(filename, "w") as f:
f.write(json.dumps(tag, indent=2, sort_keys=True))
# tags/stars, tags/timestamp
for sort_order in sort_orders:
with open(os.path.join(tag_collection_dir, sort_order, tag["id"] + ".md"), "w") as f:
f.write(TAG_SORT_MD_FRONTMATTER.format(tag["id"], tag["name"], sort_order))
def process_games(tmp_dir):
# image data
image_dir = os.path.join("images", "games")
rmcopytree(os.path.join(tmp_dir, "awesome-defold-master", "games", "images"), image_dir)
# update existing games with new info (except show+placement)
# maintain existing order
# add new games last
games = read_as_json(GAMES_JSON)
# read new games
for filename in find_files(os.path.join(tmp_dir, "awesome-defold-master", "games"), "*.json"):
basename = os.path.basename(filename)
print("Processing game: {}".format(basename))
# read new game and add additional data
game_id = basename.replace(".json", "")
new_game = read_as_json(filename)
new_game["id"] = game_id
# try to find game in existing list of games
found = False
for game in games:
if game.get("id") == new_game.get("id"):
found = True
# copy data from new game
# we do this to maintain the order of games in games.json
for k,v in new_game.items():
game[k] = v
# append new games last
if not found:
new_game["games"] = "half"
games.append(new_game)
write_as_json(GAMES_JSON, games)
def process_awesome(download = False):
if download:
if os.path.exists(AWESOME_ZIP):
os.remove(AWESOME_ZIP)
download_file("https://github.com/defold/awesome-defold/archive/master.zip", ".", AWESOME_ZIP)
if not os.path.exists(AWESOME_ZIP):
print("File {} does not exist".format(AWESOME_ZIP))
sys.exit(1)
with tmpdir() as tmp_dir:
shutil.copyfile(AWESOME_ZIP, os.path.join(tmp_dir, AWESOME_ZIP))
unzip(os.path.join(tmp_dir, AWESOME_ZIP), tmp_dir)
process_assets(tmp_dir)
process_games(tmp_dir)
LUA_APIS = [ "base", "bit", "coroutine", "debug", "io", "math", "os", "package", "socket", "string", "table" ]
def process_refdoc(download = False):
refindex = []
branchindex = [ "alpha", "beta", "stable" ]
ref_root_dir = "ref"
rmmkdir(ref_root_dir)
for branch in branchindex:
REFDOC_ZIP = "refdoc_{}.zip".format(branch)
REF_DATA_DIR = os.path.join("_data", "ref", branch)
REF_PAGE_DIR = os.path.join(ref_root_dir, branch)
if download:
if os.path.exists(REFDOC_ZIP):
os.remove(REFDOC_ZIP)
download_file("http://d.defold.com/archive/{}/engine/share/ref-doc.zip".format(get_sha1(branch)), ".", REFDOC_ZIP)
if not os.path.exists(REFDOC_ZIP):
print("File {} does not exist".format(REFDOC_ZIP))
sys.exit(1)
with tmpdir() as tmp_dir:
shutil.copyfile(REFDOC_ZIP, os.path.join(tmp_dir, REFDOC_ZIP))
unzip(os.path.join(tmp_dir, REFDOC_ZIP), tmp_dir)
# Jekyll page and data dir
rmmkdir(REF_PAGE_DIR)
rmmkdir(REF_DATA_DIR)
for file in os.listdir(os.path.join(tmp_dir, "doc")):
if file.endswith(".json"):
api = read_as_json(os.path.join(tmp_dir, "doc", file))
# ignore empty APIs (such as those moved to extensions)
if len(api["elements"]) > 0:
json_out_name = file.replace("_doc.json", "")
json_out_file = json_out_name + ".json"
# copy and rename file
shutil.copyfile(os.path.join(tmp_dir, "doc", file), os.path.join(REF_DATA_DIR, json_out_file))
namespace = api["info"]["namespace"]
type = "defold"
if namespace in LUA_APIS:
type = "lua"
elif namespace.startswith("dm"):
type = "c"
print("REFDOC " + json_out_name + " type: " + type)
# generate a dummy markdown page with some front matter for each ref doc
with open(os.path.join(REF_PAGE_DIR, file.replace("_doc.json", ".md")), "w") as f:
f.write(REFDOC_MD_FRONTMATTER.format(branch, json_out_name, type, json_out_name) + REFDOC_MD_BODY)
# build refdoc index
refindex.append({
"namespace": namespace,
"filename": json_out_name,
"url": "/ref/" + branch + "/" + json_out_name,
"branch": branch,
"type": type
})
# add extensions
for filename in os.listdir("."):
if filename.startswith("extension-") and os.path.isdir(filename):
refindex.append({
"namespace": filename,
"url": "/" + filename + "/api",
"branch": branch,
"type": "extension"
})
# copy stable files to ref/ for backwards compatibility
for item in os.listdir(os.path.join("ref", "stable")):
s = os.path.join("ref", "stable", item)
d = os.path.join("ref", item)
if not os.path.isdir(s):
shutil.copy2(s, d)
# write refdoc index
write_as_json(os.path.join("_data", "refindex.json"), refindex)
# write branch index
write_as_json(os.path.join("_data", "branchindex.json"), branchindex)
def process_file_for_indexing(filename):
with open(filename, 'r') as file:
data = file.read().replace('\n', ' ')
# replace the math notations
data = re.sub("\$\$.*?\$\$", " ", data)
# remove the html tags (but leave the text behind)
data = re.sub(r"<(.*?)>(.*?)</\1>", "\2", data)
# remove closed html tags "<tag />"
data = re.sub(r"<\w+?\s.*?/>", " ", data)
# Cleanup markdown links
data = re.sub("\[(.*?)\]\(.*?\)", "\1", data)
# finally, remove certain characters
# (do this last, so that any regexp above won't break)
#data = re.sub(r"(=|\.|\(|\))+", " ", data)
data = re.sub(r"(=|\(|\))+", " ", data)
return data.decode('utf-8')
def generate_searchindex():
searchindex = []
def append_ref_doc(filename, data):
searchindex.append({
"id": filename.replace("_data/", "").replace(".json", ""),
"type": "refdoc",
"data": data
})
def append_manual(filename, data):
searchindex.append({
"id": filename.replace("_", "").replace(".md", ""),
"type": "manual",
"data": data
})
def append_asset(filename, data):
searchindex.append({
"id": filename.replace("_data/", "").replace(".json", ""),
"type": "asset",
"data": data
})
for filename in find_files("manuals", "*.md"):
data = process_file_for_indexing(filename)
append_manual(filename, data)
for filename in find_files(os.path.join("_data", "assets"), "*.json"):
r = read_as_json(filename)
id = os.path.basename(filename).replace(".json", "")
append_asset(filename, id + " " + r["name"] + " " + r["description"])
for filename in find_files(os.path.join("_data", "ref", "stable"), "*.json"):
r = read_as_json(filename)
for element in r["elements"]:
name = element["name"]
append_ref_doc(filename, name)
if "." in name:
for part in name.split("."):
append_ref_doc(filename, part)
elif "::" in name:
for part in name.split("::"):
append_ref_doc(filename, part)
# manually create a builder without stemming, stop words etc
# if we use the standard builder pipeline functions we will end up
# with partial search words like go.get_posit instead of go.get_position
builder = lunr.builder.Builder()
builder.pipeline.add(trimmer.trimmer)
builder.ref("id")
for field in ('type', 'data'):
if isinstance(field, dict):
builder.field(**field)
else:
builder.field(field)
for document in searchindex:
if isinstance(document, (tuple, list)):
builder.add(document[0], attributes=document[1])
else:
builder.add(document)
lunrindex = builder.build()
# lunrindex = lunr.lunr(ref='id', fields=('type', 'data'), documents=searchindex)
write_as_json("searchindex.json", lunrindex.serialize())
def commit_changes(githubtoken):
if githubtoken is None:
print("You must specific a GitHub token")
sys.exit(1)
call("git config --global user.name '<EMAIL>'")
call("git config --global user.email '<EMAIL>'")
call("git add -A")
# only commit if the diff isn't empty, ie there is a change
# https://stackoverflow.com/a/8123841/1266551
call("git diff-index --quiet HEAD || git commit -m 'Site changes [skip-ci]'")
call("git push 'https://%s@github.com/defold/defold.github.io.git' HEAD:master" % (githubtoken))
ALL_COMMANDS = [ "docs", "refdoc", "awesome", "examples", "codepad", "commit", "searchindex", "extensions" ]
ALL_COMMANDS.sort()
parser = ArgumentParser()
parser.add_argument('commands', nargs="+", help='Commands (' + ', '.join(ALL_COMMANDS) + ', all, help)')
parser.add_argument("--githubtoken", dest="githubtoken", help="Authentication token for GitHub API and ")
parser.add_argument("--extension", dest="extensions", action='append', help="Which extension to process")
parser.add_argument("--download", dest="download", action='store_true', help="Download updated content for the command(s) in question")
args = parser.parse_args()
help = """
COMMANDS:
docs = Process the docs (manuals, tutorials and faq)
refdoc = Process the API reference
awesome = Process the awesome assets and games lists (from awesome-defold)
examples = Build the examples
codepad = Build the Defold CodePad
commit = Commit changed files (requires --githubtoken)
searchindex = Update the static Lunr search index
extensions = Process the docs for official extensions (use --extension to specify which extensions to process)
all = Run all of the above commands
help = Show this help
"""
if "all" in args.commands:
args.commands.remove("all")
commands = []
commands.extend(ALL_COMMANDS)
commands.extend(args.commands)
args.commands = commands
for command in args.commands:
if command == "help":
parser.print_help()
print(help)
sys.exit(0)
elif command == "docs":
process_docs(download = args.download)
elif command == "extensions":
for extension in args.extensions:
process_extension(extension, download = args.download)
elif command == "examples":
process_examples(download = args.download)
elif command == "refdoc":
process_refdoc(download = args.download)
elif command == "awesome":
process_awesome(download = args.download)
elif command == "codepad":
process_codepad(download = args.download)
elif command == "searchindex":
generate_searchindex()
elif command == "commit":
commit_changes(args.githubtoken)
else:
print("Unknown command {}".format(command))
|
[
"os.mkdir",
"os.remove",
"argparse.ArgumentParser",
"os.walk",
"json.dumps",
"shutil.rmtree",
"os.path.join",
"urllib2.urlopen",
"os.path.dirname",
"os.path.exists",
"tempfile.mkdtemp",
"requests.get",
"markdown.Markdown",
"shutil.copyfile",
"re.sub",
"json.dump",
"hashlib.md5",
"os.path.basename",
"shutil.copy2",
"os.system",
"os.listdir",
"sys.exit",
"json.load",
"zipfile.ZipFile",
"os.makedirs",
"os.path.isdir",
"os.environ.get",
"lunr.builder.Builder",
"shutil.copytree",
"fnmatch.fnmatch"
] |
[((655, 695), 'os.path.join', 'os.path.join', (['"""_data"""', '"""assetindex.json"""'], {}), "('_data', 'assetindex.json')\n", (667, 695), False, 'import os\n'), ((709, 744), 'os.path.join', 'os.path.join', (['"""_data"""', '"""games.json"""'], {}), "('_data', 'games.json')\n", (721, 744), False, 'import os\n'), ((764, 805), 'os.path.join', 'os.path.join', (['"""_data"""', '"""authorindex.json"""'], {}), "('_data', 'authorindex.json')\n", (776, 805), False, 'import os\n'), ((822, 860), 'os.path.join', 'os.path.join', (['"""_data"""', '"""tagindex.json"""'], {}), "('_data', 'tagindex.json')\n", (834, 860), False, 'import os\n'), ((882, 925), 'os.path.join', 'os.path.join', (['"""_data"""', '"""platformindex.json"""'], {}), "('_data', 'platformindex.json')\n", (894, 925), False, 'import os\n'), ((942, 970), 'os.path.join', 'os.path.join', (['"""_data"""', '"""ref"""'], {}), "('_data', 'ref')\n", (954, 970), False, 'import os\n'), ((38836, 38852), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (38850, 38852), False, 'from argparse import ArgumentParser\n'), ((1515, 1533), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1531, 1533), False, 'import tempfile\n'), ((1628, 1647), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (1642, 1647), False, 'import os\n'), ((1715, 1728), 'os.mkdir', 'os.mkdir', (['dir'], {}), '(dir)\n', (1723, 1728), False, 'import os\n'), ((1776, 1801), 'shutil.copytree', 'shutil.copytree', (['src', 'dst'], {}), '(src, dst)\n', (1791, 1801), False, 'import shutil\n'), ((1853, 1871), 'os.path.isdir', 'os.path.isdir', (['src'], {}), '(src)\n', (1866, 1871), False, 'import os\n'), ((2251, 2266), 'os.system', 'os.system', (['args'], {}), '(args)\n', (2260, 2266), False, 'import os\n'), ((2396, 2426), 'zipfile.ZipFile', 'zipfile.ZipFile', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (2411, 2426), False, 'import zipfile\n'), ((2611, 2646), 'os.path.join', 'os.path.join', (['destination', 'filename'], {}), '(destination, filename)\n', (2623, 2646), False, 'import os\n'), ((2654, 2674), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2668, 2674), False, 'import os\n'), ((3250, 3267), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3262, 3267), False, 'import requests\n'), ((3469, 3486), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3481, 3486), False, 'import requests\n'), ((4242, 4259), 'os.walk', 'os.walk', (['root_dir'], {}), '(root_dir)\n', (4249, 4259), False, 'import os\n'), ((6919, 6953), 'os.environ.get', 'os.environ.get', (['"""DM_DOC_DIR"""', 'None'], {}), "('DM_DOC_DIR', None)\n", (6933, 6953), False, 'import os\n'), ((12436, 12596), 'markdown.Markdown', 'Markdown', ([], {'extensions': "['markdown.extensions.fenced_code', 'markdown.extensions.def_list',\n 'markdown.extensions.codehilite', 'markdown.extensions.tables']"}), "(extensions=['markdown.extensions.fenced_code',\n 'markdown.extensions.def_list', 'markdown.extensions.codehilite',\n 'markdown.extensions.tables'])\n", (12444, 12596), False, 'from markdown import Markdown\n'), ((24083, 24114), 'os.path.join', 'os.path.join', (['"""_data"""', '"""assets"""'], {}), "('_data', 'assets')\n", (24095, 24114), False, 'import os\n'), ((24191, 24223), 'os.path.join', 'os.path.join', (['"""_data"""', '"""authors"""'], {}), "('_data', 'authors')\n", (24203, 24223), False, 'import os\n'), ((24295, 24324), 'os.path.join', 'os.path.join', (['"""_data"""', '"""tags"""'], {}), "('_data', 'tags')\n", (24307, 24324), False, 'import os\n'), ((24385, 24417), 'os.path.join', 'os.path.join', (['"""images"""', '"""assets"""'], {}), "('images', 'assets')\n", (24397, 24417), False, 'import os\n'), ((29556, 29587), 'os.path.join', 'os.path.join', (['"""images"""', '"""games"""'], {}), "('images', 'games')\n", (29568, 29587), False, 'import os\n'), ((37526, 37548), 'lunr.builder.Builder', 'lunr.builder.Builder', ([], {}), '()\n', (37546, 37548), False, 'import lunr\n'), ((1583, 1602), 'shutil.rmtree', 'shutil.rmtree', (['name'], {}), '(name)\n', (1596, 1602), False, 'import shutil\n'), ((1657, 1675), 'shutil.rmtree', 'shutil.rmtree', (['dir'], {}), '(dir)\n', (1670, 1675), False, 'import shutil\n'), ((1954, 1969), 'os.listdir', 'os.listdir', (['src'], {}), '(src)\n', (1964, 1969), False, 'import os\n'), ((2160, 2180), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2174, 2180), False, 'import os\n'), ((2190, 2207), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2201, 2207), False, 'import os\n'), ((2292, 2303), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2300, 2303), False, 'import sys\n'), ((2733, 2744), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2741, 2744), False, 'import sys\n'), ((2818, 2838), 'urllib2.urlopen', 'urllib2.urlopen', (['url'], {}), '(url)\n', (2833, 2838), False, 'import urllib2\n'), ((3025, 3089), 'requests.get', 'requests.get', (['url'], {'headers': "{'Authorization': 'token %s' % token}"}), "(url, headers={'Authorization': 'token %s' % token})\n", (3037, 3089), False, 'import requests\n'), ((4016, 4044), 'os.path.exists', 'os.path.exists', (['bob_filename'], {}), '(bob_filename)\n', (4030, 4044), False, 'import os\n'), ((4643, 4655), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4652, 4655), False, 'import json\n'), ((4736, 4780), 'json.dump', 'json.dump', (['data', 'f'], {'indent': '(4)', 'sort_keys': '(True)'}), '(data, f, indent=4, sort_keys=True)\n', (4745, 4780), False, 'import json\n'), ((6534, 6561), 'os.path.join', 'os.path.join', (['language', 'dir'], {}), '(language, dir)\n', (6546, 6561), False, 'import os\n'), ((6643, 6667), 'os.path.exists', 'os.path.exists', (['DOCS_ZIP'], {}), '(DOCS_ZIP)\n', (6657, 6667), False, 'import os\n'), ((6802, 6826), 'os.path.exists', 'os.path.exists', (['DOCS_ZIP'], {}), '(DOCS_ZIP)\n', (6816, 6826), False, 'import os\n'), ((6894, 6905), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6902, 6905), False, 'import sys\n'), ((7932, 7972), 'os.path.join', 'os.path.join', (['"""_data"""', '"""learnindex.json"""'], {}), "('_data', 'learnindex.json')\n", (7944, 7972), False, 'import os\n'), ((7984, 8010), 'os.path.exists', 'os.path.exists', (['index_file'], {}), '(index_file)\n', (7998, 8010), False, 'import os\n'), ((11154, 11202), 'os.path.join', 'os.path.join', (['DOC_DIR', '"""docs"""', '"""en"""', '"""tutorials"""'], {}), "(DOC_DIR, 'docs', 'en', 'tutorials')\n", (11166, 11202), False, 'import os\n'), ((12159, 12214), 'os.path.join', 'os.path.join', (['DOC_DIR', '"""docs"""', '"""en"""', '"""shared"""', '"""images"""'], {}), "(DOC_DIR, 'docs', 'en', 'shared', 'images')\n", (12171, 12214), False, 'import os\n'), ((12247, 12279), 'os.path.join', 'os.path.join', (['"""shared"""', '"""images"""'], {}), "('shared', 'images')\n", (12259, 12279), False, 'import os\n'), ((12731, 12760), 'os.path.exists', 'os.path.exists', (['extension_zip'], {}), '(extension_zip)\n', (12745, 12760), False, 'import os\n'), ((12992, 13021), 'os.path.exists', 'os.path.exists', (['extension_zip'], {}), '(extension_zip)\n', (13006, 13021), False, 'import os\n'), ((13093, 13104), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (13101, 13104), False, 'import sys\n'), ((13688, 13732), 'os.path.join', 'os.path.join', (['unzipped_extension_dir', '"""docs"""'], {}), "(unzipped_extension_dir, 'docs')\n", (13700, 13732), False, 'import os\n'), ((13793, 13832), 'os.path.join', 'os.path.join', (['extension_dir', '"""index.md"""'], {}), "(extension_dir, 'index.md')\n", (13805, 13832), False, 'import os\n'), ((16345, 16380), 'os.path.join', 'os.path.join', (['"""_data"""', '"""extensions"""'], {}), "('_data', 'extensions')\n", (16357, 16380), False, 'import os\n'), ((16448, 16506), 'os.path.join', 'os.path.join', (['extension_data_dir', "(extension_name + '.json')"], {}), "(extension_data_dir, extension_name + '.json')\n", (16460, 16506), False, 'import os\n'), ((16628, 16656), 'os.path.exists', 'os.path.exists', (['EXAMPLES_ZIP'], {}), '(EXAMPLES_ZIP)\n', (16642, 16656), False, 'import os\n'), ((16837, 16865), 'os.path.exists', 'os.path.exists', (['EXAMPLES_ZIP'], {}), '(EXAMPLES_ZIP)\n', (16851, 16865), False, 'import os\n'), ((16936, 16947), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (16944, 16947), False, 'import sys\n'), ((17003, 17026), 'os.path.exists', 'os.path.exists', (['bob_jar'], {}), '(bob_jar)\n', (17017, 17026), False, 'import os\n'), ((17092, 17103), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (17100, 17103), False, 'import sys\n'), ((17335, 17387), 'os.path.join', 'os.path.join', (['tmp_dir', '"""examples-master"""', '"""examples"""'], {}), "(tmp_dir, 'examples-master', 'examples')\n", (17347, 17387), False, 'import os\n'), ((17506, 17546), 'os.path.join', 'os.path.join', (['tmp_dir', '"""examples-master"""'], {}), "(tmp_dir, 'examples-master')\n", (17518, 17546), False, 'import os\n'), ((21734, 21777), 'os.path.join', 'os.path.join', (['"""_data"""', '"""examplesindex.json"""'], {}), "('_data', 'examplesindex.json')\n", (21746, 21777), False, 'import os\n'), ((21789, 21815), 'os.path.exists', 'os.path.exists', (['index_file'], {}), '(index_file)\n', (21803, 21815), False, 'import os\n'), ((22002, 22029), 'os.path.exists', 'os.path.exists', (['CODEPAD_ZIP'], {}), '(CODEPAD_ZIP)\n', (22016, 22029), False, 'import os\n'), ((22207, 22234), 'os.path.exists', 'os.path.exists', (['CODEPAD_ZIP'], {}), '(CODEPAD_ZIP)\n', (22221, 22234), False, 'import os\n'), ((22304, 22315), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (22312, 22315), False, 'import sys\n'), ((22371, 22394), 'os.path.exists', 'os.path.exists', (['bob_jar'], {}), '(bob_jar)\n', (22385, 22394), False, 'import os\n'), ((22460, 22471), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (22468, 22471), False, 'import sys\n'), ((22722, 22761), 'os.path.join', 'os.path.join', (['tmp_dir', '"""codepad-master"""'], {}), "(tmp_dir, 'codepad-master')\n", (22734, 22761), False, 'import os\n'), ((24433, 24499), 'os.path.join', 'os.path.join', (['tmp_dir', '"""awesome-defold-master"""', '"""assets"""', '"""images"""'], {}), "(tmp_dir, 'awesome-defold-master', 'assets', 'images')\n", (24445, 24499), False, 'import os\n'), ((24626, 24682), 'os.path.join', 'os.path.join', (['tmp_dir', '"""awesome-defold-master"""', '"""assets"""'], {}), "(tmp_dir, 'awesome-defold-master', 'assets')\n", (24638, 24682), False, 'import os\n'), ((24714, 24740), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (24730, 24740), False, 'import os\n'), ((24902, 24940), 'os.path.join', 'os.path.join', (['asset_data_dir', 'basename'], {}), '(asset_data_dir, basename)\n', (24914, 24940), False, 'import os\n'), ((24949, 24986), 'shutil.copyfile', 'shutil.copyfile', (['filename', 'asset_file'], {}), '(filename, asset_file)\n', (24964, 24986), False, 'import shutil\n'), ((28041, 28094), 'os.path.join', 'os.path.join', (['author_data_dir', "(author['id'] + '.json')"], {}), "(author_data_dir, author['id'] + '.json')\n", (28053, 28094), False, 'import os\n'), ((29077, 29124), 'os.path.join', 'os.path.join', (['tag_data_dir', "(tag['id'] + '.json')"], {}), "(tag_data_dir, tag['id'] + '.json')\n", (29089, 29124), False, 'import os\n'), ((29603, 29668), 'os.path.join', 'os.path.join', (['tmp_dir', '"""awesome-defold-master"""', '"""games"""', '"""images"""'], {}), "(tmp_dir, 'awesome-defold-master', 'games', 'images')\n", (29615, 29668), False, 'import os\n'), ((29893, 29948), 'os.path.join', 'os.path.join', (['tmp_dir', '"""awesome-defold-master"""', '"""games"""'], {}), "(tmp_dir, 'awesome-defold-master', 'games')\n", (29905, 29948), False, 'import os\n'), ((29980, 30006), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (29996, 30006), False, 'import os\n'), ((30846, 30873), 'os.path.exists', 'os.path.exists', (['AWESOME_ZIP'], {}), '(AWESOME_ZIP)\n', (30860, 30873), False, 'import os\n'), ((31025, 31052), 'os.path.exists', 'os.path.exists', (['AWESOME_ZIP'], {}), '(AWESOME_ZIP)\n', (31039, 31052), False, 'import os\n'), ((31122, 31133), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (31130, 31133), False, 'import sys\n'), ((31736, 31772), 'os.path.join', 'os.path.join', (['"""_data"""', '"""ref"""', 'branch'], {}), "('_data', 'ref', branch)\n", (31748, 31772), False, 'import os\n'), ((31796, 31830), 'os.path.join', 'os.path.join', (['ref_root_dir', 'branch'], {}), '(ref_root_dir, branch)\n', (31808, 31830), False, 'import os\n'), ((34215, 34230), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (34225, 34230), False, 'import os\n'), ((34624, 34653), 'os.path.join', 'os.path.join', (['"""ref"""', '"""stable"""'], {}), "('ref', 'stable')\n", (34636, 34653), False, 'import os\n'), ((34668, 34703), 'os.path.join', 'os.path.join', (['"""ref"""', '"""stable"""', 'item'], {}), "('ref', 'stable', item)\n", (34680, 34703), False, 'import os\n'), ((34716, 34741), 'os.path.join', 'os.path.join', (['"""ref"""', 'item'], {}), "('ref', item)\n", (34728, 34741), False, 'import os\n'), ((34851, 34889), 'os.path.join', 'os.path.join', (['"""_data"""', '"""refindex.json"""'], {}), "('_data', 'refindex.json')\n", (34863, 34889), False, 'import os\n'), ((34945, 34986), 'os.path.join', 'os.path.join', (['"""_data"""', '"""branchindex.json"""'], {}), "('_data', 'branchindex.json')\n", (34957, 34986), False, 'import os\n'), ((35181, 35217), 're.sub', 're.sub', (['"""\\\\$\\\\$.*?\\\\$\\\\$"""', '""" """', 'data'], {}), "('\\\\$\\\\$.*?\\\\$\\\\$', ' ', data)\n", (35187, 35217), False, 'import re\n'), ((35289, 35331), 're.sub', 're.sub', (['"""<(.*?)>(.*?)</\\\\1>"""', '"""\x02"""', 'data'], {}), "('<(.*?)>(.*?)</\\\\1>', '\\x02', data)\n", (35295, 35331), False, 'import re\n'), ((35390, 35425), 're.sub', 're.sub', (['"""<\\\\w+?\\\\s.*?/>"""', '""" """', 'data'], {}), "('<\\\\w+?\\\\s.*?/>', ' ', data)\n", (35396, 35425), False, 'import re\n'), ((35474, 35518), 're.sub', 're.sub', (['"""\\\\[(.*?)\\\\]\\\\(.*?\\\\)"""', '"""\x01"""', 'data'], {}), "('\\\\[(.*?)\\\\]\\\\(.*?\\\\)', '\\x01', data)\n", (35480, 35518), False, 'import re\n'), ((35689, 35722), 're.sub', 're.sub', (['"""(=|\\\\(|\\\\))+"""', '""" """', 'data'], {}), "('(=|\\\\(|\\\\))+', ' ', data)\n", (35695, 35722), False, 'import re\n'), ((36593, 36624), 'os.path.join', 'os.path.join', (['"""_data"""', '"""assets"""'], {}), "('_data', 'assets')\n", (36605, 36624), False, 'import os\n'), ((36843, 36881), 'os.path.join', 'os.path.join', (['"""_data"""', '"""ref"""', '"""stable"""'], {}), "('_data', 'ref', 'stable')\n", (36855, 36881), False, 'import os\n'), ((38257, 38268), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (38265, 38268), False, 'import sys\n'), ((40151, 40162), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (40159, 40162), False, 'import sys\n'), ((1888, 1906), 'os.path.isdir', 'os.path.isdir', (['dst'], {}), '(dst)\n', (1901, 1906), False, 'import os\n'), ((1920, 1936), 'os.makedirs', 'os.makedirs', (['dst'], {}), '(dst)\n', (1931, 1936), False, 'import os\n'), ((2102, 2127), 'shutil.copyfile', 'shutil.copyfile', (['src', 'dst'], {}), '(src, dst)\n', (2117, 2127), False, 'import shutil\n'), ((4319, 4347), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (4331, 4347), False, 'import os\n'), ((4363, 4402), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['filename', 'file_pattern'], {}), '(filename, file_pattern)\n', (4378, 4402), False, 'import fnmatch\n'), ((6681, 6700), 'os.remove', 'os.remove', (['DOCS_ZIP'], {}), '(DOCS_ZIP)\n', (6690, 6700), False, 'import os\n'), ((7162, 7197), 'os.path.join', 'os.path.join', (['tmp_dir', '"""doc-master"""'], {}), "(tmp_dir, 'doc-master')\n", (7174, 7197), False, 'import os\n'), ((7296, 7343), 'os.path.join', 'os.path.join', (['DOC_DIR', '"""docs"""', '"""languages.json"""'], {}), "(DOC_DIR, 'docs', 'languages.json')\n", (7308, 7343), False, 'import os\n'), ((7824, 7867), 'os.path.join', 'os.path.join', (['"""_data"""', '"""languageindex.json"""'], {}), "('_data', 'languageindex.json')\n", (7836, 7867), False, 'import os\n'), ((8024, 8045), 'os.remove', 'os.remove', (['index_file'], {}), '(index_file)\n', (8033, 8045), False, 'import os\n'), ((8070, 8116), 'os.path.join', 'os.path.join', (['DOC_DIR', '"""docs"""', '"""en"""', '"""en.json"""'], {}), "(DOC_DIR, 'docs', 'en', 'en.json')\n", (8082, 8116), False, 'import os\n'), ((8311, 8361), 'os.path.join', 'os.path.join', (['DOC_DIR', '"""docs"""', 'language', '"""manuals"""'], {}), "(DOC_DIR, 'docs', language, 'manuals')\n", (8323, 8361), False, 'import os\n'), ((8377, 8408), 'os.path.exists', 'os.path.exists', (['manuals_src_dir'], {}), '(manuals_src_dir)\n', (8391, 8408), False, 'import os\n'), ((9416, 9462), 'os.path.join', 'os.path.join', (['DOC_DIR', '"""docs"""', 'language', '"""faq"""'], {}), "(DOC_DIR, 'docs', language, 'faq')\n", (9428, 9462), False, 'import os\n'), ((9478, 9505), 'os.path.exists', 'os.path.exists', (['faq_src_dir'], {}), '(faq_src_dir)\n', (9492, 9505), False, 'import os\n'), ((10451, 10496), 'os.path.join', 'os.path.join', (['DOC_DIR', '"""docs"""', '"""en"""', '"""shared"""'], {}), "(DOC_DIR, 'docs', 'en', 'shared')\n", (10463, 10496), False, 'import os\n'), ((10535, 10584), 'os.path.join', 'os.path.join', (['DOC_DIR', '"""docs"""', 'language', '"""shared"""'], {}), "(DOC_DIR, 'docs', language, 'shared')\n", (10547, 10584), False, 'import os\n'), ((10623, 10668), 'os.path.join', 'os.path.join', (['"""_includes"""', '"""shared"""', 'language'], {}), "('_includes', 'shared', language)\n", (10635, 10668), False, 'import os\n'), ((10760, 10799), 'os.path.exists', 'os.path.exists', (['shared_includes_src_dir'], {}), '(shared_includes_src_dir)\n', (10774, 10799), False, 'import os\n'), ((12774, 12798), 'os.remove', 'os.remove', (['extension_zip'], {}), '(extension_zip)\n', (12783, 12798), False, 'import os\n'), ((13218, 13254), 'os.path.join', 'os.path.join', (['tmp_dir', 'extension_zip'], {}), '(tmp_dir, extension_zip)\n', (13230, 13254), False, 'import os\n'), ((13270, 13306), 'os.path.join', 'os.path.join', (['tmp_dir', 'extension_zip'], {}), '(tmp_dir, extension_zip)\n', (13282, 13306), False, 'import os\n'), ((13434, 13486), 'os.path.join', 'os.path.join', (['tmp_dir', "(extension_name + '-' + suffix)"], {}), "(tmp_dir, extension_name + '-' + suffix)\n", (13446, 13486), False, 'import os\n'), ((13502, 13540), 'os.path.exists', 'os.path.exists', (['unzipped_extension_dir'], {}), '(unzipped_extension_dir)\n', (13516, 13540), False, 'import os\n'), ((16670, 16693), 'os.remove', 'os.remove', (['EXAMPLES_ZIP'], {}), '(EXAMPLES_ZIP)\n', (16679, 16693), False, 'import os\n'), ((17206, 17241), 'os.path.join', 'os.path.join', (['tmp_dir', 'EXAMPLES_ZIP'], {}), '(tmp_dir, EXAMPLES_ZIP)\n', (17218, 17241), False, 'import os\n'), ((17257, 17292), 'os.path.join', 'os.path.join', (['tmp_dir', 'EXAMPLES_ZIP'], {}), '(tmp_dir, EXAMPLES_ZIP)\n', (17269, 17292), False, 'import os\n'), ((17454, 17484), 'os.path.join', 'os.path.join', (['tmp_dir', 'bob_jar'], {}), '(tmp_dir, bob_jar)\n', (17466, 17484), False, 'import os\n'), ((17791, 17864), 'os.path.join', 'os.path.join', (['input_dir', '"""build"""', '"""default"""', '"""Defold-examples"""', '"""archive"""'], {}), "(input_dir, 'build', 'default', 'Defold-examples', 'archive')\n", (17803, 17864), False, 'import os\n'), ((17866, 17903), 'os.path.join', 'os.path.join', (['examples_dir', '"""archive"""'], {}), "(examples_dir, 'archive')\n", (17878, 17903), False, 'import os\n'), ((18231, 18271), 'os.path.join', 'os.path.join', (['examples_dir', '"""index.html"""'], {}), "(examples_dir, 'index.html')\n", (18243, 18271), False, 'import os\n'), ((18350, 18390), 'os.path.join', 'os.path.join', (['examples_dir', '"""index.html"""'], {}), "(examples_dir, 'index.html')\n", (18362, 18390), False, 'import os\n'), ((18453, 18493), 'os.path.join', 'os.path.join', (['examples_dir', '"""index.html"""'], {}), "(examples_dir, 'index.html')\n", (18465, 18493), False, 'import os\n'), ((18551, 18591), 'os.path.join', 'os.path.join', (['examples_dir', '"""index.html"""'], {}), "(examples_dir, 'index.html')\n", (18563, 18591), False, 'import os\n'), ((18650, 18690), 'os.path.join', 'os.path.join', (['examples_dir', '"""index.html"""'], {}), "(examples_dir, 'index.html')\n", (18662, 18690), False, 'import os\n'), ((18799, 18839), 'os.path.join', 'os.path.join', (['examples_dir', '"""index.html"""'], {}), "(examples_dir, 'index.html')\n", (18811, 18839), False, 'import os\n'), ((18905, 18945), 'os.path.join', 'os.path.join', (['examples_dir', '"""index.html"""'], {}), "(examples_dir, 'index.html')\n", (18917, 18945), False, 'import os\n'), ((19009, 19049), 'os.path.join', 'os.path.join', (['examples_dir', '"""index.html"""'], {}), "(examples_dir, 'index.html')\n", (19021, 19049), False, 'import os\n'), ((19190, 19230), 'os.path.join', 'os.path.join', (['examples_dir', '"""index.html"""'], {}), "(examples_dir, 'index.html')\n", (19202, 19230), False, 'import os\n'), ((19345, 19385), 'os.path.join', 'os.path.join', (['examples_dir', '"""index.html"""'], {}), "(examples_dir, 'index.html')\n", (19357, 19385), False, 'import os\n'), ((19568, 19594), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (19584, 19594), False, 'import os\n'), ((20339, 20373), 'shutil.copyfile', 'shutil.copyfile', (['filename', 'md_file'], {}), '(filename, md_file)\n', (20354, 20373), False, 'import shutil\n'), ((20637, 20672), 'shutil.copyfile', 'shutil.copyfile', (['filename', 'png_file'], {}), '(filename, png_file)\n', (20652, 20672), False, 'import shutil\n'), ((20900, 20935), 'shutil.copyfile', 'shutil.copyfile', (['filename', 'jpg_file'], {}), '(filename, jpg_file)\n', (20915, 20935), False, 'import shutil\n'), ((21316, 21354), 'shutil.copyfile', 'shutil.copyfile', (['filename', 'script_file'], {}), '(filename, script_file)\n', (21331, 21354), False, 'import shutil\n'), ((21636, 21674), 'shutil.copyfile', 'shutil.copyfile', (['filename', 'script_file'], {}), '(filename, script_file)\n', (21651, 21674), False, 'import shutil\n'), ((21829, 21850), 'os.remove', 'os.remove', (['index_file'], {}), '(index_file)\n', (21838, 21850), False, 'import os\n'), ((21873, 21916), 'os.path.join', 'os.path.join', (['"""_data"""', '"""examplesindex.json"""'], {}), "('_data', 'examplesindex.json')\n", (21885, 21916), False, 'import os\n'), ((22043, 22065), 'os.remove', 'os.remove', (['CODEPAD_ZIP'], {}), '(CODEPAD_ZIP)\n', (22052, 22065), False, 'import os\n'), ((22540, 22574), 'os.path.join', 'os.path.join', (['tmp_dir', 'CODEPAD_ZIP'], {}), '(tmp_dir, CODEPAD_ZIP)\n', (22552, 22574), False, 'import os\n'), ((22590, 22624), 'os.path.join', 'os.path.join', (['tmp_dir', 'CODEPAD_ZIP'], {}), '(tmp_dir, CODEPAD_ZIP)\n', (22602, 22624), False, 'import os\n'), ((22669, 22699), 'os.path.join', 'os.path.join', (['tmp_dir', 'bob_jar'], {}), '(tmp_dir, bob_jar)\n', (22681, 22699), False, 'import os\n'), ((22985, 23045), 'os.path.join', 'os.path.join', (['input_dir', '"""build"""', '"""default"""', '"""DefoldCodePad"""'], {}), "(input_dir, 'build', 'default', 'DefoldCodePad')\n", (22997, 23045), False, 'import os\n'), ((28890, 28934), 'os.path.join', 'os.path.join', (['tag_collection_dir', 'sort_order'], {}), '(tag_collection_dir, sort_order)\n', (28902, 28934), False, 'import os\n'), ((30887, 30909), 'os.remove', 'os.remove', (['AWESOME_ZIP'], {}), '(AWESOME_ZIP)\n', (30896, 30909), False, 'import os\n'), ((31202, 31236), 'os.path.join', 'os.path.join', (['tmp_dir', 'AWESOME_ZIP'], {}), '(tmp_dir, AWESOME_ZIP)\n', (31214, 31236), False, 'import os\n'), ((31252, 31286), 'os.path.join', 'os.path.join', (['tmp_dir', 'AWESOME_ZIP'], {}), '(tmp_dir, AWESOME_ZIP)\n', (31264, 31286), False, 'import os\n'), ((31868, 31894), 'os.path.exists', 'os.path.exists', (['REFDOC_ZIP'], {}), '(REFDOC_ZIP)\n', (31882, 31894), False, 'import os\n'), ((32077, 32103), 'os.path.exists', 'os.path.exists', (['REFDOC_ZIP'], {}), '(REFDOC_ZIP)\n', (32091, 32103), False, 'import os\n'), ((32180, 32191), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (32188, 32191), False, 'import sys\n'), ((34757, 34773), 'os.path.isdir', 'os.path.isdir', (['s'], {}), '(s)\n', (34770, 34773), False, 'import os\n'), ((34787, 34805), 'shutil.copy2', 'shutil.copy2', (['s', 'd'], {}), '(s, d)\n', (34799, 34805), False, 'import shutil\n'), ((1992, 2012), 'os.path.join', 'os.path.join', (['src', 'f'], {}), '(src, f)\n', (2004, 2012), False, 'import os\n'), ((2014, 2034), 'os.path.join', 'os.path.join', (['dst', 'f'], {}), '(dst, f)\n', (2026, 2034), False, 'import os\n'), ((2073, 2092), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (2087, 2092), False, 'import os\n'), ((7047, 7078), 'os.path.join', 'os.path.join', (['tmp_dir', 'DOCS_ZIP'], {}), '(tmp_dir, DOCS_ZIP)\n', (7059, 7078), False, 'import os\n'), ((7098, 7129), 'os.path.join', 'os.path.join', (['tmp_dir', 'DOCS_ZIP'], {}), '(tmp_dir, DOCS_ZIP)\n', (7110, 7129), False, 'import os\n'), ((10920, 10967), 'os.path.join', 'os.path.join', (['shared_includes_dst_dir', '"""images"""'], {}), "(shared_includes_dst_dir, 'images')\n", (10932, 10967), False, 'import os\n'), ((14274, 14313), 'os.path.join', 'os.path.join', (['extension_dir', '"""api.html"""'], {}), "(extension_dir, 'api.html')\n", (14286, 14313), False, 'import os\n'), ((17589, 17619), 'os.path.join', 'os.path.join', (['tmp_dir', 'bob_jar'], {}), '(tmp_dir, bob_jar)\n', (17601, 17619), False, 'import os\n'), ((18060, 18128), 'os.path.join', 'os.path.join', (['input_dir', '"""build"""', '"""default"""', '"""Defold-examples"""', 'file'], {}), "(input_dir, 'build', 'default', 'Defold-examples', file)\n", (18072, 18128), False, 'import os\n'), ((18130, 18162), 'os.path.join', 'os.path.join', (['examples_dir', 'file'], {}), '(examples_dir, file)\n', (18142, 18162), False, 'import os\n'), ((20301, 20325), 'os.path.dirname', 'os.path.dirname', (['md_file'], {}), '(md_file)\n', (20316, 20325), False, 'import os\n'), ((20598, 20623), 'os.path.dirname', 'os.path.dirname', (['png_file'], {}), '(png_file)\n', (20613, 20623), False, 'import os\n'), ((20861, 20886), 'os.path.dirname', 'os.path.dirname', (['jpg_file'], {}), '(jpg_file)\n', (20876, 20886), False, 'import os\n'), ((21274, 21302), 'os.path.dirname', 'os.path.dirname', (['script_file'], {}), '(script_file)\n', (21289, 21302), False, 'import os\n'), ((21594, 21622), 'os.path.dirname', 'os.path.dirname', (['script_file'], {}), '(script_file)\n', (21609, 21622), False, 'import os\n'), ((22804, 22834), 'os.path.join', 'os.path.join', (['tmp_dir', 'bob_jar'], {}), '(tmp_dir, bob_jar)\n', (22816, 22834), False, 'import os\n'), ((25232, 25256), 'hashlib.md5', 'hashlib.md5', (['author_name'], {}), '(author_name)\n', (25243, 25256), False, 'import hashlib\n'), ((28154, 28198), 'json.dumps', 'json.dumps', (['author'], {'indent': '(2)', 'sort_keys': '(True)'}), '(author, indent=2, sort_keys=True)\n', (28164, 28198), False, 'import json\n'), ((28218, 28275), 'os.path.join', 'os.path.join', (['author_collection_dir', "(author['id'] + '.md')"], {}), "(author_collection_dir, author['id'] + '.md')\n", (28230, 28275), False, 'import os\n'), ((29184, 29225), 'json.dumps', 'json.dumps', (['tag'], {'indent': '(2)', 'sort_keys': '(True)'}), '(tag, indent=2, sort_keys=True)\n', (29194, 29225), False, 'import json\n'), ((31912, 31933), 'os.remove', 'os.remove', (['REFDOC_ZIP'], {}), '(REFDOC_ZIP)\n', (31921, 31933), False, 'import os\n'), ((32267, 32300), 'os.path.join', 'os.path.join', (['tmp_dir', 'REFDOC_ZIP'], {}), '(tmp_dir, REFDOC_ZIP)\n', (32279, 32300), False, 'import os\n'), ((32320, 32353), 'os.path.join', 'os.path.join', (['tmp_dir', 'REFDOC_ZIP'], {}), '(tmp_dir, REFDOC_ZIP)\n', (32332, 32353), False, 'import os\n'), ((32508, 32536), 'os.path.join', 'os.path.join', (['tmp_dir', '"""doc"""'], {}), "(tmp_dir, 'doc')\n", (32520, 32536), False, 'import os\n'), ((34285, 34308), 'os.path.isdir', 'os.path.isdir', (['filename'], {}), '(filename)\n', (34298, 34308), False, 'import os\n'), ((36685, 36711), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (36701, 36711), False, 'import os\n'), ((4435, 4463), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (4447, 4463), False, 'import os\n'), ((29326, 29389), 'os.path.join', 'os.path.join', (['tag_collection_dir', 'sort_order', "(tag['id'] + '.md')"], {}), "(tag_collection_dir, sort_order, tag['id'] + '.md')\n", (29338, 29389), False, 'import os\n'), ((32621, 32655), 'os.path.join', 'os.path.join', (['tmp_dir', '"""doc"""', 'file'], {}), "(tmp_dir, 'doc', file)\n", (32633, 32655), False, 'import os\n'), ((33004, 33038), 'os.path.join', 'os.path.join', (['tmp_dir', '"""doc"""', 'file'], {}), "(tmp_dir, 'doc', file)\n", (33016, 33038), False, 'import os\n'), ((33040, 33081), 'os.path.join', 'os.path.join', (['REF_DATA_DIR', 'json_out_file'], {}), '(REF_DATA_DIR, json_out_file)\n', (33052, 33081), False, 'import os\n')]
|
from itertools import combinations
import sys
def func(arr):
answer = []
for i in arr:
if sum(i) - max(i) > max(i):
if len(answer) == 0:
answer += i
elif sum(answer) < sum(i):
answer = i
if len(answer) == 0:
return "-1"
else:
return " ".join(map(str, sorted(answer)))
n = int(input())
array = list(map(int, sys.stdin.readline().split()))
arr = combinations(array,3)
print(func(arr))
|
[
"itertools.combinations",
"sys.stdin.readline"
] |
[((443, 465), 'itertools.combinations', 'combinations', (['array', '(3)'], {}), '(array, 3)\n', (455, 465), False, 'from itertools import combinations\n'), ((406, 426), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (424, 426), False, 'import sys\n')]
|
import discord
from discord.ext import commands
from discord.ext.commands.context import Context
from discord.commands import slash_command
from discord.commands import Option
class SlashExample(commands.Cog):
def __init__(self, bot):
self.bot = bot
@slash_command(
guild_ids=[...], name="ping", description="check the latency of the bot!"
)
async def ping(self, ctx):
"""
ephemeral makes "Only you can see this" message
`await ctx.respond(f"{round(self.client.latency * 1000)}ms",ephemeral=True)`
"""
return await ctx.respond(f"{round(self.client.latency * 1000)}ms")
@ping.error
async def ping_error(self, ctx: Context, error):
return await ctx.respond(
error, ephemeral=True
) # ephemeral makes "Only you can see this" message
def setup(bot):
bot.add_cog(SlashExample(bot))
|
[
"discord.commands.slash_command"
] |
[((281, 374), 'discord.commands.slash_command', 'slash_command', ([], {'guild_ids': '[...]', 'name': '"""ping"""', 'description': '"""check the latency of the bot!"""'}), "(guild_ids=[...], name='ping', description=\n 'check the latency of the bot!')\n", (294, 374), False, 'from discord.commands import slash_command\n')]
|
from torch import nn
from torchvision.models import MobileNetV2
class MobileNetV2Encoder(MobileNetV2):
"""
MobileNetV2Encoder inherits from torchvision's official MobileNetV2. It is modified to
use dilation on the last block to maintain output stride 16, and deleted the
classifier block that was originally used for classification. The forward method
additionally returns the feature maps at all resolutions for decoder's use.
"""
def __init__(self, in_channels, norm_layer=None):
super().__init__()
# Replace first conv layer if in_channels doesn't match.
if in_channels != 3:
self.features[0][0] = nn.Conv2d(in_channels, 32, 3, 2, 1, bias=False)
# Remove last block
self.features = self.features[:-1]
# Change to use dilation to maintain output stride = 16
self.features[14].conv[1][0].stride = (1, 1)
for feature in self.features[15:]:
feature.conv[1][0].dilation = (2, 2)
feature.conv[1][0].padding = (2, 2)
# Delete classifier
del self.classifier
def forward(self, x):
x0 = x # 1/1
x = self.features[0](x)
x = self.features[1](x)
x1 = x # 1/2
x = self.features[2](x)
x = self.features[3](x)
x2 = x # 1/4
x = self.features[4](x)
x = self.features[5](x)
x = self.features[6](x)
x3 = x # 1/8
x = self.features[7](x)
x = self.features[8](x)
x = self.features[9](x)
x = self.features[10](x)
x = self.features[11](x)
x = self.features[12](x)
x = self.features[13](x)
x = self.features[14](x)
x = self.features[15](x)
x = self.features[16](x)
x = self.features[17](x)
x4 = x # 1/16
return x4, x3, x2, x1, x0
|
[
"torch.nn.Conv2d"
] |
[((682, 729), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(32)', '(3)', '(2)', '(1)'], {'bias': '(False)'}), '(in_channels, 32, 3, 2, 1, bias=False)\n', (691, 729), False, 'from torch import nn\n')]
|
import requests
import threading
from multiprocessing.pool import ThreadPool
import time
import sys
URL = "http://192.168.219.158:80/facerec"
webcamStreamURL="192.168.219.142:8090/?action=snapshot"
def faceAuthRequest(requestParams,retJson):
res = requests.get(URL,params=requestParams,timeout=20)
res.status_code
retJson= res.json()
return
jsonParams = {'username':'parkjaehyun','stream_url':webcamStreamURL}
if __name__ == '__main__':
retJson=0
getRequestThread = threading.Thread(target=faceAuthRequest,args=(jsonParams,retJson,))
getRequestThread.start()
getRequestThread.join(timeout=21.0)
if getRequestThread.isAlive(): # after 30sec ,thread:
print("face Recognize Failed, please reload Face Recgnize Module..")
sys.exit(1)
print(retJson)
sys.exit(0)
|
[
"threading.Thread",
"sys.exit",
"requests.get"
] |
[((250, 301), 'requests.get', 'requests.get', (['URL'], {'params': 'requestParams', 'timeout': '(20)'}), '(URL, params=requestParams, timeout=20)\n', (262, 301), False, 'import requests\n'), ((476, 544), 'threading.Thread', 'threading.Thread', ([], {'target': 'faceAuthRequest', 'args': '(jsonParams, retJson)'}), '(target=faceAuthRequest, args=(jsonParams, retJson))\n', (492, 544), False, 'import threading\n'), ((764, 775), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (772, 775), False, 'import sys\n'), ((735, 746), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (743, 746), False, 'import sys\n')]
|
"""
Tests module image_io
# Author: <NAME>
# $Id:$
"""
from __future__ import unicode_literals
from __future__ import print_function
__version__ = "$Revision:$"
from copy import copy, deepcopy
import pickle
import os.path
import unittest
import numpy
import numpy.testing as np_test
import scipy
from pyto.io.image_io import ImageIO
class TestImageIO(np_test.TestCase):
"""
Tests class ImageIO
"""
def setUp(self):
"""
Sets absolute path to this file directory and saves it as self.dir
"""
# set absolute path to current dir
working_dir = os.getcwd()
file_dir, name = os.path.split(__file__)
self.dir = os.path.join(working_dir, file_dir)
# make raw file
self.raw_shape = (4,3,2)
self.raw_dtype = 'int16'
self.raw_data = numpy.arange(
24, dtype=self.raw_dtype).reshape(self.raw_shape)
raw = ImageIO()
self.raw_file_name = 'data.raw'
raw.write(file=self.raw_file_name, data=self.raw_data)
def testRead(self):
"""
Tests reading EM and MRC files
"""
# EM tomo
em = ImageIO()
em.read(file=os.path.join(self.dir, "bin-2.em"))
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(em.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(em.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(em.byteOrder, '<')
np_test.assert_equal(em.arrayOrder, 'F')
np_test.assert_equal(em.dataType, 'float32')
np_test.assert_equal(em.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(em.memmap, False)
# EM tomo with memory map
em.read(file=os.path.join(self.dir, "bin-2.em"), memmap=True)
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(em.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(em.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(em.byteOrder, '<')
np_test.assert_equal(em.arrayOrder, 'F')
np_test.assert_equal(em.dataType, 'float32')
np_test.assert_equal(em.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(em.memmap, True)
# EM, big-endian
em = ImageIO()
em.read(file=os.path.join(self.dir, "mac-file.em"))
np_test.assert_equal(em.byteOrder, '>')
# EM, little-endian
em = ImageIO()
em.read(file=os.path.join(self.dir, "pc-file.em"))
np_test.assert_equal(em.byteOrder, '<')
em.read(file=os.path.join(self.dir, "pc-file.em"), memmap=True)
np_test.assert_equal(em.byteOrder, '<')
# MRC tomo
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "bin-2.mrc"))
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(mrc.byteOrder, '<')
np_test.assert_equal(mrc.arrayOrder, 'F')
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, False)
# MRC tomo with memmap
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "bin-2.mrc"), memmap=True)
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(mrc.byteOrder, '<')
np_test.assert_equal(mrc.arrayOrder, 'F')
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, True)
# MRC tomo with extended header
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "bin-2_ext.mrc"), memmap=False)
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(mrc.byteOrder, '<')
np_test.assert_equal(mrc.arrayOrder, 'F')
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, False)
np_test.assert_equal(mrc.extendedHeaderLength, 5120)
# MRC tomo with extended header and with memmap
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "bin-2_ext.mrc"), memmap=True)
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(mrc.byteOrder, '<')
np_test.assert_equal(mrc.arrayOrder, 'F')
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, True)
np_test.assert_equal(mrc.extendedHeaderLength, 5120)
# another MRC tomo (generated by and)
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "and-tomo.mrc"))
expected = numpy.array([[-0.0329, -0.0006, -0.0698],
[-0.0101, -0.1196, -0.1295],
[0.0844, -0.0400, -0.0716]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0019, -0.0085, 0.0036],
[0.0781, 0.0279, -0.0365],
[0.0210, -0.0193, -0.0355]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 60], expected,
decimal=4)
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, False)
# another MRC tomo (generated by and) with memmap
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "and-tomo.mrc"), memmap=True)
expected = numpy.array([[-0.0329, -0.0006, -0.0698],
[-0.0101, -0.1196, -0.1295],
[0.0844, -0.0400, -0.0716]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0019, -0.0085, 0.0036],
[0.0781, 0.0279, -0.0365],
[0.0210, -0.0193, -0.0355]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 60], expected,
decimal=4)
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, True)
# mrc with the opposite byte order
mrc2 = ImageIO()
mrc2.read(file=os.path.join(self.dir, "swapped_byte_order.mrc"))
expected = numpy.array(
[[ 0.000, 0.000],
[-0.341, -6.702],
[0.782, -11.780],
[0.327, -14.298],
[-0.691, -17.411],
[-0.337, -18.076],
[-0.669, -19.157],
[-0.799, -20.400],
[-0.793, -21.286],
[-1.008, -21.386]])
np_test.assert_almost_equal(mrc2.data[:,:,0], expected, decimal=3)
np_test.assert_equal(mrc2.memmap, False)
raised = False
try:
mrc2.read(
file=os.path.join(self.dir, "swapped_byte_order.mrc"),
memmap=True)
except ValueError:
raised = True
np_test.assert_equal(raised, True)
np_test.assert_equal(mrc2.memmap, True)
# new style header mrc
mrc_new = ImageIO()
mrc_new.read(file=os.path.join(self.dir, 'new-head_int16.mrc'))
np_test.assert_equal(mrc_new.dataType, 'int16')
np_test.assert_equal(mrc_new.data.dtype, numpy.dtype('int16'))
np_test.assert_equal(mrc_new.byteOrder, '<')
np_test.assert_equal(mrc_new.arrayOrder, 'F')
np_test.assert_equal(mrc_new.shape, (40,30,20))
np_test.assert_equal(mrc_new.pixel, [0.4, 0.4, 0.4])
np_test.assert_equal(mrc_new.pixelsize, 0.4)
np_test.assert_equal(mrc_new.data[14,8,10], -14)
np_test.assert_equal(mrc_new.data[15,23,12], 10)
np_test.assert_equal(mrc_new.data[23,29,16], 2)
np_test.assert_equal(mrc_new.memmap, False)
# new style header mrc
mrc_new = ImageIO()
mrc_new.read(
file=os.path.join(self.dir, 'new-head_int16.mrc'), memmap=True)
np_test.assert_equal(mrc_new.dataType, 'int16')
np_test.assert_equal(mrc_new.data.dtype, numpy.dtype('int16'))
np_test.assert_equal(mrc_new.byteOrder, '<')
np_test.assert_equal(mrc_new.arrayOrder, 'F')
np_test.assert_equal(mrc_new.shape, (40,30,20))
np_test.assert_equal(mrc_new.pixel, [0.4, 0.4, 0.4])
np_test.assert_equal(mrc_new.pixelsize, 0.4)
np_test.assert_equal(mrc_new.data[14,8,10], -14)
np_test.assert_equal(mrc_new.data[15,23,12], 10)
np_test.assert_equal(mrc_new.data[23,29,16], 2)
np_test.assert_equal(mrc_new.memmap, True)
np_test.assert_equal(mrc_new.n_labels, 9)
np_test.assert_equal(len(mrc_new.labels), 9)
desired = (
b"COMBINEFFT: Combined FFT from two tomograms "
+ b"07-Oct-13 17:15:24" )
np_test.assert_equal(len(mrc_new.labels[3]), 80)
np_test.assert_equal(mrc_new.labels[3][:len(desired)], desired)
desired = (
b"NEWSTACK: Images copied 10-Oct-13 18:00:03")
np_test.assert_equal(len(mrc_new.labels[6]), 80)
np_test.assert_equal(mrc_new.labels[6][:len(desired)], desired)
# test raw file
raw = ImageIO()
raw.read(
file=self.raw_file_name, dataType=self.raw_dtype,
shape=self.raw_shape)
np_test.assert_equal(raw.data, self.raw_data)
np_test.assert_equal(raw.memmap, False)
# test raw file with memmap
raw = ImageIO()
raw.read(
file=self.raw_file_name, dataType=self.raw_dtype,
shape=self.raw_shape, memmap=True)
np_test.assert_equal(raw.data, self.raw_data)
np_test.assert_equal(raw.memmap, True)
def testWrite(self):
"""
Tests write (and implicitly read), for em, mrc and raw format.
"""
# arrays
ar_uint8 = numpy.array([54, 200, 5, 7, 45, 123],
dtype='uint8').reshape((3,1,2))
ar_int8 = numpy.array([54, 2, -5, 7, 45, 123],
dtype='uint8').reshape((3,1,2))
ar_uint16 = numpy.array([1034, 546, 248, 40000, 2345, 365, 4876, 563],
dtype='uint16').reshape((2,2,2))
ar_int16 = numpy.array([1034, 546, -248, 156, 2345, 365, -4876, 563],
dtype='int16').reshape((2,2,2))
ar_int32 = numpy.array([1034, 56546, -223448, 156,
2345, 2**31-10, -884876, 563],
dtype='int32').reshape((2,2,2))
ar_uint32 = numpy.array([1034, 56546, 223448, 156,
2345, 365, 884876, 2**32-10],
dtype='uint32').reshape((2,2,2))
ar_int8_2 = numpy.arange(24, dtype='int8').reshape((4,3,2))
ar_int16_2 = numpy.arange(24, dtype='int16').reshape((4,3,2))
ar2_int16 = numpy.array([1034, 546, -248, 156, 2345, 365, -4876, 563],
dtype='int16').reshape((2,4))
ar_int16_f = numpy.array(
[1034, 546, -248, 156, 2345, 365, -4876, 563],
dtype='int16', order='F').reshape((2,2,2))
ar_int16_c = numpy.array(
[1034, 546, -248, 156, 2345, 365, -4876, 563],
dtype='int16', order='C').reshape((2,2,2))
# em uint8
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.em'), data=ar_uint8)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'uint8')
np_test.assert_equal(file_in.data, ar_uint8)
# em uint16
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.em'), data=ar_uint16)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'uint16')
np_test.assert_equal(file_in.data, ar_uint16)
# em int16 converted to int32, safe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_int16, dataType='int32', casting='safe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'int32')
np_test.assert_equal(file_in.data, ar_int16)
# em int16, safe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_int16, 'casting':'safe'})
# em int16 converted to uint16, unsafe casting
file_out = ImageIO()
print("int16 to uint16")
file_out.write(file=os.path.join(self.dir, '_test.em'),
data=ar_int16, dataType='uint16', casting='unsafe')
print("int16 to uint16 end")
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'uint16')
np_test.assert_equal(file_in.data.dtype, numpy.dtype('uint16'))
np_test.assert_equal(file_in.data[0,1,0] == ar_int16[0,1,0], False)
# em int16 to uint16, safe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_int16, 'dataType':'uint16', 'casting':'safe'})
# em uint16 to int16, unsafe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_uint16, 'dataType':'int16', 'casting':'unsafe'})
# em uint32 to int32, safe casting
print("uint32 to int32 safe")
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_uint32, 'dataType':'int32', 'casting':'safe'})
# em uint32 converted to int32, unsafe casting
print("uint32 to int32")
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_uint32, dataType='int32', casting='unsafe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'int32')
#np_test.assert_equal(file_in.data, ar_uint32) should fail
np_test.assert_equal(file_in.data[0,0,0] == ar_uint32[0,0,0], True)
np_test.assert_equal(file_in.data[1,1,1] == ar_uint32[1,1,1], False)
# em uint32 to float32, safe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_uint32, 'dataType':'float32', 'casting':'safe'})
# em uint32 to float32, unsafe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_uint32, dataType='float32', casting='unsafe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'float32')
#np_test.assert_almost_equal(file_in.data, ar_uint32) should fail
np_test.assert_equal(
file_in.data[0,0,0] == ar_uint32[0,0,0], True)
np_test.assert_equal(
file_in.data[1,1,1] == ar_uint32[1,1,1], False)
# em int32 to float32, unsafe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_int32, dataType='float32', casting='unsafe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'float32')
#np_test.assert_almost_equal(file_in.data, ar_int32) should fail
np_test.assert_equal(
file_in.data[0,0,0] == ar_int32[0,0,0], True)
np_test.assert_equal(
file_in.data[1,0,1] == ar_int32[1,0,1], False)
# em int32 to float64, safe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_int32, dataType='float64', casting='safe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'float64')
np_test.assert_almost_equal(file_in.data, ar_int32)
# mrc data type and shape from args
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int8_2, shape=(2,3,4), dataType='int16')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.shape, (2,3,4))
# mrc data type and shape from previously given data
file_out = ImageIO()
file_out.setData(ar_int16_2)
file_out.write(file=os.path.join(self.dir, '_test.mrc'))
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.shape, (4,3,2))
# mrc data type and shape from attributes
file_out = ImageIO()
file_out.data = ar_int8_2
file_out.shape = (2,3,4)
file_out.dataType = 'int16'
file_out.write(file=os.path.join(self.dir, '_test.mrc'))
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.shape, (2,3,4))
# mrc data type and shape from data
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16_2)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.shape, (4,3,2))
# mrc uint8, same as ubyte
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.mrc'), data=ar_uint8)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'ubyte')
np_test.assert_almost_equal(file_in.data, ar_uint8)
# mrc uint16
file_out = ImageIO()
np_test.assert_raises(
(KeyError, TypeError),
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'), 'data':ar_uint16})
# mrc uint16 to int16, safe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'),
'data':ar_uint16, 'dataType':'ubyte', 'casting':'safe'})
# mrc uint16 to int16, unsafe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_uint16, dataType='int16', casting='unsafe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
#np_test.assert_almost_equal(file_in.data, ar_uint16) should fail
np_test.assert_equal(file_in.data[0,0,0] == ar_uint16[0,0,0], True)
np_test.assert_equal(file_in.data[0,1,1] == ar_uint16[0,1,1], False)
# mrc int16
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar_int16, pixel=2.3)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data, ar_int16)
np_test.assert_equal(file_in.pixel, [2.3, 2.3, 2.3])
np_test.assert_equal(file_in.pixelsize, 2.3)
# mrc int16 2D
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar2_int16, pixel=3.4)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data[:,:,0], ar2_int16)
np_test.assert_equal(file_in.pixelsize, 3.4)
# mrc int8 to int16
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar_int8,
dataType='int16', casting='safe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data, ar_int8)
# mrc int32
file_out = ImageIO()
np_test.assert_raises(
(KeyError, TypeError),
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'), 'data':ar_int32})
# mrc int32 to int16
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'),
'data':ar_int32, 'dataType':'int16', 'casting':'safe'})
# mrc int32 to float32
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'),
'data':ar_int32, 'dataType':'float32', 'casting':'safe'})
# mrc int32 to complex64
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'),
'data':ar_int32, 'dataType':'complex64', 'casting':'safe'})
# raw int16
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.raw'), data=ar_int16)
file_in = ImageIO()
file_in.read(
file=os.path.join(self.dir, '_test.raw'),
dataType='int16', shape=(2,2,2))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data, ar_int16)
# raw int8 to int16
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.raw'),
data=ar_int8, dataType='int16')
file_in = ImageIO()
file_in.read(
file=os.path.join(self.dir, '_test.raw'),
dataType='int16', shape=(3,1,2))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data, ar_int8)
# raw int16 to int8
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.raw'),
'data':ar_int16, 'dataType':'int8', 'casting':'safe'})
# explain error messages printed before
print("It's fine if few error messages were printed just before " +
"this line, because they have been caught.")
# shape param
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16, dataType='int16')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'), dataType='int16')
np_test.assert_equal(file_in.data.shape, (2,2,2))
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16, dataType='int16', shape=(1,4,2))
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'), dataType='int16')
np_test.assert_equal(file_in.data.shape, (1,4,2))
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16, dataType='int16', shape=(4,2))
file_in.readHeader(file=os.path.join(self.dir, '_test.mrc'))
file_in.read(file=os.path.join(self.dir, '_test.mrc'), dataType='int16')
np_test.assert_equal(file_in.data.shape, (4,2,1))
file_in.read(
file=os.path.join(self.dir, '_test.mrc'),
dataType='int16', shape=(2,2,2))
np_test.assert_equal(file_in.data.shape, (2,2,2))
# array order C, read write default (F)
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar_int16_c)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.data, ar_int16_c)
# array order C, read write C
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16_c, arrayOrder='C')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'), arrayOrder='C')
np_test.assert_equal(file_in.data, ar_int16_c)
# array order F, read write default (F)
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar_int16_f)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.data, ar_int16_f)
# array order F, read write F
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16_f, arrayOrder='F')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'), arrayOrder='F')
np_test.assert_equal(file_in.data, ar_int16_f)
def testPixelSize(self):
"""
Tests pixel size in read and write
"""
# arrays
#ar_int8_2 = numpy.arange(24, dtype='int8').reshape((4,3,2))
ar_int16_2 = numpy.arange(24, dtype='int16').reshape((4,3,2))
#
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16_2, pixel=2.1)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_almost_equal(file_in.pixel, 2.1)
def tearDown(self):
"""
Remove temporary files
"""
try:
os.remove(os.path.join(self.dir, '_test.em'))
except OSError:
pass
try:
os.remove(os.path.join(self.dir, '_test.mrc'))
except OSError:
pass
try:
os.remove(os.path.join(self.dir, '_test.raw'))
except OSError:
pass
try:
os.remove(os.path.join(self.dir, self.raw_file_name))
except OSError:
pass
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestImageIO)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"unittest.TextTestRunner",
"numpy.testing.assert_almost_equal",
"numpy.dtype",
"pyto.io.image_io.ImageIO",
"numpy.array",
"unittest.TestLoader",
"numpy.testing.assert_equal",
"numpy.arange"
] |
[((928, 937), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (935, 937), False, 'from pyto.io.image_io import ImageIO\n'), ((1170, 1179), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (1177, 1179), False, 'from pyto.io.image_io import ImageIO\n'), ((1256, 1354), 'numpy.array', 'numpy.array', (['[[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -0.0392, -0.0312]\n ]'], {}), '([[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -\n 0.0392, -0.0312]])\n', (1267, 1354), False, 'import numpy\n'), ((1440, 1517), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['em.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(em.data[50:53, 120:123, 40], expected, decimal=4)\n', (1467, 1517), True, 'import numpy.testing as np_test\n'), ((1573, 1670), 'numpy.array', 'numpy.array', (['[[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011, -0.024, 0.0347]\n ]'], {}), '([[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011,\n -0.024, 0.0347]])\n', (1584, 1670), False, 'import numpy\n'), ((1757, 1834), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['em.data[150:153, 20:23, 10]', 'expected'], {'decimal': '(4)'}), '(em.data[150:153, 20:23, 10], expected, decimal=4)\n', (1784, 1834), True, 'import numpy.testing as np_test\n'), ((1879, 1918), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.byteOrder', '"""<"""'], {}), "(em.byteOrder, '<')\n", (1899, 1918), True, 'import numpy.testing as np_test\n'), ((1927, 1967), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.arrayOrder', '"""F"""'], {}), "(em.arrayOrder, 'F')\n", (1947, 1967), True, 'import numpy.testing as np_test\n'), ((1976, 2020), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.dataType', '"""float32"""'], {}), "(em.dataType, 'float32')\n", (1996, 2020), True, 'import numpy.testing as np_test\n'), ((2097, 2135), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.memmap', '(False)'], {}), '(em.memmap, False)\n', (2117, 2135), True, 'import numpy.testing as np_test\n'), ((2260, 2358), 'numpy.array', 'numpy.array', (['[[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -0.0392, -0.0312]\n ]'], {}), '([[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -\n 0.0392, -0.0312]])\n', (2271, 2358), False, 'import numpy\n'), ((2444, 2521), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['em.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(em.data[50:53, 120:123, 40], expected, decimal=4)\n', (2471, 2521), True, 'import numpy.testing as np_test\n'), ((2577, 2674), 'numpy.array', 'numpy.array', (['[[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011, -0.024, 0.0347]\n ]'], {}), '([[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011,\n -0.024, 0.0347]])\n', (2588, 2674), False, 'import numpy\n'), ((2761, 2838), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['em.data[150:153, 20:23, 10]', 'expected'], {'decimal': '(4)'}), '(em.data[150:153, 20:23, 10], expected, decimal=4)\n', (2788, 2838), True, 'import numpy.testing as np_test\n'), ((2883, 2922), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.byteOrder', '"""<"""'], {}), "(em.byteOrder, '<')\n", (2903, 2922), True, 'import numpy.testing as np_test\n'), ((2931, 2971), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.arrayOrder', '"""F"""'], {}), "(em.arrayOrder, 'F')\n", (2951, 2971), True, 'import numpy.testing as np_test\n'), ((2980, 3024), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.dataType', '"""float32"""'], {}), "(em.dataType, 'float32')\n", (3000, 3024), True, 'import numpy.testing as np_test\n'), ((3101, 3138), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.memmap', '(True)'], {}), '(em.memmap, True)\n', (3121, 3138), True, 'import numpy.testing as np_test\n'), ((3178, 3187), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (3185, 3187), False, 'from pyto.io.image_io import ImageIO\n'), ((3256, 3295), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.byteOrder', '""">"""'], {}), "(em.byteOrder, '>')\n", (3276, 3295), True, 'import numpy.testing as np_test\n'), ((3338, 3347), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (3345, 3347), False, 'from pyto.io.image_io import ImageIO\n'), ((3415, 3454), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.byteOrder', '"""<"""'], {}), "(em.byteOrder, '<')\n", (3435, 3454), True, 'import numpy.testing as np_test\n'), ((3535, 3574), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.byteOrder', '"""<"""'], {}), "(em.byteOrder, '<')\n", (3555, 3574), True, 'import numpy.testing as np_test\n'), ((3609, 3618), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (3616, 3618), False, 'from pyto.io.image_io import ImageIO\n'), ((3697, 3795), 'numpy.array', 'numpy.array', (['[[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -0.0392, -0.0312]\n ]'], {}), '([[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -\n 0.0392, -0.0312]])\n', (3708, 3795), False, 'import numpy\n'), ((3881, 3959), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(mrc.data[50:53, 120:123, 40], expected, decimal=4)\n', (3908, 3959), True, 'import numpy.testing as np_test\n'), ((4015, 4112), 'numpy.array', 'numpy.array', (['[[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011, -0.024, 0.0347]\n ]'], {}), '([[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011,\n -0.024, 0.0347]])\n', (4026, 4112), False, 'import numpy\n'), ((4199, 4277), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[150:153, 20:23, 10]', 'expected'], {'decimal': '(4)'}), '(mrc.data[150:153, 20:23, 10], expected, decimal=4)\n', (4226, 4277), True, 'import numpy.testing as np_test\n'), ((4322, 4362), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.byteOrder', '"""<"""'], {}), "(mrc.byteOrder, '<')\n", (4342, 4362), True, 'import numpy.testing as np_test\n'), ((4371, 4412), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.arrayOrder', '"""F"""'], {}), "(mrc.arrayOrder, 'F')\n", (4391, 4412), True, 'import numpy.testing as np_test\n'), ((4421, 4466), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.dataType', '"""float32"""'], {}), "(mrc.dataType, 'float32')\n", (4441, 4466), True, 'import numpy.testing as np_test\n'), ((4544, 4583), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.memmap', '(False)'], {}), '(mrc.memmap, False)\n', (4564, 4583), True, 'import numpy.testing as np_test\n'), ((4630, 4639), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (4637, 4639), False, 'from pyto.io.image_io import ImageIO\n'), ((4731, 4829), 'numpy.array', 'numpy.array', (['[[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -0.0392, -0.0312]\n ]'], {}), '([[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -\n 0.0392, -0.0312]])\n', (4742, 4829), False, 'import numpy\n'), ((4915, 4993), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(mrc.data[50:53, 120:123, 40], expected, decimal=4)\n', (4942, 4993), True, 'import numpy.testing as np_test\n'), ((5049, 5146), 'numpy.array', 'numpy.array', (['[[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011, -0.024, 0.0347]\n ]'], {}), '([[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011,\n -0.024, 0.0347]])\n', (5060, 5146), False, 'import numpy\n'), ((5233, 5311), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[150:153, 20:23, 10]', 'expected'], {'decimal': '(4)'}), '(mrc.data[150:153, 20:23, 10], expected, decimal=4)\n', (5260, 5311), True, 'import numpy.testing as np_test\n'), ((5356, 5396), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.byteOrder', '"""<"""'], {}), "(mrc.byteOrder, '<')\n", (5376, 5396), True, 'import numpy.testing as np_test\n'), ((5405, 5446), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.arrayOrder', '"""F"""'], {}), "(mrc.arrayOrder, 'F')\n", (5425, 5446), True, 'import numpy.testing as np_test\n'), ((5455, 5500), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.dataType', '"""float32"""'], {}), "(mrc.dataType, 'float32')\n", (5475, 5500), True, 'import numpy.testing as np_test\n'), ((5578, 5616), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.memmap', '(True)'], {}), '(mrc.memmap, True)\n', (5598, 5616), True, 'import numpy.testing as np_test\n'), ((5672, 5681), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (5679, 5681), False, 'from pyto.io.image_io import ImageIO\n'), ((5778, 5876), 'numpy.array', 'numpy.array', (['[[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -0.0392, -0.0312]\n ]'], {}), '([[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -\n 0.0392, -0.0312]])\n', (5789, 5876), False, 'import numpy\n'), ((5962, 6040), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(mrc.data[50:53, 120:123, 40], expected, decimal=4)\n', (5989, 6040), True, 'import numpy.testing as np_test\n'), ((6096, 6193), 'numpy.array', 'numpy.array', (['[[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011, -0.024, 0.0347]\n ]'], {}), '([[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011,\n -0.024, 0.0347]])\n', (6107, 6193), False, 'import numpy\n'), ((6280, 6358), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[150:153, 20:23, 10]', 'expected'], {'decimal': '(4)'}), '(mrc.data[150:153, 20:23, 10], expected, decimal=4)\n', (6307, 6358), True, 'import numpy.testing as np_test\n'), ((6403, 6443), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.byteOrder', '"""<"""'], {}), "(mrc.byteOrder, '<')\n", (6423, 6443), True, 'import numpy.testing as np_test\n'), ((6452, 6493), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.arrayOrder', '"""F"""'], {}), "(mrc.arrayOrder, 'F')\n", (6472, 6493), True, 'import numpy.testing as np_test\n'), ((6502, 6547), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.dataType', '"""float32"""'], {}), "(mrc.dataType, 'float32')\n", (6522, 6547), True, 'import numpy.testing as np_test\n'), ((6625, 6664), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.memmap', '(False)'], {}), '(mrc.memmap, False)\n', (6645, 6664), True, 'import numpy.testing as np_test\n'), ((6673, 6725), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.extendedHeaderLength', '(5120)'], {}), '(mrc.extendedHeaderLength, 5120)\n', (6693, 6725), True, 'import numpy.testing as np_test\n'), ((6797, 6806), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (6804, 6806), False, 'from pyto.io.image_io import ImageIO\n'), ((6902, 7000), 'numpy.array', 'numpy.array', (['[[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -0.0392, -0.0312]\n ]'], {}), '([[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -\n 0.0392, -0.0312]])\n', (6913, 7000), False, 'import numpy\n'), ((7086, 7164), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(mrc.data[50:53, 120:123, 40], expected, decimal=4)\n', (7113, 7164), True, 'import numpy.testing as np_test\n'), ((7220, 7317), 'numpy.array', 'numpy.array', (['[[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011, -0.024, 0.0347]\n ]'], {}), '([[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011,\n -0.024, 0.0347]])\n', (7231, 7317), False, 'import numpy\n'), ((7404, 7482), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[150:153, 20:23, 10]', 'expected'], {'decimal': '(4)'}), '(mrc.data[150:153, 20:23, 10], expected, decimal=4)\n', (7431, 7482), True, 'import numpy.testing as np_test\n'), ((7527, 7567), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.byteOrder', '"""<"""'], {}), "(mrc.byteOrder, '<')\n", (7547, 7567), True, 'import numpy.testing as np_test\n'), ((7576, 7617), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.arrayOrder', '"""F"""'], {}), "(mrc.arrayOrder, 'F')\n", (7596, 7617), True, 'import numpy.testing as np_test\n'), ((7626, 7671), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.dataType', '"""float32"""'], {}), "(mrc.dataType, 'float32')\n", (7646, 7671), True, 'import numpy.testing as np_test\n'), ((7749, 7787), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.memmap', '(True)'], {}), '(mrc.memmap, True)\n', (7769, 7787), True, 'import numpy.testing as np_test\n'), ((7796, 7848), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.extendedHeaderLength', '(5120)'], {}), '(mrc.extendedHeaderLength, 5120)\n', (7816, 7848), True, 'import numpy.testing as np_test\n'), ((7910, 7919), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (7917, 7919), False, 'from pyto.io.image_io import ImageIO\n'), ((8001, 8103), 'numpy.array', 'numpy.array', (['[[-0.0329, -0.0006, -0.0698], [-0.0101, -0.1196, -0.1295], [0.0844, -0.04, \n -0.0716]]'], {}), '([[-0.0329, -0.0006, -0.0698], [-0.0101, -0.1196, -0.1295], [\n 0.0844, -0.04, -0.0716]])\n', (8012, 8103), False, 'import numpy\n'), ((8185, 8263), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(mrc.data[50:53, 120:123, 40], expected, decimal=4)\n', (8212, 8263), True, 'import numpy.testing as np_test\n'), ((8319, 8418), 'numpy.array', 'numpy.array', (['[[-0.0019, -0.0085, 0.0036], [0.0781, 0.0279, -0.0365], [0.021, -0.0193, -\n 0.0355]]'], {}), '([[-0.0019, -0.0085, 0.0036], [0.0781, 0.0279, -0.0365], [0.021,\n -0.0193, -0.0355]])\n', (8330, 8418), False, 'import numpy\n'), ((8502, 8580), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[150:153, 20:23, 60]', 'expected'], {'decimal': '(4)'}), '(mrc.data[150:153, 20:23, 60], expected, decimal=4)\n', (8529, 8580), True, 'import numpy.testing as np_test\n'), ((8625, 8670), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.dataType', '"""float32"""'], {}), "(mrc.dataType, 'float32')\n", (8645, 8670), True, 'import numpy.testing as np_test\n'), ((8748, 8787), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.memmap', '(False)'], {}), '(mrc.memmap, False)\n', (8768, 8787), True, 'import numpy.testing as np_test\n'), ((8861, 8870), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (8868, 8870), False, 'from pyto.io.image_io import ImageIO\n'), ((8965, 9067), 'numpy.array', 'numpy.array', (['[[-0.0329, -0.0006, -0.0698], [-0.0101, -0.1196, -0.1295], [0.0844, -0.04, \n -0.0716]]'], {}), '([[-0.0329, -0.0006, -0.0698], [-0.0101, -0.1196, -0.1295], [\n 0.0844, -0.04, -0.0716]])\n', (8976, 9067), False, 'import numpy\n'), ((9149, 9227), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(mrc.data[50:53, 120:123, 40], expected, decimal=4)\n', (9176, 9227), True, 'import numpy.testing as np_test\n'), ((9283, 9382), 'numpy.array', 'numpy.array', (['[[-0.0019, -0.0085, 0.0036], [0.0781, 0.0279, -0.0365], [0.021, -0.0193, -\n 0.0355]]'], {}), '([[-0.0019, -0.0085, 0.0036], [0.0781, 0.0279, -0.0365], [0.021,\n -0.0193, -0.0355]])\n', (9294, 9382), False, 'import numpy\n'), ((9466, 9544), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[150:153, 20:23, 60]', 'expected'], {'decimal': '(4)'}), '(mrc.data[150:153, 20:23, 60], expected, decimal=4)\n', (9493, 9544), True, 'import numpy.testing as np_test\n'), ((9589, 9634), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.dataType', '"""float32"""'], {}), "(mrc.dataType, 'float32')\n", (9609, 9634), True, 'import numpy.testing as np_test\n'), ((9712, 9750), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.memmap', '(True)'], {}), '(mrc.memmap, True)\n', (9732, 9750), True, 'import numpy.testing as np_test\n'), ((9810, 9819), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (9817, 9819), False, 'from pyto.io.image_io import ImageIO\n'), ((9912, 10112), 'numpy.array', 'numpy.array', (['[[0.0, 0.0], [-0.341, -6.702], [0.782, -11.78], [0.327, -14.298], [-0.691, \n -17.411], [-0.337, -18.076], [-0.669, -19.157], [-0.799, -20.4], [-\n 0.793, -21.286], [-1.008, -21.386]]'], {}), '([[0.0, 0.0], [-0.341, -6.702], [0.782, -11.78], [0.327, -14.298\n ], [-0.691, -17.411], [-0.337, -18.076], [-0.669, -19.157], [-0.799, -\n 20.4], [-0.793, -21.286], [-1.008, -21.386]])\n', (9923, 10112), False, 'import numpy\n'), ((10262, 10330), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc2.data[:, :, 0]', 'expected'], {'decimal': '(3)'}), '(mrc2.data[:, :, 0], expected, decimal=3)\n', (10289, 10330), True, 'import numpy.testing as np_test\n'), ((10337, 10377), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc2.memmap', '(False)'], {}), '(mrc2.memmap, False)\n', (10357, 10377), True, 'import numpy.testing as np_test\n'), ((10599, 10633), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['raised', '(True)'], {}), '(raised, True)\n', (10619, 10633), True, 'import numpy.testing as np_test\n'), ((10642, 10681), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc2.memmap', '(True)'], {}), '(mrc2.memmap, True)\n', (10662, 10681), True, 'import numpy.testing as np_test\n'), ((10733, 10742), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (10740, 10742), False, 'from pyto.io.image_io import ImageIO\n'), ((10823, 10870), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.dataType', '"""int16"""'], {}), "(mrc_new.dataType, 'int16')\n", (10843, 10870), True, 'import numpy.testing as np_test\n'), ((10950, 10994), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.byteOrder', '"""<"""'], {}), "(mrc_new.byteOrder, '<')\n", (10970, 10994), True, 'import numpy.testing as np_test\n'), ((11003, 11048), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.arrayOrder', '"""F"""'], {}), "(mrc_new.arrayOrder, 'F')\n", (11023, 11048), True, 'import numpy.testing as np_test\n'), ((11057, 11106), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.shape', '(40, 30, 20)'], {}), '(mrc_new.shape, (40, 30, 20))\n', (11077, 11106), True, 'import numpy.testing as np_test\n'), ((11113, 11165), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.pixel', '[0.4, 0.4, 0.4]'], {}), '(mrc_new.pixel, [0.4, 0.4, 0.4])\n', (11133, 11165), True, 'import numpy.testing as np_test\n'), ((11174, 11218), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.pixelsize', '(0.4)'], {}), '(mrc_new.pixelsize, 0.4)\n', (11194, 11218), True, 'import numpy.testing as np_test\n'), ((11227, 11277), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.data[14, 8, 10]', '(-14)'], {}), '(mrc_new.data[14, 8, 10], -14)\n', (11247, 11277), True, 'import numpy.testing as np_test\n'), ((11284, 11334), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.data[15, 23, 12]', '(10)'], {}), '(mrc_new.data[15, 23, 12], 10)\n', (11304, 11334), True, 'import numpy.testing as np_test\n'), ((11341, 11390), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.data[23, 29, 16]', '(2)'], {}), '(mrc_new.data[23, 29, 16], 2)\n', (11361, 11390), True, 'import numpy.testing as np_test\n'), ((11397, 11440), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.memmap', '(False)'], {}), '(mrc_new.memmap, False)\n', (11417, 11440), True, 'import numpy.testing as np_test\n'), ((11491, 11500), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (11498, 11500), False, 'from pyto.io.image_io import ImageIO\n'), ((11607, 11654), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.dataType', '"""int16"""'], {}), "(mrc_new.dataType, 'int16')\n", (11627, 11654), True, 'import numpy.testing as np_test\n'), ((11734, 11778), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.byteOrder', '"""<"""'], {}), "(mrc_new.byteOrder, '<')\n", (11754, 11778), True, 'import numpy.testing as np_test\n'), ((11787, 11832), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.arrayOrder', '"""F"""'], {}), "(mrc_new.arrayOrder, 'F')\n", (11807, 11832), True, 'import numpy.testing as np_test\n'), ((11841, 11890), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.shape', '(40, 30, 20)'], {}), '(mrc_new.shape, (40, 30, 20))\n', (11861, 11890), True, 'import numpy.testing as np_test\n'), ((11897, 11949), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.pixel', '[0.4, 0.4, 0.4]'], {}), '(mrc_new.pixel, [0.4, 0.4, 0.4])\n', (11917, 11949), True, 'import numpy.testing as np_test\n'), ((11958, 12002), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.pixelsize', '(0.4)'], {}), '(mrc_new.pixelsize, 0.4)\n', (11978, 12002), True, 'import numpy.testing as np_test\n'), ((12011, 12061), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.data[14, 8, 10]', '(-14)'], {}), '(mrc_new.data[14, 8, 10], -14)\n', (12031, 12061), True, 'import numpy.testing as np_test\n'), ((12068, 12118), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.data[15, 23, 12]', '(10)'], {}), '(mrc_new.data[15, 23, 12], 10)\n', (12088, 12118), True, 'import numpy.testing as np_test\n'), ((12125, 12174), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.data[23, 29, 16]', '(2)'], {}), '(mrc_new.data[23, 29, 16], 2)\n', (12145, 12174), True, 'import numpy.testing as np_test\n'), ((12181, 12223), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.memmap', '(True)'], {}), '(mrc_new.memmap, True)\n', (12201, 12223), True, 'import numpy.testing as np_test\n'), ((12232, 12273), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.n_labels', '(9)'], {}), '(mrc_new.n_labels, 9)\n', (12252, 12273), True, 'import numpy.testing as np_test\n'), ((12868, 12877), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (12875, 12877), False, 'from pyto.io.image_io import ImageIO\n'), ((13001, 13046), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['raw.data', 'self.raw_data'], {}), '(raw.data, self.raw_data)\n', (13021, 13046), True, 'import numpy.testing as np_test\n'), ((13055, 13094), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['raw.memmap', '(False)'], {}), '(raw.memmap, False)\n', (13075, 13094), True, 'import numpy.testing as np_test\n'), ((13146, 13155), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (13153, 13155), False, 'from pyto.io.image_io import ImageIO\n'), ((13292, 13337), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['raw.data', 'self.raw_data'], {}), '(raw.data, self.raw_data)\n', (13312, 13337), True, 'import numpy.testing as np_test\n'), ((13346, 13384), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['raw.memmap', '(True)'], {}), '(raw.memmap, True)\n', (13366, 13384), True, 'import numpy.testing as np_test\n'), ((15038, 15047), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (15045, 15047), False, 'from pyto.io.image_io import ImageIO\n'), ((15145, 15154), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (15152, 15154), False, 'from pyto.io.image_io import ImageIO\n'), ((15225, 15272), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""uint8"""'], {}), "(file_in.dataType, 'uint8')\n", (15245, 15272), True, 'import numpy.testing as np_test\n'), ((15281, 15325), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_uint8'], {}), '(file_in.data, ar_uint8)\n', (15301, 15325), True, 'import numpy.testing as np_test\n'), ((15366, 15375), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (15373, 15375), False, 'from pyto.io.image_io import ImageIO\n'), ((15474, 15483), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (15481, 15483), False, 'from pyto.io.image_io import ImageIO\n'), ((15554, 15602), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""uint16"""'], {}), "(file_in.dataType, 'uint16')\n", (15574, 15602), True, 'import numpy.testing as np_test\n'), ((15611, 15656), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_uint16'], {}), '(file_in.data, ar_uint16)\n', (15631, 15656), True, 'import numpy.testing as np_test\n'), ((15729, 15738), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (15736, 15738), False, 'from pyto.io.image_io import ImageIO\n'), ((15895, 15904), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (15902, 15904), False, 'from pyto.io.image_io import ImageIO\n'), ((15975, 16022), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int32"""'], {}), "(file_in.dataType, 'int32')\n", (15995, 16022), True, 'import numpy.testing as np_test\n'), ((16031, 16075), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int16'], {}), '(file_in.data, ar_int16)\n', (16051, 16075), True, 'import numpy.testing as np_test\n'), ((16130, 16139), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (16137, 16139), False, 'from pyto.io.image_io import ImageIO\n'), ((16406, 16415), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (16413, 16415), False, 'from pyto.io.image_io import ImageIO\n'), ((16643, 16652), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (16650, 16652), False, 'from pyto.io.image_io import ImageIO\n'), ((16723, 16771), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""uint16"""'], {}), "(file_in.dataType, 'uint16')\n", (16743, 16771), True, 'import numpy.testing as np_test\n'), ((16852, 16923), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[0, 1, 0] == ar_int16[0, 1, 0])', '(False)'], {}), '(file_in.data[0, 1, 0] == ar_int16[0, 1, 0], False)\n', (16872, 16923), True, 'import numpy.testing as np_test\n'), ((16984, 16993), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (16991, 16993), False, 'from pyto.io.image_io import ImageIO\n'), ((17272, 17281), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (17279, 17281), False, 'from pyto.io.image_io import ImageIO\n'), ((17598, 17607), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (17605, 17607), False, 'from pyto.io.image_io import ImageIO\n'), ((17928, 17937), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (17935, 17937), False, 'from pyto.io.image_io import ImageIO\n'), ((18097, 18106), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (18104, 18106), False, 'from pyto.io.image_io import ImageIO\n'), ((18177, 18224), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int32"""'], {}), "(file_in.dataType, 'int32')\n", (18197, 18224), True, 'import numpy.testing as np_test\n'), ((18300, 18371), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[0, 0, 0] == ar_uint32[0, 0, 0])', '(True)'], {}), '(file_in.data[0, 0, 0] == ar_uint32[0, 0, 0], True)\n', (18320, 18371), True, 'import numpy.testing as np_test\n'), ((18376, 18448), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[1, 1, 1] == ar_uint32[1, 1, 1])', '(False)'], {}), '(file_in.data[1, 1, 1] == ar_uint32[1, 1, 1], False)\n', (18396, 18448), True, 'import numpy.testing as np_test\n'), ((18511, 18520), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (18518, 18520), False, 'from pyto.io.image_io import ImageIO\n'), ((18802, 18811), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (18809, 18811), False, 'from pyto.io.image_io import ImageIO\n'), ((18973, 18982), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (18980, 18982), False, 'from pyto.io.image_io import ImageIO\n'), ((19053, 19102), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""float32"""'], {}), "(file_in.dataType, 'float32')\n", (19073, 19102), True, 'import numpy.testing as np_test\n'), ((19186, 19257), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[0, 0, 0] == ar_uint32[0, 0, 0])', '(True)'], {}), '(file_in.data[0, 0, 0] == ar_uint32[0, 0, 0], True)\n', (19206, 19257), True, 'import numpy.testing as np_test\n'), ((19275, 19347), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[1, 1, 1] == ar_uint32[1, 1, 1])', '(False)'], {}), '(file_in.data[1, 1, 1] == ar_uint32[1, 1, 1], False)\n', (19295, 19347), True, 'import numpy.testing as np_test\n'), ((19423, 19432), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (19430, 19432), False, 'from pyto.io.image_io import ImageIO\n'), ((19593, 19602), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (19600, 19602), False, 'from pyto.io.image_io import ImageIO\n'), ((19673, 19722), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""float32"""'], {}), "(file_in.dataType, 'float32')\n", (19693, 19722), True, 'import numpy.testing as np_test\n'), ((19805, 19875), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[0, 0, 0] == ar_int32[0, 0, 0])', '(True)'], {}), '(file_in.data[0, 0, 0] == ar_int32[0, 0, 0], True)\n', (19825, 19875), True, 'import numpy.testing as np_test\n'), ((19893, 19964), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[1, 0, 1] == ar_int32[1, 0, 1])', '(False)'], {}), '(file_in.data[1, 0, 1] == ar_int32[1, 0, 1], False)\n', (19913, 19964), True, 'import numpy.testing as np_test\n'), ((20038, 20047), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (20045, 20047), False, 'from pyto.io.image_io import ImageIO\n'), ((20206, 20215), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (20213, 20215), False, 'from pyto.io.image_io import ImageIO\n'), ((20286, 20335), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""float64"""'], {}), "(file_in.dataType, 'float64')\n", (20306, 20335), True, 'import numpy.testing as np_test\n'), ((20344, 20395), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['file_in.data', 'ar_int32'], {}), '(file_in.data, ar_int32)\n', (20371, 20395), True, 'import numpy.testing as np_test\n'), ((20460, 20469), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (20467, 20469), False, 'from pyto.io.image_io import ImageIO\n'), ((20627, 20636), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (20634, 20636), False, 'from pyto.io.image_io import ImageIO\n'), ((20708, 20755), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (20728, 20755), True, 'import numpy.testing as np_test\n'), ((20764, 20810), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.shape', '(2, 3, 4)'], {}), '(file_in.shape, (2, 3, 4))\n', (20784, 20810), True, 'import numpy.testing as np_test\n'), ((20890, 20899), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (20897, 20899), False, 'from pyto.io.image_io import ImageIO\n'), ((21020, 21029), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (21027, 21029), False, 'from pyto.io.image_io import ImageIO\n'), ((21101, 21148), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (21121, 21148), True, 'import numpy.testing as np_test\n'), ((21157, 21203), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.shape', '(4, 3, 2)'], {}), '(file_in.shape, (4, 3, 2))\n', (21177, 21203), True, 'import numpy.testing as np_test\n'), ((21272, 21281), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (21279, 21281), False, 'from pyto.io.image_io import ImageIO\n'), ((21468, 21477), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (21475, 21477), False, 'from pyto.io.image_io import ImageIO\n'), ((21549, 21596), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (21569, 21596), True, 'import numpy.testing as np_test\n'), ((21605, 21651), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.shape', '(2, 3, 4)'], {}), '(file_in.shape, (2, 3, 4))\n', (21625, 21651), True, 'import numpy.testing as np_test\n'), ((21714, 21723), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (21721, 21723), False, 'from pyto.io.image_io import ImageIO\n'), ((21847, 21856), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (21854, 21856), False, 'from pyto.io.image_io import ImageIO\n'), ((21928, 21975), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (21948, 21975), True, 'import numpy.testing as np_test\n'), ((21984, 22030), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.shape', '(4, 3, 2)'], {}), '(file_in.shape, (4, 3, 2))\n', (22004, 22030), True, 'import numpy.testing as np_test\n'), ((22084, 22093), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (22091, 22093), False, 'from pyto.io.image_io import ImageIO\n'), ((22192, 22201), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (22199, 22201), False, 'from pyto.io.image_io import ImageIO\n'), ((22273, 22320), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""ubyte"""'], {}), "(file_in.dataType, 'ubyte')\n", (22293, 22320), True, 'import numpy.testing as np_test\n'), ((22329, 22380), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['file_in.data', 'ar_uint8'], {}), '(file_in.data, ar_uint8)\n', (22356, 22380), True, 'import numpy.testing as np_test\n'), ((22423, 22432), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (22430, 22432), False, 'from pyto.io.image_io import ImageIO\n'), ((22669, 22678), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (22676, 22678), False, 'from pyto.io.image_io import ImageIO\n'), ((22958, 22967), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (22965, 22967), False, 'from pyto.io.image_io import ImageIO\n'), ((23128, 23137), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (23135, 23137), False, 'from pyto.io.image_io import ImageIO\n'), ((23209, 23256), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (23229, 23256), True, 'import numpy.testing as np_test\n'), ((23340, 23411), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[0, 0, 0] == ar_uint16[0, 0, 0])', '(True)'], {}), '(file_in.data[0, 0, 0] == ar_uint16[0, 0, 0], True)\n', (23360, 23411), True, 'import numpy.testing as np_test\n'), ((23416, 23488), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[0, 1, 1] == ar_uint16[0, 1, 1])', '(False)'], {}), '(file_in.data[0, 1, 1] == ar_uint16[0, 1, 1], False)\n', (23436, 23488), True, 'import numpy.testing as np_test\n'), ((23525, 23534), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (23532, 23534), False, 'from pyto.io.image_io import ImageIO\n'), ((23657, 23666), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (23664, 23666), False, 'from pyto.io.image_io import ImageIO\n'), ((23738, 23785), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (23758, 23785), True, 'import numpy.testing as np_test\n'), ((23794, 23838), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int16'], {}), '(file_in.data, ar_int16)\n', (23814, 23838), True, 'import numpy.testing as np_test\n'), ((23847, 23899), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.pixel', '[2.3, 2.3, 2.3]'], {}), '(file_in.pixel, [2.3, 2.3, 2.3])\n', (23867, 23899), True, 'import numpy.testing as np_test\n'), ((23908, 23952), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.pixelsize', '(2.3)'], {}), '(file_in.pixelsize, 2.3)\n', (23928, 23952), True, 'import numpy.testing as np_test\n'), ((23996, 24005), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (24003, 24005), False, 'from pyto.io.image_io import ImageIO\n'), ((24129, 24138), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (24136, 24138), False, 'from pyto.io.image_io import ImageIO\n'), ((24210, 24257), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (24230, 24257), True, 'import numpy.testing as np_test\n'), ((24266, 24320), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data[:, :, 0]', 'ar2_int16'], {}), '(file_in.data[:, :, 0], ar2_int16)\n', (24286, 24320), True, 'import numpy.testing as np_test\n'), ((24335, 24379), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.pixelsize', '(3.4)'], {}), '(file_in.pixelsize, 3.4)\n', (24355, 24379), True, 'import numpy.testing as np_test\n'), ((24428, 24437), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (24435, 24437), False, 'from pyto.io.image_io import ImageIO\n'), ((24594, 24603), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (24601, 24603), False, 'from pyto.io.image_io import ImageIO\n'), ((24675, 24722), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (24695, 24722), True, 'import numpy.testing as np_test\n'), ((24731, 24774), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int8'], {}), '(file_in.data, ar_int8)\n', (24751, 24774), True, 'import numpy.testing as np_test\n'), ((24816, 24825), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (24823, 24825), False, 'from pyto.io.image_io import ImageIO\n'), ((25046, 25055), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (25053, 25055), False, 'from pyto.io.image_io import ImageIO\n'), ((25319, 25328), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (25326, 25328), False, 'from pyto.io.image_io import ImageIO\n'), ((25596, 25605), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (25603, 25605), False, 'from pyto.io.image_io import ImageIO\n'), ((25862, 25871), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (25869, 25871), False, 'from pyto.io.image_io import ImageIO\n'), ((25970, 25979), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (25977, 25979), False, 'from pyto.io.image_io import ImageIO\n'), ((26109, 26156), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (26129, 26156), True, 'import numpy.testing as np_test\n'), ((26165, 26209), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int16'], {}), '(file_in.data, ar_int16)\n', (26185, 26209), True, 'import numpy.testing as np_test\n'), ((26258, 26267), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (26265, 26267), False, 'from pyto.io.image_io import ImageIO\n'), ((26408, 26417), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (26415, 26417), False, 'from pyto.io.image_io import ImageIO\n'), ((26547, 26594), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (26567, 26594), True, 'import numpy.testing as np_test\n'), ((26603, 26646), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int8'], {}), '(file_in.data, ar_int8)\n', (26623, 26646), True, 'import numpy.testing as np_test\n'), ((26695, 26704), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (26702, 26704), False, 'from pyto.io.image_io import ImageIO\n'), ((27143, 27152), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (27150, 27152), False, 'from pyto.io.image_io import ImageIO\n'), ((27294, 27303), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (27301, 27303), False, 'from pyto.io.image_io import ImageIO\n'), ((27393, 27444), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data.shape', '(2, 2, 2)'], {}), '(file_in.data.shape, (2, 2, 2))\n', (27413, 27444), True, 'import numpy.testing as np_test\n'), ((27462, 27471), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (27469, 27471), False, 'from pyto.io.image_io import ImageIO\n'), ((27628, 27637), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (27635, 27637), False, 'from pyto.io.image_io import ImageIO\n'), ((27727, 27778), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data.shape', '(1, 4, 2)'], {}), '(file_in.data.shape, (1, 4, 2))\n', (27747, 27778), True, 'import numpy.testing as np_test\n'), ((28071, 28122), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data.shape', '(4, 2, 1)'], {}), '(file_in.data.shape, (4, 2, 1))\n', (28091, 28122), True, 'import numpy.testing as np_test\n'), ((28250, 28301), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data.shape', '(2, 2, 2)'], {}), '(file_in.data.shape, (2, 2, 2))\n', (28270, 28301), True, 'import numpy.testing as np_test\n'), ((28368, 28377), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (28375, 28377), False, 'from pyto.io.image_io import ImageIO\n'), ((28491, 28500), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (28498, 28500), False, 'from pyto.io.image_io import ImageIO\n'), ((28572, 28618), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int16_c'], {}), '(file_in.data, ar_int16_c)\n', (28592, 28618), True, 'import numpy.testing as np_test\n'), ((28677, 28686), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (28684, 28686), False, 'from pyto.io.image_io import ImageIO\n'), ((28828, 28837), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (28835, 28837), False, 'from pyto.io.image_io import ImageIO\n'), ((28925, 28971), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int16_c'], {}), '(file_in.data, ar_int16_c)\n', (28945, 28971), True, 'import numpy.testing as np_test\n'), ((29040, 29049), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (29047, 29049), False, 'from pyto.io.image_io import ImageIO\n'), ((29163, 29172), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (29170, 29172), False, 'from pyto.io.image_io import ImageIO\n'), ((29244, 29290), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int16_f'], {}), '(file_in.data, ar_int16_f)\n', (29264, 29290), True, 'import numpy.testing as np_test\n'), ((29349, 29358), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (29356, 29358), False, 'from pyto.io.image_io import ImageIO\n'), ((29500, 29509), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (29507, 29509), False, 'from pyto.io.image_io import ImageIO\n'), ((29597, 29643), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int16_f'], {}), '(file_in.data, ar_int16_f)\n', (29617, 29643), True, 'import numpy.testing as np_test\n'), ((29928, 29937), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (29935, 29937), False, 'from pyto.io.image_io import ImageIO\n'), ((30074, 30083), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (30081, 30083), False, 'from pyto.io.image_io import ImageIO\n'), ((30155, 30202), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['file_in.pixel', '(2.1)'], {}), '(file_in.pixel, 2.1)\n', (30182, 30202), True, 'import numpy.testing as np_test\n'), ((2065, 2087), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (2076, 2087), False, 'import numpy\n'), ((3069, 3091), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (3080, 3091), False, 'import numpy\n'), ((4512, 4534), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (4523, 4534), False, 'import numpy\n'), ((5546, 5568), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (5557, 5568), False, 'import numpy\n'), ((6593, 6615), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (6604, 6615), False, 'import numpy\n'), ((7717, 7739), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (7728, 7739), False, 'import numpy\n'), ((8716, 8738), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (8727, 8738), False, 'import numpy\n'), ((9680, 9702), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (9691, 9702), False, 'import numpy\n'), ((10920, 10940), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (10931, 10940), False, 'import numpy\n'), ((11704, 11724), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (11715, 11724), False, 'import numpy\n'), ((16821, 16842), 'numpy.dtype', 'numpy.dtype', (['"""uint16"""'], {}), "('uint16')\n", (16832, 16842), False, 'import numpy\n'), ((30782, 30803), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (30801, 30803), False, 'import unittest\n'), ((30843, 30879), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (30866, 30879), False, 'import unittest\n'), ((838, 876), 'numpy.arange', 'numpy.arange', (['(24)'], {'dtype': 'self.raw_dtype'}), '(24, dtype=self.raw_dtype)\n', (850, 876), False, 'import numpy\n'), ((13544, 13596), 'numpy.array', 'numpy.array', (['[54, 200, 5, 7, 45, 123]'], {'dtype': '"""uint8"""'}), "([54, 200, 5, 7, 45, 123], dtype='uint8')\n", (13555, 13596), False, 'import numpy\n'), ((13664, 13715), 'numpy.array', 'numpy.array', (['[54, 2, -5, 7, 45, 123]'], {'dtype': '"""uint8"""'}), "([54, 2, -5, 7, 45, 123], dtype='uint8')\n", (13675, 13715), False, 'import numpy\n'), ((13785, 13859), 'numpy.array', 'numpy.array', (['[1034, 546, 248, 40000, 2345, 365, 4876, 563]'], {'dtype': '"""uint16"""'}), "([1034, 546, 248, 40000, 2345, 365, 4876, 563], dtype='uint16')\n", (13796, 13859), False, 'import numpy\n'), ((13927, 14000), 'numpy.array', 'numpy.array', (['[1034, 546, -248, 156, 2345, 365, -4876, 563]'], {'dtype': '"""int16"""'}), "([1034, 546, -248, 156, 2345, 365, -4876, 563], dtype='int16')\n", (13938, 14000), False, 'import numpy\n'), ((14068, 14161), 'numpy.array', 'numpy.array', (['[1034, 56546, -223448, 156, 2345, 2 ** 31 - 10, -884876, 563]'], {'dtype': '"""int32"""'}), "([1034, 56546, -223448, 156, 2345, 2 ** 31 - 10, -884876, 563],\n dtype='int32')\n", (14079, 14161), False, 'import numpy\n'), ((14255, 14347), 'numpy.array', 'numpy.array', (['[1034, 56546, 223448, 156, 2345, 365, 884876, 2 ** 32 - 10]'], {'dtype': '"""uint32"""'}), "([1034, 56546, 223448, 156, 2345, 365, 884876, 2 ** 32 - 10],\n dtype='uint32')\n", (14266, 14347), False, 'import numpy\n'), ((14442, 14472), 'numpy.arange', 'numpy.arange', (['(24)'], {'dtype': '"""int8"""'}), "(24, dtype='int8')\n", (14454, 14472), False, 'import numpy\n'), ((14511, 14542), 'numpy.arange', 'numpy.arange', (['(24)'], {'dtype': '"""int16"""'}), "(24, dtype='int16')\n", (14523, 14542), False, 'import numpy\n'), ((14580, 14653), 'numpy.array', 'numpy.array', (['[1034, 546, -248, 156, 2345, 365, -4876, 563]'], {'dtype': '"""int16"""'}), "([1034, 546, -248, 156, 2345, 365, -4876, 563], dtype='int16')\n", (14591, 14653), False, 'import numpy\n'), ((14722, 14810), 'numpy.array', 'numpy.array', (['[1034, 546, -248, 156, 2345, 365, -4876, 563]'], {'dtype': '"""int16"""', 'order': '"""F"""'}), "([1034, 546, -248, 156, 2345, 365, -4876, 563], dtype='int16',\n order='F')\n", (14733, 14810), False, 'import numpy\n'), ((14870, 14958), 'numpy.array', 'numpy.array', (['[1034, 546, -248, 156, 2345, 365, -4876, 563]'], {'dtype': '"""int16"""', 'order': '"""C"""'}), "([1034, 546, -248, 156, 2345, 365, -4876, 563], dtype='int16',\n order='C')\n", (14881, 14958), False, 'import numpy\n'), ((29849, 29880), 'numpy.arange', 'numpy.arange', (['(24)'], {'dtype': '"""int16"""'}), "(24, dtype='int16')\n", (29861, 29880), False, 'import numpy\n')]
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from sam.models import Tag, Post, SiteImage
from django.core.cache import cache
from django.db.models import Q
def education(request):
education = cache.get("education")
if not education:
tag = Tag.objects.filter(tag="top_education")
post = None
images = []
if tag:
posts = list(Post.objects.filter(tags=tag))
if posts:
post = posts[-1]
for image in post.images.values():
images.append(SiteImage.objects.get(image=image['image']))
images.reverse()
public = Q(private=False)
education = Q(tags__tag="education")
school = Q(tags__tag="school")
homework = Q(tags__tag="homework")
posts = Post.objects.filter(education | school | homework)
posts = list(set(posts.filter(public)))
if len(posts) > 2:
posts = posts[len(posts) - 2:]
posts.reverse()
full_post_stub = False
cache_obj = {
"post": post,
"posts": posts,
"images": images,
"full_post_stub": full_post_stub
}
cache.set("education", cache_obj)
else:
post = education['post']
posts = education['posts']
images = education['images']
full_post_stub = education['full_post_stub']
return render_to_response('education.html', {
"post": post,
"posts": posts,
"images": images,
"full_post_stub": full_post_stub
}, context_instance=RequestContext(request))
|
[
"sam.models.SiteImage.objects.get",
"django.core.cache.cache.set",
"django.db.models.Q",
"django.core.cache.cache.get",
"django.template.RequestContext",
"sam.models.Tag.objects.filter",
"sam.models.Post.objects.filter"
] |
[((244, 266), 'django.core.cache.cache.get', 'cache.get', (['"""education"""'], {}), "('education')\n", (253, 266), False, 'from django.core.cache import cache\n'), ((303, 342), 'sam.models.Tag.objects.filter', 'Tag.objects.filter', ([], {'tag': '"""top_education"""'}), "(tag='top_education')\n", (321, 342), False, 'from sam.models import Tag, Post, SiteImage\n'), ((691, 707), 'django.db.models.Q', 'Q', ([], {'private': '(False)'}), '(private=False)\n', (692, 707), False, 'from django.db.models import Q\n'), ((728, 752), 'django.db.models.Q', 'Q', ([], {'tags__tag': '"""education"""'}), "(tags__tag='education')\n", (729, 752), False, 'from django.db.models import Q\n'), ((770, 791), 'django.db.models.Q', 'Q', ([], {'tags__tag': '"""school"""'}), "(tags__tag='school')\n", (771, 791), False, 'from django.db.models import Q\n'), ((811, 834), 'django.db.models.Q', 'Q', ([], {'tags__tag': '"""homework"""'}), "(tags__tag='homework')\n", (812, 834), False, 'from django.db.models import Q\n'), ((852, 902), 'sam.models.Post.objects.filter', 'Post.objects.filter', (['(education | school | homework)'], {}), '(education | school | homework)\n', (871, 902), False, 'from sam.models import Tag, Post, SiteImage\n'), ((1246, 1279), 'django.core.cache.cache.set', 'cache.set', (['"""education"""', 'cache_obj'], {}), "('education', cache_obj)\n", (1255, 1279), False, 'from django.core.cache import cache\n'), ((1636, 1659), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (1650, 1659), False, 'from django.template import RequestContext\n'), ((424, 453), 'sam.models.Post.objects.filter', 'Post.objects.filter', ([], {'tags': 'tag'}), '(tags=tag)\n', (443, 453), False, 'from sam.models import Tag, Post, SiteImage\n'), ((595, 638), 'sam.models.SiteImage.objects.get', 'SiteImage.objects.get', ([], {'image': "image['image']"}), "(image=image['image'])\n", (616, 638), False, 'from sam.models import Tag, Post, SiteImage\n')]
|
#
# imitation_frames.py, doom-net
#
# Created by <NAME> on 01/21/17.
#
import os
import time
import h5py
import torch
import torch.nn as nn
import torch.optim as optim
from device import device
import argparse
from doom_instance import *
from aac import BaseModel
def data_generator(args, screens, variables, labels, episodes, step_size):
# remove short episodes
episode_min_size = args.episode_size*step_size
episodes = episodes[episodes[:, 1]-episodes[:, 0] > episode_min_size]
episodes_num = len(episodes)
#
step_idx = episodes[:, 0].copy() + np.random.randint(step_size, size=episodes_num)
step_screens = np.ndarray(shape=(args.batch_size, *screens.shape[1:]), dtype=np.float32)
step_variables = np.ndarray(shape=(args.batch_size, *variables.shape[1:]), dtype=np.float32)
step_labels = np.ndarray(shape=(args.batch_size,), dtype=np.int)
step_terminals = np.ones(shape=(args.batch_size,), dtype=np.float32)
# select episodes for the initial batch
batch_episodes = np.random.randint(episodes_num, size=args.batch_size)
while True:
for i in range(args.batch_size):
idx = batch_episodes[i]
step_screens[i, :] = screens[step_idx[idx]] / 127.5 - 1.0
step_variables[i, :] = variables[step_idx[idx]] / 100
step_labels[i] = labels[step_idx[idx]]
step_idx[idx] += step_size
if step_idx[idx] > episodes[idx][1]:
step_idx[idx] = episodes[idx][0] + np.random.randint(step_size)
step_terminals[i] = 0
# reached terminal state, select a new episode
batch_episodes[i] = np.random.randint(episodes_num)
else:
step_terminals[i] = 1
yield torch.from_numpy(step_screens), \
torch.from_numpy(step_variables), \
torch.from_numpy(step_labels), \
torch.from_numpy(step_terminals)
def train(args):
data_file = h5py.File(args.h5_path, 'r')
screens = data_file['screens']
variables = data_file['variables']
labels = data_file['action_labels']
print('Dataset size =', len(screens))
action_sets = data_file['action_sets'][:]
episodes = data_file['episodes'][:]
input_shape = screens[0].shape
train_generator = data_generator(args, screens, variables, labels, episodes, args.skiprate)
np.save('action_set', action_sets)
model = BaseModel(input_shape[0]*args.frame_num, len(action_sets), variables.shape[1], args.frame_num).to(device)
if args.load is not None and os.path.isfile(args.load):
print("loading model parameters {}".format(args.load))
source_model = torch.load(args.load)
model.load_state_dict(source_model.state_dict())
del source_model
criterion = nn.CrossEntropyLoss()
optimizer = optim.AdamW(model.parameters(), lr=5e-4)
optimizer.zero_grad()
running_loss = 0
running_accuracy = 0
batch_time = time.time()
for batch, (screens, variables, labels, terminals) in enumerate(train_generator):
labels = labels.to(device)
outputs, _ = model(*model.transform_input(screens, variables))
loss = criterion(outputs, labels)
model.set_terminal(terminals)
running_loss += loss.item()
_, pred = outputs.max(1)
accuracy = (pred == labels).float().mean()
running_accuracy += accuracy
loss.backward()
optimizer.step()
optimizer.zero_grad()
if batch % args.episode_length == args.episode_length - 1:
running_loss /= args.episode_length
running_accuracy /= args.episode_length
print(
'[{:d}] loss: {:.3f}, accuracy: {:.3f}, time: {:.6f}'.format(
batch + 1, running_loss, running_accuracy, time.time()-batch_time
)
)
running_loss = 0
running_accuracy = 0
batch_time = time.time()
if batch % args.checkpoint_rate == args.checkpoint_rate - 1:
torch.save(model, args.checkpoint_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Doom Recorder')
parser.add_argument('--episode_size', type=int, default=20, help='number of steps in an episode')
parser.add_argument('--batch_size', type=int, default=64, help='number of game instances running in parallel')
parser.add_argument('--load', default=None, help='path to model file')
parser.add_argument('--h5_path', default=os.path.expanduser('~') + '/test/datasets/vizdoom/cig_map01/flat.h5',
help='hd5 file path')
parser.add_argument('--skiprate', type=int, default=2, help='number of skipped frames')
parser.add_argument('--episode_length', type=int, default=30, help='episode length')
parser.add_argument('--frame_num', type=int, default=4, help='number of frames per input')
parser.add_argument('--checkpoint_file', default=None, help='check point file name')
parser.add_argument('--checkpoint_rate', type=int, default=5000, help='number of batches per checkpoit')
args = parser.parse_args()
train(args)
|
[
"h5py.File",
"argparse.ArgumentParser",
"torch.load",
"torch.nn.CrossEntropyLoss",
"time.time",
"torch.save",
"os.path.isfile",
"os.path.expanduser",
"torch.from_numpy"
] |
[((1973, 2001), 'h5py.File', 'h5py.File', (['args.h5_path', '"""r"""'], {}), "(args.h5_path, 'r')\n", (1982, 2001), False, 'import h5py\n'), ((2802, 2823), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2821, 2823), True, 'import torch.nn as nn\n'), ((2970, 2981), 'time.time', 'time.time', ([], {}), '()\n', (2979, 2981), False, 'import time\n'), ((4140, 4192), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Doom Recorder"""'}), "(description='Doom Recorder')\n", (4163, 4192), False, 'import argparse\n'), ((2568, 2593), 'os.path.isfile', 'os.path.isfile', (['args.load'], {}), '(args.load)\n', (2582, 2593), False, 'import os\n'), ((2681, 2702), 'torch.load', 'torch.load', (['args.load'], {}), '(args.load)\n', (2691, 2702), False, 'import torch\n'), ((3964, 3975), 'time.time', 'time.time', ([], {}), '()\n', (3973, 3975), False, 'import time\n'), ((4058, 4097), 'torch.save', 'torch.save', (['model', 'args.checkpoint_file'], {}), '(model, args.checkpoint_file)\n', (4068, 4097), False, 'import torch\n'), ((1759, 1789), 'torch.from_numpy', 'torch.from_numpy', (['step_screens'], {}), '(step_screens)\n', (1775, 1789), False, 'import torch\n'), ((1807, 1839), 'torch.from_numpy', 'torch.from_numpy', (['step_variables'], {}), '(step_variables)\n', (1823, 1839), False, 'import torch\n'), ((1857, 1886), 'torch.from_numpy', 'torch.from_numpy', (['step_labels'], {}), '(step_labels)\n', (1873, 1886), False, 'import torch\n'), ((1904, 1936), 'torch.from_numpy', 'torch.from_numpy', (['step_terminals'], {}), '(step_terminals)\n', (1920, 1936), False, 'import torch\n'), ((4530, 4553), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (4548, 4553), False, 'import os\n'), ((3822, 3833), 'time.time', 'time.time', ([], {}), '()\n', (3831, 3833), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 The HERA Collaboration
# Licensed under the 2-clause BSD License
"""
Setup file for bda_utils.
"""
import os
import glob
from setuptools import setup, find_packages
setup_args = {
"name": "bda_utils",
"author": "The HERA Collaboration",
"url": "https://github.com/HERA-Team/bda_utils",
"license": "BSD",
"description": "a series of tools for supporting BDA",
"package_dir": {"": "src"},
"packages": ["bda_utils"],
"scripts": [fl for fl in glob.glob("scripts/*") if not os.path.isdir(fl)],
"use_scm_version": True,
"install_requires": [
"astropy",
"numpy",
"pyuvdata",
"redis",
],
}
if __name__ == "__main__":
setup(**setup_args)
|
[
"os.path.isdir",
"setuptools.setup",
"glob.glob"
] |
[((738, 757), 'setuptools.setup', 'setup', ([], {}), '(**setup_args)\n', (743, 757), False, 'from setuptools import setup, find_packages\n'), ((519, 541), 'glob.glob', 'glob.glob', (['"""scripts/*"""'], {}), "('scripts/*')\n", (528, 541), False, 'import glob\n'), ((549, 566), 'os.path.isdir', 'os.path.isdir', (['fl'], {}), '(fl)\n', (562, 566), False, 'import os\n')]
|
import networkx as nx
import numpy as np
import pickle
G = nx.Graph()
node1, node2 = np.loadtxt(graph_input, usecols=(0,1), unpack=True)
for i in range(len(node1)):
G.add_edge(node1[i], node2[i])
graph_num_node = G.number_of_nodes()
print(f"This graph contains {graph_num_node} nodes. ")
graph_num_edge = G.number_of_edges()
print(f"This graph contains {graph_num_edge} edges. ")
node_bet_central = nx.betweenness_centrality(G)
pickle.dump(node_bet_central, open("node_betweeen_centrality.pkl", 'wb'))
res = np.array([(int(key), node_bet_central[key]) for key in node_bet_central.keys() ])
res_sorted = res[res[:,0].argsort()]
ax.xaxis.set_minor_locator(MultipleLocator(10))
pos = dict(zip(idx.astype(int), np.column_stack((x, y, z))))
pos = {}
for i in range(len(idx)):
pos[str(int(idx[i]))] = (x[i], y[i], z[i])
for key in pos.keys():
position[key] = {'posi': pos[key]}
nx.set_node_attributes(G, poistion)
pos = nx.get_node_attributes(G, 'posi')
n = G.number_of_nodes()
degrees = [val for (node, val) in G.degree()]
edge_max = max(degrees)
colors = [plt.cm.plasma(degrees[i]/edge_max) for i in range(n)]
with plt.style.context(('ggplot')):
fig = plt.figure(figsize=(10,7))
ax = Axes3D(fig)
for key, value in pos.items():
xi = value[0]
yi = value[1]
zi = value[2]
ax.scatter(xi, yi, zi, c=colors[key], s=20+20*G.degree(key), edgecolors='k', alpha=0.7)
for i, j in enumerate(G.edges()):
x = np.array((pos[j[0]][0], pos[j[1]][0]))
y = np.array((pos[j[0]][1], pos[j[1]][1]))
z = np.array((pos[j[0]][2], pos[j[1]][2]))
ax.plot(x, y, z, c='black', alpha=0.5)
ax.view_init(30, angle)
ax.set_axis_off()
plt.show()
return
|
[
"networkx.set_node_attributes",
"networkx.betweenness_centrality",
"numpy.column_stack",
"networkx.Graph",
"numpy.loadtxt",
"numpy.array",
"networkx.get_node_attributes"
] |
[((60, 70), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (68, 70), True, 'import networkx as nx\n'), ((86, 138), 'numpy.loadtxt', 'np.loadtxt', (['graph_input'], {'usecols': '(0, 1)', 'unpack': '(True)'}), '(graph_input, usecols=(0, 1), unpack=True)\n', (96, 138), True, 'import numpy as np\n'), ((408, 436), 'networkx.betweenness_centrality', 'nx.betweenness_centrality', (['G'], {}), '(G)\n', (433, 436), True, 'import networkx as nx\n'), ((895, 930), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['G', 'poistion'], {}), '(G, poistion)\n', (917, 930), True, 'import networkx as nx\n'), ((938, 971), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['G', '"""posi"""'], {}), "(G, 'posi')\n", (960, 971), True, 'import networkx as nx\n'), ((719, 745), 'numpy.column_stack', 'np.column_stack', (['(x, y, z)'], {}), '((x, y, z))\n', (734, 745), True, 'import numpy as np\n'), ((1477, 1515), 'numpy.array', 'np.array', (['(pos[j[0]][0], pos[j[1]][0])'], {}), '((pos[j[0]][0], pos[j[1]][0]))\n', (1485, 1515), True, 'import numpy as np\n'), ((1528, 1566), 'numpy.array', 'np.array', (['(pos[j[0]][1], pos[j[1]][1])'], {}), '((pos[j[0]][1], pos[j[1]][1]))\n', (1536, 1566), True, 'import numpy as np\n'), ((1579, 1617), 'numpy.array', 'np.array', (['(pos[j[0]][2], pos[j[1]][2])'], {}), '((pos[j[0]][2], pos[j[1]][2]))\n', (1587, 1617), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
#
# Author:
# <NAME> (@skelsec)
#
import asyncio
from pycquery_krb.protocol.asn1_structs import KerberosResponse
from pycquery_krb.common.constants import KerberosSocketType
from asysocks.client import SOCKSClient
from asysocks.common.comms import SocksQueueComms
class AIOKerberosClientSocksSocket:
def __init__(self, target):
self.target = target
self.out_queue = None
self.in_queue = None
self.proxy_client = None
self.proxy_task = None
def get_addr_str(self):
return '%s:%d' % (self.target.ip, self.target.port)
async def sendrecv(self, data):
self.out_queue = asyncio.Queue()
self.in_queue = asyncio.Queue()
comms = SocksQueueComms(self.out_queue, self.in_queue)
self.client = SOCKSClient(comms, self.target.proxy.target, self.target.proxy.creds)
self.proxy_task = asyncio.create_task(self.client.run())
length = len(data).to_bytes(4, byteorder = 'big', signed = False)
await self.out_queue.put(length+data)
resp_data = b''
resp_data_len = -1
while True:
data, err = await self.in_queue.get()
if data is None:
break
if err is not None:
raise err
resp_data += data
if resp_data_len == -1:
if len(resp_data) > 4:
resp_data_len = int.from_bytes(resp_data[:4], byteorder = 'big', signed = False)
if resp_data_len == 0:
raise Exception('Returned data length is 0! This means the server did not understand our message')
if resp_data_len != -1:
if len(resp_data) == resp_data_len + 4:
resp_data = resp_data[4:]
break
elif len(resp_data) > resp_data_len + 4:
raise Exception('Got too much data somehow')
else:
continue
await self.out_queue.put(None)
if resp_data == b'':
raise Exception('Connection returned no data!')
krb_message = KerberosResponse.load(resp_data)
return krb_message
def __str__(self):
t = '===AIOKerberosClientProxySocket AIO===\r\n'
t += 'target: %s\r\n' % self.target
return t
|
[
"pycquery_krb.protocol.asn1_structs.KerberosResponse.load",
"asysocks.client.SOCKSClient",
"asyncio.Queue",
"asysocks.common.comms.SocksQueueComms"
] |
[((615, 630), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (628, 630), False, 'import asyncio\n'), ((649, 664), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (662, 664), False, 'import asyncio\n'), ((675, 721), 'asysocks.common.comms.SocksQueueComms', 'SocksQueueComms', (['self.out_queue', 'self.in_queue'], {}), '(self.out_queue, self.in_queue)\n', (690, 721), False, 'from asysocks.common.comms import SocksQueueComms\n'), ((739, 808), 'asysocks.client.SOCKSClient', 'SOCKSClient', (['comms', 'self.target.proxy.target', 'self.target.proxy.creds'], {}), '(comms, self.target.proxy.target, self.target.proxy.creds)\n', (750, 808), False, 'from asysocks.client import SOCKSClient\n'), ((1798, 1830), 'pycquery_krb.protocol.asn1_structs.KerberosResponse.load', 'KerberosResponse.load', (['resp_data'], {}), '(resp_data)\n', (1819, 1830), False, 'from pycquery_krb.protocol.asn1_structs import KerberosResponse\n')]
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from pickle import dumps, load, loads
class PyMimeData(QMimeData):
"""
The PyMimeData wraps a Python instance as MIME data.
"""
# The MIME type for instances.
MIME_TYPE = 'application/x-ets-qt4-instance'
def __init__(self, data=None):
"""
Initialise the instance.
"""
QMimeData.__init__(self)
# Keep a local reference to be returned if possible.
self._local_instance = data
if data is not None:
# We may not be able to pickle the data.
#try:
pdata = dumps(data)
#except:
# return
# This format (as opposed to using a single sequence) allows the
# type to be extracted without unpickling the data itself.
self.setData(self.MIME_TYPE, dumps(data.__class__) + pdata)
@classmethod
def coerce(cls, md):
"""
Coerce a QMimeData instance to a PyMimeData instance if possible.
"""
# See if the data is already of the right type. If it is then we know
# we are in the same process.
if isinstance(md, cls):
return md
# See if the data type is supported.
if not md.hasFormat(cls.MIME_TYPE):
return None
nmd = cls()
nmd.setData(cls.MIME_TYPE, md.data())
return nmd
def instance(self):
"""
Return the instance.
"""
if self._local_instance is not None:
return self._local_instance
io = StringIO(str(self.data(self.MIME_TYPE)))
try:
# Skip the type.
load(io)
# Recreate the instance.
return load(io)
except:
pass
return None
def instanceType(self):
"""
Return the type of the instance.
"""
if self._local_instance is not None:
return self._local_instance.__class__
try:
return loads(str(self.data(self.MIME_TYPE)))
except:
pass
return None
|
[
"pickle.load",
"pickle.dumps"
] |
[((645, 656), 'pickle.dumps', 'dumps', (['data'], {}), '(data)\n', (650, 656), False, 'from pickle import dumps, load, loads\n'), ((1739, 1747), 'pickle.load', 'load', (['io'], {}), '(io)\n', (1743, 1747), False, 'from pickle import dumps, load, loads\n'), ((1807, 1815), 'pickle.load', 'load', (['io'], {}), '(io)\n', (1811, 1815), False, 'from pickle import dumps, load, loads\n'), ((897, 918), 'pickle.dumps', 'dumps', (['data.__class__'], {}), '(data.__class__)\n', (902, 918), False, 'from pickle import dumps, load, loads\n')]
|
# Generated by Django 2.0.5 on 2018-09-01 12:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('abyssal_modules', '0002_remove_ownershiprecord'),
]
operations = [
migrations.RemoveField(
model_name='moduletype',
name='attributes',
),
migrations.CreateModel(
name='TypeAttribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display', models.BooleanField(default=False)),
('attribute', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='abyssal_modules.ModuleDogmaAttribute')),
('type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='abyssal_modules.ModuleType')),
],
),
migrations.AddField(
model_name='moduletype',
name='attributes',
field=models.ManyToManyField(related_name='_moduletype_attributes_+', through='abyssal_modules.TypeAttribute', to='abyssal_modules.ModuleDogmaAttribute'),
),
]
|
[
"django.db.models.ManyToManyField",
"django.db.migrations.RemoveField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField"
] |
[((280, 346), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""moduletype"""', 'name': '"""attributes"""'}), "(model_name='moduletype', name='attributes')\n", (302, 346), False, 'from django.db import migrations, models\n'), ((1052, 1204), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""_moduletype_attributes_+"""', 'through': '"""abyssal_modules.TypeAttribute"""', 'to': '"""abyssal_modules.ModuleDogmaAttribute"""'}), "(related_name='_moduletype_attributes_+', through=\n 'abyssal_modules.TypeAttribute', to='abyssal_modules.ModuleDogmaAttribute')\n", (1074, 1204), False, 'from django.db import migrations, models\n'), ((493, 586), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (509, 586), False, 'from django.db import migrations, models\n'), ((613, 647), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (632, 647), False, 'from django.db import migrations, models\n'), ((680, 790), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""abyssal_modules.ModuleDogmaAttribute"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'abyssal_modules.ModuleDogmaAttribute')\n", (697, 790), False, 'from django.db import migrations, models\n'), ((813, 913), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""abyssal_modules.ModuleType"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'abyssal_modules.ModuleType')\n", (830, 913), False, 'from django.db import migrations, models\n')]
|
import komand
import dumbno
from .schema import ConnectionSchema
# Custom imports below
class Connection(komand.Connection):
def __init__(self):
super(self.__class__, self).__init__(input=ConnectionSchema())
def connect(self, params={}):
self.host = params.get("host")
self.port = params.get("port", 9000)
self.dumbno = dumbno.ACLClient(self.host, port=self.port)
|
[
"dumbno.ACLClient"
] |
[((364, 407), 'dumbno.ACLClient', 'dumbno.ACLClient', (['self.host'], {'port': 'self.port'}), '(self.host, port=self.port)\n', (380, 407), False, 'import dumbno\n')]
|
#
# Copyright (c) 2019 UCT Prague.
#
# propertyvalue_acls.py is part of Invenio Explicit ACLs
# (see https://github.com/oarepo/invenio-explicit-acls).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""Simple ACL matching all records that have a metadata property equal to a given value."""
import enum
import logging
from invenio_accounts.models import User
from invenio_db import db
from sqlalchemy_utils import ChoiceType, Timestamp
from invenio_explicit_acls.models import ACL, gen_uuid_key
from .es_mixin import ESACLMixin
logger = logging.getLogger(__name__)
class MatchOperation(enum.Enum):
"""The operation for matching property to value might be either term or match - choose according to the schema."""
match = 'match'
term = 'term'
class BoolOperation(enum.Enum):
"""The ES Bool filter query type."""
must = 'must'
"""All properties of this type must match. The equivalent of AND."""
mustNot = 'must_not'
"""All properties of this type must not match. The equivalent of NOT."""
should = 'should'
"""At least one property must match. The equivalent of OR."""
filter = 'filter'
"""Properties that must match, but are run in non-scoring, filtering mode."""
class PropertyValue(db.Model, Timestamp):
"""Property and Value match to be used in Property based ACL queries."""
__tablename__ = 'explicit_acls_propertyvalue'
#
# Fields
#
id = db.Column(
db.String(36),
default=gen_uuid_key,
primary_key=True
)
"""Primary key."""
acl_id = db.Column(db.ForeignKey('explicit_acls_propertyvalueacl.id',
name='fk_explicit_acls_propertyvalue_acl_id'))
acl = db.relationship('PropertyValueACL', back_populates='property_values')
name = db.Column(db.String(64))
"""Name of the property in elasticsearch."""
value = db.Column(db.String(128))
"""Value of the property in elasticsearch."""
match_operation = db.Column(ChoiceType(MatchOperation, impl=db.String(length=10)),
default=MatchOperation.term)
"""Property value matching mode: can be either term or match."""
bool_operation = db.Column(ChoiceType(BoolOperation, impl=db.String(length=10)), default=BoolOperation.must)
"""Bool filter operation mode this property belongs to."""
originator_id = db.Column(db.ForeignKey(User.id, ondelete='CASCADE', ),
nullable=False, index=True)
originator = db.relationship(
User,
backref=db.backref("authored_properties"))
"""The originator (person that last time modified the Property)"""
def __str__(self):
"""Returns string representation of the class."""
return '%s: %s(%s=%s)' % (self.bool_operation, self.match_operation, self.name, self.value, )
class PropertyValueACL(ESACLMixin, ACL):
"""An ACL that matches all records that have a metadata property equal to a given constant value."""
__tablename__ = 'explicit_acls_propertyvalueacl'
__mapper_args__ = {
'polymorphic_identity': 'propertyvalue',
}
#
# Fields
#
id = db.Column(db.String(36), db.ForeignKey('explicit_acls_acl.id'), primary_key=True)
"""Id maps to base class' id"""
property_values = db.relationship("PropertyValue", back_populates="acl")
"""A set of actors for this ACL (who have rights to perform an operation this ACL references)"""
@property
def record_selector(self):
"""Returns an elasticsearch query matching resources that this ACL maps to."""
boolProps = {}
for prop in self.property_values: # type: PropertyValue
boolProps.setdefault(prop.bool_operation.value, []).append(
{
prop.match_operation.value: {
prop.name: prop.value
}
}
)
return {
'bool': boolProps
}
def __repr__(self):
"""String representation for model."""
return '"{0.name}" ({0.id}) on schemas {0.schemas}'.format(self)
|
[
"logging.getLogger",
"invenio_db.db.String",
"invenio_db.db.relationship",
"invenio_db.db.backref",
"invenio_db.db.ForeignKey"
] |
[((1557, 1584), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1574, 1584), False, 'import logging\n'), ((2733, 2802), 'invenio_db.db.relationship', 'db.relationship', (['"""PropertyValueACL"""'], {'back_populates': '"""property_values"""'}), "('PropertyValueACL', back_populates='property_values')\n", (2748, 2802), False, 'from invenio_db import db\n'), ((4319, 4373), 'invenio_db.db.relationship', 'db.relationship', (['"""PropertyValue"""'], {'back_populates': '"""acl"""'}), "('PropertyValue', back_populates='acl')\n", (4334, 4373), False, 'from invenio_db import db\n'), ((2465, 2478), 'invenio_db.db.String', 'db.String', (['(36)'], {}), '(36)\n', (2474, 2478), False, 'from invenio_db import db\n'), ((2588, 2689), 'invenio_db.db.ForeignKey', 'db.ForeignKey', (['"""explicit_acls_propertyvalueacl.id"""'], {'name': '"""fk_explicit_acls_propertyvalue_acl_id"""'}), "('explicit_acls_propertyvalueacl.id', name=\n 'fk_explicit_acls_propertyvalue_acl_id')\n", (2601, 2689), False, 'from invenio_db import db\n'), ((2825, 2838), 'invenio_db.db.String', 'db.String', (['(64)'], {}), '(64)\n', (2834, 2838), False, 'from invenio_db import db\n'), ((2912, 2926), 'invenio_db.db.String', 'db.String', (['(128)'], {}), '(128)\n', (2921, 2926), False, 'from invenio_db import db\n'), ((3404, 3446), 'invenio_db.db.ForeignKey', 'db.ForeignKey', (['User.id'], {'ondelete': '"""CASCADE"""'}), "(User.id, ondelete='CASCADE')\n", (3417, 3446), False, 'from invenio_db import db\n'), ((4188, 4201), 'invenio_db.db.String', 'db.String', (['(36)'], {}), '(36)\n', (4197, 4201), False, 'from invenio_db import db\n'), ((4203, 4240), 'invenio_db.db.ForeignKey', 'db.ForeignKey', (['"""explicit_acls_acl.id"""'], {}), "('explicit_acls_acl.id')\n", (4216, 4240), False, 'from invenio_db import db\n'), ((3572, 3605), 'invenio_db.db.backref', 'db.backref', (['"""authored_properties"""'], {}), "('authored_properties')\n", (3582, 3605), False, 'from invenio_db import db\n'), ((3043, 3063), 'invenio_db.db.String', 'db.String', ([], {'length': '(10)'}), '(length=10)\n', (3052, 3063), False, 'from invenio_db import db\n'), ((3259, 3279), 'invenio_db.db.String', 'db.String', ([], {'length': '(10)'}), '(length=10)\n', (3268, 3279), False, 'from invenio_db import db\n')]
|
"""
Misc helper function
"""
from django.utils.translation import ugettext as _
def to_set(obj):
"""
Converts an object to a set if it isn't already
"""
if obj is None:
return set()
if isinstance(obj, set):
return obj
if not hasattr(obj, '__iter__') or isinstance(obj, str):
obj = [obj]
return set(obj)
def str_enum(it):
"""
Converts an iterable to string 'the smart way'
"""
it = [str(o) for o in it]
l = len(it)
if l < 4:
# the list of objects contains 3 items or less print all of them
return ', '.join(it[-3:-2] + [_(' and ').join(it[-2:])])
else:
# the list of objects contains more than 3 items, print only the 1st
# 2 ones and give a number
return _('%s and %d others') % (', '.join(it[0:2]), l - 2)
|
[
"django.utils.translation.ugettext"
] |
[((822, 843), 'django.utils.translation.ugettext', '_', (['"""%s and %d others"""'], {}), "('%s and %d others')\n", (823, 843), True, 'from django.utils.translation import ugettext as _\n'), ((654, 664), 'django.utils.translation.ugettext', '_', (['""" and """'], {}), "(' and ')\n", (655, 664), True, 'from django.utils.translation import ugettext as _\n')]
|
# Copyright 2020-present <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import asyncio
import contextlib
import logging
import random
from datetime import datetime, timezone
from fractions import Fraction
from typing import Optional
import apsw
import discord
from discord.ext import commands
from ...bot import Salamander, SalamanderContext, UserFeedbackError
from ...checks import admin_or_perms
from ...utils.converters import Weekday
from ...utils.embed_generators import embed_from_member
log = logging.getLogger("salamander.contrib_exts.qotw")
GUILD_SETTINGS_TABLE_CREATION_STATEMENT = """
CREATE TABLE IF NOT EXISTS contrib_qotw_guild_settings (
guild_id INTEGER PRIMARY KEY NOT NULL,
channel_id INTEGER DEFAULT NULL,
last_qotw_at TEXT DEFAULT CURRENT_TIMESTAMP,
qotw_day INTEGER DEFAULT 5,
last_pinned_message_id INTEGER DEFAULT NULL
)
"""
CREATE_MEMBERS_TABLE_STATEMENT = """
CREATE TABLE IF NOT EXISTS contrib_qotw_members (
guild_id INTEGER NOT NULL REFERENCES contrib_qotw_guild_settings(guild_id)
ON UPDATE CASCADE ON DELETE CASCADE,
user_id INTEGER NOT NULL,
current_question TEXT DEFAULT NULL,
questions_since_select INTEGER DEFAULT 1,
PRIMARY KEY (user_id, guild_id)
)
"""
CREATE_HISTORICAL_ALL = """
CREATE TABLE IF NOT EXISTS contrib_qotw_all_history (
guild_id INTEGER NOT NULL,
user_id INTEGER NOT NULL,
question TEXT NOT NULL,
when_asked TEXT DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (guild_id, user_id) REFERENCES contrib_qotw_members (guild_id, user_id)
ON UPDATE CASCADE ON DELETE CASCADE
)
"""
SELECTED_QUESTION_HISTORY = """
CREATE TABLE IF NOT EXISTS contrib_qotw_selected_history (
guild_id INTEGER NOT NULL,
user_id INTEGER NOT NULL,
question TEXT,
when_selected TEXT DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (guild_id, user_id) REFERENCES contrib_qotw_members (guild_id, user_id)
ON UPDATE CASCADE ON DELETE CASCADE
)
"""
# The default here isn't wrong, but it is intentionally offset for math.
def resevoir_sample(iterable):
for n, x in enumerate(iterable, 1):
if random.randrange(n) == 0: # nosec
pick = x
return pick
class QOTW(commands.Cog):
""" Question of the week """
def __init__(self, bot: Salamander):
self.bot: Salamander = bot
self.conn = self.bot._conn
cursor = self.conn.cursor()
for statement in (
GUILD_SETTINGS_TABLE_CREATION_STATEMENT,
CREATE_MEMBERS_TABLE_STATEMENT,
CREATE_HISTORICAL_ALL,
SELECTED_QUESTION_HISTORY,
):
cursor.execute(statement)
cursor.close()
self._loop: Optional[asyncio.Task] = None
@staticmethod
def remove_tables_from_connection(conn: apsw.Connection):
cursor = conn.cursor()
with conn:
cursor.execute("""DROP TABLE contrib_qotw_all_history""")
cursor.execute("""DROP TABLE contrib_qotw_selected_history""")
cursor.execute("""DROP TABLE contrib_qotw_members""")
cursor.execute("""DROP TABLE contrib_qotw_guild_settings""")
def init(self):
self._loop = asyncio.create_task(self.bg_loop())
def cog_unload(self):
if self._loop:
self._loop.cancel()
self.conn.close()
async def bg_loop(self):
cursor = self.conn.cursor()
while True:
await asyncio.sleep(600)
now = datetime.utcnow().replace(tzinfo=timezone.utc)
current_weekday = now.weekday()
tsks = (
self.handle_qotw(*row)
for row in cursor.execute(
"""
SELECT guild_id, channel_id, last_pinned_message_id
FROM contrib_qotw_guild_settings
WHERE
channel_id IS NOT NULL
AND qotw_day=?
AND DATE(last_qotw_at) < DATE(CURRENT_TIMESTAMP)
""",
(current_weekday,),
)
)
results = await asyncio.gather(*tsks, return_exceptions=True)
for t in results:
if isinstance(t, Exception):
log.exception("Error in something: ", exc_info=t)
async def handle_qotw(
self, guild_id: int, channel_id: int, last_pinned_message_id: int
):
guild = self.bot.get_guild(guild_id)
if guild is None:
return
channel = guild.get_channel(channel_id)
if channel is None:
return
cursor = self.conn.cursor()
questions = cursor.execute(
"""
SELECT user_id, current_question, questions_since_select
FROM contrib_qotw_members
WHERE current_question IS NOT NULL AND guild_id=?
""",
(guild_id,),
).fetchall()
if not questions:
cursor.execute(
"""
INSERT INTO contrib_qotw_guild_settings (guild_id, last_qotw_at)
VALUES (?, CURRENT_TIMESTAMP)
ON CONFLICT (guild_id)
DO UPDATE SET
last_qotw_at=excluded.last_qotw_at
""",
(guild_id,),
)
return
def gen_(question_list):
for (mid, cq, qss) in question_list:
if mem := guild.get_member(
mid
): # ensure we only ask questions for people still here.
for i in range(qss):
yield (mem, cq)
# could do something more clever here, but this isn't gonna be an issue
selected_m, selected_question = resevoir_sample(gen_(questions))
with contextlib.suppress(discord.HTTPException):
old_m = await channel.fetch_message(last_pinned_message_id)
await old_m.unpin()
new_m = await channel.send(
f"**New Question of the Week** {selected_m.mention} asks:\n\n{selected_question}"
)
last_pin = None
with contextlib.suppress(discord.HTTPException):
await new_m.pin()
last_pin = new_m.id
with self.conn:
cursor.execute(
"""
UPDATE contrib_qotw_members
SET
questions_since_select = questions_since_select + 1
WHERE current_question IS NOT NULL AND guild_id=?
""",
(guild_id,),
)
cursor.execute(
"""
UPDATE contrib_qotw_members
SET questions_since_select = 1, current_question=NULL
WHERE guild_id = ? AND user_id = ?
""",
(guild_id, selected_m.id),
)
cursor.execute(
"""
INSERT INTO contrib_qotw_selected_history (guild_id, user_id, question)
VALUES (?,?,?)
""",
(guild_id, selected_m.id, selected_question),
)
cursor.execute(
"""
UPDATE contrib_qotw_guild_settings
SET last_qotw_at = CURRENT_TIMESTAMP, last_pinned_message_id = :pin
WHERE guild_id = :guild_id
""",
dict(pin=last_pin, guild_id=guild_id),
)
@admin_or_perms(manage_messages=True)
@commands.guild_only()
@commands.group(name="qotwset")
async def qotw_set(self, ctx: SalamanderContext):
""" Commands to manage QOTW """
if ctx.invoked_subcommand is None:
await ctx.send_help()
@admin_or_perms(manage_messages=True)
@commands.guild_only()
@qotw_set.command(name="channel")
async def qotw_set_channel(
self, ctx: SalamanderContext, *, channel: discord.TextChannel
):
""" Sets the channel for QOTW """
cursor = self.conn.cursor()
cursor.execute(
"""
INSERT INTO contrib_qotw_guild_settings (guild_id, channel_id)
VALUES (?, ?)
ON CONFLICT (guild_id)
DO UPDATE SET channel_id=excluded.channel_id
""",
(ctx.guild.id, channel.id),
)
await ctx.send("Channel set")
@qotw_set.command(name="clearchannel")
async def qotw_set_clearchan(self, ctx: SalamanderContext):
"""
Clears the qotw channel if set.
This will effectively disable QOTW in this server.
"""
cursor = self.conn.cursor()
cursor.execute(
"""
UPDATE contrib_qotw_guild_settings
SET channel_id = NULL
WHERE guild_id=?
""",
(ctx.guild.id,),
)
await ctx.send("Channel cleared.")
@qotw_set.command(name="day")
async def qotw_set_day(self, ctx: SalamanderContext, *, day: Weekday):
""" Sets the day of the week QOTW should be held on """
cursor = self.conn.cursor()
cursor.execute(
"""
INSERT INTO contrib_qotw_guild_settings (guild_id, qotw_day)
VALUES (?,?)
ON CONFLICT (guild_id)
DO UPDATE SET qotw_day=excluded.qotw_day
""",
(ctx.guild.id, day.number),
)
await ctx.send(f"QOTW will be selected on {day}")
@commands.guild_only()
@qotw_set.command(name="force")
async def force_qotw(self, ctx: SalamanderContext):
""" Force a new question to be asked """
cursor = self.conn.cursor()
row = cursor.execute(
"""
SELECT channel_id, last_pinned_message_id
FROM contrib_qotw_guild_settings
WHERE
channel_id IS NOT NULL
AND guild_id = ?
""",
(ctx.guild.id,),
).fetchone()
channel = None
if row:
channel_id, last_pinned_message_id = row
channel = ctx.guild.get_channel(channel_id)
if not channel:
raise UserFeedbackError(custom_message="No QOTW channel has been set")
await self.handle_qotw(ctx.guild.id, channel_id, last_pinned_message_id)
@admin_or_perms(manage_messages=True)
@commands.guild_only()
@qotw_set.command(name="view")
async def view_pending(self, ctx: SalamanderContext):
""" View the currently pending questions """
cursor = self.conn.cursor()
questions = cursor.execute(
"""
SELECT user_id, current_question, questions_since_select
FROM contrib_qotw_members
WHERE current_question IS NOT NULL AND guild_id=?
""",
(ctx.guild.id,),
).fetchall()
if not questions:
return await ctx.send("No current questions.")
total = 0
filtered_questions = []
for user_id, question, weight in questions:
if m := ctx.guild.get_member(user_id):
filtered_questions.append((m, question, weight))
total += weight
if not filtered_questions:
return await ctx.send("No current questions")
embeds = []
n = len(filtered_questions)
for idx, (member, question, weight) in enumerate(filtered_questions, 1):
em = embed_from_member(member)
em.add_field(name=f"Question {idx} of {n}", value=question, inline=False)
em.add_field(
name="Current odds of selection", value=f"{Fraction(weight, total)}"
)
embeds.append(em)
await ctx.list_menu(embeds)
@commands.guild_only()
@commands.command()
async def qotwodds(self, ctx: SalamanderContext):
"""
Get the current odds that your question is selected next.
"""
cursor = self.conn.cursor()
questions = cursor.execute(
"""
SELECT user_id, current_question, questions_since_select
FROM contrib_qotw_members
WHERE current_question IS NOT NULL AND guild_id=?
""",
(ctx.guild.id,),
).fetchall()
total = 0
user_has_question = False
user_question_weight = 0
filtered_questions = []
for user_id, question, weight in questions:
if m := ctx.guild.get_member(user_id):
filtered_questions.append((m, question, weight))
total += weight
if ctx.author.id == user_id:
user_has_question = True
user_question_weight = weight
if not filtered_questions:
return await ctx.send(
"There are no questions currently queued up, feel free to ask one."
)
elif user_has_question:
return await ctx.send(
f"There are currently {len(filtered_questions)} questions.\n"
f"Your question currently has a {Fraction(user_question_weight, total)} chance of being selected."
)
else:
return await ctx.send(
f"There are currently {len(filtered_questions)} questions.\n"
"You do not have a question submitted, but feel free to ask one."
)
@commands.guild_only()
@commands.command()
async def ask(self, ctx: SalamanderContext, *, question: str):
if len(question) > 1500:
return await ctx.send(
"Please ask a shorter question (max 1500 characters)."
)
cursor = self.conn.cursor()
with self.conn:
params = (ctx.guild.id, ctx.author.id, question)
cursor.execute(
"""
INSERT INTO contrib_qotw_members (guild_id, user_id, current_question)
VALUES (?,?,?)
ON CONFLICT (guild_id, user_id)
DO UPDATE SET current_question=excluded.current_question
""",
params,
)
cursor.execute(
"""
INSERT INTO contrib_qotw_all_history (guild_id, user_id, question)
VALUES(?,?,?)
""",
params,
)
await ctx.send(
"Your submitted question for the next QOTW has been set.", delete_after=15
)
await asyncio.sleep(10)
try:
await ctx.message.delete()
except Exception as exc:
log.exception("Couldn't delete", exc_info=exc)
@ask.before_invoke
async def ask_before_invoke(self, ctx: SalamanderContext):
cursor = self.conn.cursor()
row = cursor.execute(
"""
SELECT channel_id FROM contrib_qotw_guild_settings WHERE guild_id = ?
""",
(ctx.guild.id,),
).fetchone()
if not row:
raise commands.CheckFailure()
(channel_id,) = row
if channel_id != ctx.channel.id:
raise commands.CheckFailure()
|
[
"asyncio.gather",
"discord.ext.commands.command",
"asyncio.sleep",
"contextlib.suppress",
"datetime.datetime.utcnow",
"random.randrange",
"discord.ext.commands.group",
"discord.ext.commands.guild_only",
"fractions.Fraction",
"logging.getLogger",
"discord.ext.commands.CheckFailure"
] |
[((1067, 1116), 'logging.getLogger', 'logging.getLogger', (['"""salamander.contrib_exts.qotw"""'], {}), "('salamander.contrib_exts.qotw')\n", (1084, 1116), False, 'import logging\n'), ((8070, 8091), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (8089, 8091), False, 'from discord.ext import commands\n'), ((8097, 8127), 'discord.ext.commands.group', 'commands.group', ([], {'name': '"""qotwset"""'}), "(name='qotwset')\n", (8111, 8127), False, 'from discord.ext import commands\n'), ((8347, 8368), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (8366, 8368), False, 'from discord.ext import commands\n'), ((10021, 10042), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (10040, 10042), False, 'from discord.ext import commands\n'), ((10912, 10933), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (10931, 10933), False, 'from discord.ext import commands\n'), ((12308, 12329), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (12327, 12329), False, 'from discord.ext import commands\n'), ((12335, 12353), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (12351, 12353), False, 'from discord.ext import commands\n'), ((13957, 13978), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (13976, 13978), False, 'from discord.ext import commands\n'), ((13984, 14002), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (14000, 14002), False, 'from discord.ext import commands\n'), ((2682, 2701), 'random.randrange', 'random.randrange', (['n'], {}), '(n)\n', (2698, 2701), False, 'import random\n'), ((6375, 6417), 'contextlib.suppress', 'contextlib.suppress', (['discord.HTTPException'], {}), '(discord.HTTPException)\n', (6394, 6417), False, 'import contextlib\n'), ((6702, 6744), 'contextlib.suppress', 'contextlib.suppress', (['discord.HTTPException'], {}), '(discord.HTTPException)\n', (6721, 6744), False, 'import contextlib\n'), ((15048, 15065), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (15061, 15065), False, 'import asyncio\n'), ((15568, 15591), 'discord.ext.commands.CheckFailure', 'commands.CheckFailure', ([], {}), '()\n', (15589, 15591), False, 'from discord.ext import commands\n'), ((15680, 15703), 'discord.ext.commands.CheckFailure', 'commands.CheckFailure', ([], {}), '()\n', (15701, 15703), False, 'from discord.ext import commands\n'), ((3992, 4010), 'asyncio.sleep', 'asyncio.sleep', (['(600)'], {}), '(600)\n', (4005, 4010), False, 'import asyncio\n'), ((4684, 4729), 'asyncio.gather', 'asyncio.gather', (['*tsks'], {'return_exceptions': '(True)'}), '(*tsks, return_exceptions=True)\n', (4698, 4729), False, 'import asyncio\n'), ((4029, 4046), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4044, 4046), False, 'from datetime import datetime, timezone\n'), ((12194, 12217), 'fractions.Fraction', 'Fraction', (['weight', 'total'], {}), '(weight, total)\n', (12202, 12217), False, 'from fractions import Fraction\n'), ((13648, 13685), 'fractions.Fraction', 'Fraction', (['user_question_weight', 'total'], {}), '(user_question_weight, total)\n', (13656, 13685), False, 'from fractions import Fraction\n')]
|