code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
u"""Downloads notes from the TEK web.
@author: <NAME>
"""
import requests
from bs4 import BeautifulSoup
import os.path
import wget
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
url_root = 'https://teknet.tek.fi/arkisto.lehti/content/'
url = url_root+'ack-vad-gul.html'
r = requests.get(url, verify=False)
parsed_html = BeautifulSoup(r.text, 'lxml')
songs = parsed_html.body.find('ul', attrs={'class': 'blog-list'})
song_urls = songs.find_all('a')
urls = map(lambda x: x.get('href'), song_urls)
# names = map(lambda x: x.string, song_urls)
for song_url in urls:
correct_url = song_url.replace("a%CC%88", "%C3%A4")
correct_url = correct_url.replace("o%CC%88", "%C3%B6")
r_song = requests.get(url_root+correct_url, verify=False)
parsed_html = BeautifulSoup(r_song.text, 'lxml')
content = parsed_html.body.find('div', attrs={'class': 'node'})
if content:
imgs = content.find_all('img')
img_urls = map(lambda x: x.get('src'), imgs)
for img_url in img_urls:
filename = img_url[img_url.rfind("/")+1:]
if not os.path.isfile(filename):
wget.download(url_root+img_url)
|
[
"bs4.BeautifulSoup",
"requests.get",
"wget.download"
] |
[((355, 386), 'requests.get', 'requests.get', (['url'], {'verify': '(False)'}), '(url, verify=False)\n', (367, 386), False, 'import requests\n'), ((401, 430), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""lxml"""'], {}), "(r.text, 'lxml')\n", (414, 430), False, 'from bs4 import BeautifulSoup\n'), ((772, 822), 'requests.get', 'requests.get', (['(url_root + correct_url)'], {'verify': '(False)'}), '(url_root + correct_url, verify=False)\n', (784, 822), False, 'import requests\n'), ((839, 873), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r_song.text', '"""lxml"""'], {}), "(r_song.text, 'lxml')\n", (852, 873), False, 'from bs4 import BeautifulSoup\n'), ((1198, 1231), 'wget.download', 'wget.download', (['(url_root + img_url)'], {}), '(url_root + img_url)\n', (1211, 1231), False, 'import wget\n')]
|
import json
from django.shortcuts import render, HttpResponse
from django.views import View
# FBV
def users(request):
# if request.method == "GET":
# pass
user_list = ['smalle', 'aezocn']
return HttpResponse(json.dumps(user_list))
class MyBaseView(object):
# 装饰器作用(拦截器)
def dispatch(self, request, *args, **kwargs):
print('before...')
# 此时MyBaseView无父类,则到self(StudentsView)的其他父类查找dispatch
ret = super(MyBaseView, self).dispatch(request, *args, **kwargs)
print('end...')
return ret
# CBV: 根据不同的Http类型自动选择对应方法
class StudentsView(MyBaseView, View): # 多继承(优先级从左到右),寻找自身属性或方法 -> 寻找最左边父类的属性或方法 -> 寻找第二个父类的属性或方法 -> ...
# def dispatch(self, request, *args, **kwargs):
# # 反射获取对应的方法(父类View内部也是实现了一个dispatch)
# fun = getattr(self, request.method.lower())
# return fun(request, *args, **kwargs)
def get(self,request,*args,**kwargs):
print('get...')
return HttpResponse('GET...')
def post(self,request,*args,**kwargs):
return HttpResponse('POST...')
def put(self,request,*args,**kwargs):
return HttpResponse('PUT...')
def delete(self,request,*args,**kwargs):
return HttpResponse('DELETE...')
# ################# contenttypes ####################
from . import models
def test_contenttypes_create(request):
'''创建数据'''
banner = models.BannerImage.objects.filter(name='home').first()
models.Image.objects.create(path='home1.jpg', content_object=banner)
models.Image.objects.create(path='home2.jpg', content_object=banner)
models.Image.objects.create(path='home3.jpg', content_object=banner)
return HttpResponse('test_contenttypes_create...')
def test_contenttypes_list(request):
'''查询数据'''
banner = models.BannerImage.objects.filter(id=1).first()
image_list = banner.image_list.all()
# <QuerySet [<Image: Image object (1)>, <Image: Image object (2)>, <Image: Image object (3)>]>
print(image_list)
return HttpResponse('test_contenttypes_list...')
|
[
"django.shortcuts.HttpResponse",
"json.dumps"
] |
[((1675, 1718), 'django.shortcuts.HttpResponse', 'HttpResponse', (['"""test_contenttypes_create..."""'], {}), "('test_contenttypes_create...')\n", (1687, 1718), False, 'from django.shortcuts import render, HttpResponse\n'), ((2008, 2049), 'django.shortcuts.HttpResponse', 'HttpResponse', (['"""test_contenttypes_list..."""'], {}), "('test_contenttypes_list...')\n", (2020, 2049), False, 'from django.shortcuts import render, HttpResponse\n'), ((230, 251), 'json.dumps', 'json.dumps', (['user_list'], {}), '(user_list)\n', (240, 251), False, 'import json\n'), ((968, 990), 'django.shortcuts.HttpResponse', 'HttpResponse', (['"""GET..."""'], {}), "('GET...')\n", (980, 990), False, 'from django.shortcuts import render, HttpResponse\n'), ((1050, 1073), 'django.shortcuts.HttpResponse', 'HttpResponse', (['"""POST..."""'], {}), "('POST...')\n", (1062, 1073), False, 'from django.shortcuts import render, HttpResponse\n'), ((1132, 1154), 'django.shortcuts.HttpResponse', 'HttpResponse', (['"""PUT..."""'], {}), "('PUT...')\n", (1144, 1154), False, 'from django.shortcuts import render, HttpResponse\n'), ((1216, 1241), 'django.shortcuts.HttpResponse', 'HttpResponse', (['"""DELETE..."""'], {}), "('DELETE...')\n", (1228, 1241), False, 'from django.shortcuts import render, HttpResponse\n')]
|
from pcf.core import State
from pcf.particle.aws.dynamodb.dynamodb_table import DynamoDB
#example dynamodb
dynamodb_example_json = {
"pcf_name": "pcf_dynamodb", # Required
"flavor": "dynamodb_table", # Required
"aws_resource": {
# Refer to https://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Client.create_table for a full list of parameters
"AttributeDefinitions": [
{
"AttributeName": "Post",
"AttributeType": "S"
},
{
"AttributeName": "PostDateTime",
"AttributeType": "S"
},
],
"TableName": "pcf_test_table",
"KeySchema": [
{
"AttributeName": "Post",
"KeyType": "HASH"
},
{
"AttributeName": "PostDateTime",
"KeyType": "RANGE"
}
],
"LocalSecondaryIndexes": [
{
"IndexName": "LastPostIndex",
"KeySchema": [
{
"AttributeName": "Post",
"KeyType": "HASH"
},
{
"AttributeName": "PostDateTime",
"KeyType": "RANGE"
}
],
"Projection": {
"ProjectionType": "KEYS_ONLY"
}
}
],
"ProvisionedThroughput": {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10
},
"Tags": [
{
"Key": "Name",
"Value": "pcf-dynamodb-test"
}
]
}
}
# create dynamodb particle using json
dynamodb_particle = DynamoDB(dynamodb_example_json)
# example start
dynamodb_particle.set_desired_state(State.running)
dynamodb_particle.apply()
print(dynamodb_particle.get_state())
print(dynamodb_particle.get_current_state_definition())
# example update
dynamodb_example_json["aws_resource"]["ProvisionedThroughput"] = {"ReadCapacityUnits": 25, "WriteCapacityUnits": 30}
dynamodb_particle = DynamoDB(dynamodb_example_json)
dynamodb_particle.set_desired_state(State.running)
dynamodb_particle.apply()
# example item
key_value = {
"Post": {
"S": "adding post to table"
},
"PostDateTime": {
"S": "201807031301"
}
}
# example put item
dynamodb_particle.put_item(key_value)
# example get item
print(dynamodb_particle.get_item(key_value))
# example delete item
print(dynamodb_particle.delete_item(key_value))
print(dynamodb_particle.get_state())
print(dynamodb_particle.get_current_state_definition())
# example terminate
dynamodb_particle.set_desired_state(State.terminated)
dynamodb_particle.apply()
print(dynamodb_particle.get_state())
|
[
"pcf.particle.aws.dynamodb.dynamodb_table.DynamoDB"
] |
[((1820, 1851), 'pcf.particle.aws.dynamodb.dynamodb_table.DynamoDB', 'DynamoDB', (['dynamodb_example_json'], {}), '(dynamodb_example_json)\n', (1828, 1851), False, 'from pcf.particle.aws.dynamodb.dynamodb_table import DynamoDB\n'), ((2196, 2227), 'pcf.particle.aws.dynamodb.dynamodb_table.DynamoDB', 'DynamoDB', (['dynamodb_example_json'], {}), '(dynamodb_example_json)\n', (2204, 2227), False, 'from pcf.particle.aws.dynamodb.dynamodb_table import DynamoDB\n')]
|
# ----------------------------------------------------------------------
# noc.core.script.metrics tests
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
import pytest
# NOC modules
from noc.core.script.metrics import (
percent,
percent_invert,
percent_usage,
convert_percent_str,
sum,
subtract,
is1,
invert0,
scale,
)
@pytest.mark.parametrize(
"value,total,expected",
[
(10.0, 0, 100.0),
(10.0, None, 100.0),
(1.0, 10.0, 10.0),
(5.0, 10.0, 50.0),
(9.0, 10.0, 90.0),
(10.0, 10.0, 100.0),
],
)
def test_percent(value, total, expected):
assert percent(value, total) == expected
@pytest.mark.parametrize(
"value,total,expected",
[
(10.0, 0, 100.0),
(10.0, None, 100.0),
(1.0, 9.0, 10.0),
(5.0, 5.0, 50.0),
(9.0, 0.0, 100.0),
(10.0, 10.0, 50.0),
],
)
def test_percent_usage(value, total, expected):
assert percent_usage(value, total) == expected
@pytest.mark.parametrize(
"value,total,expected",
[
(10.0, 0, 100.0),
(10.0, None, 100.0),
(1.0, 10.0, 90.0),
(5.0, 10.0, 50.0),
(9.0, 10.0, 10.0),
(10.0, 10.0, 0.0),
],
)
def test_percent_invert(value, total, expected):
assert percent_invert(value, total) == expected
@pytest.mark.parametrize(
"value,expected", [("09%", 9.0), ("09% ", 9.0), ("09", 9.0), ("10%", 10.0), (None, 0)]
)
def test_convert_percent_str(value, expected):
assert convert_percent_str(value) == expected
@pytest.mark.parametrize(
"values,expected", [((1.0,), 1.0), ((1.0, 2.0), 3.0), ((1.0, 2.0, 3.0), 6.0)]
)
def test_sum(values, expected):
assert sum(*values) == expected
@pytest.mark.parametrize(
"values,expected", [((10.0, 1.0), 9.0), ((10.0, 1.0, 2.0), 7.0), ((10.0, 1.0, 2.0, 3.0), 4.0)]
)
def test_subtract(values, expected):
assert subtract(*values) == expected
@pytest.mark.parametrize("value,expected", [(0, 0), (1, 1), (2, 0)])
def test_is1(value, expected):
assert is1(value) == expected
@pytest.mark.parametrize("value,expected", [(-1, 1), (0, 1), (1, 0)])
def test_invert0(value, expected):
assert invert0(value) == expected
@pytest.mark.parametrize("sf,value,expected", [(1, 1, 1), (0, 1, 0), (10, 5, 50), (8, 0.25, 2.0)])
def test_scale(sf, value, expected):
f = scale(sf)
assert f(value) == expected
|
[
"noc.core.script.metrics.scale",
"noc.core.script.metrics.invert0",
"noc.core.script.metrics.percent_invert",
"noc.core.script.metrics.is1",
"noc.core.script.metrics.percent_usage",
"noc.core.script.metrics.convert_percent_str",
"noc.core.script.metrics.subtract",
"pytest.mark.parametrize",
"noc.core.script.metrics.percent",
"noc.core.script.metrics.sum"
] |
[((547, 721), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value,total,expected"""', '[(10.0, 0, 100.0), (10.0, None, 100.0), (1.0, 10.0, 10.0), (5.0, 10.0, 50.0\n ), (9.0, 10.0, 90.0), (10.0, 10.0, 100.0)]'], {}), "('value,total,expected', [(10.0, 0, 100.0), (10.0,\n None, 100.0), (1.0, 10.0, 10.0), (5.0, 10.0, 50.0), (9.0, 10.0, 90.0),\n (10.0, 10.0, 100.0)])\n", (570, 721), False, 'import pytest\n'), ((870, 1042), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value,total,expected"""', '[(10.0, 0, 100.0), (10.0, None, 100.0), (1.0, 9.0, 10.0), (5.0, 5.0, 50.0),\n (9.0, 0.0, 100.0), (10.0, 10.0, 50.0)]'], {}), "('value,total,expected', [(10.0, 0, 100.0), (10.0,\n None, 100.0), (1.0, 9.0, 10.0), (5.0, 5.0, 50.0), (9.0, 0.0, 100.0), (\n 10.0, 10.0, 50.0)])\n", (893, 1042), False, 'import pytest\n'), ((1202, 1374), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value,total,expected"""', '[(10.0, 0, 100.0), (10.0, None, 100.0), (1.0, 10.0, 90.0), (5.0, 10.0, 50.0\n ), (9.0, 10.0, 10.0), (10.0, 10.0, 0.0)]'], {}), "('value,total,expected', [(10.0, 0, 100.0), (10.0,\n None, 100.0), (1.0, 10.0, 90.0), (5.0, 10.0, 50.0), (9.0, 10.0, 10.0),\n (10.0, 10.0, 0.0)])\n", (1225, 1374), False, 'import pytest\n'), ((1537, 1653), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value,expected"""', "[('09%', 9.0), ('09% ', 9.0), ('09', 9.0), ('10%', 10.0), (None, 0)]"], {}), "('value,expected', [('09%', 9.0), ('09% ', 9.0), (\n '09', 9.0), ('10%', 10.0), (None, 0)])\n", (1560, 1653), False, 'import pytest\n'), ((1755, 1862), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""values,expected"""', '[((1.0,), 1.0), ((1.0, 2.0), 3.0), ((1.0, 2.0, 3.0), 6.0)]'], {}), "('values,expected', [((1.0,), 1.0), ((1.0, 2.0), 3.0\n ), ((1.0, 2.0, 3.0), 6.0)])\n", (1778, 1862), False, 'import pytest\n'), ((1935, 2058), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""values,expected"""', '[((10.0, 1.0), 9.0), ((10.0, 1.0, 2.0), 7.0), ((10.0, 1.0, 2.0, 3.0), 4.0)]'], {}), "('values,expected', [((10.0, 1.0), 9.0), ((10.0, 1.0,\n 2.0), 7.0), ((10.0, 1.0, 2.0, 3.0), 4.0)])\n", (1958, 2058), False, 'import pytest\n'), ((2142, 2209), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value,expected"""', '[(0, 0), (1, 1), (2, 0)]'], {}), "('value,expected', [(0, 0), (1, 1), (2, 0)])\n", (2165, 2209), False, 'import pytest\n'), ((2278, 2346), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value,expected"""', '[(-1, 1), (0, 1), (1, 0)]'], {}), "('value,expected', [(-1, 1), (0, 1), (1, 0)])\n", (2301, 2346), False, 'import pytest\n'), ((2423, 2524), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sf,value,expected"""', '[(1, 1, 1), (0, 1, 0), (10, 5, 50), (8, 0.25, 2.0)]'], {}), "('sf,value,expected', [(1, 1, 1), (0, 1, 0), (10, 5,\n 50), (8, 0.25, 2.0)])\n", (2446, 2524), False, 'import pytest\n'), ((2566, 2575), 'noc.core.script.metrics.scale', 'scale', (['sf'], {}), '(sf)\n', (2571, 2575), False, 'from noc.core.script.metrics import percent, percent_invert, percent_usage, convert_percent_str, sum, subtract, is1, invert0, scale\n'), ((833, 854), 'noc.core.script.metrics.percent', 'percent', (['value', 'total'], {}), '(value, total)\n', (840, 854), False, 'from noc.core.script.metrics import percent, percent_invert, percent_usage, convert_percent_str, sum, subtract, is1, invert0, scale\n'), ((1159, 1186), 'noc.core.script.metrics.percent_usage', 'percent_usage', (['value', 'total'], {}), '(value, total)\n', (1172, 1186), False, 'from noc.core.script.metrics import percent, percent_invert, percent_usage, convert_percent_str, sum, subtract, is1, invert0, scale\n'), ((1493, 1521), 'noc.core.script.metrics.percent_invert', 'percent_invert', (['value', 'total'], {}), '(value, total)\n', (1507, 1521), False, 'from noc.core.script.metrics import percent, percent_invert, percent_usage, convert_percent_str, sum, subtract, is1, invert0, scale\n'), ((1713, 1739), 'noc.core.script.metrics.convert_percent_str', 'convert_percent_str', (['value'], {}), '(value)\n', (1732, 1739), False, 'from noc.core.script.metrics import percent, percent_invert, percent_usage, convert_percent_str, sum, subtract, is1, invert0, scale\n'), ((1907, 1919), 'noc.core.script.metrics.sum', 'sum', (['*values'], {}), '(*values)\n', (1910, 1919), False, 'from noc.core.script.metrics import percent, percent_invert, percent_usage, convert_percent_str, sum, subtract, is1, invert0, scale\n'), ((2109, 2126), 'noc.core.script.metrics.subtract', 'subtract', (['*values'], {}), '(*values)\n', (2117, 2126), False, 'from noc.core.script.metrics import percent, percent_invert, percent_usage, convert_percent_str, sum, subtract, is1, invert0, scale\n'), ((2252, 2262), 'noc.core.script.metrics.is1', 'is1', (['value'], {}), '(value)\n', (2255, 2262), False, 'from noc.core.script.metrics import percent, percent_invert, percent_usage, convert_percent_str, sum, subtract, is1, invert0, scale\n'), ((2393, 2407), 'noc.core.script.metrics.invert0', 'invert0', (['value'], {}), '(value)\n', (2400, 2407), False, 'from noc.core.script.metrics import percent, percent_invert, percent_usage, convert_percent_str, sum, subtract, is1, invert0, scale\n')]
|
import urllib.parse
import base64
from urllib.parse import unquote
from urllib.parse import quote
tempEmail = input('Registered Email: ').encode('UTF-8')
suffix = input ('Suffix Added: ').encode('UTF-8')
adminEmail = input('Admin Email: ').encode('UTF-8')
saml_dec = base64.b64decode(unquote(input('SAMLRespone: ')))
saml_dec = saml_dec.replace(tempEmail, (adminEmail + b'<!--hoho-->' + suffix))
final = urllib.parse.quote(base64.b64encode(saml_dec).decode())
print ('\n\n\n\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n\n\n')
print (final)
|
[
"base64.b64encode"
] |
[((446, 472), 'base64.b64encode', 'base64.b64encode', (['saml_dec'], {}), '(saml_dec)\n', (462, 472), False, 'import base64\n')]
|
from operator import attrgetter
from typing import List
from meadow.models import Book
def search_by_title(title: str) -> List[Book]:
if not title:
return list(filter(attrgetter("is_approved"), Book.objects.all()))
title = title.lower()
books = []
for book in Book.objects.all():
if book.title.lower().count(title) > 0 and book.is_approved:
books.append(book)
return books
def book_preview(book_id: int) -> dict:
book = Book.objects.get(id=book_id)
if not book.is_approved:
raise ValueError("Book is not approved!")
return {
"title": book.title,
"author": {"first_name": book.author.first_name, "last_name": book.author.last_name},
"description": book.description,
"isbn_10": book.isbn_10,
"isbn_13": book.isbn_13,
"download_link": book.download_link,
}
|
[
"meadow.models.Book.objects.all",
"operator.attrgetter",
"meadow.models.Book.objects.get"
] |
[((289, 307), 'meadow.models.Book.objects.all', 'Book.objects.all', ([], {}), '()\n', (305, 307), False, 'from meadow.models import Book\n'), ((479, 507), 'meadow.models.Book.objects.get', 'Book.objects.get', ([], {'id': 'book_id'}), '(id=book_id)\n', (495, 507), False, 'from meadow.models import Book\n'), ((182, 207), 'operator.attrgetter', 'attrgetter', (['"""is_approved"""'], {}), "('is_approved')\n", (192, 207), False, 'from operator import attrgetter\n'), ((209, 227), 'meadow.models.Book.objects.all', 'Book.objects.all', ([], {}), '()\n', (225, 227), False, 'from meadow.models import Book\n')]
|
import random
from functools import lru_cache
from hypothesis import core
class Settings:
def __init__(self) -> None:
self.seed = random.getrandbits(128) # type: int
self.unicode_enabled = True # type: bool
self.enable_color = True # type: bool
@property
def seed(self) -> int:
return self._seed
@seed.setter
def seed(self, value: int) -> None:
self._seed = value
core.global_force_seed = value # type: ignore
random.seed(value)
@lru_cache(maxsize=1)
def get_settings() -> Settings:
return Settings()
|
[
"functools.lru_cache",
"random.seed",
"random.getrandbits"
] |
[((546, 566), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (555, 566), False, 'from functools import lru_cache\n'), ((145, 168), 'random.getrandbits', 'random.getrandbits', (['(128)'], {}), '(128)\n', (163, 168), False, 'import random\n'), ((524, 542), 'random.seed', 'random.seed', (['value'], {}), '(value)\n', (535, 542), False, 'import random\n')]
|
from hookup import db
from hookup.models import Page, User
import getpass
DEFAULT_SITES = ["facebook", "twitter", "netflix", "github"]
def create_superuser():
username = input("Username: ")
password = getpass.getpass("Password ")
user = User(username=username, password=password)
user.save()
def register_sites(sites=DEFAULT_SITES):
user = User.query.first()
for site in sites:
page = Page(name=site, source=f"{site}.html", stock=True)
user.pages.append(page)
user.save()
def main():
db.create_all()
exists = User.query.first()
if not exists:
create_superuser()
register_sites()
print("[+] Done")
if __name__ == '__main__':
main()
|
[
"hookup.db.create_all",
"getpass.getpass",
"hookup.models.Page",
"hookup.models.User",
"hookup.models.User.query.first"
] |
[((211, 239), 'getpass.getpass', 'getpass.getpass', (['"""Password """'], {}), "('Password ')\n", (226, 239), False, 'import getpass\n'), ((251, 293), 'hookup.models.User', 'User', ([], {'username': 'username', 'password': 'password'}), '(username=username, password=password)\n', (255, 293), False, 'from hookup.models import Page, User\n'), ((363, 381), 'hookup.models.User.query.first', 'User.query.first', ([], {}), '()\n', (379, 381), False, 'from hookup.models import Page, User\n'), ((540, 555), 'hookup.db.create_all', 'db.create_all', ([], {}), '()\n', (553, 555), False, 'from hookup import db\n'), ((569, 587), 'hookup.models.User.query.first', 'User.query.first', ([], {}), '()\n', (585, 587), False, 'from hookup.models import Page, User\n'), ((420, 470), 'hookup.models.Page', 'Page', ([], {'name': 'site', 'source': 'f"""{site}.html"""', 'stock': '(True)'}), "(name=site, source=f'{site}.html', stock=True)\n", (424, 470), False, 'from hookup.models import Page, User\n')]
|
from __future__ import annotations
import re
from typing import Callable, TypeVar, Any, Sequence, Mapping, MutableMapping, Iterable, Optional
from datetime import datetime, date as dt_date, time as dt_time
from time import time as t_time, sleep
from unicodedata import normalize
from string import Formatter
from dateutil.parser import parserinfo, parse
from seleniumbase.config.settings import LARGE_TIMEOUT
from . import types as pb_types
T = TypeVar('T')
def wait_until(f: Callable[..., T],
args: list = None,
kwargs: dict = None,
timeout: Optional[pb_types.NumberType] = None,
step: pb_types.NumberType = 0.5,
expected: Any = True,
equals: bool = True,
raise_error: str = None, ) -> (bool, T):
"""
Waits until Callable `f` returns the `expected` value
(or something different from the expected value if `equals` is False).
If you want to check an object property instead of a method, you can use a `lambda` function.
:param f: The Callable object (usually function or method)
:param args: List of positional arguments passed to f. Default: []
:param kwargs: Dictionary of keyword arguments passed to f. Default: {}
:param timeout: Timeout in seconds
:param step: Wait time between each check
:param expected: Expected value
:param equals: If True, wait until f(*args, **kwargs) == expected.
If False, wait until f(*args, **kwargs) != expected.
:param raise_error: If not None, raises an Error if timeout is reached
:return: Tuple(success, value). success is True if the waiting succeeded,
and value is the last value returned by f(*args, **kwargs)
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
if timeout is None:
timeout = LARGE_TIMEOUT
if timeout < 0:
raise RuntimeError(f"timeout should be >= 0. timeout = {timeout}")
if step <= 0:
raise RuntimeError(f"step should be > 0. step = {step}")
if equals is True:
default_value = None if expected is not None else False
else:
default_value = expected
current = t_time()
start = current
stop = start + timeout
value = default_value
# noinspection PyBroadException,TryExceptPass
try:
value = f(*args, **kwargs)
except Exception:
pass
keep_looping = True
while keep_looping:
if (value == expected) is equals:
return True, value
after = t_time()
if after < current + step:
sleep(current + step - after)
current = t_time()
if current <= stop:
# noinspection PyBroadException,TryExceptPass
try:
value = f(*args, **kwargs)
except Exception:
pass
else:
keep_looping = False
else:
if raise_error is not None:
raise TimeoutError(
f"{raise_error}. f='{f}', args='{args}', kwargs='{kwargs}', timeout='{timeout}', step='{step}', "
f"expected='{expected}', equals='{equals}', last value={value}",
)
else:
return False, value
class ParserInfoEs(parserinfo):
HMS = [('h', 'hour', 'hours', 'hora', 'horas'),
('m', 'minute', 'minutes', 'minuto', 'minutos'),
('s', 'second', 'seconds', 'segundo', 'segundos')]
JUMP = [' ', '.', ',', ';', '-', '/', "'", 'at', 'on', 'and', 'ad', 'm', 't', 'of', 'st', 'nd', 'rd', 'th',
'a', 'en', 'y', 'de']
MONTHS = [('Jan', 'January', 'Ene', 'Enero'),
('Feb', 'February', 'Febrero'),
('Mar', 'March', 'Marzo'),
('Apr', 'April', 'Abr', 'Abril'),
('May', 'May', 'Mayo'),
('Jun', 'June', 'Junio'),
('Jul', 'July', 'Julio'),
('Aug', 'August', 'Ago', 'Agosto'),
('Sep', 'Sept', 'September', 'Septiembre'),
('Oct', 'October', 'Octubre'),
('Nov', 'November', 'Noviembre'),
('Dec', 'December', 'Dic', 'Diciembre')]
PERTAIN = ['of', 'de']
WEEKDAYS = [('Mon', 'Monday', 'L', 'Lun', 'Lunes'),
('Tue', 'Tuesday', 'M', 'Mar', 'Martes'),
('Wed', 'Wednesday', 'X', 'Mie', 'Mié', 'Mier', 'Miér', 'Miercoles', 'Miércoles'),
('Thu', 'Thursday', 'J', 'Jue', 'Jueves'),
('Fri', 'Friday', 'V', 'Vie', 'Viernes'),
('Sat', 'Saturday', 'S', 'Sab', 'Sáb', 'Sabado', 'Sábado'),
('Sun', 'Sunday', 'D', 'Dom', 'Domingo')]
def __init__(self, dayfirst=True, yearfirst=False):
super().__init__(dayfirst=dayfirst, yearfirst=yearfirst)
class DateUtil:
@staticmethod
def parse_datetime_es(date_str: str) -> datetime:
parser_info_es = ParserInfoEs()
return parse(date_str, parser_info_es)
@staticmethod
def parse_date_es(date_str: str) -> dt_date:
return DateUtil.parse_datetime_es(date_str).date()
@staticmethod
def parse_time_es(date_str: str) -> dt_time:
return DateUtil.parse_datetime_es(date_str).time()
@staticmethod
def python_format_date(the_date: datetime, python_format_str: str = "{date.day}/{date:%m}/{date.year}") -> str:
class CustomFormatter(Formatter):
def get_field(self,
field_name: str,
args: Sequence[Any],
kwargs: Mapping[str, Any]) -> Any:
if field_name.startswith("date") is False:
raise RuntimeError(f"Incorrect python_format_str: {python_format_str}")
return super().get_field(field_name, args, kwargs)
formatter = CustomFormatter()
# noinspection StrFormat
return formatter.format(python_format_str, date=the_date)
class CaseInsensitiveDict(MutableMapping):
"""A case-insensitive ``dict``-like object.
Implements all methods and operations of
``MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return ((lowerkey, keyval[1]) for lowerkey, keyval in self._store.items())
def __eq__(self, other):
if isinstance(other, Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
def clean(s: str) -> str:
s = s.lower()
s = re.sub('á', 'a', s)
s = re.sub('é', 'e', s)
s = re.sub('í', 'i', s)
s = re.sub('ó', 'o', s)
s = re.sub('ú', 'u', s)
s = re.sub('ñ', 'n', s)
# Invalid characters
s = re.sub('[^0-9a-zA-Z_]', '_', s)
# Remove leading characters until we find a letter or underscore
s = re.sub('^[^a-zA-Z_]+', '', s)
return s
def normalize_caseless(text: str) -> str:
return normalize("NFKD", text.casefold())
def caseless_equal(left, right) -> bool:
return normalize_caseless(left) == normalize_caseless(right)
def caseless_text_in_texts(text: str, texts: Iterable[str]) -> bool:
normalized_set = {normalize_caseless(t) for t in texts}
normalized_text = normalize_caseless(text)
return normalized_text in normalized_set
def expand_replacing_spaces_and_underscores(texts: Iterable[str]) -> set[str]:
expanded = set(texts)
expanded = expanded.union({t.replace("_", " ") for t in texts})
expanded = expanded.union({t.replace(" ", "_") for t in texts})
return expanded
def first_not_none(*args: T) -> Optional[T]:
for i in args:
if i is not None:
return i
else:
return None
|
[
"dateutil.parser.parse",
"time.sleep",
"time.time",
"typing.TypeVar",
"re.sub"
] |
[((447, 459), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (454, 459), False, 'from typing import Callable, TypeVar, Any, Sequence, Mapping, MutableMapping, Iterable, Optional\n'), ((2218, 2226), 'time.time', 't_time', ([], {}), '()\n', (2224, 2226), True, 'from time import time as t_time, sleep\n'), ((8275, 8294), 're.sub', 're.sub', (['"""á"""', '"""a"""', 's'], {}), "('á', 'a', s)\n", (8281, 8294), False, 'import re\n'), ((8303, 8322), 're.sub', 're.sub', (['"""é"""', '"""e"""', 's'], {}), "('é', 'e', s)\n", (8309, 8322), False, 'import re\n'), ((8331, 8350), 're.sub', 're.sub', (['"""í"""', '"""i"""', 's'], {}), "('í', 'i', s)\n", (8337, 8350), False, 'import re\n'), ((8359, 8378), 're.sub', 're.sub', (['"""ó"""', '"""o"""', 's'], {}), "('ó', 'o', s)\n", (8365, 8378), False, 'import re\n'), ((8387, 8406), 're.sub', 're.sub', (['"""ú"""', '"""u"""', 's'], {}), "('ú', 'u', s)\n", (8393, 8406), False, 'import re\n'), ((8415, 8434), 're.sub', 're.sub', (['"""ñ"""', '"""n"""', 's'], {}), "('ñ', 'n', s)\n", (8421, 8434), False, 'import re\n'), ((8469, 8500), 're.sub', 're.sub', (['"""[^0-9a-zA-Z_]"""', '"""_"""', 's'], {}), "('[^0-9a-zA-Z_]', '_', s)\n", (8475, 8500), False, 'import re\n'), ((8579, 8608), 're.sub', 're.sub', (['"""^[^a-zA-Z_]+"""', '""""""', 's'], {}), "('^[^a-zA-Z_]+', '', s)\n", (8585, 8608), False, 'import re\n'), ((2568, 2576), 'time.time', 't_time', ([], {}), '()\n', (2574, 2576), True, 'from time import time as t_time, sleep\n'), ((2672, 2680), 'time.time', 't_time', ([], {}), '()\n', (2678, 2680), True, 'from time import time as t_time, sleep\n'), ((4933, 4964), 'dateutil.parser.parse', 'parse', (['date_str', 'parser_info_es'], {}), '(date_str, parser_info_es)\n', (4938, 4964), False, 'from dateutil.parser import parserinfo, parse\n'), ((2624, 2653), 'time.sleep', 'sleep', (['(current + step - after)'], {}), '(current + step - after)\n', (2629, 2653), False, 'from time import time as t_time, sleep\n')]
|
import csv
import glob
from biothings.utils.dataload import list_split, dict_sweep, unlist, value_convert_to_number
VALID_COLUMN_NO = 245
'''this parser is for dbNSFP v3.5a beta2 downloaded from
https://sites.google.com/site/jpopgen/dbNSFP'''
# convert one snp to json
def _map_line_to_json(df, version, include_gnomad, index=0):
# specific variable treatment
chrom = df["#chr"]
if chrom == 'M':
chrom = 'MT'
# fields[7] in version 2, represent hg18_pos
hg18_end = df["hg18_pos(1-based)"]
if hg18_end == ".":
hg18_end = "."
else:
hg18_end = int(hg18_end)
# in case of no hg19 position provided, remove the item
if df["hg19_pos(1-based)"] == '.':
return None
else:
chromStart = int(df["hg19_pos(1-based)"])
chromEnd = chromStart
chromStart_38 = int(df["pos(1-based)"])
ref = df["ref"].upper()
alt = df["alt"].upper()
HGVS_19 = "chr%s:g.%d%s>%s" % (chrom, chromStart, ref, alt)
HGVS_38 = "chr%s:g.%d%s>%s" % (chrom, chromStart_38, ref, alt)
if version == 'hg19':
HGVS = HGVS_19
elif version == 'hg38':
HGVS = HGVS_38
siphy_29way_pi = df["SiPhy_29way_pi"]
if siphy_29way_pi == ".":
siphy = "."
else:
freq = siphy_29way_pi.split(":")
siphy = {'a': freq[0], 'c': freq[1], 'g': freq[2], 't': freq[3]}
gtex_gene = df["GTEx_V6p_gene"].split('|')
gtex_tissue = df["GTEx_V6p_tissue"].split('|')
gtex = map(dict, map(lambda t: zip(('gene', 'tissue'), t), zip(gtex_gene, gtex_tissue)))
acc = df["Uniprot_acc_Polyphen2"].rstrip().rstrip(';').split(";")
pos = df["Uniprot_aapos_Polyphen2"].rstrip().rstrip(';').split(";")
uniprot = map(dict, map(lambda t: zip(('acc', 'pos'), t), zip(acc, pos)))
provean_score = df["PROVEAN_score"].split(';')
sift_score = df["SIFT_score"].split(';')
hdiv_score = df["Polyphen2_HDIV_score"].split(';')
hvar_score = df["Polyphen2_HVAR_score"].split(';')
lrt_score = df["LRT_score"].split(';')
m_cap_score = df["M-CAP_score"].split(';')
mutationtaster_score = df["MutationTaster_score"].split(';')
mutationassessor_score = df["MutationAssessor_score"].split(';')
vest3_score = df["VEST3_score"].split(';')
metasvm_score = df["MetaSVM_score"].split(';')
fathmm_score = df["FATHMM_score"].split(';')
metalr_score = df["MetaLR_score"].split(';')
revel_score = df["REVEL_score"].split(';')
'''
parse mutpred top 5 features
'''
def modify_pvalue(pvalue):
return float(pvalue.strip('P = '))
mutpred_mechanisms = df["MutPred_Top5features"]
if mutpred_mechanisms not in ['.', ',', '-']:
mutpred_mechanisms = mutpred_mechanisms.split(" (") and mutpred_mechanisms.split(";")
mutpred_mechanisms = [m.rstrip(")") for m in mutpred_mechanisms]
mutpred_mechanisms = [i.split(" (") for i in mutpred_mechanisms]
mutpred_mechanisms = sum(mutpred_mechanisms, [])
mechanisms = [
{"mechanism": mutpred_mechanisms[0],
"p_val": modify_pvalue(mutpred_mechanisms[1])},
{"mechanism": mutpred_mechanisms[2],
"p_val": modify_pvalue(mutpred_mechanisms[3])},
{"mechanism": mutpred_mechanisms[4],
"p_val": modify_pvalue(mutpred_mechanisms[5])},
{"mechanism": mutpred_mechanisms[6],
"p_val": modify_pvalue(mutpred_mechanisms[7])},
{"mechanism": mutpred_mechanisms[8],
"p_val": modify_pvalue(mutpred_mechanisms[9])}
]
else:
mechanisms = '.'
# normalize scores
def norm(arr):
return [None if item == '.' else item for item in arr]
provean_score = norm(provean_score)
sift_score = norm(sift_score)
hdiv_score = norm(hdiv_score)
hvar_score = norm(hvar_score)
lrt_score = norm(lrt_score)
m_cap_score = norm(m_cap_score)
mutationtaster_score = norm(mutationtaster_score)
mutationassessor_score = norm(mutationassessor_score)
vest3_score = norm(vest3_score)
metasvm_score = norm(metasvm_score)
fathmm_score = norm(fathmm_score)
metalr_score = norm(metalr_score)
revel_score = norm(revel_score)
gnomad = {"gnomad_exomes": {
"ac": df["gnomAD_exomes_AC"],
"an": df["gnomAD_exomes_AN"],
"af": df["gnomAD_exomes_AF"],
"afr_ac": df["gnomAD_exomes_AFR_AC"],
"afr_af": df["gnomAD_exomes_AFR_AF"],
"afr_an": df["gnomAD_exomes_AFR_AN"],
"amr_ac": df["gnomAD_exomes_AMR_AC"],
"amr_an": df["gnomAD_exomes_AMR_AN"],
"amr_af": df["gnomAD_exomes_AMR_AF"],
"asj_ac": df["gnomAD_exomes_ASJ_AC"],
"asj_an": df["gnomAD_exomes_ASJ_AN"],
"asj_af": df["gnomAD_exomes_ASJ_AF"],
"eas_ac": df["gnomAD_exomes_EAS_AC"],
"eas_af": df["gnomAD_exomes_EAS_AF"],
"eas_an": df["gnomAD_exomes_EAS_AN"],
"fin_ac": df["gnomAD_exomes_FIN_AC"],
"fin_af": df["gnomAD_exomes_FIN_AF"],
"fin_an": df["gnomAD_exomes_FIN_AN"],
"nfe_ac": df["gnomAD_exomes_NFE_AC"],
"nfe_af": df["gnomAD_exomes_NFE_AF"],
"nfe_an": df["gnomAD_exomes_NFE_AN"],
"sas_ac": df["gnomAD_exomes_SAS_AC"],
"sas_af": df["gnomAD_exomes_SAS_AF"],
"sas_an": df["gnomAD_exomes_SAS_AN"],
"oth_ac": df["gnomAD_exomes_OTH_AC"],
"oth_af": df["gnomAD_exomes_OTH_AF"],
"oth_an": df["gnomAD_exomes_OTH_AN"]
},
"gnomad_genomes": {
"ac": df["gnomAD_genomes_AC"],
"an": df["gnomAD_genomes_AN"],
"af": df["gnomAD_genomes_AF"],
"afr_ac": df["gnomAD_genomes_AFR_AC"],
"afr_af": df["gnomAD_genomes_AFR_AF"],
"afr_an": df["gnomAD_genomes_AFR_AN"],
"amr_ac": df["gnomAD_genomes_AMR_AC"],
"amr_an": df["gnomAD_genomes_AMR_AN"],
"amr_af": df["gnomAD_genomes_AMR_AF"],
"asj_ac": df["gnomAD_genomes_ASJ_AC"],
"asj_an": df["gnomAD_genomes_ASJ_AN"],
"asj_af": df["gnomAD_genomes_ASJ_AF"],
"eas_ac": df["gnomAD_genomes_EAS_AC"],
"eas_af": df["gnomAD_genomes_EAS_AF"],
"eas_an": df["gnomAD_genomes_EAS_AN"],
"fin_ac": df["gnomAD_genomes_FIN_AC"],
"fin_af": df["gnomAD_genomes_FIN_AF"],
"fin_an": df["gnomAD_genomes_FIN_AN"],
"nfe_ac": df["gnomAD_genomes_NFE_AC"],
"nfe_af": df["gnomAD_genomes_NFE_AF"],
"nfe_an": df["gnomAD_genomes_NFE_AN"],
"oth_ac": df["gnomAD_genomes_OTH_AC"],
"oth_af": df["gnomAD_genomes_OTH_AF"],
"oth_an": df["gnomAD_genomes_OTH_AN"]
}
}
# load as json data
one_snp_json = {
"_id": HGVS,
"dbnsfp": {
"rsid": df["rs_dbSNP150"],
#"rsid_dbSNP144": fields[6],
"chrom": chrom,
"hg19": {
"start": chromStart,
"end": chromEnd
},
"hg18": {
"start": df["hg18_pos(1-based)"],
"end": hg18_end
},
"hg38": {
"start": df["pos(1-based)"],
"end": df["pos(1-based)"]
},
"ref": ref,
"alt": alt,
"aa": {
"ref": df["aaref"],
"alt": df["aaalt"],
"pos": df["aapos"],
"refcodon": df["refcodon"],
"codonpos": df["codonpos"],
"codon_degeneracy": df["codon_degeneracy"],
},
"genename": df["genename"],
"uniprot": list(uniprot),
"interpro_domain": df["Interpro_domain"],
"cds_strand": df["cds_strand"],
"ancestral_allele": df["Ancestral_allele"],
#"altaineandertal": fields[17],
#"denisova": fields[18]
"ensembl": {
"geneid": df["Ensembl_geneid"],
"transcriptid": df["Ensembl_transcriptid"],
"proteinid": df["Ensembl_proteinid"]
},
"sift": {
"score": sift_score,
"converted_rankscore": df["SIFT_converted_rankscore"],
"pred": df["SIFT_pred"]
},
"polyphen2": {
"hdiv": {
"score": hdiv_score,
"rankscore": df["Polyphen2_HDIV_rankscore"],
"pred": df["Polyphen2_HDIV_pred"]
},
"hvar": {
"score": hvar_score,
"rankscore": df["Polyphen2_HVAR_rankscore"],
"pred": df["Polyphen2_HVAR_pred"]
}
},
"lrt": {
"score": lrt_score,
"converted_rankscore": df["LRT_converted_rankscore"],
"pred": df["LRT_pred"],
"omega": df["LRT_Omega"]
},
"mutationtaster": {
"score": mutationtaster_score,
"converted_rankscore": df["MutationTaster_converted_rankscore"],
"pred": df["MutationTaster_pred"],
"model": df["MutationTaster_model"],
"AAE": df["MutationTaster_AAE"]
},
"mutationassessor": {
"score": mutationassessor_score,
"rankscore": df["MutationAssessor_score_rankscore"],
"pred": df["MutationAssessor_pred"]
},
"fathmm": {
"score": fathmm_score,
"rankscore": df["FATHMM_converted_rankscore"],
"pred": df["FATHMM_pred"]
},
"provean": {
"score": provean_score,
"rankscore": df["PROVEAN_converted_rankscore"],
"pred": df["PROVEAN_pred"]
},
"vest3": {
"score": vest3_score,
"rankscore": df["VEST3_rankscore"],
"transcriptid": df["Transcript_id_VEST3"],
"transcriptvar": df["Transcript_var_VEST3"]
},
"fathmm-mkl": {
"coding_score": df["fathmm-MKL_coding_score"],
"coding_rankscore": df["fathmm-MKL_coding_rankscore"],
"coding_pred": df["fathmm-MKL_coding_pred"],
"coding_group": df["fathmm-MKL_coding_group"]
},
"eigen": {
"coding_or_noncoding": df["Eigen_coding_or_noncoding"],
"raw": df["Eigen-raw"],
"phred": df["Eigen-phred"]
},
"eigen-pc": {
"raw": df["Eigen-PC-raw"],
"phred": df["Eigen-PC-phred"],
"raw_rankscore": df["Eigen-PC-raw_rankscore"]
},
"genocanyon": {
"score": df["GenoCanyon_score"],
"rankscore": df["GenoCanyon_score_rankscore"]
},
"metasvm": {
"score": metasvm_score,
"rankscore": df["MetaSVM_rankscore"],
"pred": df["MetaSVM_pred"]
},
"metalr": {
"score": metalr_score,
"rankscore": df["MetaLR_rankscore"],
"pred": df["MetaLR_pred"]
},
"reliability_index": df["Reliability_index"],
"m_cap_score": {
"score": m_cap_score,
"rankscore": df["M-CAP_rankscore"],
"pred": df["M-CAP_pred"]
},
"revel": {
"score": revel_score,
"rankscore": df["REVEL_rankscore"]
},
"mutpred": {
"score": df["MutPred_score"],
"rankscore": df["MutPred_rankscore"],
"accession": df["MutPred_protID"],
"aa_change": df["MutPred_AAchange"],
"pred": mechanisms
},
"dann": {
"score": df["DANN_score"],
"rankscore": df["DANN_rankscore"]
},
"gerp++": {
"nr": df["GERP++_NR"],
"rs": df["GERP++_RS"],
"rs_rankscore": df["GERP++_RS_rankscore"]
},
"integrated": {
"fitcons_score": df["integrated_fitCons_score"],
"fitcons_rankscore": df["integrated_fitCons_score_rankscore"],
"confidence_value": df["integrated_confidence_value"]
},
"gm12878": {
"fitcons_score": df["GM12878_fitCons_score"],
"fitcons_rankscore": df["GM12878_fitCons_score_rankscore"],
"confidence_value": df["GM12878_confidence_value"]
},
"h1-hesc": {
"fitcons_score": df["H1-hESC_fitCons_score"],
"fitcons_rankscore": df["H1-hESC_fitCons_score_rankscore"],
"confidence_value": df["H1-hESC_confidence_value"]
},
"huvec": {
"fitcons_score": df["HUVEC_fitCons_score"],
"fitcons_rankscore": df["HUVEC_fitCons_score_rankscore"],
"confidence_value": df["HUVEC_confidence_value"]
},
"phylo": {
"p100way": {
"vertebrate": df["phyloP100way_vertebrate"],
"vertebrate_rankscore": df["phyloP100way_vertebrate_rankscore"]
},
"p20way": {
"mammalian": df["phyloP20way_mammalian"],
"mammalian_rankscore": df["phyloP20way_mammalian_rankscore"]
}
},
"phastcons": {
"100way": {
"vertebrate": df["phastCons100way_vertebrate"],
"vertebrate_rankscore": df["phastCons100way_vertebrate_rankscore"]
},
"20way": {
"mammalian": df["phastCons20way_mammalian"],
"mammalian_rankscore": df["phastCons20way_mammalian_rankscore"]
}
},
"siphy_29way": {
"pi": siphy,
"logodds": df["SiPhy_29way_logOdds"],
"logodds_rankscore": df["SiPhy_29way_logOdds_rankscore"]
},
"1000gp3": {
"ac": df["1000Gp3_AC"],
"af": df["1000Gp3_AF"],
"afr_ac": df["1000Gp3_AFR_AC"],
"afr_af": df["1000Gp3_AFR_AF"],
"eur_ac": df["1000Gp3_EUR_AC"],
"eur_af": df["1000Gp3_EUR_AF"],
"amr_ac": df["1000Gp3_AMR_AC"],
"amr_af": df["1000Gp3_AMR_AF"],
"eas_ac": df["1000Gp3_EAS_AC"],
"eas_af": df["1000Gp3_EAS_AF"],
"sas_ac": df["1000Gp3_SAS_AC"],
"sas_af": df["1000Gp3_SAS_AF"]
},
"twinsuk": {
"ac": df["TWINSUK_AC"],
"af": df["TWINSUK_AF"]
},
"alspac": {
"ac": df["ALSPAC_AC"],
"af": df["ALSPAC_AF"]
},
"esp6500": {
"aa_ac": df["ESP6500_AA_AC"],
"aa_af": df["ESP6500_AA_AF"],
"ea_ac": df["ESP6500_EA_AC"],
"ea_af": df["ESP6500_EA_AF"]
},
"exac": {
"ac": df["ExAC_AC"],
"af": df["ExAC_AF"],
"adj_ac": df["ExAC_Adj_AC"],
"adj_af": df["ExAC_Adj_AF"],
"afr_ac": df["ExAC_AFR_AC"],
"afr_af": df["ExAC_AFR_AF"],
"amr_ac": df["ExAC_AMR_AC"],
"amr_af": df["ExAC_AMR_AF"],
"eas_ac": df["ExAC_EAS_AC"],
"eas_af": df["ExAC_EAS_AF"],
"fin_ac": df["ExAC_FIN_AC"],
"fin_af": df["ExAC_FIN_AF"],
"nfe_ac": df["ExAC_NFE_AC"],
"nfe_af": df["ExAC_NFE_AF"],
"sas_ac": df["ExAC_SAS_AC"],
"sas_af": df["ExAC_SAS_AF"]
},
"exac_nontcga": {
"ac": df["ExAC_nonTCGA_AC"],
"af": df["ExAC_nonTCGA_AF"],
"adj_ac": df["ExAC_nonTCGA_Adj_AC"],
"adj_af": df["ExAC_nonTCGA_Adj_AF"],
"afr_ac": df["ExAC_nonTCGA_AFR_AC"],
"afr_af": df["ExAC_nonTCGA_AFR_AF"],
"amr_ac": df["ExAC_nonTCGA_AMR_AC"],
"amr_af": df["ExAC_nonTCGA_AMR_AF"],
"eas_ac": df["ExAC_nonTCGA_EAS_AC"],
"eas_af": df["ExAC_nonTCGA_EAS_AF"],
"fin_ac": df["ExAC_nonTCGA_FIN_AC"],
"fin_af": df["ExAC_nonTCGA_FIN_AF"],
"nfe_ac": df["ExAC_nonTCGA_NFE_AC"],
"nfe_af": df["ExAC_nonTCGA_NFE_AF"],
"sas_ac": df["ExAC_nonTCGA_SAS_AC"],
"sas_af": df["ExAC_nonTCGA_SAS_AF"]
},
"exac_nonpsych": {
"ac": df["ExAC_nonpsych_AC"],
"af": df["ExAC_nonpsych_AF"],
"adj_ac": df["ExAC_nonpsych_Adj_AC"],
"adj_af": df["ExAC_nonpsych_Adj_AF"],
"afr_ac": df["ExAC_nonpsych_AFR_AC"],
"afr_af": df["ExAC_nonpsych_AFR_AF"],
"amr_ac": df["ExAC_nonpsych_AMR_AC"],
"amr_af": df["ExAC_nonpsych_AMR_AF"],
"eas_ac": df["ExAC_nonpsych_EAS_AC"],
"eas_af": df["ExAC_nonpsych_EAS_AF"],
"fin_ac": df["ExAC_nonpsych_FIN_AC"],
"fin_af": df["ExAC_nonpsych_FIN_AF"],
"nfe_ac": df["ExAC_nonpsych_NFE_AC"],
"nfe_af": df["ExAC_nonpsych_NFE_AF"],
"sas_ac": df["ExAC_nonpsych_SAS_AC"],
"sas_af": df["ExAC_nonpsych_SAS_AF"]
},
"clinvar": {
"rs": df["clinvar_rs"],
"clinsig": list(map(int,[i for i in df["clinvar_clnsig"].split("|") if i != "."])),
"trait": [i for i in df["clinvar_trait"].split("|") if i != "."],
"golden_stars": list(map(int,[i for i in df["clinvar_golden_stars"].split("|") if i != "."]))
},
"gtex": list(gtex)
}
}
if include_gnomad:
one_snp_json['dbnsfp'].update(gnomad)
one_snp_json = list_split(dict_sweep(unlist(value_convert_to_number(one_snp_json)), vals=[".", '-', None]), ";")
one_snp_json["dbnsfp"]["chrom"] = str(one_snp_json["dbnsfp"]["chrom"])
return one_snp_json
# open file, parse, pass to json mapper
def data_generator(input_file, version, include_gnomad):
open_file = open(input_file)
db_nsfp = csv.reader(open_file, delimiter="\t")
index = next(db_nsfp)
assert len(index) == VALID_COLUMN_NO, "Expecting %s columns, but got %s" % (VALID_COLUMN_NO, len(index))
previous_row = None
for row in db_nsfp:
df = dict(zip(index, row))
# use transpose matrix to have 1 row with N 187 columns
current_row = _map_line_to_json(df, version=version, include_gnomad=include_gnomad)
if previous_row and current_row:
if current_row["_id"] == previous_row["_id"]:
aa = previous_row["dbnsfp"]["aa"]
if not isinstance(aa, list):
aa = [aa]
aa.append(current_row["dbnsfp"]["aa"])
previous_row["dbnsfp"]["aa"] = aa
if len(previous_row["dbnsfp"]["aa"]) > 1:
continue
else:
yield previous_row
previous_row = current_row
if previous_row:
yield previous_row
def load_data_file(input_file, version, include_gnomad=False):
data = data_generator(input_file, version=version, include_gnomad=include_gnomad)
for one_snp_json in data:
yield one_snp_json
# load path and find files, pass to data_generator
def load_data(path_glob, version='hg19', include_gnomad=False):
for input_file in sorted(glob.glob(path_glob)):
for d in load_data_file(input_file, version, include_gnomad):
yield d
|
[
"biothings.utils.dataload.value_convert_to_number",
"csv.reader",
"glob.glob"
] |
[((19083, 19120), 'csv.reader', 'csv.reader', (['open_file'], {'delimiter': '"""\t"""'}), "(open_file, delimiter='\\t')\n", (19093, 19120), False, 'import csv\n'), ((20401, 20421), 'glob.glob', 'glob.glob', (['path_glob'], {}), '(path_glob)\n', (20410, 20421), False, 'import glob\n'), ((18769, 18806), 'biothings.utils.dataload.value_convert_to_number', 'value_convert_to_number', (['one_snp_json'], {}), '(one_snp_json)\n', (18792, 18806), False, 'from biothings.utils.dataload import list_split, dict_sweep, unlist, value_convert_to_number\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import json
import codecs
import fnmatch
import logging
import itertools
import bs4
from tqdm import tqdm
def recursive_iglob(rootdir='.', pattern='*'):
"""Recursive version of iglob.
Taken from https://gist.github.com/whophil/2a999bcaf0ebfbd6e5c0d213fb38f489
"""
for root, dirnames, filenames in os.walk(rootdir):
for filename in fnmatch.filter(filenames, pattern):
yield os.path.join(root, filename)
def fix_unclosed(tag_name, html):
return re.sub(r'(<{}.*[^/-])>'.format(tag_name), r'\1 />', html)
def convert_jptimes(content):
content = fix_unclosed('meta', content)
content = fix_unclosed('link', content)
doc = bs4.BeautifulSoup(content, 'html.parser')
file_name_components = input_file.split('/')
date = '/'.join(file_name_components[2:5])
categories = file_name_components[5:-1]
file_name = file_name_components[-1]
url = 'http://' + input_file
author = doc.find('meta', attrs={'name': 'author'})['content']
# Extracting title
title = doc.find('meta', property='og:title')
if not title:
logging.error('no title for {}'.format(input_file))
print(doc.find_all('meta'))
input()
return
title = re.sub(r'\s+', ' ', title['content']).strip()
title = re.sub(r'\| The Japan Times', '', title)
if not len(title):
logging.error('no title for {}'.format(input_file))
return
# Extracting headline
headline = doc.find('meta', property='og:description')
if not headline:
logging.error('no headline for {}'.format(input_file))
return
headline = re.sub(r'\s+', ' ', headline['content']).strip()
if not len(headline):
logging.error('no headline for {}'.format(input_file))
return
# Extracting article content
body = doc.find('div', attrs={'id': 'jtarticle'})
if not body:
logging.error('no body for {}'.format(input_file))
return
body = re.sub(r'\s+', ' ', body.get_text(separator=' ')).strip()
if not len(body):
logging.error('no body for {}'.format(input_file))
return
# Extracting keywords
keywords = doc.find('meta', attrs={'name': 'keywords'})
if keywords is None:
logging.error('no keywords for {}'.format(input_file))
return
keywords = re.sub(r'\s+', ' ', keywords['content']).strip()
keywords = keywords.split(', ')
# remove empty keywords
keywords = [k.split(';') for k in keywords if k]
if not keywords:
logging.error('no keywords for {}'.format(input_file))
return
return {
'title': title, 'headline': '', 'abstract': body,
'keyword': keywords, 'file_name': file_name,
'date': date, 'categories': categories, 'url': url,
'author': author
}
def convert_nytimes(content):
doc = bs4.BeautifulSoup(content, 'html.parser')
file_name_components = input_file.split('/')
date = '/'.join(file_name_components[1:4])
categories = file_name_components[4:-1]
file_name = '.'.join(file_name_components[-1].split('.')[:-1])
url = 'http://' + input_file
# Removing script and style tags
for script in doc(['script', 'style', 'link', 'button']):
script.decompose() # rip it out
try:
# Before 2013
author = doc.find('meta', attrs={'name': 'author'})['content']
except TypeError:
# After 2013
author = doc.find('meta', attrs={'name': 'byl'})['content']
author = author.replace('By ', '')
# Extracting title
title = doc.find('meta', property='og:title')
if not title:
logging.error('no title for {}'.format(input_file))
return
title = re.sub(r'\s+', ' ', title['content']).strip()
if not len(title):
logging.error('no title for {}'.format(input_file))
return
# Extracting headline
headline = doc.find('meta', property='og:description')
if not headline:
logging.error('no headline for {}'.format(input_file))
return
headline = re.sub(r'\s+', ' ', headline['content']).strip()
if not len(headline):
logging.error('no headline for {}'.format(input_file))
return
# Extracting article content
body = doc.find('section', attrs={'name': 'articleBody'})
if not body:
body = doc.find_all('p', attrs={'class': 'story-body-text story-content'})
if not body:
logging.error('no body for {}'.format(input_file))
return
else:
body = ' '.join([re.sub(r'\s+', ' ', p.get_text(separator=' ')).strip() for p in body])
else:
body = re.sub(r'\s+', ' ', body.get_text(separator=' ')).strip()
if not len(body):
logging.error('no body for {}'.format(input_file))
return
# Extracting keywords
keywords = doc.find('meta', attrs={'name': 'news_keywords'})
if keywords is None:
keywords = doc.find('meta', attrs={'name': 'keywords'})
if not keywords:
logging.error('no keywords for {}'.format(input_file))
return
keywords = re.sub(r'\s+', ' ', keywords['content']).strip()
keywords = keywords.split(',')
# remove empty keywords
keywords = [k.split(';') for k in keywords if k]
if not keywords:
logging.error('no keywords for {}'.format(input_file))
return
return {
'title': title, 'headline': headline, 'abstract': body,
'keyword': keywords, 'file_name': file_name,
'date': date, 'categories': categories, 'url': url,
'author': author
}
if __name__ == '__main__':
import argparse
def arguments():
parser = argparse.ArgumentParser(description='Converts html files to jsonl using a filelist')
parser.add_argument(
'-f', '--filelist', type=argparse.FileType('r'),
help='Filelist file. If not given convert every found '
'file into `dataset.jsonl` without id')
args = parser.parse_args()
return args
args = arguments()
logging.basicConfig(level=logging.INFO)
logging.info('start converting...')
articles_processed = 0
output_file = '..' + os.sep + 'dataset.jsonl'
jptimes_dir = 'www.japantimes.co.jp/'
nytimes_dir = 'www.nytimes.co.jp/'
if args.filelist:
files = [l.strip().split('\t') for l in args.filelist]
args.filelist.close()
output_file = '..' + os.sep + args.filelist.name.replace('url.filelist', 'jsonl')
else:
files = itertools.chain(
recursive_iglob(rootdir=jptimes_dir, pattern='[!.]*'),
recursive_iglob(rootdir=nytimes_dir, pattern='*.html')
)
with codecs.open(output_file, 'w', 'utf-8') as f:
for input_file in tqdm(files):
if args.filelist:
id_, input_file = input_file
input_file = input_file.replace('http://', '')
if not os.path.isfile(input_file):
continue
# Loading soup
with open(input_file) as g:
content = g.read()
if 'nytimes' in input_file:
res = convert_nytimes(content)
elif 'japantimes' in input_file:
res = convert_jptimes(content)
else:
logging.error('Unrecognised file type : {}'.format(
input_file))
if not res:
continue
if args.filelist:
res['id'] = id_
f.write(json.dumps(res) + '\n')
articles_processed += 1
logging.info('Converted {} articles'.format(articles_processed))
if args.filelist and articles_processed != len(files):
logging.info(
'There are {} missing articles. Please (re)try downloading '
'articles using download script'.format(len(files) - articles_processed)
)
|
[
"fnmatch.filter",
"tqdm.tqdm",
"argparse.ArgumentParser",
"logging.basicConfig",
"codecs.open",
"os.path.join",
"os.walk",
"json.dumps",
"logging.info",
"os.path.isfile",
"bs4.BeautifulSoup",
"re.sub",
"argparse.FileType"
] |
[((383, 399), 'os.walk', 'os.walk', (['rootdir'], {}), '(rootdir)\n', (390, 399), False, 'import os\n'), ((743, 784), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['content', '"""html.parser"""'], {}), "(content, 'html.parser')\n", (760, 784), False, 'import bs4\n'), ((1357, 1397), 're.sub', 're.sub', (['"""\\\\| The Japan Times"""', '""""""', 'title'], {}), "('\\\\| The Japan Times', '', title)\n", (1363, 1397), False, 'import re\n'), ((2928, 2969), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['content', '"""html.parser"""'], {}), "(content, 'html.parser')\n", (2945, 2969), False, 'import bs4\n'), ((6149, 6188), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (6168, 6188), False, 'import logging\n'), ((6194, 6229), 'logging.info', 'logging.info', (['"""start converting..."""'], {}), "('start converting...')\n", (6206, 6229), False, 'import logging\n'), ((425, 459), 'fnmatch.filter', 'fnmatch.filter', (['filenames', 'pattern'], {}), '(filenames, pattern)\n', (439, 459), False, 'import fnmatch\n'), ((5765, 5854), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Converts html files to jsonl using a filelist"""'}), "(description=\n 'Converts html files to jsonl using a filelist')\n", (5788, 5854), False, 'import argparse\n'), ((6792, 6830), 'codecs.open', 'codecs.open', (['output_file', '"""w"""', '"""utf-8"""'], {}), "(output_file, 'w', 'utf-8')\n", (6803, 6830), False, 'import codecs\n'), ((6864, 6875), 'tqdm.tqdm', 'tqdm', (['files'], {}), '(files)\n', (6868, 6875), False, 'from tqdm import tqdm\n'), ((1299, 1336), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', "title['content']"], {}), "('\\\\s+', ' ', title['content'])\n", (1305, 1336), False, 'import re\n'), ((1697, 1737), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', "headline['content']"], {}), "('\\\\s+', ' ', headline['content'])\n", (1703, 1737), False, 'import re\n'), ((2404, 2444), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', "keywords['content']"], {}), "('\\\\s+', ' ', keywords['content'])\n", (2410, 2444), False, 'import re\n'), ((3788, 3825), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', "title['content']"], {}), "('\\\\s+', ' ', title['content'])\n", (3794, 3825), False, 'import re\n'), ((4133, 4173), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', "headline['content']"], {}), "('\\\\s+', ' ', headline['content'])\n", (4139, 4173), False, 'import re\n'), ((5190, 5230), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', "keywords['content']"], {}), "('\\\\s+', ' ', keywords['content'])\n", (5196, 5230), False, 'import re\n'), ((479, 507), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (491, 507), False, 'import os\n'), ((5916, 5938), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (5933, 5938), False, 'import argparse\n'), ((7038, 7064), 'os.path.isfile', 'os.path.isfile', (['input_file'], {}), '(input_file)\n', (7052, 7064), False, 'import os\n'), ((7630, 7645), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (7640, 7645), False, 'import json\n')]
|
#!/usr/bin/env python3
# Copyright (C) 2019 <NAME> <<EMAIL>>
# Released under the MIT license (see COPYING.MIT for the terms)
import argparse
import subprocess
import sys
CONTAINER_MODEL = 'model'
CONTAINER_DEMO = 'demo'
IMAGE_MODEL = 'phytecorg/aidemo-customvision-model:0.4.1'
IMAGE_DEMO = 'phytecorg/aidemo-customvision-demo:0.5.0'
NETWORK = 'aikit'
def stop_containers():
process_ps = subprocess.run(['docker', 'ps', '--format={{.Names}}'],
check=True, stdout=subprocess.PIPE)
containers = process_ps.stdout.decode('utf-8').split('\n')
if CONTAINER_MODEL in containers:
subprocess.run(['docker', 'stop', CONTAINER_MODEL], check=True)
if CONTAINER_DEMO in containers:
subprocess.run(['docker', 'stop', CONTAINER_DEMO], check=True)
def list_networks():
process_list = subprocess.run(['docker', 'network', 'ls',
'--format={{.Name}}'], check=True, stdout=subprocess.PIPE)
return process_list.stdout.decode('utf-8').split('\n')
def create_network():
if NETWORK not in list_networks():
subprocess.run(['docker', 'network', 'create', NETWORK], check=True)
def remove_network():
if NETWORK in list_networks():
subprocess.run(['docker', 'network', 'rm', NETWORK], check=True)
def run_containers():
subprocess.run(['docker', 'run',
'--rm',
'--name', CONTAINER_MODEL,
'--network', NETWORK,
'-p', '8877:8877',
'-d', IMAGE_MODEL,
'--port', '8877', 'hands'], check=True)
subprocess.run(['docker', 'run',
'--rm',
'--privileged',
'--name', CONTAINER_DEMO,
'--network', NETWORK,
'--device', '/dev/video0',
'-e', 'QT_QPA_PLATFORM=wayland',
'-e', 'QT_WAYLAND_FORCE_DPI=192',
'-e', 'QT_WAYLAND_DISABLE_WINDOWDECORATION=1',
'-e', 'XDG_RUNTIME_DIR=/run/user/0',
'-v', '/run/user/0:/run/user/0',
'-d', IMAGE_DEMO, '/bin/bash', '-c',
'weston-start && sleep 1 && aidemo-customvision-demo -x'], check=True)
def start(args):
stop_containers()
create_network()
run_containers()
return 0
def stop(args):
stop_containers()
remove_network()
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convenience runner for '
'starting and stopping Docker images for the AI kit')
subparsers = parser.add_subparsers()
subparser_start = subparsers.add_parser('start')
subparser_start.set_defaults(function=start)
subparser_stop = subparsers.add_parser('stop')
subparser_stop.set_defaults(function=stop)
args = parser.parse_args()
sys.exit(args.function(args))
|
[
"subprocess.run",
"argparse.ArgumentParser"
] |
[((396, 492), 'subprocess.run', 'subprocess.run', (["['docker', 'ps', '--format={{.Names}}']"], {'check': '(True)', 'stdout': 'subprocess.PIPE'}), "(['docker', 'ps', '--format={{.Names}}'], check=True, stdout=\n subprocess.PIPE)\n", (410, 492), False, 'import subprocess\n'), ((822, 928), 'subprocess.run', 'subprocess.run', (["['docker', 'network', 'ls', '--format={{.Name}}']"], {'check': '(True)', 'stdout': 'subprocess.PIPE'}), "(['docker', 'network', 'ls', '--format={{.Name}}'], check=\n True, stdout=subprocess.PIPE)\n", (836, 928), False, 'import subprocess\n'), ((1288, 1463), 'subprocess.run', 'subprocess.run', (["['docker', 'run', '--rm', '--name', CONTAINER_MODEL, '--network', NETWORK,\n '-p', '8877:8877', '-d', IMAGE_MODEL, '--port', '8877', 'hands']"], {'check': '(True)'}), "(['docker', 'run', '--rm', '--name', CONTAINER_MODEL,\n '--network', NETWORK, '-p', '8877:8877', '-d', IMAGE_MODEL, '--port',\n '8877', 'hands'], check=True)\n", (1302, 1463), False, 'import subprocess\n'), ((1508, 1955), 'subprocess.run', 'subprocess.run', (["['docker', 'run', '--rm', '--privileged', '--name', CONTAINER_DEMO,\n '--network', NETWORK, '--device', '/dev/video0', '-e',\n 'QT_QPA_PLATFORM=wayland', '-e', 'QT_WAYLAND_FORCE_DPI=192', '-e',\n 'QT_WAYLAND_DISABLE_WINDOWDECORATION=1', '-e',\n 'XDG_RUNTIME_DIR=/run/user/0', '-v', '/run/user/0:/run/user/0', '-d',\n IMAGE_DEMO, '/bin/bash', '-c',\n 'weston-start && sleep 1 && aidemo-customvision-demo -x']"], {'check': '(True)'}), "(['docker', 'run', '--rm', '--privileged', '--name',\n CONTAINER_DEMO, '--network', NETWORK, '--device', '/dev/video0', '-e',\n 'QT_QPA_PLATFORM=wayland', '-e', 'QT_WAYLAND_FORCE_DPI=192', '-e',\n 'QT_WAYLAND_DISABLE_WINDOWDECORATION=1', '-e',\n 'XDG_RUNTIME_DIR=/run/user/0', '-v', '/run/user/0:/run/user/0', '-d',\n IMAGE_DEMO, '/bin/bash', '-c',\n 'weston-start && sleep 1 && aidemo-customvision-demo -x'], check=True)\n", (1522, 1955), False, 'import subprocess\n'), ((2237, 2359), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convenience runner for starting and stopping Docker images for the AI kit"""'}), "(description=\n 'Convenience runner for starting and stopping Docker images for the AI kit'\n )\n", (2260, 2359), False, 'import argparse\n'), ((609, 672), 'subprocess.run', 'subprocess.run', (["['docker', 'stop', CONTAINER_MODEL]"], {'check': '(True)'}), "(['docker', 'stop', CONTAINER_MODEL], check=True)\n", (623, 672), False, 'import subprocess\n'), ((718, 780), 'subprocess.run', 'subprocess.run', (["['docker', 'stop', CONTAINER_DEMO]"], {'check': '(True)'}), "(['docker', 'stop', CONTAINER_DEMO], check=True)\n", (732, 780), False, 'import subprocess\n'), ((1061, 1129), 'subprocess.run', 'subprocess.run', (["['docker', 'network', 'create', NETWORK]"], {'check': '(True)'}), "(['docker', 'network', 'create', NETWORK], check=True)\n", (1075, 1129), False, 'import subprocess\n'), ((1196, 1260), 'subprocess.run', 'subprocess.run', (["['docker', 'network', 'rm', NETWORK]"], {'check': '(True)'}), "(['docker', 'network', 'rm', NETWORK], check=True)\n", (1210, 1260), False, 'import subprocess\n')]
|
# Generated by Django 3.2.4 on 2021-06-13 15:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, unique=True)),
],
),
migrations.CreateModel(
name='Place',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.TextField(default=1623599319.0517447)),
('updated_at', models.TextField(blank=True, null=True)),
('is_deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=64)),
('location', models.CharField(max_length=64)),
('memo', models.TextField()),
('best_menu', models.CharField(max_length=64)),
('additional_info', models.CharField(max_length=200)),
('stars', models.DecimalField(blank=True, decimal_places=1, max_digits=2, null=True)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='place', to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='place', to='place.category')),
('like_comments', models.ManyToManyField(blank=True, related_name='like_place', to=settings.AUTH_USER_MODEL)),
('tags', models.ManyToManyField(blank=True, related_name='place', to='place.Tag')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, default='default.png', null=True, upload_to='images/')),
('place', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='photo', to='place.place')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.TextField(default=1623599319.0517447)),
('updated_at', models.TextField(blank=True, null=True)),
('is_deleted', models.BooleanField(default=False)),
('content', models.TextField()),
('commenter', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='comment', to=settings.AUTH_USER_MODEL)),
('place', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment', to='place.place')),
],
options={
'abstract': False,
},
),
]
|
[
"django.db.models.TextField",
"django.db.migrations.swappable_dependency",
"django.db.models.ManyToManyField",
"django.db.models.BigAutoField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.ImageField",
"django.db.models.DecimalField"
] |
[((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((437, 533), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (456, 533), False, 'from django.db import migrations, models\n'), ((557, 588), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (573, 588), False, 'from django.db import migrations, models\n'), ((717, 813), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (736, 813), False, 'from django.db import migrations, models\n'), ((837, 881), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'unique': '(True)'}), '(max_length=64, unique=True)\n', (853, 881), False, 'from django.db import migrations, models\n'), ((1012, 1108), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1031, 1108), False, 'from django.db import migrations, models\n'), ((1138, 1182), 'django.db.models.TextField', 'models.TextField', ([], {'default': '(1623599319.0517447)'}), '(default=1623599319.0517447)\n', (1154, 1182), False, 'from django.db import migrations, models\n'), ((1216, 1255), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1232, 1255), False, 'from django.db import migrations, models\n'), ((1289, 1323), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1308, 1323), False, 'from django.db import migrations, models\n'), ((1351, 1382), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (1367, 1382), False, 'from django.db import migrations, models\n'), ((1414, 1445), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (1430, 1445), False, 'from django.db import migrations, models\n'), ((1473, 1491), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1489, 1491), False, 'from django.db import migrations, models\n'), ((1524, 1555), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (1540, 1555), False, 'from django.db import migrations, models\n'), ((1594, 1626), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1610, 1626), False, 'from django.db import migrations, models\n'), ((1655, 1729), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'decimal_places': '(1)', 'max_digits': '(2)', 'null': '(True)'}), '(blank=True, decimal_places=1, max_digits=2, null=True)\n', (1674, 1729), False, 'from django.db import migrations, models\n'), ((1759, 1901), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""place"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='place', to=settings.AUTH_USER_MODEL)\n", (1776, 1901), False, 'from django.db import migrations, models\n'), ((1928, 2038), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""place"""', 'to': '"""place.category"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='place', to='place.category')\n", (1945, 2038), False, 'from django.db import migrations, models\n'), ((2070, 2165), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""like_place"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, related_name='like_place', to=settings.\n AUTH_USER_MODEL)\n", (2092, 2165), False, 'from django.db import migrations, models\n'), ((2188, 2260), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""place"""', 'to': '"""place.Tag"""'}), "(blank=True, related_name='place', to='place.Tag')\n", (2210, 2260), False, 'from django.db import migrations, models\n'), ((2463, 2559), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2482, 2559), False, 'from django.db import migrations, models\n'), ((2584, 2673), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'default': '"""default.png"""', 'null': '(True)', 'upload_to': '"""images/"""'}), "(blank=True, default='default.png', null=True, upload_to=\n 'images/')\n", (2601, 2673), False, 'from django.db import migrations, models\n'), ((2697, 2828), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""photo"""', 'to': '"""place.place"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='photo', to='place.place')\n", (2714, 2828), False, 'from django.db import migrations, models\n'), ((2956, 3052), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2975, 3052), False, 'from django.db import migrations, models\n'), ((3082, 3126), 'django.db.models.TextField', 'models.TextField', ([], {'default': '(1623599319.0517447)'}), '(default=1623599319.0517447)\n', (3098, 3126), False, 'from django.db import migrations, models\n'), ((3160, 3199), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3176, 3199), False, 'from django.db import migrations, models\n'), ((3233, 3267), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3252, 3267), False, 'from django.db import migrations, models\n'), ((3298, 3316), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (3314, 3316), False, 'from django.db import migrations, models\n'), ((3349, 3493), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""comment"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='comment', to=settings.AUTH_USER_MODEL)\n", (3366, 3493), False, 'from django.db import migrations, models\n'), ((3517, 3626), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""comment"""', 'to': '"""place.place"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='comment', to='place.place')\n", (3534, 3626), False, 'from django.db import migrations, models\n')]
|
from __future__ import absolute_import
from __future__ import print_function
import os,time,cv2,sys,math
import tensorflow as tf
import numpy as np
import time, datetime
import argparse
import random
import os, sys
import subprocess
from utils import utils, helpers
from builders import fusion_model_builder
import datetime
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def radiance_writer(out_path, image):
with open(out_path, "wb") as f:
f.write(bytes("#?RADIANCE\n# Made with Python & Numpy\nFORMAT=32-bit_rle_rgbe\n\n",'UTF-8'))
f.write(bytes("-Y %d +X %d\n" %(image.shape[0], image.shape[1]),'UTF-8'))
brightest = np.max(image,axis=2)
mantissa = np.zeros_like(brightest)
exponent = np.zeros_like(brightest)
np.frexp(brightest, mantissa, exponent)
scaled_mantissa = mantissa * 255.0 / brightest
rgbe = np.zeros((image.shape[0], image.shape[1], 4), dtype=np.uint8)
rgbe[...,0:3] = np.around(image[...,0:3] * scaled_mantissa[...,None])
rgbe[...,3] = np.around(exponent + 128)
rgbe.flatten().tofile(f)
def compute_psnr(img1, img2):
mse = np.mean((img1-img2)**2)
if mse == 0:
return 100
PIXEL_MAX = 1.0 # input -1~1
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def log_tonemap(im):
return tf.log(1+5000*im)/tf.log(1+5000.0)
def log_tonemap_output(im):
return np.log(1+5000*im)/np.log(1+5000.0)
parser = argparse.ArgumentParser()
parser.add_argument('--nTry', type=int, default=None, help='Current try number')
parser.add_argument('--num_epochs', type=int, default=100, help='Number of epochs to train for')
parser.add_argument('--id_str', type=str, default="", help='Unique ID string to identify current try')
parser.add_argument('--status_id', type=int, default=1, help='Status ID to write to status.txt. Can be 1, 2 or 3')
parser.add_argument('--epoch_start_i', type=int, default=0, help='Start counting epochs from this number')
parser.add_argument('--checkpoint_step', type=int, default=1, help='How often to save checkpoints (epochs)')
parser.add_argument('--validation_step', type=int, default=1, help='How often to perform validation (epochs)')
parser.add_argument('--image', type=str, default=None, help='The image you want to predict on. Only valid in "predict" mode.')
parser.add_argument('--continue_training', type=str2bool, default=False, help='Whether to continue training from a checkpoint')
parser.add_argument('--dataset', type=str, default="hdr_ddg_dataset_ulti_13thJuly", help='Dataset you are using.')
parser.add_argument('--crop_height', type=int, default=256, help='Height of cropped input image to network')
parser.add_argument('--crop_width', type=int, default=256, help='Width of cropped input image to network')
parser.add_argument('--batch_size', type=int, default=16, help='Number of images in each batch')
parser.add_argument('--num_val_images', type=int, default=100000, help='The number of images to used for validations')
parser.add_argument('--model', type=str, default="DRIB_4_four_conv", help='The model you are using. See model_builder.py for supported models')
parser.add_argument('--frontend', type=str, default="ResNet101", help='The frontend you are using. See frontend_builder.py for supported models')
parser.add_argument('--save_logs', type=str2bool, default=True, help='Whether to save training info to the corresponding logs txt file')
parser.add_argument('--log_interval', type=int, default=100, help='Log Interval')
parser.add_argument('--init_lr', type=float, default=0.0002, help='Initial learning rate')
parser.add_argument('--lr_decay', type=float, default=0.94, help='Initial learning rate')
parser.add_argument('--loss', type=str, default='l2', help='Choose between l2 or l1 norm as a loss function')
parser.add_argument('--logdir', type=str, default='/workspace/logs', help='Choose between l2 or l1 norm as a loss function')
parser.add_argument('--crop_pixels_height',type=int,default=10,help='Location of input image')
args = parser.parse_args()
try_name = "Try%d_%s_%s"%(args.nTry,args.model,args.id_str)
if not os.path.isdir(try_name):
os.makedirs(try_name)
if args.save_logs:
if args.continue_training:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
status = open("status%d.txt"%(args.status_id),'a')
else:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'w')
status = open("status%d.txt"%(args.status_id),'w')
config = tf.ConfigProto()
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
if not os.path.exists(os.path.join(args.logdir,try_name,'train')):
os.makedirs(os.path.join(args.logdir,try_name,'train'),exist_ok=True)
if not os.path.exists(os.path.join(args.logdir,try_name,'test')):
os.makedirs(os.path.join(args.logdir,try_name,'test'),exist_ok=True)
train_writer = tf.summary.FileWriter('{}/{}/train'.format(args.logdir,try_name))
test_writer = tf.summary.FileWriter('{}/{}/test'.format(args.logdir,try_name))
train_loss_pl = tf.placeholder(tf.float32,shape=None)
train_loss_summary =tf.summary.scalar('train_loss',train_loss_pl)
test_loss_pl = tf.placeholder(tf.float32,shape=None)
test_loss_summary =tf.summary.scalar('test_loss',test_loss_pl)
test_psnr_pl = tf.placeholder(tf.float32,shape=None)
test_psnr_summary =tf.summary.scalar('val_psnr',test_psnr_pl)
le_image_pl = tf.placeholder(tf.float32,shape=[args.batch_size,args.crop_width,args.crop_height,3])
me_image_pl = tf.placeholder(tf.float32,shape=[args.batch_size,args.crop_width,args.crop_height,3])
he_image_pl = tf.placeholder(tf.float32,shape=[args.batch_size,args.crop_width,args.crop_height,3])
gt_image_pl = tf.placeholder(tf.float32,shape=[args.batch_size,args.crop_width,args.crop_height,3])
le_image_summ = tf.summary.image('le images',le_image_pl,max_outputs=args.batch_size)
me_image_summ = tf.summary.image('me images',me_image_pl,max_outputs=args.batch_size)
he_image_summ = tf.summary.image('he images',he_image_pl,max_outputs=args.batch_size)
gt_image_summ = tf.summary.image('gt images',gt_image_pl,max_outputs=args.batch_size)
input_exposure_stacks = [tf.placeholder(tf.float32,shape=[None,None,None,6]) for x in range(3)]
gt_exposure_stack = tf.placeholder(tf.float32,shape=[None,None,None,3])
lr = tf.placeholder("float", shape=[])
network, init_fn = fusion_model_builder.build_model(model_name=args.model, frontend=args.frontend, input_exposure_stack=input_exposure_stacks, crop_width=args.crop_width, crop_height=args.crop_height, is_training=True)
str_params = utils.count_params()
print(str_params)
if args.save_logs:
log_file.write(str_params + "\n")
if args.loss == 'l2':
loss = tf.losses.mean_squared_error(log_tonemap(gt_exposure_stack), log_tonemap(network))
elif args.loss == 'l1':
loss = tf.losses.absolute_difference(log_tonemap(gt_exposure_stack), log_tonemap(network))
opt = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss, var_list=[var for var in tf.trainable_variables()])
saver=tf.train.Saver(max_to_keep=1000)
sess.run(tf.global_variables_initializer())
train_writer.add_graph(sess.graph)
# Load a previous checkpoint if desired
model_checkpoint_name = "%s/ckpts/latest_model_"%(try_name) + args.model + "_" + args.dataset + ".ckpt"
if args.continue_training:
print('Loading latest model checkpoint')
saver.restore(sess, model_checkpoint_name)
print('Loaded latest model checkpoint')
print("\n***** Begin training *****")
print("Try -->", args.nTry)
print("Dataset -->", args.dataset)
print("Model -->", args.model)
print("Crop Height -->", args.crop_height)
print("Crop Width -->", args.crop_width)
print("Num Epochs -->", args.num_epochs)
print("Batch Size -->", args.batch_size)
print("Save Logs -->", args.save_logs)
avg_loss_per_epoch = []
avg_val_loss_per_epoch = []
avg_psnr_per_epoch = []
if args.save_logs:
log_file.write("\nDataset --> " + args.dataset)
log_file.write("\nModel --> " + args.model)
log_file.write("\nCrop Height -->" + str(args.crop_height))
log_file.write("\nCrop Width -->" + str(args.crop_width))
log_file.write("\nNum Epochs -->" + str(args.num_epochs))
log_file.write("\nBatch Size -->" + str(args.batch_size))
log_file.close()
status.write("\nDataset --> " + args.dataset)
status.write("\nModel --> " + args.model)
status.write("\nCrop Height -->" + str(args.crop_height))
status.write("\nCrop Width -->" + str(args.crop_width))
status.write("\nNum Epochs -->" + str(args.num_epochs))
status.write("\nBatch Size -->" + str(args.batch_size))
status.close()
# Load the data
print("Loading the data ...")
# ["he_at_me", "le_at_me", "me_at_he", "me_at_le", "he", "le", "me"]
exposure_keys_train = ["he", "le", "me"]
exposure_keys_train_labels = ["hdr"]
exposure_keys_val = ["he", "le", "me"]
exposure_keys_val_labels = ["hdr"]
multiexposure_train_names = utils.prepare_data_multiexposure("%s/train_256"%(args.dataset), exposure_keys_train)
multiexposure_train_label_names = utils.prepare_data_multiexposure("%s/train_labels_256"%(args.dataset), exposure_keys_train_labels)
multiexposure_val_names = utils.prepare_data_multiexposure("%s/val"%(args.dataset), exposure_keys_val)
multiexposure_val_label_names = utils.prepare_data_multiexposure("%s/val_labels"%(args.dataset), exposure_keys_val_labels)
train_input_names_he, train_input_names_le, train_input_names_me = multiexposure_train_names[0], multiexposure_train_names[1], multiexposure_train_names[2]
train_output_names_hdr = multiexposure_train_label_names[0]
val_input_names_he, val_input_names_le, val_input_names_me = multiexposure_val_names[0], multiexposure_val_names[1], multiexposure_val_names[2]
val_output_names_hdr = multiexposure_val_label_names[0]
# Which validation images do we want
val_indices = []
num_vals = min(args.num_val_images, len(val_input_names_he))
# Set random seed to make sure models are validated on the same validation images.
# So you can compare the results of different models more intuitively.
random.seed(16)
val_indices=random.sample(range(0,len(val_input_names_he)),num_vals)
learning_rates = []
lr_decay_step = 1
small_loss_bin = []
train_step =0
val_step = 0
# Do the training here
for epoch in range(args.epoch_start_i, args.num_epochs):
learning_rate = args.init_lr*(float)(args.lr_decay)**(float)(epoch)
learning_rates.append(learning_rate)
print("\nLearning rate for epoch # %04d = %f\n"%(epoch, learning_rate))
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write("\nLearning rate for epoch " + str(epoch) + " = " + str(learning_rate) + "\n")
log_file.close()
status = open("status%d.txt"%(args.status_id),'a')
status.write("\nLearning rate for epoch " + str(epoch) + " = " + str(learning_rate) + "\n")
status.close()
current_losses = []
current_losses_val = []
cnt=0
# Equivalent to shuffling
id_list = np.random.permutation(len(train_input_names_he))
num_iters = int(np.floor(len(id_list) / args.batch_size))
st = time.time()
epoch_st=time.time()
for i in range(num_iters):
input_image_le_batch = []
input_image_me_batch = []
input_image_he_batch = []
output_image_batch = []
# Collect a batch of images
for j in range(args.batch_size):
index = i*args.batch_size + j
id = id_list[index]
cv2_image_train_he = cv2.imread(train_input_names_he[id],-1)
input_image_he = np.float32(cv2.cvtColor(cv2_image_train_he,cv2.COLOR_BGR2RGB)) / 65535.0
input_image_he_gamma,_,_ = utils.ldr_to_hdr_train(input_image_he,train_input_names_he[id])
input_image_he_c = np.concatenate([input_image_he,input_image_he_gamma],axis=2)
cv2_image_train_me = cv2.imread(train_input_names_me[id],-1)
input_image_me = np.float32(cv2.cvtColor(cv2_image_train_me,cv2.COLOR_BGR2RGB)) / 65535.0
input_image_me_gamma,_,_ = utils.ldr_to_hdr_train(input_image_me,train_input_names_me[id])
input_image_me_c = np.concatenate([input_image_me,input_image_me_gamma],axis=2)
cv2_image_train_le = cv2.imread(train_input_names_le[id],-1)
input_image_le = np.float32(cv2.cvtColor(cv2_image_train_le,cv2.COLOR_BGR2RGB)) / 65535.0
input_image_le_gamma,_,_ = utils.ldr_to_hdr_train(input_image_le,train_input_names_le[id])
input_image_le_c = np.concatenate([input_image_le,input_image_le_gamma],axis=2)
output_image = cv2.cvtColor(cv2.imread(train_output_names_hdr[id],-1),cv2.COLOR_BGR2RGB)
input_image_le_batch.append(np.expand_dims(input_image_le_c, axis=0))
input_image_me_batch.append(np.expand_dims(input_image_me_c, axis=0))
input_image_he_batch.append(np.expand_dims(input_image_he_c, axis=0))
output_image_batch.append(np.expand_dims(output_image, axis=0))
input_image_le_batch = np.squeeze(np.stack(input_image_le_batch, axis=1))
input_image_me_batch = np.squeeze(np.stack(input_image_me_batch, axis=1))
input_image_he_batch = np.squeeze(np.stack(input_image_he_batch, axis=1))
output_image_batch = np.squeeze(np.stack(output_image_batch, axis=1))
train_writer.add_summary(sess.run(le_image_summ,feed_dict={le_image_pl:input_image_le_batch[...,:3]}),i)
train_writer.add_summary(sess.run(me_image_summ,feed_dict={me_image_pl:input_image_me_batch[...,:3]}),i)
train_writer.add_summary(sess.run(he_image_summ,feed_dict={he_image_pl:input_image_he_batch[...,:3]}),i)
train_writer.add_summary(sess.run(gt_image_summ,feed_dict={gt_image_pl:output_image_batch[...,:3]}),i)
# Do the training here
_,current_loss=sess.run([opt,loss],feed_dict={input_exposure_stacks[0]:input_image_le_batch,input_exposure_stacks[1]:input_image_me_batch,input_exposure_stacks[2]:input_image_he_batch, gt_exposure_stack:output_image_batch, lr:learning_rate})
current_losses.append(current_loss)
small_loss_bin.append(current_loss)
cnt = cnt + args.batch_size
if cnt % args.log_interval == 0:
small_loss_bin_mean = np.mean(small_loss_bin)
string_print = "Epoch = %d Count = %d Current_Loss = %.4f Time = %.2f "%(epoch, cnt, small_loss_bin_mean, time.time()-st)
small_loss_bin = []
train_str = utils.LOG(string_print)
print(train_str)
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write(train_str + "\n")
log_file.close()
status = open("status%d.txt"%(args.status_id),'a')
status.write(train_str + "\n")
status.close()
st = time.time()
summ = sess.run(train_loss_summary, feed_dict={train_loss_pl:np.mean(current_losses)})
train_writer.add_summary(summ,train_step)
train_step +=1
mean_loss = np.mean(current_losses)
avg_loss_per_epoch.append(mean_loss)
# Create directories if needed
if not os.path.isdir("%s/%s/%04d"%(try_name, "ckpts", epoch)):
os.makedirs("%s/%s/%04d"%(try_name, "ckpts", epoch))
# Save latest checkpoint to same file name
print("Saving latest checkpoint")
saver.save(sess, model_checkpoint_name)
if val_indices != 0 and epoch % args.checkpoint_step == 0:
print("Saving checkpoint for this epoch")
saver.save(sess, "%s/%s/%04d/model.ckpt"%(try_name, "ckpts", epoch))
print("Average Training loss = ", mean_loss)
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write("\nAverage Training loss = " + str(mean_loss))
log_file.close()
status = open("status%d.txt"%(args.status_id),'a')
status.write("\nAverage Training loss = " + str(mean_loss))
status.close()
if epoch % args.validation_step == 0:
print("Performing validation")
if not os.path.isdir("%s/%s/%04d"%(try_name, "val_Imgs", epoch)):
os.makedirs("%s/%s/%04d"%(try_name, "val_Imgs", epoch))
psnr_pre_list = []
psnr_post_list = []
val_idx_count = 0
pred_time_list = []
# Do the validation on a small set of validation images
for ind in val_indices:
print("\rRunning test image %d / %d"%(val_idx_count+1, len(val_input_names_he)))
input_images = []
cv2_img_he = cv2.imread(val_input_names_he[ind],-1)
h,w = cv2_img_he.shape[:2]
input_image_he = np.expand_dims(np.float32(cv2.cvtColor(cv2_img_he,cv2.COLOR_BGR2RGB)),axis=0)/65535.0
input_image_he_gamma,_,_ = utils.ldr_to_hdr_test(input_image_he,val_input_names_he[ind])
input_image_he_c = np.concatenate([input_image_he,input_image_he_gamma],axis=3)
cv2_img_me = cv2.imread(val_input_names_me[ind],-1)
h,w = cv2_img_me.shape[:2]
input_image_me = np.expand_dims(np.float32(cv2.cvtColor(cv2_img_me,cv2.COLOR_BGR2RGB)),axis=0)/65535.0
input_image_me_gamma,_,_ = utils.ldr_to_hdr_test(input_image_me,val_input_names_me[ind])
input_image_me_c = np.concatenate([input_image_me,input_image_me_gamma],axis=3)
cv2_img_le = cv2.imread(val_input_names_le[ind],-1)
h,w = cv2_img_le.shape[:2]
input_image_le = np.expand_dims(np.float32(cv2.cvtColor(cv2_img_le,cv2.COLOR_BGR2RGB)),axis=0)/65535.0
input_image_le_gamma,_,_ = utils.ldr_to_hdr_test(input_image_le,val_input_names_le[ind])
input_image_le_c = np.concatenate([input_image_le,input_image_le_gamma],axis=3)
cv2_img_hdr = cv2.imread(val_output_names_hdr[ind],-1)
h,w = cv2_img_hdr.shape[:2]
gt_hdr = cv2.cvtColor(cv2_img_hdr,cv2.COLOR_BGR2RGB)
gt_hdr = np.expand_dims(np.float32(gt_hdr), axis=0)
pred_st = time.time()
output_image_pred, curr_val_loss = sess.run([network,loss],feed_dict={input_exposure_stacks[0]:input_image_le_c,input_exposure_stacks[1]:input_image_me_c,input_exposure_stacks[2]:input_image_he_c,gt_exposure_stack:gt_hdr})
pred_et = time.time()
pred_time_list.append(pred_et-pred_st)
output_image = np.squeeze(output_image_pred)
gt_hdr = np.squeeze(gt_hdr)
h,w = output_image.shape[:2]
output_image_cropped = output_image[args.crop_pixels_height:h-args.crop_pixels_height,:,:]
gt_hdr_cropped = gt_hdr[args.crop_pixels_height:h-args.crop_pixels_height,:,:]
current_pre_psnr = compute_psnr(output_image_cropped, gt_hdr_cropped)
current_post_psnr = compute_psnr(log_tonemap_output(output_image_cropped), log_tonemap_output(gt_hdr_cropped))
current_losses_val.append(curr_val_loss)
psnr_pre_list.append(current_pre_psnr)
psnr_post_list.append(current_post_psnr)
file_name = utils.filepath_to_name(val_input_names_he[ind])
radiance_writer("%s/%s/%04d/%s_pred.hdr"%(try_name, "val_Imgs", epoch, file_name),output_image)
radiance_writer("%s/%s/%04d/%s_gt.hdr"%(try_name, "val_Imgs", epoch, file_name),gt_hdr)
val_idx_count = val_idx_count+1
mean_val_loss = np.mean(current_losses_val)
merge_summ = tf.summary.merge([test_loss_summary,test_psnr_summary])
merge_summ = sess.run(merge_summ, feed_dict={test_loss_pl:mean_val_loss,test_psnr_pl:np.mean(psnr_post_list)})
test_writer.add_summary(merge_summ,val_step)
val_step+=1
mean_pre_psnr = np.mean(psnr_pre_list)
mean_post_psnr = np.mean(psnr_post_list)
mean_proc_time = np.mean(pred_time_list)
print('val psnr pre list {}\n'.format(psnr_pre_list))
print('val psnr post list {}\n'.format(psnr_post_list))
print("Average Validation loss = %f"%(mean_val_loss))
print("Average PRE-PSNR = %f"%(mean_pre_psnr))
print("Average POST -PSNR = %f"%(mean_post_psnr))
print('Average processing time = %f'%(mean_proc_time))
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write("\nAverage Validation loss = " + str(mean_val_loss)+"\n")
log_file.write("Average PRE-PSNR = %f\n"%(mean_pre_psnr))
log_file.write("Average POST-PSNR = %f\n"%(mean_post_psnr))
log_file.write('Average processing time = %f\n'%(mean_proc_time))
status = open("status%d.txt"%(args.status_id),'a')
status.write("\nAverage Validation loss = " + str(mean_val_loss)+"\n")
status.write("Average PRE-PSNR = %f\n"%(mean_pre_psnr))
status.write("Average POST -PSNR = %f\n"%(mean_post_psnr))
status.write('Average processing time = %f\n'%(mean_proc_time))
status.close()
epoch_time=time.time()-epoch_st
remain_time=epoch_time*(args.num_epochs-1-epoch)
m, s = divmod(remain_time, 60)
h, m = divmod(m, 60)
if s!=0:
train_time="Remaining training time = %d hours %d minutes %d seconds\n"%(h,m,s)
else:
train_time="Remaining training time : Training completed.\n"
str_time = utils.LOG(train_time)
print(str_time)
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write(str_time + "\n")
log_file.close()
status = open("status%d.txt"%(args.status_id),'a')
status.write(str_time + "\n")
status.close()
sess.close()
|
[
"argparse.ArgumentParser",
"tensorflow.trainable_variables",
"tensorflow.train.AdamOptimizer",
"tensorflow.ConfigProto",
"numpy.around",
"utils.utils.prepare_data_multiexposure",
"numpy.mean",
"tensorflow.summary.merge",
"utils.utils.count_params",
"os.path.join",
"argparse.ArgumentTypeError",
"numpy.zeros_like",
"cv2.cvtColor",
"tensorflow.placeholder",
"numpy.max",
"utils.utils.ldr_to_hdr_train",
"random.seed",
"utils.utils.ldr_to_hdr_test",
"numpy.frexp",
"numpy.stack",
"tensorflow.summary.image",
"tensorflow.summary.scalar",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"math.sqrt",
"tensorflow.Session",
"utils.utils.LOG",
"tensorflow.log",
"numpy.squeeze",
"numpy.concatenate",
"os.makedirs",
"numpy.log",
"os.path.isdir",
"numpy.float32",
"numpy.zeros",
"numpy.expand_dims",
"utils.utils.filepath_to_name",
"time.time",
"cv2.imread",
"builders.fusion_model_builder.build_model"
] |
[((1599, 1624), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1622, 1624), False, 'import argparse\n'), ((4663, 4679), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (4677, 4679), True, 'import tensorflow as tf\n'), ((4770, 4795), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (4780, 4795), True, 'import tensorflow as tf\n'), ((5269, 5307), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'None'}), '(tf.float32, shape=None)\n', (5283, 5307), True, 'import tensorflow as tf\n'), ((5328, 5374), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train_loss"""', 'train_loss_pl'], {}), "('train_loss', train_loss_pl)\n", (5345, 5374), True, 'import tensorflow as tf\n'), ((5392, 5430), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'None'}), '(tf.float32, shape=None)\n', (5406, 5430), True, 'import tensorflow as tf\n'), ((5450, 5494), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""test_loss"""', 'test_loss_pl'], {}), "('test_loss', test_loss_pl)\n", (5467, 5494), True, 'import tensorflow as tf\n'), ((5512, 5550), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'None'}), '(tf.float32, shape=None)\n', (5526, 5550), True, 'import tensorflow as tf\n'), ((5570, 5613), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""val_psnr"""', 'test_psnr_pl'], {}), "('val_psnr', test_psnr_pl)\n", (5587, 5613), True, 'import tensorflow as tf\n'), ((5632, 5726), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[args.batch_size, args.crop_width, args.crop_height, 3]'}), '(tf.float32, shape=[args.batch_size, args.crop_width, args.\n crop_height, 3])\n', (5646, 5726), True, 'import tensorflow as tf\n'), ((5733, 5827), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[args.batch_size, args.crop_width, args.crop_height, 3]'}), '(tf.float32, shape=[args.batch_size, args.crop_width, args.\n crop_height, 3])\n', (5747, 5827), True, 'import tensorflow as tf\n'), ((5834, 5928), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[args.batch_size, args.crop_width, args.crop_height, 3]'}), '(tf.float32, shape=[args.batch_size, args.crop_width, args.\n crop_height, 3])\n', (5848, 5928), True, 'import tensorflow as tf\n'), ((5935, 6029), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[args.batch_size, args.crop_width, args.crop_height, 3]'}), '(tf.float32, shape=[args.batch_size, args.crop_width, args.\n crop_height, 3])\n', (5949, 6029), True, 'import tensorflow as tf\n'), ((6042, 6113), 'tensorflow.summary.image', 'tf.summary.image', (['"""le images"""', 'le_image_pl'], {'max_outputs': 'args.batch_size'}), "('le images', le_image_pl, max_outputs=args.batch_size)\n", (6058, 6113), True, 'import tensorflow as tf\n'), ((6129, 6200), 'tensorflow.summary.image', 'tf.summary.image', (['"""me images"""', 'me_image_pl'], {'max_outputs': 'args.batch_size'}), "('me images', me_image_pl, max_outputs=args.batch_size)\n", (6145, 6200), True, 'import tensorflow as tf\n'), ((6216, 6287), 'tensorflow.summary.image', 'tf.summary.image', (['"""he images"""', 'he_image_pl'], {'max_outputs': 'args.batch_size'}), "('he images', he_image_pl, max_outputs=args.batch_size)\n", (6232, 6287), True, 'import tensorflow as tf\n'), ((6303, 6374), 'tensorflow.summary.image', 'tf.summary.image', (['"""gt images"""', 'gt_image_pl'], {'max_outputs': 'args.batch_size'}), "('gt images', gt_image_pl, max_outputs=args.batch_size)\n", (6319, 6374), True, 'import tensorflow as tf\n'), ((6493, 6548), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, None, 3]'}), '(tf.float32, shape=[None, None, None, 3])\n', (6507, 6548), True, 'import tensorflow as tf\n'), ((6555, 6588), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {'shape': '[]'}), "('float', shape=[])\n", (6569, 6588), True, 'import tensorflow as tf\n'), ((6609, 6818), 'builders.fusion_model_builder.build_model', 'fusion_model_builder.build_model', ([], {'model_name': 'args.model', 'frontend': 'args.frontend', 'input_exposure_stack': 'input_exposure_stacks', 'crop_width': 'args.crop_width', 'crop_height': 'args.crop_height', 'is_training': '(True)'}), '(model_name=args.model, frontend=args.\n frontend, input_exposure_stack=input_exposure_stacks, crop_width=args.\n crop_width, crop_height=args.crop_height, is_training=True)\n', (6641, 6818), False, 'from builders import fusion_model_builder\n'), ((6829, 6849), 'utils.utils.count_params', 'utils.count_params', ([], {}), '()\n', (6847, 6849), False, 'from utils import utils, helpers\n'), ((7288, 7320), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(1000)'}), '(max_to_keep=1000)\n', (7302, 7320), True, 'import tensorflow as tf\n'), ((9176, 9264), 'utils.utils.prepare_data_multiexposure', 'utils.prepare_data_multiexposure', (["('%s/train_256' % args.dataset)", 'exposure_keys_train'], {}), "('%s/train_256' % args.dataset,\n exposure_keys_train)\n", (9208, 9264), False, 'from utils import utils, helpers\n'), ((9296, 9398), 'utils.utils.prepare_data_multiexposure', 'utils.prepare_data_multiexposure', (["('%s/train_labels_256' % args.dataset)", 'exposure_keys_train_labels'], {}), "('%s/train_labels_256' % args.dataset,\n exposure_keys_train_labels)\n", (9328, 9398), False, 'from utils import utils, helpers\n'), ((9422, 9498), 'utils.utils.prepare_data_multiexposure', 'utils.prepare_data_multiexposure', (["('%s/val' % args.dataset)", 'exposure_keys_val'], {}), "('%s/val' % args.dataset, exposure_keys_val)\n", (9454, 9498), False, 'from utils import utils, helpers\n'), ((9532, 9626), 'utils.utils.prepare_data_multiexposure', 'utils.prepare_data_multiexposure', (["('%s/val_labels' % args.dataset)", 'exposure_keys_val_labels'], {}), "('%s/val_labels' % args.dataset,\n exposure_keys_val_labels)\n", (9564, 9626), False, 'from utils import utils, helpers\n'), ((10326, 10341), 'random.seed', 'random.seed', (['(16)'], {}), '(16)\n', (10337, 10341), False, 'import random\n'), ((1286, 1313), 'numpy.mean', 'np.mean', (['((img1 - img2) ** 2)'], {}), '((img1 - img2) ** 2)\n', (1293, 1313), True, 'import numpy as np\n'), ((4300, 4323), 'os.path.isdir', 'os.path.isdir', (['try_name'], {}), '(try_name)\n', (4313, 4323), False, 'import os, sys\n'), ((4327, 4348), 'os.makedirs', 'os.makedirs', (['try_name'], {}), '(try_name)\n', (4338, 4348), False, 'import os, sys\n'), ((6401, 6456), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, None, 6]'}), '(tf.float32, shape=[None, None, None, 6])\n', (6415, 6456), True, 'import tensorflow as tf\n'), ((7331, 7364), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7362, 7364), True, 'import tensorflow as tf\n'), ((11414, 11425), 'time.time', 'time.time', ([], {}), '()\n', (11423, 11425), False, 'import time, datetime\n'), ((11437, 11448), 'time.time', 'time.time', ([], {}), '()\n', (11446, 11448), False, 'import time, datetime\n'), ((15056, 15079), 'numpy.mean', 'np.mean', (['current_losses'], {}), '(current_losses)\n', (15063, 15079), True, 'import numpy as np\n'), ((20866, 20887), 'utils.utils.LOG', 'utils.LOG', (['train_time'], {}), '(train_time)\n', (20875, 20887), False, 'from utils import utils, helpers\n'), ((830, 851), 'numpy.max', 'np.max', (['image'], {'axis': '(2)'}), '(image, axis=2)\n', (836, 851), True, 'import numpy as np\n'), ((867, 891), 'numpy.zeros_like', 'np.zeros_like', (['brightest'], {}), '(brightest)\n', (880, 891), True, 'import numpy as np\n'), ((906, 930), 'numpy.zeros_like', 'np.zeros_like', (['brightest'], {}), '(brightest)\n', (919, 930), True, 'import numpy as np\n'), ((937, 976), 'numpy.frexp', 'np.frexp', (['brightest', 'mantissa', 'exponent'], {}), '(brightest, mantissa, exponent)\n', (945, 976), True, 'import numpy as np\n'), ((1037, 1098), 'numpy.zeros', 'np.zeros', (['(image.shape[0], image.shape[1], 4)'], {'dtype': 'np.uint8'}), '((image.shape[0], image.shape[1], 4), dtype=np.uint8)\n', (1045, 1098), True, 'import numpy as np\n'), ((1118, 1173), 'numpy.around', 'np.around', (['(image[..., 0:3] * scaled_mantissa[..., None])'], {}), '(image[..., 0:3] * scaled_mantissa[..., None])\n', (1127, 1173), True, 'import numpy as np\n'), ((1189, 1214), 'numpy.around', 'np.around', (['(exponent + 128)'], {}), '(exponent + 128)\n', (1198, 1214), True, 'import numpy as np\n'), ((1469, 1490), 'tensorflow.log', 'tf.log', (['(1 + 5000 * im)'], {}), '(1 + 5000 * im)\n', (1475, 1490), True, 'import tensorflow as tf\n'), ((1487, 1505), 'tensorflow.log', 'tf.log', (['(1 + 5000.0)'], {}), '(1 + 5000.0)\n', (1493, 1505), True, 'import tensorflow as tf\n'), ((1548, 1569), 'numpy.log', 'np.log', (['(1 + 5000 * im)'], {}), '(1 + 5000 * im)\n', (1554, 1569), True, 'import numpy as np\n'), ((1566, 1584), 'numpy.log', 'np.log', (['(1 + 5000.0)'], {}), '(1 + 5000.0)\n', (1572, 1584), True, 'import numpy as np\n'), ((4821, 4865), 'os.path.join', 'os.path.join', (['args.logdir', 'try_name', '"""train"""'], {}), "(args.logdir, try_name, 'train')\n", (4833, 4865), False, 'import os, sys\n'), ((4883, 4927), 'os.path.join', 'os.path.join', (['args.logdir', 'try_name', '"""train"""'], {}), "(args.logdir, try_name, 'train')\n", (4895, 4927), False, 'import os, sys\n'), ((4966, 5009), 'os.path.join', 'os.path.join', (['args.logdir', 'try_name', '"""test"""'], {}), "(args.logdir, try_name, 'test')\n", (4978, 5009), False, 'import os, sys\n'), ((5027, 5070), 'os.path.join', 'os.path.join', (['args.logdir', 'try_name', '"""test"""'], {}), "(args.logdir, try_name, 'test')\n", (5039, 5070), False, 'import os, sys\n'), ((7171, 7211), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (7193, 7211), True, 'import tensorflow as tf\n'), ((15171, 15227), 'os.path.isdir', 'os.path.isdir', (["('%s/%s/%04d' % (try_name, 'ckpts', epoch))"], {}), "('%s/%s/%04d' % (try_name, 'ckpts', epoch))\n", (15184, 15227), False, 'import os, sys\n'), ((15230, 15284), 'os.makedirs', 'os.makedirs', (["('%s/%s/%04d' % (try_name, 'ckpts', epoch))"], {}), "('%s/%s/%04d' % (try_name, 'ckpts', epoch))\n", (15241, 15284), False, 'import os, sys\n'), ((19369, 19391), 'numpy.mean', 'np.mean', (['psnr_pre_list'], {}), '(psnr_pre_list)\n', (19376, 19391), True, 'import numpy as np\n'), ((19412, 19435), 'numpy.mean', 'np.mean', (['psnr_post_list'], {}), '(psnr_post_list)\n', (19419, 19435), True, 'import numpy as np\n'), ((19456, 19479), 'numpy.mean', 'np.mean', (['pred_time_list'], {}), '(pred_time_list)\n', (19463, 19479), True, 'import numpy as np\n'), ((20559, 20570), 'time.time', 'time.time', ([], {}), '()\n', (20568, 20570), False, 'import time, datetime\n'), ((513, 566), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (539, 566), False, 'import argparse\n'), ((11755, 11795), 'cv2.imread', 'cv2.imread', (['train_input_names_he[id]', '(-1)'], {}), '(train_input_names_he[id], -1)\n', (11765, 11795), False, 'import os, time, cv2, sys, math\n'), ((11920, 11984), 'utils.utils.ldr_to_hdr_train', 'utils.ldr_to_hdr_train', (['input_image_he', 'train_input_names_he[id]'], {}), '(input_image_he, train_input_names_he[id])\n', (11942, 11984), False, 'from utils import utils, helpers\n'), ((12007, 12069), 'numpy.concatenate', 'np.concatenate', (['[input_image_he, input_image_he_gamma]'], {'axis': '(2)'}), '([input_image_he, input_image_he_gamma], axis=2)\n', (12021, 12069), True, 'import numpy as np\n'), ((12097, 12137), 'cv2.imread', 'cv2.imread', (['train_input_names_me[id]', '(-1)'], {}), '(train_input_names_me[id], -1)\n', (12107, 12137), False, 'import os, time, cv2, sys, math\n'), ((12262, 12326), 'utils.utils.ldr_to_hdr_train', 'utils.ldr_to_hdr_train', (['input_image_me', 'train_input_names_me[id]'], {}), '(input_image_me, train_input_names_me[id])\n', (12284, 12326), False, 'from utils import utils, helpers\n'), ((12349, 12411), 'numpy.concatenate', 'np.concatenate', (['[input_image_me, input_image_me_gamma]'], {'axis': '(2)'}), '([input_image_me, input_image_me_gamma], axis=2)\n', (12363, 12411), True, 'import numpy as np\n'), ((12439, 12479), 'cv2.imread', 'cv2.imread', (['train_input_names_le[id]', '(-1)'], {}), '(train_input_names_le[id], -1)\n', (12449, 12479), False, 'import os, time, cv2, sys, math\n'), ((12604, 12668), 'utils.utils.ldr_to_hdr_train', 'utils.ldr_to_hdr_train', (['input_image_le', 'train_input_names_le[id]'], {}), '(input_image_le, train_input_names_le[id])\n', (12626, 12668), False, 'from utils import utils, helpers\n'), ((12691, 12753), 'numpy.concatenate', 'np.concatenate', (['[input_image_le, input_image_le_gamma]'], {'axis': '(2)'}), '([input_image_le, input_image_le_gamma], axis=2)\n', (12705, 12753), True, 'import numpy as np\n'), ((13190, 13228), 'numpy.stack', 'np.stack', (['input_image_le_batch'], {'axis': '(1)'}), '(input_image_le_batch, axis=1)\n', (13198, 13228), True, 'import numpy as np\n'), ((13267, 13305), 'numpy.stack', 'np.stack', (['input_image_me_batch'], {'axis': '(1)'}), '(input_image_me_batch, axis=1)\n', (13275, 13305), True, 'import numpy as np\n'), ((13344, 13382), 'numpy.stack', 'np.stack', (['input_image_he_batch'], {'axis': '(1)'}), '(input_image_he_batch, axis=1)\n', (13352, 13382), True, 'import numpy as np\n'), ((13421, 13457), 'numpy.stack', 'np.stack', (['output_image_batch'], {'axis': '(1)'}), '(output_image_batch, axis=1)\n', (13429, 13457), True, 'import numpy as np\n'), ((14362, 14385), 'numpy.mean', 'np.mean', (['small_loss_bin'], {}), '(small_loss_bin)\n', (14369, 14385), True, 'import numpy as np\n'), ((14552, 14575), 'utils.utils.LOG', 'utils.LOG', (['string_print'], {}), '(string_print)\n', (14561, 14575), False, 'from utils import utils, helpers\n'), ((14870, 14881), 'time.time', 'time.time', ([], {}), '()\n', (14879, 14881), False, 'import time, datetime\n'), ((16044, 16103), 'os.path.isdir', 'os.path.isdir', (["('%s/%s/%04d' % (try_name, 'val_Imgs', epoch))"], {}), "('%s/%s/%04d' % (try_name, 'val_Imgs', epoch))\n", (16057, 16103), False, 'import os, sys\n'), ((16107, 16164), 'os.makedirs', 'os.makedirs', (["('%s/%s/%04d' % (try_name, 'val_Imgs', epoch))"], {}), "('%s/%s/%04d' % (try_name, 'val_Imgs', epoch))\n", (16118, 16164), False, 'import os, sys\n'), ((16469, 16508), 'cv2.imread', 'cv2.imread', (['val_input_names_he[ind]', '(-1)'], {}), '(val_input_names_he[ind], -1)\n', (16479, 16508), False, 'import os, time, cv2, sys, math\n'), ((16680, 16742), 'utils.utils.ldr_to_hdr_test', 'utils.ldr_to_hdr_test', (['input_image_he', 'val_input_names_he[ind]'], {}), '(input_image_he, val_input_names_he[ind])\n', (16701, 16742), False, 'from utils import utils, helpers\n'), ((16765, 16827), 'numpy.concatenate', 'np.concatenate', (['[input_image_he, input_image_he_gamma]'], {'axis': '(3)'}), '([input_image_he, input_image_he_gamma], axis=3)\n', (16779, 16827), True, 'import numpy as np\n'), ((16847, 16886), 'cv2.imread', 'cv2.imread', (['val_input_names_me[ind]', '(-1)'], {}), '(val_input_names_me[ind], -1)\n', (16857, 16886), False, 'import os, time, cv2, sys, math\n'), ((17055, 17117), 'utils.utils.ldr_to_hdr_test', 'utils.ldr_to_hdr_test', (['input_image_me', 'val_input_names_me[ind]'], {}), '(input_image_me, val_input_names_me[ind])\n', (17076, 17117), False, 'from utils import utils, helpers\n'), ((17140, 17202), 'numpy.concatenate', 'np.concatenate', (['[input_image_me, input_image_me_gamma]'], {'axis': '(3)'}), '([input_image_me, input_image_me_gamma], axis=3)\n', (17154, 17202), True, 'import numpy as np\n'), ((17225, 17264), 'cv2.imread', 'cv2.imread', (['val_input_names_le[ind]', '(-1)'], {}), '(val_input_names_le[ind], -1)\n', (17235, 17264), False, 'import os, time, cv2, sys, math\n'), ((17433, 17495), 'utils.utils.ldr_to_hdr_test', 'utils.ldr_to_hdr_test', (['input_image_le', 'val_input_names_le[ind]'], {}), '(input_image_le, val_input_names_le[ind])\n', (17454, 17495), False, 'from utils import utils, helpers\n'), ((17518, 17580), 'numpy.concatenate', 'np.concatenate', (['[input_image_le, input_image_le_gamma]'], {'axis': '(3)'}), '([input_image_le, input_image_le_gamma], axis=3)\n', (17532, 17580), True, 'import numpy as np\n'), ((17604, 17645), 'cv2.imread', 'cv2.imread', (['val_output_names_hdr[ind]', '(-1)'], {}), '(val_output_names_hdr[ind], -1)\n', (17614, 17645), False, 'import os, time, cv2, sys, math\n'), ((17690, 17734), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_img_hdr', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_img_hdr, cv2.COLOR_BGR2RGB)\n', (17702, 17734), False, 'import os, time, cv2, sys, math\n'), ((17806, 17817), 'time.time', 'time.time', ([], {}), '()\n', (17815, 17817), False, 'import time, datetime\n'), ((18059, 18070), 'time.time', 'time.time', ([], {}), '()\n', (18068, 18070), False, 'import time, datetime\n'), ((18137, 18166), 'numpy.squeeze', 'np.squeeze', (['output_image_pred'], {}), '(output_image_pred)\n', (18147, 18166), True, 'import numpy as np\n'), ((18180, 18198), 'numpy.squeeze', 'np.squeeze', (['gt_hdr'], {}), '(gt_hdr)\n', (18190, 18198), True, 'import numpy as np\n'), ((18761, 18808), 'utils.utils.filepath_to_name', 'utils.filepath_to_name', (['val_input_names_he[ind]'], {}), '(val_input_names_he[ind])\n', (18783, 18808), False, 'from utils import utils, helpers\n'), ((19059, 19086), 'numpy.mean', 'np.mean', (['current_losses_val'], {}), '(current_losses_val)\n', (19066, 19086), True, 'import numpy as np\n'), ((19106, 19162), 'tensorflow.summary.merge', 'tf.summary.merge', (['[test_loss_summary, test_psnr_summary]'], {}), '([test_loss_summary, test_psnr_summary])\n', (19122, 19162), True, 'import tensorflow as tf\n'), ((1418, 1432), 'math.sqrt', 'math.sqrt', (['mse'], {}), '(mse)\n', (1427, 1432), False, 'import os, time, cv2, sys, math\n'), ((7252, 7276), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (7274, 7276), True, 'import tensorflow as tf\n'), ((12788, 12830), 'cv2.imread', 'cv2.imread', (['train_output_names_hdr[id]', '(-1)'], {}), '(train_output_names_hdr[id], -1)\n', (12798, 12830), False, 'import os, time, cv2, sys, math\n'), ((12885, 12925), 'numpy.expand_dims', 'np.expand_dims', (['input_image_le_c'], {'axis': '(0)'}), '(input_image_le_c, axis=0)\n', (12899, 12925), True, 'import numpy as np\n'), ((12959, 12999), 'numpy.expand_dims', 'np.expand_dims', (['input_image_me_c'], {'axis': '(0)'}), '(input_image_me_c, axis=0)\n', (12973, 12999), True, 'import numpy as np\n'), ((13033, 13073), 'numpy.expand_dims', 'np.expand_dims', (['input_image_he_c'], {'axis': '(0)'}), '(input_image_he_c, axis=0)\n', (13047, 13073), True, 'import numpy as np\n'), ((13105, 13141), 'numpy.expand_dims', 'np.expand_dims', (['output_image'], {'axis': '(0)'}), '(output_image, axis=0)\n', (13119, 13141), True, 'import numpy as np\n'), ((17762, 17780), 'numpy.float32', 'np.float32', (['gt_hdr'], {}), '(gt_hdr)\n', (17772, 17780), True, 'import numpy as np\n'), ((11827, 11878), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_image_train_he', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_image_train_he, cv2.COLOR_BGR2RGB)\n', (11839, 11878), False, 'import os, time, cv2, sys, math\n'), ((12169, 12220), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_image_train_me', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_image_train_me, cv2.COLOR_BGR2RGB)\n', (12181, 12220), False, 'import os, time, cv2, sys, math\n'), ((12511, 12562), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_image_train_le', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_image_train_le, cv2.COLOR_BGR2RGB)\n', (12523, 12562), False, 'import os, time, cv2, sys, math\n'), ((14496, 14507), 'time.time', 'time.time', ([], {}), '()\n', (14505, 14507), False, 'import time, datetime\n'), ((14949, 14972), 'numpy.mean', 'np.mean', (['current_losses'], {}), '(current_losses)\n', (14956, 14972), True, 'import numpy as np\n'), ((16589, 16632), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_img_he', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_img_he, cv2.COLOR_BGR2RGB)\n', (16601, 16632), False, 'import os, time, cv2, sys, math\n'), ((16964, 17007), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_img_me', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_img_me, cv2.COLOR_BGR2RGB)\n', (16976, 17007), False, 'import os, time, cv2, sys, math\n'), ((17342, 17385), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_img_le', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_img_le, cv2.COLOR_BGR2RGB)\n', (17354, 17385), False, 'import os, time, cv2, sys, math\n'), ((19251, 19274), 'numpy.mean', 'np.mean', (['psnr_post_list'], {}), '(psnr_post_list)\n', (19258, 19274), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 14:43:38 2020
@author: dukel
"""
#%%
import numpy as np
import pandas as pd
import socket
ls = ['192.168.3.11',
'172.16.58.3',
'192.168.127.12',
'172.16.17.32',
'172.16.58.3',
'192.168.127.12',
'172.16.31.10',
'192.168.3.11',
'172.16.58.3',
'172.16.58.3',
'172.16.17.32',
'172.16.58.3',
'172.16.17.32',
'192.168.127.12',
'172.16.17.32',
'192.168.3.11',
'172.16.31.10',
'172.16.58.3',
'172.16.31.10',
'172.16.31.10']
def ip2host(ls_input):
"""
Parameters : list of a ip addreses
----------
Returns : list of tuples, n=2, consisting of the ip and hostname
"""
ls_output = []
for ip in ls_input:
try:
x = socket.gethostbyaddr(ip)
ls_output.append((ip, x[0]))
except Exception as e:
print('Error: ', e)
ls_output.append((ip, None))
return ls_output
def host2ip(ls_input):
ls_output = []
for hostname in ls_input:
try:
x = socket.gethostbyname(hostname)
ls_output.append((hostname, x[0]))
except Exception as e:
print('Error: ', e)
ls_output.append((hostname, None))
return ls_output
ls2 = convert_ip_to_hostname(ls)
#%%
# clean
df = pd.DataFrame(data=ls2, columns=['ip', 'hostname'])
df['hostname'] = df['hostname'].str.replace('.ucdmc.ucdavis.edu','').str.upper()
|
[
"pandas.DataFrame",
"socket.gethostbyaddr",
"socket.gethostbyname"
] |
[((1378, 1428), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'ls2', 'columns': "['ip', 'hostname']"}), "(data=ls2, columns=['ip', 'hostname'])\n", (1390, 1428), True, 'import pandas as pd\n'), ((824, 848), 'socket.gethostbyaddr', 'socket.gethostbyaddr', (['ip'], {}), '(ip)\n', (844, 848), False, 'import socket\n'), ((1118, 1148), 'socket.gethostbyname', 'socket.gethostbyname', (['hostname'], {}), '(hostname)\n', (1138, 1148), False, 'import socket\n')]
|
"""
The command worker for Signed Tag. Designed to process incoming signed messages from UTIM.
It checks the input data structure (should contain two TLV elements: message and signature)
and verifies elements lengths. In case everything is correct it calls uHost's decrypt()
method and passing there the dev-id of the UTIM which has sent the message,
the message itself and the signature.
In case the signature verification was successful, the Worker builds the package addressed to_Client
and puts it into the outbound queue.
In case the input data structure is corrupted or signature verification failed, the Worker
reports an issue (in debug mode) and discards the message.
"""
import logging
from ..utilities.tag import Tag
from ..utilities.length import Length
class CommandWorkerSignedException(Exception):
"""
Some exception of CommandWorkerSigned class
"""
pass
class CommandWorkerSignedMethodException(Exception):
"""
No Utim method exception of CommandWorkerSigned class
"""
pass
class CommandWorkerSigned(object):
"""
Signed command worker class
"""
def __init__(self, uhost):
"""
Initialization
"""
# Check all necessary methods
methods = [
'decrypt'
]
for method in methods:
if not (hasattr(uhost, method) and callable(getattr(uhost, method))):
raise CommandWorkerSignedMethodException
self.__uhost = uhost # Uhost instance
def process(self, devid, data, outbound_queue):
"""
Run process
"""
logging.debug("Command Worker Signed. Trying to verify signature of %s : %s",
[hex(x) for x in data], [hex(x) for x in data])
tag1 = data[0:1]
length_bytes1 = data[1:3]
length1 = int.from_bytes(length_bytes1, byteorder='big', signed=False)
value1 = data[3:3 + length1]
tag2 = data[3 + length1:4 + length1]
length_bytes2 = data[4 + length1: 6 + length1]
length2 = int.from_bytes(length_bytes2, byteorder='big', signed=False)
value2 = data[6 + length1:6 + length1 + length2]
# Logging
logging.debug('Tag1: %s', str(tag1))
logging.debug('Length1: %d', length1)
logging.debug('Value1: %s', [x for x in value1])
logging.debug('Tag2: %s', str(tag2))
logging.debug('Length2: %d', length2)
logging.debug('Value2: %s', [x for x in value2])
# Check real data length
if (length1 == len(value1) and tag1 == Tag.UCOMMAND.SIGNED and
length2 == len(value2) and tag2 == Tag.UCOMMAND.SIGNATURE and
length2 == Length.UCOMMAND.SIGNATURE):
unsigned_message = self.__uhost.decrypt(devid, value1, value2)
if unsigned_message is not None:
logging.debug("Unsigned message: %s", [x for x in unsigned_message])
packet = ['to_Client/' + devid, unsigned_message]
outbound_queue.put(packet)
else:
logging.debug('Command_worker_signed: failed to decrypt message')
else:
logging.debug('Command_worker_signed: Invalid input data')
|
[
"logging.debug"
] |
[((2244, 2281), 'logging.debug', 'logging.debug', (['"""Length1: %d"""', 'length1'], {}), "('Length1: %d', length1)\n", (2257, 2281), False, 'import logging\n'), ((2290, 2338), 'logging.debug', 'logging.debug', (['"""Value1: %s"""', '[x for x in value1]'], {}), "('Value1: %s', [x for x in value1])\n", (2303, 2338), False, 'import logging\n'), ((2392, 2429), 'logging.debug', 'logging.debug', (['"""Length2: %d"""', 'length2'], {}), "('Length2: %d', length2)\n", (2405, 2429), False, 'import logging\n'), ((2438, 2486), 'logging.debug', 'logging.debug', (['"""Value2: %s"""', '[x for x in value2]'], {}), "('Value2: %s', [x for x in value2])\n", (2451, 2486), False, 'import logging\n'), ((3169, 3227), 'logging.debug', 'logging.debug', (['"""Command_worker_signed: Invalid input data"""'], {}), "('Command_worker_signed: Invalid input data')\n", (3182, 3227), False, 'import logging\n'), ((2863, 2931), 'logging.debug', 'logging.debug', (['"""Unsigned message: %s"""', '[x for x in unsigned_message]'], {}), "('Unsigned message: %s', [x for x in unsigned_message])\n", (2876, 2931), False, 'import logging\n'), ((3076, 3141), 'logging.debug', 'logging.debug', (['"""Command_worker_signed: failed to decrypt message"""'], {}), "('Command_worker_signed: failed to decrypt message')\n", (3089, 3141), False, 'import logging\n')]
|
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class Contrastive(function.Function):
"""Contrastive loss function."""
def __init__(self, margin, use_cudnn=True):
self.margin = float(margin)
self.use_cudnn = use_cudnn
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x0_type, x1_type, y_type = in_types
type_check.expect(
x0_type.dtype == numpy.float32,
x1_type.dtype == numpy.float32,
x0_type.shape == x1_type.shape,
x0_type.shape[0] == x1_type.shape[0],
x1_type.shape[0] == y_type.shape[0],
x0_type.ndim == 2,
x1_type.ndim == 2,
y_type.ndim == 1
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
x0, x1, y = inputs
self.diff = x0 - x1 # N x 2
self.dist_sq = xp.sum(self.diff ** 2, axis=1) # N
self.dist = xp.sqrt(self.dist_sq)
self.mdist = self.margin - self.dist
dist = xp.maximum(self.mdist, 0)
loss = y * self.dist_sq + (1 - y) * dist * dist
loss = xp.sum(loss) / 2.0 / x0.shape[0]
return xp.array(loss, dtype=xp.float32),
def backward(self, inputs, gy):
xp = cuda.get_array_module(*inputs)
x0, x1, y = inputs
y = xp.vstack((y, y)).T
alpha = gy[0] / y.shape[0]
dist = xp.vstack((self.dist, self.dist)).T
# similar pair
gx0 = alpha * y * self.diff
# dissimilar pair
mdist = xp.vstack((self.mdist, self.mdist)).T
mdist_p = xp.array(self.mdist > 0, dtype=xp.int32)
mdist_p = xp.vstack((mdist_p, mdist_p)).T
gx0 += alpha * (1 - y) * mdist_p * mdist * -(self.diff / dist)
gx0 = gx0.astype(xp.float32)
return gx0, -gx0, None
def contrastive(x0, x1, y, margin=1, use_cudnn=True):
"""Contrastive loss.
"""
return Contrastive(margin, use_cudnn)(x0, x1, y)
|
[
"chainer.utils.type_check.expect",
"chainer.cuda.get_array_module"
] |
[((449, 708), 'chainer.utils.type_check.expect', 'type_check.expect', (['(x0_type.dtype == numpy.float32)', '(x1_type.dtype == numpy.float32)', '(x0_type.shape == x1_type.shape)', '(x0_type.shape[0] == x1_type.shape[0])', '(x1_type.shape[0] == y_type.shape[0])', '(x0_type.ndim == 2)', '(x1_type.ndim == 2)', '(y_type.ndim == 1)'], {}), '(x0_type.dtype == numpy.float32, x1_type.dtype == numpy.\n float32, x0_type.shape == x1_type.shape, x0_type.shape[0] == x1_type.\n shape[0], x1_type.shape[0] == y_type.shape[0], x0_type.ndim == 2, \n x1_type.ndim == 2, y_type.ndim == 1)\n', (466, 708), False, 'from chainer.utils import type_check\n'), ((845, 875), 'chainer.cuda.get_array_module', 'cuda.get_array_module', (['*inputs'], {}), '(*inputs)\n', (866, 875), False, 'from chainer import cuda\n'), ((1332, 1362), 'chainer.cuda.get_array_module', 'cuda.get_array_module', (['*inputs'], {}), '(*inputs)\n', (1353, 1362), False, 'from chainer import cuda\n')]
|
#!/usr/bin/env python3
import os
from os.path import splitext
import fiona
import rasterio
import numpy as np
from rasterio.warp import calculate_default_transform, reproject, Resampling
from pyproj.crs import CRS
def getDriver(fileName):
driverDictionary = {'.gpkg' : 'GPKG','.geojson' : 'GeoJSON','.shp' : 'ESRI Shapefile'}
driver = driverDictionary[splitext(fileName)[1]]
return(driver)
def pull_file(url, full_pulled_filepath):
"""
This helper function pulls a file and saves it to a specified path.
Args:
url (str): The full URL to the file to download.
full_pulled_filepath (str): The full system path where the downloaded file will be saved.
"""
import urllib.request
print("Pulling " + url)
urllib.request.urlretrieve(url, full_pulled_filepath)
def delete_file(file_path):
"""
This helper function deletes a file.
Args:
file_path (str): System path to a file to be deleted.
"""
try:
os.remove(file_path)
except FileNotFoundError:
pass
def run_system_command(args):
"""
This helper function takes a system command and runs it. This function is designed for use
in multiprocessing.
Args:
args (list): A single-item list, the first and only item being a system command string.
"""
# Parse system command.
command = args[0]
# Run system command.
os.system(command)
def subset_wbd_gpkg(wbd_gpkg, multilayer_wbd_geopackage):
import geopandas as gp
from utils.shared_variables import CONUS_STATE_LIST, PREP_PROJECTION
print("Subsetting " + wbd_gpkg + "...")
# Read geopackage into dataframe.
wbd = gp.read_file(wbd_gpkg)
gdf = gp.GeoDataFrame(wbd)
for index, row in gdf.iterrows():
state = row["STATES"]
if state != None: # Some polygons are empty in the STATES field.
keep_flag = False # Default to Fault, i.e. to delete the polygon.
if state in CONUS_STATE_LIST:
keep_flag = True
# Only split if multiple states present. More efficient this way.
elif len(state) > 2:
for wbd_state in state.split(","): # Some polygons have multiple states, separated by a comma.
if wbd_state in CONUS_STATE_LIST: # Check each polygon to make sure it's state abbrev name is allowed.
keep_flag = True
break
if not keep_flag:
gdf.drop(index, inplace=True) # Delete from dataframe.
# Overwrite geopackage.
layer_name = os.path.split(wbd_gpkg)[1].strip('.gpkg')
gdf.crs = PREP_PROJECTION
gdf.to_file(multilayer_wbd_geopackage, layer=layer_name,driver='GPKG',index=False)
def get_fossid_from_huc8(huc8_id,foss_id_attribute='fossid',
hucs=os.path.join(os.environ['inputDataDir'],'wbd','WBD_National.gpkg'),
hucs_layerName=None):
hucs = fiona.open(hucs,'r',layer=hucs_layerName)
for huc in hucs:
if huc['properties']['HUC8'] == huc8_id:
return(huc['properties'][foss_id_attribute])
def update_raster_profile(args):
elev_cm_filename = args[0]
elev_m_filename = args[1]
projection = args[2]
nodata_val = args[3]
blocksize = args[4]
keep_intermediate = args[5]
overwrite = args[6]
if os.path.exists(elev_m_filename) & overwrite:
os.remove(elev_m_filename)
elif not os.path.exists(elev_m_filename):
pass
else:
print(f"Skipping {elev_m_filename}. Use overwrite option.")
return
if isinstance(blocksize, int):
pass
elif isinstance(blocksize,str):
blocksize = int(blocksize)
elif isinstance(blocksize,float):
blocksize = int(blocksize)
else:
raise TypeError("Pass integer for blocksize")
assert elev_cm_filename.endswith('.tif'), "input raster needs to be a tif"
# Update nodata value and convert from cm to meters
dem_cm = rasterio.open(elev_cm_filename)
no_data = dem_cm.nodata
dem_m_profile = dem_cm.profile.copy()
dem_m_profile.update(driver='GTiff',tiled=True,nodata=nodata_val,
blockxsize=blocksize, blockysize=blocksize,
dtype='float32',crs=projection,compress='lzw',interleave='band')
dest = rasterio.open(elev_m_filename, "w", **dem_m_profile, BIGTIFF='YES')
for idx,window in dem_cm.block_windows(1):
data = dem_cm.read(1,window=window)
# wrote out output of this line as the same variable.
data = np.where(data == int(no_data), nodata_val, (data/100).astype(rasterio.float32))
# removed this line to avoid having two array copies of data. Kills memory usage
#del data
dest.write(data, indexes = 1, window=window)
# not necessary
#del dem_m
dem_cm.close()
dest.close()
if keep_intermediate == False:
os.remove(elev_cm_filename)
return(elev_m_filename)
'''
This function isn't currently used but is the preferred method for
reprojecting elevation grids.
Several USGS elev_cm.tifs have the crs value in their profile stored as the string "CRS.from_epsg(26904)"
instead of the actual output of that command.
Rasterio fails to properly read the crs but using gdal retrieves the correct projection.
Until this issue is resolved use the reproject_dem function in reproject_dem.py instead.
reproject_dem is not stored in the shared_functions.py because rasterio and
gdal bindings are not entirely compatible: https://rasterio.readthedocs.io/en/latest/topics/switch.html
'''
def reproject_raster(input_raster_name,reprojection,blocksize=None,reprojected_raster_name=None):
if blocksize is not None:
if isinstance(blocksize, int):
pass
elif isinstance(blocksize,str):
blocksize = int(blocksize)
elif isinstance(blocksize,float):
blocksize = int(blocksize)
else:
raise TypeError("Pass integer for blocksize")
else:
blocksize = 256
assert input_raster_name.endswith('.tif'), "input raster needs to be a tif"
reprojection = rasterio.crs.CRS.from_string(reprojection)
with rasterio.open(input_raster_name) as src:
# Check projection
if src.crs.to_string() != reprojection:
if src.crs.to_string().startswith('EPSG'):
epsg = src.crs.to_epsg()
proj_crs = CRS.from_epsg(epsg)
rio_crs = rasterio.crs.CRS.from_user_input(proj_crs).to_string()
else:
rio_crs = src.crs.to_string()
print(f"{input_raster_name} not projected")
print(f"Reprojecting from {rio_crs} to {reprojection}")
transform, width, height = calculate_default_transform(
src.crs, reprojection, src.width, src.height, *src.bounds)
kwargs = src.meta.copy()
kwargs.update({
'crs': reprojection,
'transform': transform,
'width': width,
'height': height,
'compress': 'lzw'
})
if reprojected_raster_name is None:
reprojected_raster_name = input_raster_name
assert reprojected_raster_name.endswith('.tif'), "output raster needs to be a tif"
with rasterio.open(reprojected_raster_name, 'w', **kwargs, tiled=True, blockxsize=blocksize, blockysize=blocksize, BIGTIFF='YES') as dst:
reproject(
source=rasterio.band(src, 1),
destination=rasterio.band(dst, 1),
src_transform=src.transform,
src_crs=rio_crs,
dst_transform=transform,
dst_crs=reprojection.to_string(),
resampling=Resampling.nearest)
del dst
del src
def mem_profile(func):
def wrapper(*args, **kwargs):
if (os.environ.get('mem') == "1"):
profile(func)(*args, **kwargs)
else:
func(*args, **kwargs)
return wrapper
def append_id_to_file_name(file_name, identifier):
'''
Processing:
Takes an incoming file name and inserts an identifier into the name
just ahead of the extension, with an underscore added.
ie) filename = "/output/myfolder/a_raster.tif"
indentifer = "13090001"
Becomes: "/output/myfolder/a_raster_13090001.tif"
Note:
- Can handle a single identifier or a list of identifier
ie) identifier = ["13090001", "123000001"]
Becomes: "/output/myfolder/a_raster_13090001_123000001.tif"
- This allows for file name to not be submitted and will return None
Inputs:
file_name: a single file name
identifier: a value or list of values to be inserted with an underscore
added ahead of the extention
Output:
out_file_name: A single name with each identifer added at the end before
the extension, each with an underscore in front of the identifier.
'''
if file_name is not None:
root,extension = os.path.splitext(file_name)
if isinstance(identifier, list):
out_file_name = root
for i in identifier:
out_file_name += "_{}".format(i)
out_file_name += extension
else:
out_file_name = root + "_{}".format(identifier) + extension
else:
out_file_name = None
return(out_file_name)
|
[
"rasterio.open",
"os.remove",
"fiona.open",
"rasterio.band",
"os.path.exists",
"os.system",
"rasterio.warp.calculate_default_transform",
"pyproj.crs.CRS.from_epsg",
"os.environ.get",
"geopandas.GeoDataFrame",
"os.path.splitext",
"rasterio.crs.CRS.from_user_input",
"rasterio.crs.CRS.from_string",
"os.path.split",
"os.path.join",
"geopandas.read_file"
] |
[((1414, 1432), 'os.system', 'os.system', (['command'], {}), '(command)\n', (1423, 1432), False, 'import os\n'), ((1687, 1709), 'geopandas.read_file', 'gp.read_file', (['wbd_gpkg'], {}), '(wbd_gpkg)\n', (1699, 1709), True, 'import geopandas as gp\n'), ((1720, 1740), 'geopandas.GeoDataFrame', 'gp.GeoDataFrame', (['wbd'], {}), '(wbd)\n', (1735, 1740), True, 'import geopandas as gp\n'), ((2856, 2924), 'os.path.join', 'os.path.join', (["os.environ['inputDataDir']", '"""wbd"""', '"""WBD_National.gpkg"""'], {}), "(os.environ['inputDataDir'], 'wbd', 'WBD_National.gpkg')\n", (2868, 2924), False, 'import os\n'), ((2983, 3026), 'fiona.open', 'fiona.open', (['hucs', '"""r"""'], {'layer': 'hucs_layerName'}), "(hucs, 'r', layer=hucs_layerName)\n", (2993, 3026), False, 'import fiona\n'), ((4059, 4090), 'rasterio.open', 'rasterio.open', (['elev_cm_filename'], {}), '(elev_cm_filename)\n', (4072, 4090), False, 'import rasterio\n'), ((4412, 4479), 'rasterio.open', 'rasterio.open', (['elev_m_filename', '"""w"""'], {'BIGTIFF': '"""YES"""'}), "(elev_m_filename, 'w', **dem_m_profile, BIGTIFF='YES')\n", (4425, 4479), False, 'import rasterio\n'), ((6239, 6281), 'rasterio.crs.CRS.from_string', 'rasterio.crs.CRS.from_string', (['reprojection'], {}), '(reprojection)\n', (6267, 6281), False, 'import rasterio\n'), ((994, 1014), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (1003, 1014), False, 'import os\n'), ((3420, 3451), 'os.path.exists', 'os.path.exists', (['elev_m_filename'], {}), '(elev_m_filename)\n', (3434, 3451), False, 'import os\n'), ((3473, 3499), 'os.remove', 'os.remove', (['elev_m_filename'], {}), '(elev_m_filename)\n', (3482, 3499), False, 'import os\n'), ((5006, 5033), 'os.remove', 'os.remove', (['elev_cm_filename'], {}), '(elev_cm_filename)\n', (5015, 5033), False, 'import os\n'), ((6292, 6324), 'rasterio.open', 'rasterio.open', (['input_raster_name'], {}), '(input_raster_name)\n', (6305, 6324), False, 'import rasterio\n'), ((9271, 9298), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (9287, 9298), False, 'import os\n'), ((363, 381), 'os.path.splitext', 'splitext', (['fileName'], {}), '(fileName)\n', (371, 381), False, 'from os.path import splitext\n'), ((3513, 3544), 'os.path.exists', 'os.path.exists', (['elev_m_filename'], {}), '(elev_m_filename)\n', (3527, 3544), False, 'import os\n'), ((6862, 6953), 'rasterio.warp.calculate_default_transform', 'calculate_default_transform', (['src.crs', 'reprojection', 'src.width', 'src.height', '*src.bounds'], {}), '(src.crs, reprojection, src.width, src.height, *\n src.bounds)\n', (6889, 6953), False, 'from rasterio.warp import calculate_default_transform, reproject, Resampling\n'), ((8057, 8078), 'os.environ.get', 'os.environ.get', (['"""mem"""'], {}), "('mem')\n", (8071, 8078), False, 'import os\n'), ((2604, 2627), 'os.path.split', 'os.path.split', (['wbd_gpkg'], {}), '(wbd_gpkg)\n', (2617, 2627), False, 'import os\n'), ((6532, 6551), 'pyproj.crs.CRS.from_epsg', 'CRS.from_epsg', (['epsg'], {}), '(epsg)\n', (6545, 6551), False, 'from pyproj.crs import CRS\n'), ((7446, 7574), 'rasterio.open', 'rasterio.open', (['reprojected_raster_name', '"""w"""'], {'tiled': '(True)', 'blockxsize': 'blocksize', 'blockysize': 'blocksize', 'BIGTIFF': '"""YES"""'}), "(reprojected_raster_name, 'w', **kwargs, tiled=True,\n blockxsize=blocksize, blockysize=blocksize, BIGTIFF='YES')\n", (7459, 7574), False, 'import rasterio\n'), ((6578, 6620), 'rasterio.crs.CRS.from_user_input', 'rasterio.crs.CRS.from_user_input', (['proj_crs'], {}), '(proj_crs)\n', (6610, 6620), False, 'import rasterio\n'), ((7633, 7654), 'rasterio.band', 'rasterio.band', (['src', '(1)'], {}), '(src, 1)\n', (7646, 7654), False, 'import rasterio\n'), ((7688, 7709), 'rasterio.band', 'rasterio.band', (['dst', '(1)'], {}), '(dst, 1)\n', (7701, 7709), False, 'import rasterio\n')]
|
"""
Template to run ase to do a contrained optimization
using Gaussian
KinBot needs to pass to the template:
1. A label for the calculation
2. The number of cores
3. The kwargs for Gaussian
4. The atom vector
5. The geometry
6. The Gaussian command
"""
import os, sys, re
import numpy as np
import ase
from ase import Atoms
from ase.calculators.gaussian import Gaussian
from ase.optimize.pcobfgs import PCOBFGS
from ase.db import connect
label = '{label}'
kwargs = {kwargs}
Gaussian.command = '{qc_command} < PREFIX.com > PREFIX.log'
calc = Gaussian(**kwargs)
atom = {atom}
geom = {geom}
mol = Atoms(symbols = atom, positions = geom)
mol.set_calculator(calc)
fix = {fix}
change = {change}
bonds = []
angles = []
dihedrals = []
for fi in fix:
if len(fi) == 2:
#careful: atom indices in the fix lists start at 1
bondlength = mol.get_distance(fi[0] - 1, fi[1] - 1)
bonds.append([bondlength,[fi[0] - 1, fi[1] - 1]])
if len(fi) == 3:
#careful: atom indices in the fix lists start at 1
angle = mol.get_angle(fi[0]-1,fi[1]-1,fi[2]-1) * np.pi / 180
angles.append([angle,[fi[0]-1,fi[1]-1,fi[2]-1]])
if len(fi) == 4:
#careful: atom indices in the fix lists start at 1
dihed = mol.get_dihedral(fi[0]-1,fi[1]-1,fi[2]-1,fi[3]-1) * np.pi / 180
dihedrals.append([dihed,[fi[0]-1,fi[1]-1,fi[2]-1,fi[3]-1]])
for ci in change:
if len(ci) == 3:
#careful: atom indices in the fix lists start at 1
bondlength = ci[2]
bonds.append([bondlength,[ci[0] - 1, ci[1] - 1]])
if len(ci) == 4:
#careful: atom indices in the fix lists start at 1
angle = ci[3] * np.pi / 180
angles.append([angle,[ci[0]-1,ci[1]-1,ci[2]-1]])
if len(ci) == 5:
#careful: atom indices in the fix lists start at 1
dihed = ci[4] * np.pi / 180
dihedrals.append([dihed,[ci[0]-1,ci[1]-1,ci[2]-1,ci[3]-1]])
dyn = PCOBFGS(mol,
trajectory=label + '.traj',
bonds=bonds,
angles=angles,
dihedrals=dihedrals,
force_consistent=False)
try:
dyn.run(fmax = 0.01, steps = 400)
e = mol.get_potential_energy()
data = {{'energy': e, 'status' : 'normal'}}
except RuntimeError:
data = {{'status' : 'error'}}
db = connect('{working_dir}/kinbot.db')
db.write(mol, name=label, data=data)
# add the finished stamp
f = open(label + '.log','a')
f.write('done\n')
f.close()
|
[
"ase.calculators.gaussian.Gaussian",
"ase.db.connect",
"ase.optimize.pcobfgs.PCOBFGS",
"ase.Atoms"
] |
[((549, 567), 'ase.calculators.gaussian.Gaussian', 'Gaussian', ([], {}), '(**kwargs)\n', (557, 567), False, 'from ase.calculators.gaussian import Gaussian\n'), ((604, 639), 'ase.Atoms', 'Atoms', ([], {'symbols': 'atom', 'positions': 'geom'}), '(symbols=atom, positions=geom)\n', (609, 639), False, 'from ase import Atoms\n'), ((1933, 2050), 'ase.optimize.pcobfgs.PCOBFGS', 'PCOBFGS', (['mol'], {'trajectory': "(label + '.traj')", 'bonds': 'bonds', 'angles': 'angles', 'dihedrals': 'dihedrals', 'force_consistent': '(False)'}), "(mol, trajectory=label + '.traj', bonds=bonds, angles=angles,\n dihedrals=dihedrals, force_consistent=False)\n", (1940, 2050), False, 'from ase.optimize.pcobfgs import PCOBFGS\n'), ((2306, 2340), 'ase.db.connect', 'connect', (['"""{working_dir}/kinbot.db"""'], {}), "('{working_dir}/kinbot.db')\n", (2313, 2340), False, 'from ase.db import connect\n')]
|
from django.contrib import admin
from .models import Video
# Register your models here.
admin.site.register(Video)
class VideoAdmin(admin.ModelAdmin):
list_display = ('aid','name', 'tags', 'url', 'cover', 'desc', 'add_time')
|
[
"django.contrib.admin.site.register"
] |
[((89, 115), 'django.contrib.admin.site.register', 'admin.site.register', (['Video'], {}), '(Video)\n', (108, 115), False, 'from django.contrib import admin\n')]
|
"""Added additional file attributes
Revision ID: <KEY>
Revises: 826d7777c67c
Create Date: 2022-01-14 16:32:28.259435
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "826d7777c67c"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("fileinfo", sa.Column("creation_date", sa.DateTime(), nullable=False))
op.add_column("fileinfo", sa.Column("update_date", sa.DateTime(), nullable=False))
op.add_column("fileinfo", sa.Column("format", sa.String(), nullable=False))
op.add_column("fileinfo", sa.Column("size", sa.Integer(), nullable=False))
op.drop_column("fileinfo", "registration_date")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"fileinfo",
sa.Column(
"registration_date",
postgresql.TIMESTAMP(),
autoincrement=False,
nullable=False,
),
)
op.drop_column("fileinfo", "size")
op.drop_column("fileinfo", "format")
op.drop_column("fileinfo", "update_date")
op.drop_column("fileinfo", "creation_date")
# ### end Alembic commands ###
|
[
"sqlalchemy.DateTime",
"sqlalchemy.dialects.postgresql.TIMESTAMP",
"alembic.op.drop_column",
"sqlalchemy.String",
"sqlalchemy.Integer"
] |
[((766, 813), 'alembic.op.drop_column', 'op.drop_column', (['"""fileinfo"""', '"""registration_date"""'], {}), "('fileinfo', 'registration_date')\n", (780, 813), False, 'from alembic import op\n'), ((1143, 1177), 'alembic.op.drop_column', 'op.drop_column', (['"""fileinfo"""', '"""size"""'], {}), "('fileinfo', 'size')\n", (1157, 1177), False, 'from alembic import op\n'), ((1182, 1218), 'alembic.op.drop_column', 'op.drop_column', (['"""fileinfo"""', '"""format"""'], {}), "('fileinfo', 'format')\n", (1196, 1218), False, 'from alembic import op\n'), ((1223, 1264), 'alembic.op.drop_column', 'op.drop_column', (['"""fileinfo"""', '"""update_date"""'], {}), "('fileinfo', 'update_date')\n", (1237, 1264), False, 'from alembic import op\n'), ((1269, 1312), 'alembic.op.drop_column', 'op.drop_column', (['"""fileinfo"""', '"""creation_date"""'], {}), "('fileinfo', 'creation_date')\n", (1283, 1312), False, 'from alembic import op\n'), ((484, 497), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (495, 497), True, 'import sqlalchemy as sa\n'), ((571, 584), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (582, 584), True, 'import sqlalchemy as sa\n'), ((653, 664), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (662, 664), True, 'import sqlalchemy as sa\n'), ((731, 743), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (741, 743), True, 'import sqlalchemy as sa\n'), ((1037, 1059), 'sqlalchemy.dialects.postgresql.TIMESTAMP', 'postgresql.TIMESTAMP', ([], {}), '()\n', (1057, 1059), False, 'from sqlalchemy.dialects import postgresql\n')]
|
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shlex
from subprocess import PIPE
from psutil import Popen
def run_command(cmd, data, location, chw, env=None):
cmd_env = None
if env:
cmd_env = os.environ.copy()
cmd_env.update(env)
cwd = os.getcwd()
if location is not None and chw is True:
cwd = location
elif location is not None and chw is False:
cmd = "{0} {1}".format(cmd, location)
r = Popen(
shlex.split(cmd), stdout=PIPE, stdin=PIPE, stderr=PIPE, cwd=cwd, env=cmd_env
)
if data is None:
output = r.communicate()[0].decode("utf-8")
else:
output = r.communicate(input=data)[0]
return output
|
[
"os.getcwd",
"os.environ.copy",
"shlex.split"
] |
[((843, 854), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (852, 854), False, 'import os\n'), ((787, 804), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (802, 804), False, 'import os\n'), ((1040, 1056), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (1051, 1056), False, 'import shlex\n')]
|
from typing import List, Tuple
import torch
from torch import nn
from torch import Tensor
from convlstm import ConvLSTM, HiddenState
class ConvLSTMAutoencoder(nn.Module):
"""
This model is an implementation of the 'autoencoder' convolutional LSTM
model proposed in 'Convolutional LSTM Network: A Machine Learning Approach
for Precipitation Nowcasting', Shi et al., 2015, http://arxiv.org/abs/1506.04214
Instead of one decoding network, as proposed in the paper, this model has two
decoding networks as in 'Unsupervised Learning of Video Representations using LSTMs',
Srivastava et al., 2016.
The encoding network receives a sequence of images and outputs its hidden state that
should represent a compressed representation of the sequence. Its hidden state is then
used as initial hidden state for the two decoding networks that use the information
contained in it to respectively reconstruct the input sequence and to predict future
frames.
"""
def __init__(self, input_size: Tuple[int, int], input_dim: int,
hidden_dim: List[int], kernel_size: List[Tuple[int, int]],
batch_first: bool=True, bias: bool=True, decoding_steps: int=-1):
super(ConvLSTMAutoencoder, self).__init__()
self.decoding_steps = decoding_steps
self.input_size = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.batch_first = batch_first
self.num_layers = len(hidden_dim)
self.encoder = ConvLSTM(
input_size=input_size,
input_dim=input_dim,
hidden_dim=hidden_dim,
kernel_size=kernel_size,
num_layers=self.num_layers,
batch_first=False,
bias=bias,
mode=ConvLSTM.SEQUENCE
)
# reverse the order of hidden dimensions and kernels
decoding_hidden_dim = list(reversed(hidden_dim))
decoding_kernel_size = list(reversed(kernel_size))
decoding_hidden_dim .append(input_dim) # NOTE: we need a num_of_decoding_layers = num_of_encoding_layers+1
decoding_kernel_size.append((1,1)) # so we add a 1x1 ConvLSTM as last decoding layer
self.input_reconstruction = ConvLSTM(
input_size=input_size,
input_dim=input_dim,
hidden_dim=decoding_hidden_dim,
kernel_size=decoding_kernel_size,
num_layers=self.num_layers + 1,
batch_first=False,
bias=bias,
mode=ConvLSTM.STEP_BY_STEP
)
self.future_prediction = ConvLSTM(
input_size=input_size,
input_dim=input_dim,
hidden_dim=decoding_hidden_dim,
kernel_size=decoding_kernel_size,
num_layers=self.num_layers + 1,
batch_first=False,
bias=bias,
mode=ConvLSTM.STEP_BY_STEP
)
def forward(self, input_sequence: Tensor) -> Tuple[Tensor]:
sequence = input_sequence.transpose(0,1) if self.batch_first else input_sequence # always work in sequence-first mode
sequence_len = sequence.size(0)
steps = self.decoding_steps if self.decoding_steps != -1 else sequence_len
# encode
_, hidden_state = self.encoder(sequence)
last_frame = sequence[-1, :]
h_n, c_n = hidden_state
representation = (h_n[-1], c_n[-1])
# decode for input reconstruction
output_seq_recon = ConvLSTMAutoencoder._decode(self.input_reconstruction, last_frame,
representation, steps)
# decode for future prediction
output_seq_pred = ConvLSTMAutoencoder._decode(self.future_prediction, last_frame,
representation, steps)
if self.batch_first: # if input was batch_first restore dimension order
reconstruction = output_seq_recon.transpose(0,1)
prediction = output_seq_pred .transpose(0,1)
else:
reconstruction = output_seq_recon
prediction = output_seq_pred
return (reconstruction, prediction)
@staticmethod
def _decode(decoder: ConvLSTM, last_frame: Tensor, representation: HiddenState, steps: int) -> Tensor:
decoded_sequence = []
h_n, c_n = representation
h_0, c_0 = decoder.init_hidden(last_frame.size(0))
h_0[0], c_0[0] = h_n, c_n
state = (h_0, c_0)
output = last_frame
for t in range(steps):
output, state = decoder(output, state)
decoded_sequence.append(output)
return torch.stack(decoded_sequence, dim=0)
|
[
"convlstm.ConvLSTM",
"torch.stack"
] |
[((1592, 1782), 'convlstm.ConvLSTM', 'ConvLSTM', ([], {'input_size': 'input_size', 'input_dim': 'input_dim', 'hidden_dim': 'hidden_dim', 'kernel_size': 'kernel_size', 'num_layers': 'self.num_layers', 'batch_first': '(False)', 'bias': 'bias', 'mode': 'ConvLSTM.SEQUENCE'}), '(input_size=input_size, input_dim=input_dim, hidden_dim=hidden_dim,\n kernel_size=kernel_size, num_layers=self.num_layers, batch_first=False,\n bias=bias, mode=ConvLSTM.SEQUENCE)\n', (1600, 1782), False, 'from convlstm import ConvLSTM, HiddenState\n'), ((2324, 2542), 'convlstm.ConvLSTM', 'ConvLSTM', ([], {'input_size': 'input_size', 'input_dim': 'input_dim', 'hidden_dim': 'decoding_hidden_dim', 'kernel_size': 'decoding_kernel_size', 'num_layers': '(self.num_layers + 1)', 'batch_first': '(False)', 'bias': 'bias', 'mode': 'ConvLSTM.STEP_BY_STEP'}), '(input_size=input_size, input_dim=input_dim, hidden_dim=\n decoding_hidden_dim, kernel_size=decoding_kernel_size, num_layers=self.\n num_layers + 1, batch_first=False, bias=bias, mode=ConvLSTM.STEP_BY_STEP)\n', (2332, 2542), False, 'from convlstm import ConvLSTM, HiddenState\n'), ((2927, 3145), 'convlstm.ConvLSTM', 'ConvLSTM', ([], {'input_size': 'input_size', 'input_dim': 'input_dim', 'hidden_dim': 'decoding_hidden_dim', 'kernel_size': 'decoding_kernel_size', 'num_layers': '(self.num_layers + 1)', 'batch_first': '(False)', 'bias': 'bias', 'mode': 'ConvLSTM.STEP_BY_STEP'}), '(input_size=input_size, input_dim=input_dim, hidden_dim=\n decoding_hidden_dim, kernel_size=decoding_kernel_size, num_layers=self.\n num_layers + 1, batch_first=False, bias=bias, mode=ConvLSTM.STEP_BY_STEP)\n', (2935, 3145), False, 'from convlstm import ConvLSTM, HiddenState\n'), ((5241, 5277), 'torch.stack', 'torch.stack', (['decoded_sequence'], {'dim': '(0)'}), '(decoded_sequence, dim=0)\n', (5252, 5277), False, 'import torch\n')]
|
# %%
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from xgboost import XGBClassifier
import pandas as pd
# %%
data = pd.read_csv("../data/iris.csv")
X = data.drop("class", axis=1)
y = data["class"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
# %%
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.fit_transform(X_test)
# %%
label_encoder = LabelEncoder()
y_train = label_encoder.fit_transform(y_train)
y_test = label_encoder.fit_transform(y_test)
# %%
model = XGBClassifier(
max_depth=3,
objective="multi:softprob",
eval_metric="merror",
use_label_encoder=False,
)
model.fit(X_train_scaled, y_train)
preds = model.predict(X_test_scaled)
print(model.score(X_test_scaled, y_test))
print(confusion_matrix(y_test, preds))
# %%
|
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.preprocessing.LabelEncoder",
"xgboost.XGBClassifier",
"sklearn.metrics.confusion_matrix"
] |
[((233, 264), 'pandas.read_csv', 'pd.read_csv', (['"""../data/iris.csv"""'], {}), "('../data/iris.csv')\n", (244, 264), True, 'import pandas as pd\n'), ((350, 405), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(X, y, test_size=0.33, random_state=42)\n', (366, 405), False, 'from sklearn.model_selection import train_test_split\n'), ((428, 442), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (440, 442), False, 'from sklearn.preprocessing import MinMaxScaler, LabelEncoder\n'), ((558, 572), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (570, 572), False, 'from sklearn.preprocessing import MinMaxScaler, LabelEncoder\n'), ((680, 785), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {'max_depth': '(3)', 'objective': '"""multi:softprob"""', 'eval_metric': '"""merror"""', 'use_label_encoder': '(False)'}), "(max_depth=3, objective='multi:softprob', eval_metric='merror',\n use_label_encoder=False)\n", (693, 785), False, 'from xgboost import XGBClassifier\n'), ((921, 952), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'preds'], {}), '(y_test, preds)\n', (937, 952), False, 'from sklearn.metrics import confusion_matrix\n')]
|
#
# Copyright 2011 Twitter, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PyCascading function decorators to be used with user-defined functions.
A user-defined function is a function that gets applied as a filter or an
Each function for each tuple, or the reduce-side function for tuples in a
grouping in an Every Cascading operation.
UDFs can emit a new set of tuples (as in a Function after an Each operation),
keep or filter out tuples (a Filter after an Each), or emit aggregate values
(an Aggregator or Buffer for a group after an Every).
We use globally or locally scoped Python functions to perform these
user-defined operations. When building the data processing pipeline, we can
simply stream data through a Python function with PyCascading if it was
decorated by one of the decorators.
* A udf_'map' function is executed for each input tuple, and returns no, one, or
several new output tuples.
* A 'udf_filter' is a boolean-valued function, which should return true if the
input tuple should be kept for the output, and false if not.
* A 'udf_buffer' is a function that is applied to groups of tuples, and is the
equivalent of a Cascading Buffer. It returns an aggregate after iterating
through the tuples in the group.
Exports the following:
udf
yields
numargs_expected
python_list_expected
python_dict_expected
collects_output
produces_python_list
produces_tuples
udf_filter
udf_map
udf_buffer
"""
__author__ = '<NAME>'
import inspect
from pycascading.pipe import DecoratedFunction
from com.twitter.pycascading import CascadingBaseOperationWrapper
from com.twitter.pycascading import CascadingRecordProducerWrapper
def _function_decorator(args, kwargs, defaults={}):
"""
A decorator to recursively decorate a function with arbitrary attributes.
"""
def fun_decorator(function_or_callabledict):
if isinstance(function_or_callabledict, DecoratedFunction):
# Another decorator is next
dff = function_or_callabledict
else:
# The original function comes next
dff = DecoratedFunction.decorate_function(function_or_callabledict)
# Add the attributes to the decorated function
dff.decorators.update(additional_parameters)
return dff
additional_parameters = dict(defaults)
additional_parameters.update(kwargs)
if len(args) == 1 and not kwargs and (inspect.isroutine(args[0]) or isinstance(args[0], DecoratedFunction)):
# We used the decorator without ()s, the first argument is the
# function. We cannot use additional parameters in this case.
return fun_decorator(args[0])
else:
return fun_decorator
def udf(*args, **kwargs):
"""The function can receive tuples or groups of tuples from Cascading.
This is the decorator to use when we have a function that we want to use
in a Cascading job after an Each or Every.
"""
return _function_decorator(args, kwargs)
def yields(*args, **kwargs):
"""The function is a generator that yields output tuples.
PyCascading considers this function a generator that yields one or more
output tuples before returning. If this decorator is not used, the way the
function emits tuples is determined automatically at runtime the first time
the funtion is called. The alternative to yielding values is to return
one tuple with return.
We can safely yield Nones or not yield anything at all; no output tuples
will be emitted in this case.
"""
return _function_decorator(args, kwargs, \
{ 'output_method' : CascadingRecordProducerWrapper.OutputMethod.YIELDS })
def numargs_expected(num, *args, **kwargs):
"""The function expects a num number of fields in the input tuples.
Arguments:
num -- the exact number of fields that the input tuples must have
"""
return _function_decorator(args, kwargs, { 'numargs_expected' : num })
def python_list_expected(*args, **kwargs):
"""PyCascading will pass in the input tuples as Python lists.
There is some performance penalty as all the incoming tuples need to be
converted to Python lists.
"""
params = dict(kwargs)
params.update()
return _function_decorator(args, kwargs, { 'input_conversion' : \
CascadingBaseOperationWrapper.ConvertInputTuples.PYTHON_LIST })
def python_dict_expected(*args, **kwargs):
"""The input tuples are converted to Python dicts for this function.
PyCascading will convert all input tuples to a Python dict for this
function. The keys of the dict are the Cascading field names and the values
are the values read from the tuple.
There is some performance penalty as all the incoming tuples need to be
converted to Python dicts.
"""
return _function_decorator(args, kwargs, { 'input_conversion' : \
CascadingBaseOperationWrapper.ConvertInputTuples.PYTHON_DICT })
def collects_output(*args, **kwargs):
"""The function expects an output collector where output tuples are added.
PyCascading will pass in a Cascading TupleEntryCollector to which the
function can add output tuples by calling its 'add' method.
Use this if performance is important, as no conversion takes place between
Python objects and Cascading tuples.
"""
return _function_decorator(args, kwargs, { 'output_method' : \
CascadingRecordProducerWrapper.OutputMethod.COLLECTS })
def produces_python_list(*args, **kwargs):
"""The function emits Python lists as tuples.
These will be converted by PyCascading to Cascading Tuples, so this impacts
performance somewhat.
"""
return _function_decorator(args, kwargs, { 'output_type' : \
CascadingRecordProducerWrapper.OutputType.PYTHON_LIST })
def produces_tuples(*args, **kwargs):
"""The function emits native Cascading Tuples or TupleEntrys.
No conversion takes place so this is a fast way to add tuples to the
output.
"""
return _function_decorator(args, kwargs, { 'output_type' : \
CascadingRecordProducerWrapper.OutputType.TUPLE })
def udf_filter(*args, **kwargs):
"""This makes the function a filter.
The function should return 'true' for each input tuple that should stay
in the output stream, and 'false' if it is to be removed.
IMPORTANT: this behavior is the opposite of what Cascading expects, but
similar to how the Python filter works!
Note that the same effect can be attained by a map that returns the tuple
itself or None if it should be filtered out.
"""
return _function_decorator(args, kwargs, { 'type' : 'filter' })
def udf_map(*args, **kwargs):
"""The function decorated with this emits output tuples for each input one.
The function is called for all the tuples in the input stream as happens
in a Cascading Each. The function input tuple is passed in to the function
as the first parameter and is a native Cascading TupleEntry unless the
python_list_expected or python_dict_expected decorators are also used.
If collects_output is used, the 2nd parameter is a Cascading
TupleEntryCollector to which Tuples or TupleEntrys can be added. Otherwise,
the function may return an output tuple or yield any number of tuples if
it is a generator.
Whether the function yields or returns will be determined automatically if
no decorators used that specify this, and so will be the output tuple type
(it can be Python list or a Cascading Tuple).
Note that the meaning of 'map' used here is closer to the Python map()
builtin than the 'map' in MapReduce. It essentially means that each input
tuple needs to be transformed (mapped) by a custom function.
Arguments:
produces -- a list of output field names
"""
return _function_decorator(args, kwargs, { 'type' : 'map' })
def udf_buffer(*args, **kwargs):
"""The function decorated with this takes a group and emits aggregates.
A udf_buffer function must follow a Cascading Every operation, which comes
after a GroupBy. The function will be called for each grouping on a
different reducer. The first parameter passed to the function is the
value of the grouping field for this group, and the second is an iterator
to the tuples belonging to this group.
Note that the iterator always points to a static variable in Cascading
that holds a copy of the current TupleEntry, thus we cannot cache this for
subsequent operations in the function. Instead, take iterator.getTuple() or
create a new TupleEntry by deep copying the item in the loop.
Cascading also doesn't automatically add the group field to the output
tuples, so we need to do it manually. In fact a Cascading Buffer is more
powerful than an aggregator, although it can be used as one. It acts more
like a function emitting arbitrary tuples for groups, rather than just a
simple aggregator.
By default the output tuples will be what the buffer returns or yields,
and the grouping fields won't be included. This is different from the
aggregators' behavior, which add the output fields to the grouping fields.
Also, only one buffer may follow a GroupBy, in contrast to aggregators, of
which many may be present.
See http://groups.google.com/group/cascading-user/browse_thread/thread/f5e5f56f6500ed53/f55fdd6bba399dcf?lnk=gst&q=scope#f55fdd6bba399dcf
"""
return _function_decorator(args, kwargs, { 'type' : 'buffer' })
def unwrap(*args, **kwargs):
"""Unwraps the tuple into function parameters before calling the function.
This is not implemented on the Java side yet.
"""
return _function_decorator(args, kwargs, { 'parameters' : 'unwrap' })
def tuplein(*args, **kwargs):
return _function_decorator(args, kwargs, { 'parameters' : 'tuple' })
|
[
"inspect.isroutine",
"pycascading.pipe.DecoratedFunction.decorate_function"
] |
[((2572, 2633), 'pycascading.pipe.DecoratedFunction.decorate_function', 'DecoratedFunction.decorate_function', (['function_or_callabledict'], {}), '(function_or_callabledict)\n', (2607, 2633), False, 'from pycascading.pipe import DecoratedFunction\n'), ((2888, 2914), 'inspect.isroutine', 'inspect.isroutine', (['args[0]'], {}), '(args[0])\n', (2905, 2914), False, 'import inspect\n')]
|
import package_to_document
import pyDocStr
import os
print(pyDocStr.__file__)
current_path = os.getcwd()
print(current_path)
pyDocStr.build_docstrings_package(
"./pyDocStr/package_to_document",
new_package_path="./pyDocStr/package_documented",
subpackages=True,
level_logger='debug'
)
|
[
"os.getcwd",
"pyDocStr.build_docstrings_package"
] |
[((96, 107), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (105, 107), False, 'import os\n'), ((128, 293), 'pyDocStr.build_docstrings_package', 'pyDocStr.build_docstrings_package', (['"""./pyDocStr/package_to_document"""'], {'new_package_path': '"""./pyDocStr/package_documented"""', 'subpackages': '(True)', 'level_logger': '"""debug"""'}), "('./pyDocStr/package_to_document',\n new_package_path='./pyDocStr/package_documented', subpackages=True,\n level_logger='debug')\n", (161, 293), False, 'import pyDocStr\n')]
|
import types
import json
from io import StringIO
from collections import OrderedDict
from defusedxml import ElementTree as ET
from django.core.serializers.json import DjangoJSONEncoder
from django.http.response import HttpResponseBase
from django.template.loader import get_template
from django.utils.encoding import force_text
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.module_loading import import_string
from django.utils.html import format_html
from pyston.utils.helpers import UniversalBytesIO, serialized_data_to_python
from pyston.utils.datastructures import FieldsetGenerator
from pyston.conf import settings
from .file_generators import CSVGenerator, XLSXGenerator, PDFGenerator, TXTGenerator
def is_collection(data):
return isinstance(data, (list, tuple, set, types.GeneratorType))
def get_default_converters():
"""
Register all converters from settings configuration.
"""
converters = OrderedDict()
for converter_class_path in settings.CONVERTERS:
converter_class = import_string(converter_class_path)()
converters[converter_class.format] = converter_class
return converters
def get_default_converter_name(converters=None):
"""
Gets default converter name
"""
converters = get_default_converters() if converters is None else converters
return list(converters.keys())[0]
def get_converter(result_format, converters=None):
"""
Gets an converter, returns the class and a content-type.
"""
converters = get_default_converters() if converters is None else converters
if result_format in converters:
return converters.get(result_format)
else:
raise ValueError('No converter found for type {}'.format(result_format))
def get_converter_name_from_request(request, converters=None, input_serialization=False):
"""
Function for determining which converter name to use
for output.
"""
try:
import mimeparse
except ImportError:
mimeparse = None
context_key = 'accept'
if input_serialization:
context_key = 'content_type'
converters = get_default_converters() if converters is None else converters
default_converter_name = get_default_converter_name(converters)
if mimeparse and context_key in request._rest_context:
supported_mime_types = set()
converter_map = {}
preferred_content_type = None
for name, converter_class in converters.items():
if name == default_converter_name:
preferred_content_type = converter_class.media_type
supported_mime_types.add(converter_class.media_type)
converter_map[converter_class.media_type] = name
supported_mime_types = list(supported_mime_types)
if preferred_content_type:
supported_mime_types.append(preferred_content_type)
try:
preferred_content_type = mimeparse.best_match(supported_mime_types,
request._rest_context[context_key])
except ValueError:
pass
default_converter_name = converter_map.get(preferred_content_type, default_converter_name)
return default_converter_name
def get_converter_from_request(request, converters=None, input_serialization=False):
"""
Function for determining which converter name to use
for output.
"""
return get_converter(get_converter_name_from_request(request, converters, input_serialization), converters)
def get_supported_mime_types(converters):
return [converter.media_type for _, converter in converters.items()]
class Converter:
"""
Converter from standard data types to output format (JSON,YAML, Pickle) and from input to python objects
"""
charset = 'utf-8'
media_type = None
format = None
allow_tags = False
@property
def content_type(self):
return '{}; charset={}'.format(self.media_type, self.charset)
def _encode(self, data, options=None, **kwargs):
"""
Encodes data to output string. You must implement this method or change implementation encode_to_stream method.
"""
raise NotImplementedError
def _decode(self, data, **kwargs):
"""
Decodes data to string input
"""
raise NotImplementedError
def _encode_to_stream(self, output_stream, data, options=None, **kwargs):
"""
Encodes data and writes it to the output stream
"""
output_stream.write(self._encode(data, options=options, **kwargs))
def encode_to_stream(self, output_stream, data, options=None, **kwargs):
self._encode_to_stream(self._get_output_stream(output_stream), data, options=options, **kwargs)
def decode(self, data, **kwargs):
return self._decode(data, **kwargs)
def _get_output_stream(self, output_stream):
return output_stream if isinstance(output_stream, UniversalBytesIO) else UniversalBytesIO(output_stream)
class XMLConverter(Converter):
"""
Converter for XML.
Supports only output conversion
"""
media_type = 'text/xml'
format = 'xml'
root_element_name = 'response'
def _to_xml(self, xml, data):
from pyston.serializer import LAZY_SERIALIZERS
if isinstance(data, LAZY_SERIALIZERS):
self._to_xml(xml, data.serialize())
elif is_collection(data):
for item in data:
xml.startElement('resource', {})
self._to_xml(xml, item)
xml.endElement('resource')
elif isinstance(data, dict):
for key, value in data.items():
xml.startElement(key, {})
self._to_xml(xml, value)
xml.endElement(key)
else:
xml.characters(force_text(data))
def _encode(self, data, **kwargs):
if data is not None:
stream = StringIO()
xml = SimplerXMLGenerator(stream, 'utf-8')
xml.startDocument()
xml.startElement(self.root_element_name, {})
self._to_xml(xml, data)
xml.endElement(self.root_element_name)
xml.endDocument()
return stream.getvalue()
else:
return ''
def _decode(self, data, **kwargs):
return ET.fromstring(data)
class LazyDjangoJSONEncoder(DjangoJSONEncoder):
def default(self, o):
from pyston.serializer import LAZY_SERIALIZERS
if isinstance(o, types.GeneratorType):
return tuple(o)
elif isinstance(o, LAZY_SERIALIZERS):
return o.serialize()
else:
return super(LazyDjangoJSONEncoder, self).default(o)
class JSONConverter(Converter):
"""
JSON emitter, understands timestamps.
"""
media_type = 'application/json'
format = 'json'
def _encode_to_stream(self, output_stream, data, options=None, **kwargs):
options = settings.JSON_CONVERTER_OPTIONS if options is None else options
if data is not None:
json.dump(data, output_stream, cls=LazyDjangoJSONEncoder, ensure_ascii=False, **options)
def _decode(self, data, **kwargs):
return json.loads(data)
class GeneratorConverter(Converter):
"""
Generator converter is more complicated.
Contains user readable informations (headers).
Supports only output.
Output is flat.
It is necessary set generator_class as class attribute
This class contains little bit low-level implementation
"""
generator_class = None
def _render_headers(self, field_name_list):
result = []
if len(field_name_list) == 1 and '' in field_name_list:
return result
for field_name in field_name_list:
result.append(field_name)
return result
def _get_recursive_value_from_row(self, data, key_path):
from pyston.serializer import LAZY_SERIALIZERS
if isinstance(data, LAZY_SERIALIZERS):
return self._get_recursive_value_from_row(data.serialize(), key_path)
elif len(key_path) == 0:
return data
elif isinstance(data, dict):
return self._get_recursive_value_from_row(data.get(key_path[0], ''), key_path[1:])
elif is_collection(data):
return [self._get_recursive_value_from_row(val, key_path) for val in data]
else:
return ''
def _render_dict(self, value, first):
if first:
return '\n'.join(('{}: {}'.format(key, self.render_value(val, False)) for key, val in value.items()))
else:
return '({})'.format(
', '.join(('{}: {}'.format(key, self.render_value(val, False)) for key, val in value.items()))
)
def _render_iterable(self, value, first):
if first:
return '\n'.join((self.render_value(val, False) for val in value))
else:
return '({})'.format(', '.join((self.render_value(val, False) for val in value)))
def render_value(self, value, first=True):
if isinstance(value, dict):
return self._render_dict(value, first)
elif is_collection(value):
return self._render_iterable(value, first)
else:
return force_text(value)
def _get_value_from_row(self, data, field):
return self.render_value(self._get_recursive_value_from_row(data, field.key_path) or '')
def _render_row(self, row, field_name_list):
return (self._get_value_from_row(row, field) for field in field_name_list)
def _render_content(self, field_name_list, converted_data):
constructed_data = converted_data
if not is_collection(constructed_data):
constructed_data = [constructed_data]
return (self._render_row(row, field_name_list) for row in constructed_data)
def _encode_to_stream(self, output_stream, data, resource=None, requested_fields=None, direct_serialization=False,
**kwargs):
fieldset = FieldsetGenerator(
resource,
force_text(requested_fields) if requested_fields is not None else None,
direct_serialization=direct_serialization
).generate()
self.generator_class().generate(
self._render_headers(fieldset),
self._render_content(fieldset, data),
output_stream
)
class CSVConverter(GeneratorConverter):
"""
Converter for CSV response.
Supports only output conversion
"""
generator_class = CSVGenerator
media_type = 'text/csv'
format = 'csv'
allow_tags = True
class XLSXConverter(GeneratorConverter):
"""
Converter for XLSX response.
For its use must be installed library xlsxwriter
Supports only output conversion
"""
generator_class = XLSXGenerator
media_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
format = 'xlsx'
allow_tags = True
class PDFConverter(GeneratorConverter):
"""
Converter for PDF response.
For its use must be installed library pisa
Supports only output conversion
"""
generator_class = PDFGenerator
media_type = 'application/pdf'
format = 'pdf'
class TXTConverter(GeneratorConverter):
"""
Converter for TXT response.
Supports only output conversion
"""
generator_class = TXTGenerator
media_type = 'plain/text'
format = 'txt'
allow_tags = True
class HTMLConverter(Converter):
"""
Converter for HTML.
Supports only output conversion and should be used only for debug
"""
media_type = 'text/html'
format = 'html'
template_name = 'pyston/html_converter.html'
def _get_put_form(self, resource, obj):
from pyston.resource import BaseObjectResource
return (
resource._get_form(inst=obj)
if isinstance(resource, BaseObjectResource) and resource.has_put_permission(obj=obj)
else None
)
def _get_post_form(self, resource, obj):
from pyston.resource import BaseObjectResource
return (
resource._get_form(inst=obj)
if isinstance(resource, BaseObjectResource) and resource.has_post_permission(obj=obj)
else None
)
def _get_forms(self, resource, obj):
return {
'post': self._get_post_form(resource, obj),
'put': self._get_put_form(resource, obj),
}
def _get_converter(self, resource):
return JSONConverter()
def _get_permissions(self, resource, obj):
return {
'post': resource.has_post_permission(obj=obj),
'get': resource.has_get_permission(obj=obj),
'put': resource.has_put_permission(obj=obj),
'delete': resource.has_delete_permission(obj=obj),
'head': resource.has_head_permission(obj=obj),
'options': resource.has_options_permission(obj=obj),
} if resource else {}
def _update_headers(self, http_headers, resource, converter):
http_headers['Content-Type'] = converter.content_type
return http_headers
def encode_to_stream(self, output_stream, data, options=None, **kwargs):
assert output_stream is not HttpResponseBase, 'Output stream must be http response'
self._get_output_stream(output_stream).write(
self._encode(data, response=output_stream, options=options, **kwargs)
)
def _convert_url_to_links(self, data):
if isinstance(data, list):
return [self._convert_url_to_links(val) for val in data]
elif isinstance(data, dict):
return OrderedDict((
(key, format_html('<a href=\'{0}\'>{0}</a>', val) if key == 'url' else self._convert_url_to_links(val))
for key, val in data.items()
))
else:
return data
def _encode(self, data, response=None, http_headers=None, resource=None, result=None, **kwargs):
from pyston.resource import BaseObjectResource
http_headers = {} if http_headers is None else http_headers.copy()
converter = self._get_converter(resource)
http_headers = self._update_headers(http_headers, resource, converter)
obj = (
resource._get_obj_or_none() if isinstance(resource, BaseObjectResource) and resource.has_permission()
else None
)
kwargs.update({
'http_headers': http_headers,
'resource': resource,
})
data_stream = UniversalBytesIO()
converter._encode_to_stream(data_stream, self._convert_url_to_links(serialized_data_to_python(data)), **kwargs)
context = kwargs.copy()
context.update({
'permissions': self._get_permissions(resource, obj),
'forms': self._get_forms(resource, obj),
'output': data_stream.getvalue(),
'name': resource._get_name() if resource and resource.has_permission() else response.status_code
})
# All responses has set 200 response code, because response can return status code without content (204) and
# browser doesn't render it
response.status_code = 200
return get_template(self.template_name).render(context, request=resource.request if resource else None)
|
[
"django.utils.module_loading.import_string",
"io.StringIO",
"json.dump",
"json.loads",
"pyston.utils.helpers.serialized_data_to_python",
"django.utils.html.format_html",
"pyston.utils.helpers.UniversalBytesIO",
"django.utils.xmlutils.SimplerXMLGenerator",
"collections.OrderedDict",
"django.utils.encoding.force_text",
"django.template.loader.get_template",
"mimeparse.best_match",
"defusedxml.ElementTree.fromstring"
] |
[((954, 967), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (965, 967), False, 'from collections import OrderedDict\n'), ((6359, 6378), 'defusedxml.ElementTree.fromstring', 'ET.fromstring', (['data'], {}), '(data)\n', (6372, 6378), True, 'from defusedxml import ElementTree as ET\n'), ((7239, 7255), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (7249, 7255), False, 'import json\n'), ((14619, 14637), 'pyston.utils.helpers.UniversalBytesIO', 'UniversalBytesIO', ([], {}), '()\n', (14635, 14637), False, 'from pyston.utils.helpers import UniversalBytesIO, serialized_data_to_python\n'), ((1047, 1082), 'django.utils.module_loading.import_string', 'import_string', (['converter_class_path'], {}), '(converter_class_path)\n', (1060, 1082), False, 'from django.utils.module_loading import import_string\n'), ((2942, 3020), 'mimeparse.best_match', 'mimeparse.best_match', (['supported_mime_types', 'request._rest_context[context_key]'], {}), '(supported_mime_types, request._rest_context[context_key])\n', (2962, 3020), False, 'import mimeparse\n'), ((5002, 5033), 'pyston.utils.helpers.UniversalBytesIO', 'UniversalBytesIO', (['output_stream'], {}), '(output_stream)\n', (5018, 5033), False, 'from pyston.utils.helpers import UniversalBytesIO, serialized_data_to_python\n'), ((5955, 5965), 'io.StringIO', 'StringIO', ([], {}), '()\n', (5963, 5965), False, 'from io import StringIO\n'), ((5985, 6021), 'django.utils.xmlutils.SimplerXMLGenerator', 'SimplerXMLGenerator', (['stream', '"""utf-8"""'], {}), "(stream, 'utf-8')\n", (6004, 6021), False, 'from django.utils.xmlutils import SimplerXMLGenerator\n'), ((7095, 7188), 'json.dump', 'json.dump', (['data', 'output_stream'], {'cls': 'LazyDjangoJSONEncoder', 'ensure_ascii': '(False)'}), '(data, output_stream, cls=LazyDjangoJSONEncoder, ensure_ascii=\n False, **options)\n', (7104, 7188), False, 'import json\n'), ((9316, 9333), 'django.utils.encoding.force_text', 'force_text', (['value'], {}), '(value)\n', (9326, 9333), False, 'from django.utils.encoding import force_text\n'), ((14714, 14745), 'pyston.utils.helpers.serialized_data_to_python', 'serialized_data_to_python', (['data'], {}), '(data)\n', (14739, 14745), False, 'from pyston.utils.helpers import UniversalBytesIO, serialized_data_to_python\n'), ((15305, 15337), 'django.template.loader.get_template', 'get_template', (['self.template_name'], {}), '(self.template_name)\n', (15317, 15337), False, 'from django.template.loader import get_template\n'), ((5847, 5863), 'django.utils.encoding.force_text', 'force_text', (['data'], {}), '(data)\n', (5857, 5863), False, 'from django.utils.encoding import force_text\n'), ((10132, 10160), 'django.utils.encoding.force_text', 'force_text', (['requested_fields'], {}), '(requested_fields)\n', (10142, 10160), False, 'from django.utils.encoding import force_text\n'), ((13763, 13804), 'django.utils.html.format_html', 'format_html', (['"""<a href=\'{0}\'>{0}</a>"""', 'val'], {}), '("<a href=\'{0}\'>{0}</a>", val)\n', (13774, 13804), False, 'from django.utils.html import format_html\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 15 22:20:52 2018
@author: Srinivas
"""
import numpy as np
X = np.arange(1, 1000)
Y = X[(X % 3 == 0) | (X % 5 == 0)]
Z = sum(Y)
print(Z)
|
[
"numpy.arange"
] |
[((121, 139), 'numpy.arange', 'np.arange', (['(1)', '(1000)'], {}), '(1, 1000)\n', (130, 139), True, 'import numpy as np\n')]
|
from plenum.server.replica import Replica
from plenum.test import waits
from plenum.test.delayers import cDelay, chk_delay
from plenum.test.helper import sdk_send_random_requests, assertExp, incoming_3pc_msgs_count
from stp_core.loop.eventually import eventually
nodeCount = 4
CHK_FREQ = 5
# LOG_SIZE in checkpoints corresponds to the catch-up lag in checkpoints
LOG_SIZE = 2 * CHK_FREQ
def test_stashed_messages_processed_on_backup_replica_ordering_resumption(
looper, chkFreqPatched, reqs_for_checkpoint,
one_replica_and_others_in_backup_instance,
sdk_pool_handle, sdk_wallet_client, view_change_done,
txnPoolNodeSet):
"""
Verifies resumption of ordering 3PC-batches on a backup replica
on detection of a lag in checkpoints in case it is detected after
some 3PC-messages related to the next checkpoint have already been stashed
as laying outside of the watermarks.
Please note that to verify this case the config is set up so that
LOG_SIZE == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ
"""
slow_replica, other_replicas = one_replica_and_others_in_backup_instance
view_no = slow_replica.viewNo
# Send a request and ensure that the replica orders the batch for it
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.run(
eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == (view_no, 2)),
slow_replica,
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
# Don't receive Commits from two replicas
slow_replica.node.nodeIbStasher.delay(
cDelay(instId=1, sender_filter=other_replicas[0].node.name))
slow_replica.node.nodeIbStasher.delay(
cDelay(instId=1, sender_filter=other_replicas[1].node.name))
# Send a request for which the replica will not be able to order the batch
# due to an insufficient count of Commits
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Receive further Commits from now on
slow_replica.node.nodeIbStasher.drop_delayeds()
slow_replica.node.nodeIbStasher.resetDelays()
# Send requests but in a quantity insufficient
# for catch-up number of checkpoints
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP *
reqs_for_checkpoint - 3)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Don't receive Checkpoints
slow_replica.node.nodeIbStasher.delay(chk_delay(instId=1))
# Send more requests to reach catch-up number of checkpoints
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
reqs_for_checkpoint)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Ensure that there are no 3PC-messages stashed
# as laying outside of the watermarks
assert slow_replica.stasher.num_stashed_watermarks == 0
# Send a request for which the batch will be outside of the watermarks
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Ensure that the replica has not ordered any batches
# after the very first one
assert slow_replica.last_ordered_3pc == (view_no, 2)
# Ensure that the watermarks have not been shifted since the view start
assert slow_replica.h == 0
assert slow_replica.H == LOG_SIZE
# Ensure that there are some quorumed stashed checkpoints
assert slow_replica.stashed_checkpoints_with_quorum()
# Ensure that now there are 3PC-messages stashed
# as laying outside of the watermarks
assert slow_replica.stasher.num_stashed_watermarks == incoming_3pc_msgs_count(len(txnPoolNodeSet))
# Receive belated Checkpoints
slow_replica.node.nodeIbStasher.reset_delays_and_process_delayeds()
# Ensure that the replica has ordered the batch for the last sent request
looper.run(
eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc ==
(view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 1)),
slow_replica,
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
# Ensure that the watermarks have been shifted so that the lower watermark
# now equals to the end of the last stable checkpoint in the instance
assert slow_replica.h == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ
assert slow_replica.H == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + LOG_SIZE
# Ensure that now there are no quorumed stashed checkpoints
assert not slow_replica.stashed_checkpoints_with_quorum()
# Ensure that now there are no 3PC-messages stashed
# as laying outside of the watermarks
assert slow_replica.stasher.num_stashed_watermarks == 0
# Send a request and ensure that the replica orders the batch for it
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.run(
eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc ==
(view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 2)),
slow_replica,
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
|
[
"plenum.test.delayers.chk_delay",
"plenum.test.helper.assertExp",
"plenum.test.waits.expectedTransactionExecutionTime",
"plenum.test.helper.sdk_send_random_requests",
"plenum.test.delayers.cDelay"
] |
[((1268, 1339), 'plenum.test.helper.sdk_send_random_requests', 'sdk_send_random_requests', (['looper', 'sdk_pool_handle', 'sdk_wallet_client', '(1)'], {}), '(looper, sdk_pool_handle, sdk_wallet_client, 1)\n', (1292, 1339), False, 'from plenum.test.helper import sdk_send_random_requests, assertExp, incoming_3pc_msgs_count\n'), ((1993, 2064), 'plenum.test.helper.sdk_send_random_requests', 'sdk_send_random_requests', (['looper', 'sdk_pool_handle', 'sdk_wallet_client', '(1)'], {}), '(looper, sdk_pool_handle, sdk_wallet_client, 1)\n', (2017, 2064), False, 'from plenum.test.helper import sdk_send_random_requests, assertExp, incoming_3pc_msgs_count\n'), ((2376, 2519), 'plenum.test.helper.sdk_send_random_requests', 'sdk_send_random_requests', (['looper', 'sdk_pool_handle', 'sdk_wallet_client', '(Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP * reqs_for_checkpoint - 3)'], {}), '(looper, sdk_pool_handle, sdk_wallet_client, \n Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP * reqs_for_checkpoint - 3)\n', (2400, 2519), False, 'from plenum.test.helper import sdk_send_random_requests, assertExp, incoming_3pc_msgs_count\n'), ((2808, 2901), 'plenum.test.helper.sdk_send_random_requests', 'sdk_send_random_requests', (['looper', 'sdk_pool_handle', 'sdk_wallet_client', 'reqs_for_checkpoint'], {}), '(looper, sdk_pool_handle, sdk_wallet_client,\n reqs_for_checkpoint)\n', (2832, 2901), False, 'from plenum.test.helper import sdk_send_random_requests, assertExp, incoming_3pc_msgs_count\n'), ((3231, 3302), 'plenum.test.helper.sdk_send_random_requests', 'sdk_send_random_requests', (['looper', 'sdk_pool_handle', 'sdk_wallet_client', '(1)'], {}), '(looper, sdk_pool_handle, sdk_wallet_client, 1)\n', (3255, 3302), False, 'from plenum.test.helper import sdk_send_random_requests, assertExp, incoming_3pc_msgs_count\n'), ((5228, 5299), 'plenum.test.helper.sdk_send_random_requests', 'sdk_send_random_requests', (['looper', 'sdk_pool_handle', 'sdk_wallet_client', '(1)'], {}), '(looper, sdk_pool_handle, sdk_wallet_client, 1)\n', (5252, 5299), False, 'from plenum.test.helper import sdk_send_random_requests, assertExp, incoming_3pc_msgs_count\n'), ((1690, 1749), 'plenum.test.delayers.cDelay', 'cDelay', ([], {'instId': '(1)', 'sender_filter': 'other_replicas[0].node.name'}), '(instId=1, sender_filter=other_replicas[0].node.name)\n', (1696, 1749), False, 'from plenum.test.delayers import cDelay, chk_delay\n'), ((1802, 1861), 'plenum.test.delayers.cDelay', 'cDelay', ([], {'instId': '(1)', 'sender_filter': 'other_replicas[1].node.name'}), '(instId=1, sender_filter=other_replicas[1].node.name)\n', (1808, 1861), False, 'from plenum.test.delayers import cDelay, chk_delay\n'), ((2083, 2132), 'plenum.test.waits.expectedTransactionExecutionTime', 'waits.expectedTransactionExecutionTime', (['nodeCount'], {}), '(nodeCount)\n', (2121, 2132), False, 'from plenum.test import waits\n'), ((2591, 2640), 'plenum.test.waits.expectedTransactionExecutionTime', 'waits.expectedTransactionExecutionTime', (['nodeCount'], {}), '(nodeCount)\n', (2629, 2640), False, 'from plenum.test import waits\n'), ((2717, 2736), 'plenum.test.delayers.chk_delay', 'chk_delay', ([], {'instId': '(1)'}), '(instId=1)\n', (2726, 2736), False, 'from plenum.test.delayers import cDelay, chk_delay\n'), ((2945, 2994), 'plenum.test.waits.expectedTransactionExecutionTime', 'waits.expectedTransactionExecutionTime', (['nodeCount'], {}), '(nodeCount)\n', (2983, 2994), False, 'from plenum.test import waits\n'), ((3321, 3370), 'plenum.test.waits.expectedTransactionExecutionTime', 'waits.expectedTransactionExecutionTime', (['nodeCount'], {}), '(nodeCount)\n', (3359, 3370), False, 'from plenum.test import waits\n'), ((1390, 1446), 'plenum.test.helper.assertExp', 'assertExp', (['(slow_replica.last_ordered_3pc == (view_no, 2))'], {}), '(slow_replica.last_ordered_3pc == (view_no, 2))\n', (1399, 1446), False, 'from plenum.test.helper import sdk_send_random_requests, assertExp, incoming_3pc_msgs_count\n'), ((1540, 1589), 'plenum.test.waits.expectedTransactionExecutionTime', 'waits.expectedTransactionExecutionTime', (['nodeCount'], {}), '(nodeCount)\n', (1578, 1589), False, 'from plenum.test import waits\n'), ((4220, 4343), 'plenum.test.helper.assertExp', 'assertExp', (['(slow_replica.last_ordered_3pc == (view_no, (Replica.\n STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 1))'], {}), '(slow_replica.last_ordered_3pc == (view_no, (Replica.\n STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 1))\n', (4229, 4343), False, 'from plenum.test.helper import sdk_send_random_requests, assertExp, incoming_3pc_msgs_count\n'), ((4469, 4518), 'plenum.test.waits.expectedTransactionExecutionTime', 'waits.expectedTransactionExecutionTime', (['nodeCount'], {}), '(nodeCount)\n', (4507, 4518), False, 'from plenum.test import waits\n'), ((5350, 5473), 'plenum.test.helper.assertExp', 'assertExp', (['(slow_replica.last_ordered_3pc == (view_no, (Replica.\n STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 2))'], {}), '(slow_replica.last_ordered_3pc == (view_no, (Replica.\n STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 2))\n', (5359, 5473), False, 'from plenum.test.helper import sdk_send_random_requests, assertExp, incoming_3pc_msgs_count\n'), ((5599, 5648), 'plenum.test.waits.expectedTransactionExecutionTime', 'waits.expectedTransactionExecutionTime', (['nodeCount'], {}), '(nodeCount)\n', (5637, 5648), False, 'from plenum.test import waits\n')]
|
from mpi4py import MPI
import numpy as np
import mpids.MPInumpy as mpi_np
if __name__ == "__main__":
#Capture default communicator, MPI process rank, and number of MPI processes
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
note = "Note: creation routines are using their default MPI related kwargs."
note += "\nDefault kwargs:"
note += " routine(..., comm=MPI.COMM_WORLD, root=0, dist='b')\n"
print(note) if rank == 0 else None
#Arange, evenly spaced values within specified interval
print('From arange(start, stop, step) Routine') if rank == 0 else None
mpi_arange = mpi_np.arange(size * 5)
print('Local Arange Result Rank {}: {}'.format(rank, mpi_arange))
print() if rank == 0 else None
|
[
"mpids.MPInumpy.arange"
] |
[((640, 663), 'mpids.MPInumpy.arange', 'mpi_np.arange', (['(size * 5)'], {}), '(size * 5)\n', (653, 663), True, 'import mpids.MPInumpy as mpi_np\n')]
|
from discord.ext import commands
from peony import PeonyClient
from datetime import datetime
import discord
import asyncio
import json
from lxml import html
import html as htmlc
import traceback
class Twitter:
""" Twitter stream commands """
def __init__(self, bot):
self.bot = bot
self.tweetson = True
with open("twitter.json") as f:
self.track = json.load(f)
self.pclient = PeonyClient(**self.bot.credentials['Twitter'])
self.bot.twitask = self.bot.loop.create_task(self.twat())
def __unload(self):
self.tweetson = False
self.bot.twitask.cancel()
async def _save(self):
with await self.bot.configlock:
with open('twitter.json',"w",encoding='utf-8') as f:
json.dump(self.track,f,ensure_ascii=True,
sort_keys=True,indent=4, separators=(',',':'))
async def twat(self):
""" Twitter tracker function """
await self.bot.wait_until_ready()
# Retrieve list of IDs to track
ids = ",".join([str(i[1]["id"]) for i in self.track.items()])
footericon = "https://abs.twimg.com/icons/apple-touch-icon-192x192.png"
ts = self.pclient.stream.statuses.filter.post(follow=ids)
async with ts as stream:
print(f"Tracking {len(self.track.items())} twitter users.")
async for t in stream:
# Break loop if bot not running.
if self.bot.is_closed():
break
# if tweet output is disabled, break the loop.
if not self.tweetson:
break
# discard malformed tweets
if not hasattr(t,"user"):
continue
# Set destination or discard non-tracked
u = t.user
if u.id_str in ids:
s = self.track.items()
chanid = [i[1]["channel"] for i in s if i[1]["id"] == int(u.id_str)][0]
destin = self.bot.get_channel(chanid)
else:
continue
# discard retweets & adverts
if hasattr(t,'retweeted_status') or t.text.startswith(("rt",'ad')):
continue
# discard replies
if t["in_reply_to_status_id"] is not None:
continue
if t.truncated:
txt = htmlc.unescape(t.extended_tweet.full_text)
ents = dict(t.entities)
ents.update(dict(t.extended_tweet.entities))
else:
ents = t.entities
txt = htmlc.unescape(t.text)
if "coral" in txt:
continue
if "hashtags" in ents:
for i in ents["hashtags"]:
frnt = f"[#{i.text}]"
bk = f"(https://twitter.com/hashtag/{i.text})"
rpl = frnt + bk
txt = txt.replace(f'#{i.text}',rpl)
if "urls" in ents:
for i in ents["urls"]:
txt = txt.replace(i.url,i.expanded_url)
if "user_mentions" in ents:
for i in ents["user_mentions"]:
frnt = f"[@{i.screen_name}]"
bk = f"(https://twitter.com/{i.screen_name})"
rpl = frnt+bk
txt = txt.replace(f'@{i.screen_name}',rpl)
e = discord.Embed(description=txt)
if hasattr(u,"url"):
e.url = u.url
if hasattr(u,"profile_link_color"):
e.color = int(u.profile_link_color,16)
e.set_thumbnail(url=u.profile_image_url)
e.timestamp = datetime.strptime(t.created_at,"%a %b %d %H:%M:%S %z %Y")
e.set_footer(icon_url=footericon,text="Twitter")
lk = f"http://www.twitter.com/{u.screen_name}/status/{t.id_str}"
e.title = f"{u.name} (@{u.screen_name})"
e.url = lk
# Extract entities to lists
photos = []
videos = []
def extract_entities(alist):
for i in alist:
if i.type in ["photo","animated_gif"]:
photos.append(i.media_url)
elif i.type == "video":
videos.append(i.video_info.variants[1].url)
else:
print("Unrecognised TWITTER MEDIA TYPE")
print(i)
# Fuck this nesting kthx.
if hasattr(t,"extended_entities") and hasattr (t.extended_entities,"media"):
extract_entities(t.extended_entities.media)
if hasattr(t,"quoted_status"):
if hasattr(t.quoted_status,"extended_entities"):
if hasattr(t.quoted_status.extended_entities,"media"):
extract_entities(t.quoted_status.extended_entities.media)
# Set image if one image, else add embed field.
if len(photos) == 1:
e.set_image(url=photos[0])
elif len(photos) > 1:
en = enumerate(photos,start=1)
v = ", ".join([f"[{i}]({j})" for i, j in en])
e.add_field(name="Attached Photos",value=v,inline=True)
# Add embed field for videos
if videos:
if len(videos) > 1:
en = enumerate(videos,start=1)
v = ", ".join([f"[{i}]({j})" for i, j in en])
e.add_field(name="Attached Videos",value=v,inline=True)
else:
await destin.send(embed=e)
await destin.send(videos[0])
else:
await destin.send(embed=e)
@commands.group(aliases=["tweet","tweets","checkdelay","twstatus"],invoke_without_command=True)
@commands.is_owner()
async def twitter(self,ctx):
""" Check delay and status of twitter tracker """
e = discord.Embed(title="Twitter Status",color=0x7EB3CD)
e.set_thumbnail(url="https://i.imgur.com/jSEtorp.png")
if self.tweetson:
e.description = "```diff\n+ ENABLED```"
else:
e.description = "```diff\n- DISABLED```"
e.color = 0xff0000
footer = "Tweets are not currently being output."
e.set_footer(text=footer)
for i in set([i[1]["channel"] for i in self.track.items()]):
# Get Channel name from ID in JSON
fname = f"#{self.bot.get_channel(int(i)).name} Tracker"
# Find all tracks for this channel.
fvalue = "\n".join([c[0] for c in self.track.items() if c[1]["channel"] == i])
e.add_field(name=fname,value=fvalue)
if self.bot.is_owner(ctx.author):
x = self.bot.twitask._state
if x == "PENDING":
v = "✅ Task running."
elif x == "CANCELLED":
v = "⚠ Task Cancelled."
elif x == "FINISHED":
self.bot.twitask.print_stack()
v = "⁉ Task Finished"
z = self.bot.twitask.exception()
else:
v = f"❔ `{self.bot.twitask._state}`"
e.add_field(name="Debug Info",value=v,inline=False)
try:
e.add_field(name="Exception",value=z,inline=False)
except NameError:
pass
await ctx.send(embed=e)
@twitter.command(name="on",aliases=["start"])
@commands.is_owner()
async def _on(self,ctx):
""" Turn tweet output on """
if not self.tweetson:
self.tweetson = True
await ctx.send("<:tweet:332196044769198093> Twitter output has been enabled.")
self.bot.twitask = self.bot.loop.create_task(self.twat())
elif self.bot.twitask._state in ["FINISHED","CANCELLED"]:
e = discord.Embed(color=0x7EB3CD)
e.description = f"<:tweet:332196044769198093> Restarting {self.bot.twitask._state}\
task after exception {self.bot.twitask.exception()}."
await ctx.send(embed=e)
self.bot.twitask = self.bot.loop.create_task(self.twat())
else:
await ctx.send("<:tweet:332196044769198093> Twitter output already enabled.")
@twitter.command(name="off",aliases=["stop"])
@commands.is_owner()
async def _off(self,ctx):
""" Turn tweet output off """
if self.tweetson:
self.tweetson = False
await ctx.send("<:tweet:332196044769198093> Twitter output has been disabled.")
else:
await ctx.send("<:tweet:332196044769198093> Twitter output already disabled.")
@twitter.command(name="add")
@commands.is_owner()
async def _add(self,ctx,username):
""" Add user to track for this channel """
params = {"user_name":username,"submit":"GET+USER+ID"}
async with self.bot.session.get("http://gettwitterid.com/",params=params) as resp:
if resp.status != 200:
await ctx.send("🚫 HTTP Error {resp.status} try again later.")
return
tree = html.fromstring(await resp.text())
try:
id = tree.xpath('.//tr[1]/td[2]/p/text()')[0]
except IndexError:
await ctx.send("🚫 Couldn't find user with that name.")
self.track[username] = {"id":int(id),"channel":ctx.channel.id}
await self._save()
await ctx.send(f"<:tweet:332196044769198093> {username} will be tracked in {ctx.channel.mention} from next restart.")
@twitter.command(name="del")
@commands.is_owner()
async def _del(self,ctx,username):
""" Deletes a user from the twitter tracker """
trk = [{k.lower():k} for k in self.track.keys()]
if username.lower() in trk:
self.track.pop(trk[username.lower()])
await self._save()
def setup(bot):
bot.add_cog(Twitter(bot))
|
[
"json.dump",
"json.load",
"html.unescape",
"discord.Embed",
"datetime.datetime.strptime",
"discord.ext.commands.group",
"peony.PeonyClient",
"discord.ext.commands.is_owner"
] |
[((4798, 4900), 'discord.ext.commands.group', 'commands.group', ([], {'aliases': "['tweet', 'tweets', 'checkdelay', 'twstatus']", 'invoke_without_command': '(True)'}), "(aliases=['tweet', 'tweets', 'checkdelay', 'twstatus'],\n invoke_without_command=True)\n", (4812, 4900), False, 'from discord.ext import commands\n'), ((4896, 4915), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (4913, 4915), False, 'from discord.ext import commands\n'), ((6263, 6282), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (6280, 6282), False, 'from discord.ext import commands\n'), ((7022, 7041), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (7039, 7041), False, 'from discord.ext import commands\n'), ((7364, 7383), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (7381, 7383), False, 'from discord.ext import commands\n'), ((8153, 8172), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (8170, 8172), False, 'from discord.ext import commands\n'), ((412, 458), 'peony.PeonyClient', 'PeonyClient', ([], {}), "(**self.bot.credentials['Twitter'])\n", (423, 458), False, 'from peony import PeonyClient\n'), ((5007, 5059), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Twitter Status"""', 'color': '(8303565)'}), "(title='Twitter Status', color=8303565)\n", (5020, 5059), False, 'import discord\n'), ((381, 393), 'json.load', 'json.load', (['f'], {}), '(f)\n', (390, 393), False, 'import json\n'), ((723, 819), 'json.dump', 'json.dump', (['self.track', 'f'], {'ensure_ascii': '(True)', 'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ':')"}), "(self.track, f, ensure_ascii=True, sort_keys=True, indent=4,\n separators=(',', ':'))\n", (732, 819), False, 'import json\n'), ((2885, 2915), 'discord.Embed', 'discord.Embed', ([], {'description': 'txt'}), '(description=txt)\n', (2898, 2915), False, 'import discord\n'), ((3119, 3177), 'datetime.datetime.strptime', 'datetime.strptime', (['t.created_at', '"""%a %b %d %H:%M:%S %z %Y"""'], {}), "(t.created_at, '%a %b %d %H:%M:%S %z %Y')\n", (3136, 3177), False, 'from datetime import datetime\n'), ((6606, 6634), 'discord.Embed', 'discord.Embed', ([], {'color': '(8303565)'}), '(color=8303565)\n', (6619, 6634), False, 'import discord\n'), ((2084, 2126), 'html.unescape', 'htmlc.unescape', (['t.extended_tweet.full_text'], {}), '(t.extended_tweet.full_text)\n', (2098, 2126), True, 'import html as htmlc\n'), ((2255, 2277), 'html.unescape', 'htmlc.unescape', (['t.text'], {}), '(t.text)\n', (2269, 2277), True, 'import html as htmlc\n')]
|
"""
Unit test for table_suppression module
Original Issues: DC-1360
As part of the controlled tier, some table data will be entirely suppressed. When suppression happens, the table
needs to maintain it’s expected schema, but drop all of its data.
Apply table suppression to note, location, provider, and care_site tables.
table schemas should remain intact and match their data_steward/resource_files/schemas/<table>.json schema definition.
Should be added to list of CONTROLLED_TIER_DEID_CLEANING_CLASSES in data_steward/cdr_cleaner/clean_cdr.py
all data should be dropped from the tables
sandboxing not required
"""
# Python imports
import unittest
# Project imports
from cdr_cleaner.cleaning_rules.table_suppression import TableSuppression, tables, TABLE_SUPPRESSION_QUERY
from constants.cdr_cleaner import clean_cdr as clean_consts
import constants.cdr_cleaner.clean_cdr as cdr_consts
class TableSuppressionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.project_id = 'test_project'
self.dataset_id = 'test_dataset'
self.sandbox_id = 'test_sandbox'
self.client = None
self.rule_instance = TableSuppression(self.project_id, self.dataset_id,
self.sandbox_id)
self.assertEqual(self.rule_instance.project_id, self.project_id)
self.assertEqual(self.rule_instance.dataset_id, self.dataset_id)
self.assertEqual(self.rule_instance.sandbox_dataset_id, self.sandbox_id)
def test_setup_rule(self):
# Test
self.rule_instance.setup_rule(self.client)
def test_get_query_specs(self):
# Pre conditions
self.assertEqual(self.rule_instance.affected_datasets,
[clean_consts.CONTROLLED_TIER_DEID])
# Test
results_list = self.rule_instance.get_query_specs()
# Post conditions
expected_query_list = []
for table in tables:
query = dict()
query[cdr_consts.QUERY] = TABLE_SUPPRESSION_QUERY.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
table=table,
)
expected_query_list.append(query)
self.assertEqual(results_list, expected_query_list)
|
[
"cdr_cleaner.cleaning_rules.table_suppression.TableSuppression",
"cdr_cleaner.cleaning_rules.table_suppression.TABLE_SUPPRESSION_QUERY.render"
] |
[((1378, 1445), 'cdr_cleaner.cleaning_rules.table_suppression.TableSuppression', 'TableSuppression', (['self.project_id', 'self.dataset_id', 'self.sandbox_id'], {}), '(self.project_id, self.dataset_id, self.sandbox_id)\n', (1394, 1445), False, 'from cdr_cleaner.cleaning_rules.table_suppression import TableSuppression, tables, TABLE_SUPPRESSION_QUERY\n'), ((2236, 2340), 'cdr_cleaner.cleaning_rules.table_suppression.TABLE_SUPPRESSION_QUERY.render', 'TABLE_SUPPRESSION_QUERY.render', ([], {'project_id': 'self.project_id', 'dataset_id': 'self.dataset_id', 'table': 'table'}), '(project_id=self.project_id, dataset_id=self.\n dataset_id, table=table)\n', (2266, 2340), False, 'from cdr_cleaner.cleaning_rules.table_suppression import TableSuppression, tables, TABLE_SUPPRESSION_QUERY\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 26 22:00:03 2016
@author: Sirindil
"""
import os
import sys
import time
import shlex
import random
import string
import struct
import time
import platform
import subprocess
import ctypes
from ctypes import windll, byref, wintypes, Structure, c_ulong
from ctypes.wintypes import SMALL_RECT
from colorama import init, Fore, Back, Style, Cursor
import win32com.client
import win32api, win32con
import ctypes
from ctypes import wintypes
from colorama import init
import re
from functools import partial
import winsound
#import pywinauto
init(strip=not sys.stdout.isatty()) # strip colors if stdout is redirected
user32 = ctypes.WinDLL('user32', use_last_error=True)
class POINT(Structure):
_fields_ = [("x", c_ulong), ("y", c_ulong)]
def queryMousePosition():
pt = POINT()
windll.user32.GetCursorPos(byref(pt))
return { "x": pt.x, "y": pt.y}
def click(x,y):
win32api.SetCursorPos((x,y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
CSI = '\033['
OSC = '\033]'
BEL = '\007'
def clear_line(mode=2):
return CSI + str(mode) + 'K'
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
KEYEVENTF_EXTENDEDKEY = 0x0001
KEYEVENTF_KEYUP = 0x0002
KEYEVENTF_UNICODE = 0x0004
KEYEVENTF_SCANCODE = 0x0008
MAPVK_VK_TO_VSC = 0
# msdn.microsoft.com/en-us/library/dd375731
VK_TAB = 0x09
VK_MENU = 0x12
VK_RETURN = 0x0D
VK_CONTROL = 0x11
VK_D = 0x44
# C struct definitions
wintypes.ULONG_PTR = wintypes.WPARAM
class MOUSEINPUT(ctypes.Structure):
_fields_ = (("dx", wintypes.LONG),
("dy", wintypes.LONG),
("mouseData", wintypes.DWORD),
("dwFlags", wintypes.DWORD),
("time", wintypes.DWORD),
("dwExtraInfo", wintypes.ULONG_PTR))
class KEYBDINPUT(ctypes.Structure):
_fields_ = (("wVk", wintypes.WORD),
("wScan", wintypes.WORD),
("dwFlags", wintypes.DWORD),
("time", wintypes.DWORD),
("dwExtraInfo", wintypes.ULONG_PTR))
def __init__(self, *args, **kwds):
super(KEYBDINPUT, self).__init__(*args, **kwds)
# some programs use the scan code even if KEYEVENTF_SCANCODE
# isn't set in dwFflags, so attempt to map the correct code.
if not self.dwFlags & KEYEVENTF_UNICODE:
self.wScan = user32.MapVirtualKeyExW(self.wVk,
MAPVK_VK_TO_VSC, 0)
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (("uMsg", wintypes.DWORD),
("wParamL", wintypes.WORD),
("wParamH", wintypes.WORD))
class INPUT(ctypes.Structure):
class _INPUT(ctypes.Union):
_fields_ = (("ki", KEYBDINPUT),
("mi", MOUSEINPUT),
("hi", HARDWAREINPUT))
_anonymous_ = ("_input",)
_fields_ = (("type", wintypes.DWORD),
("_input", _INPUT))
LPINPUT = ctypes.POINTER(INPUT)
def _check_count(result, func, args):
if result == 0:
raise ctypes.WinError(ctypes.get_last_error())
return args
user32.SendInput.errcheck = _check_count
user32.SendInput.argtypes = (wintypes.UINT, # nInputs
LPINPUT, # pInputs
ctypes.c_int) # cbSize
# Functions
def PressKey(hexKeyCode):
x = INPUT(type=INPUT_KEYBOARD,
ki=KEYBDINPUT(wVk=hexKeyCode))
user32.SendInput(1, ctypes.byref(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
x = INPUT(type=INPUT_KEYBOARD,
ki=KEYBDINPUT(wVk=hexKeyCode,
dwFlags=KEYEVENTF_KEYUP))
user32.SendInput(1, ctypes.byref(x), ctypes.sizeof(x))
def AltEnter():
"""Press Alt+Tab and hold Alt key for 2 seconds
in order to see the overlay.
"""
PressKey(VK_MENU) # Alt
PressKey(VK_RETURN) # Enter
ReleaseKey(VK_RETURN) # Enter~
time.sleep(0.5)
ReleaseKey(VK_MENU) # Alt~
def CtlD():
PressKey(VK_CONTROL) # Alt
PressKey(VK_D) # Enter
ReleaseKey(VK_D) # Enter~
time.sleep(0.5)
ReleaseKey(VK_CONTROL) # Alt~
user32 = ctypes.windll.user32
screensize = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)
def terminalSize(): # Windows only
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def get_terminal_size():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if tuple_xy is None:
print("default")
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def clear():
sys.stderr.write("\x1b[2J\x1b[H")
mypicture = """\
888888OZZO888888888888MMMMMMMMNNMMMMMMMMMMMMMMMMMMMMMMNNMND8OOZZ$$ZOOOOOOOZZZZZZ
8888888OZO88888888888NMMMMMNNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMNDOOOO$ZZZZZZZZZZZZZ$Z
8888888OOO8888888888NMMMMMMN8888888NNNNMMMMMMMMMMMMMMMMMMMMMN8OO$ZZOOOOOOOOOOOZZ
8888888OZ88888888888MMMMMND8ZZ$7777$O88888DNMMMMMMMMMMMMMMMMMN8OZZZZZZZZZZZZ$Z$$
8888888OOO888888888DMMMNDOZ$7777I???????I77$$DNMMMMMMMMMMMMMMMDOZ$ZOOOOOOOOZZZZZ
8888888OOO888888888NMMNOZ$7II??++++=+++++++?I7ZDNNMMMMMMMMMMMMM8ZZZZOZZZZZZZZ$Z$
88888888O88888OOOOOMMNOZ$7III??++==========++?I$8DDNNMMMMMMMMMMNZZZZOZZZOOZZOZZZ
888888888888888888DNNOZ$$77I??+++============++?IODDNNMMMMMMMMMN8ZZOZOOZZZZZZZZ$
888888888888888888DN8Z$$7I????++=============++??I7$ODMMMMMMMMMMDOZZZZZZZZZZZZZZ
88888888888888NMNDND8NNO$I++====~~~===++++++++???I77Z8NNMMMMMMMMD8OZZZZZZZZZZZZ$
8888888888888MMMMMNNMNNNND87=~~~~~~~~~===++???I???I7ZODNMMMMMMMMNOOOOOOOZZZZZZZ$
DD8888888888DMMMMMNZI?+?$8D8+~~~:::::~~=+????IIIII77$ODNNMMMMMMMM88OOOOOOOOOZZZZ
DD8888888888DDNMMN8I?++==+?+?=~::::+$ODDDDDD88O$77777ODNNMMMMMMMN8888888OOOOOOOO
DDD888888888888NNNO77$7I++=?++=~::~+7$$$ZZOODNNDOZ$$7ZDNNMMMMMMMDDD88888888OOOOO
DDDD88D8DD8DDD8888$ON8O$$II++??=:~~~=+=~==~~~?78D8Z$$$ODNMMMMMMMD88888888888OOOO
NDDDD8DDDDD88D888$78O+~D8D7II7I???=~=+?+===~~~+IODO$77$8DNMMMMMN8888888888888OOO
MNDD8DDDD888DD888$$Z7$7Z$Z7777I7$7+=II7II7II+==?I8O$$$$ONNNNMMMD88888888888888OO
MNNNDDDDDDDDDDD8O7III?+??I7Z7II7$$7?$=::8DNOZ7I?I$$7$$Z8NNNMMMNDD888888888888888
MMNNNDDDDDDDDDD8Z7IIII?II7$ZII$$$$$I++IIOD8ODO7III777$ODNNNNMMNDDDD8888888888888
MMMNNDDDDDDDDDD8$I??????I7?=+?$Z$$7?+++===?I7II???I77$ONMNMNNND888D8888888888888
MMNNNDDDDDDDDDD87I?++++???=::~?ZO$I====++==+?????II77Z8NMMNNMDDDDD88888888888888
MMMNNDNNDDDDDND87?+===~=+I+~===?OZ+====~==+++???II777Z8NMD$$77OD88D8888888888888
MMMNNNNNNNNNNNND7==~:~~+?II=IZD=I$~~~~~~==~===+?I7$$$Z8D8$7I77?8D888D88888888888
MMMMNNNNNNNNNNND7+=~~=+?7I=++++I7I~~~~~~~~~~==+?I7$$ZO8DOO?+I7?8DDDDD88888888888
MMMMMNNNNNNNNNND7+==+?II?~:::=?I?+=~::::~~===++?I7$ZZO8Z$$=:7?8DD8DDDDDDDDDDDDD8
MMMMMMMMNNNNNNND$?++=?I+=~~~:~+?++=~:,,::~==++??I7ZZO8$$$7=:7ZDDD8DD8D8888888888
MMMMMMMMMMNNNNNN7I+=+7$7II??=~~~~===~:,::~=+++?I7$OO8O7I?$I78DDD88DDDD8888888888
MMMMMMMMMMNNNNNN77?=+I$$7$OZ77I+~:~=~::::~==++?I7$O887?+IZ$8DDDDDDDDDDD888888888
MMMMMMMMMMNNNNNNO7I++++??IIIII$Z$?=~~:::~~==+??I7ZODDNO$8DDDDDDDDDDDDD8DD8D88888
MMMMMMMMMMMNMMMMM7I?++====~~:::~~~~~~~~~=====+?I$O8DDNNNNDDDDDDDDDDDDDDDDDD8D888
MMMMMMMMMMMMMMMMM$7I?======~::::~~~~~~~~===+++?7$888DNNNNNDDDDDDDDDDDDDDDDDDDD88
MMMMMMMMMMMMMMMMMNZ$7?++++++=~::~~~~~=~====++?IZOOO8NNNNNNNNNDDDDDDDDDDDDDDDDDD8
MMMMMMMMMMMMMMMMMMZ$I?=~~=====~~~==========+?7$88OODNNNNNNNNNNDDDDDDDDDDDDDDDDD8
MMMMMMMMMMMMMMMMMN7$I+~:::::~=~====+====++?I7$8OOO8NNNNNNNNNNNNDDDDDDDDDDDDDDDD8
MMMMMMMMMMMMMMMMND$?$?==::::::~=++??+???I7$ZOO$$Z8DNNNNNNNNNNNNDDDDDDDDDDDDDDDDD
MMMMMMMMMMMMMMMMMO$??77??+===~++?I77777$$ZZ7777$ZDNNNNNNNNNNNNNNDDDDDDDDDDDDDDDD
MMMMMMNNNMMMMMMMN$$++77?+?I77$$$$$777II?????II7Z8MMMMNNNNNNNNNNNDDDDNDDDDDDDDDDD
MMMMMMNNMMMMMMMMNOZ+=+7======+??????++====+??I$8DMMMMNNNNNNNNNNNDNNNNNDDDDDDDDDD
MMMMMMNNMMMMMMMMMM8+==??=~~~=~~~===~~~~~~==+I7Z8NMMMMMNNNNNNNNNNNNNNNNNNDDDDDDDD
NNMMMMMMMMMMNNNNNMM$=~=??===~====~:::~~~=++?I$ONMMMMMMMMNNNNNNNNNNNNNNNNNDDNDDDD
MMMMNMMMMMMNNNNNNMMMM8$??I7$ZO8888OOZ7III?II$DNNNMMMMMMMMNNNNNNNNNNNNNNNNNDNNDDD
MMMNMMMMMMMMMNNNNNMMMMNNNNNNNNNNNNNNNNNNNNNMMNNMMMMMMMMMMMMNNNNNNNNNNNNNNDDNNDDD
"""
castle = """\
~~~~~~~~~~~~~~~~~~~~~~~~~~~~O?~~+$+?~~~~~~~~~~=?O$~Z7~ZI++~~~~8$Z=$O~$+I?=~~~~~=~==N$$=~IO=$7I=================~~~~~~~~~
~~~~~~~~~~~~~~~~~~~~~~::8=8$ZI?+I$=?~~~~~~~~~ONN87II7$7?=$+8788?III7ZI+$I~~~~~~~~~~NON+8D8D7$?ZI?~==~~~===~~~~~~~~~~~~:I
~~~~~~~~~~~~~:~:::::::::N8Z$7?+?++I~+~:::::::O8DOMNNMMM=~ZO88D8IMMM7NN?7+~~~~~~~~~~NODDDDD??II+~~~~~~~~~~~~~~~~~~~~~~~:N
~~~~~~~~~~~~::::::::::::D8887I?+I+==$::::::::8NNO$7=O?D7DOMNMDN?8N7ION=NN?~~~~~~~~~NODDOOD$$7?++~~~~~~~~~~~~~~~~~~~~~~ND
:~~~~~~~~~::::::::::::::888$$I??++~:O:~?I=::::DDN=II777~$O8DDND?ZI+~877$$~~~~~~~~$~MD88DOD$Z?+?=~~~~~~~~~~~~~~~~~~~~:MDN
~~~~~~~:::::::::::::::::88O$I???+=++Z$77~~::::DDN+:?=I$~$88N8DNIII?+$I7+?:::~~~~~O?MDDDD8ZZ$7?++~~~~~~~~~~~~~~~~~~~~D7OM
~~~~~~~~::::::::::::::::8OO7??+=~===$777?~::::8DD+++I7$=Z88D8DD?7??I777778D8?~:~:8NMDD88O$Z7I?+~~~~~~~~~~~~~~~~~~~:DMMDN
~~~~~~~:::::::::::::::::OOO$+??=~~~~ONOOD8N:8N8DD+II$$7=Z888DDD?7I?II??$7DDZ8DDDDNDNDDD8ZZZI7+=+~~~~~~~~~~~~~~~~~~:D8NMN
~~~~~~~~::::::::::::::::ZOO7I+==+~~~ZO88ZOOD8D8DD+I77M$=OOONODD+7I7IZ?+OIZDDZZ8DD8DNDD88DZ$II===~::~~~~~~~~:~~~~~~~IMMMM
~~~~~~~~~~~~::::::::::::Z8Z7?+=~Z~~=ONOO88O8DND8D?I?IMZ~OOOD8DD?$I7?M??$?$ZZ8ZDD8DDMDDDDOZ$7IN+=~~:~~~~~~~~~~~~~~::DNMMM
~~~~~~~~~~~~~~~~:::::::=O8Z7?==~=~~+OD8D88DDNN8DD???I77=Z$8O8ONM$DI+I?+$IZZO88ZOODDNDDDOOO7I?I+=OO~~::~~~~~~~~~~~~~N7MDM
==~~~~+I=+=~~~~~~~~~~~:?OOZ7?+=~=~:=OD$888ODDD88NI?I?7I+ODDDDNM??NII??I+II7D8Z88DDDNDD8DOZ$7I?+~Z$+:~~:~I=~:~=8=8MMMMMMM
=====8O8O?$$~~$=$+~=:I$NOOZ7?+=8+~~=8D$OOOZO88O8N+?I?I?~OZZD88N$7?O?+?III$ZOD8ZZZ8DNDDD88O$7++==8MND+~ZD8D=:~=$?8MMMMND8
O88DMNMNNNZ887NMMNDZ88MNOZOI?==N=+~=88Z$ZOOODN8DD???I?I~ZZNMMMMO?$??+?+++$$8D8$ZZO8NDD888Z7I?I=~DMNMMMNMNNI~~IMMMMMMMMMM
8ODDMMMMMNMM8NMMMMMN8ODN88O$?+=+~~==OD7O8Z88DDOON?II??I~OONDNDN?MDI+I+++?778DZZO$ODND8D88O$I?+=:DMMMMMMMMMMNNNMMMMMMMMMM
DNDOMMMONNM7OMMMMMMM7777N8OI??+=~:~=OD$Z$7$Z888DD+?7+II~OODNNNNOMD?I?+=?I$ZONOOZ$88NNDD88OI??+=~DMMMMMMMMMMMMMMMMMMMMMMM
DNDNDNM8NMNNMODMNNDNNNNMO887?=7:::=~Z8Z$Z$8O8DODN??+?$$~NDMMMMO8MOI7I7+?7O88OOODOONNDDDD8$7I7==~NMMMMMMMMMMMMMMMMMMMMMMM
8DN88DDDDNDDONNNDNNDD88DDNDZOII??$Z7MMMDNMMMMMNNDI$7ND8ON88DDNNNO8DO8ZZZODMMMMMMNDMMMMMMNDZZ7ZZ77MMMMMMMMMNNMMMMMMMMMMMM
Z$$ZZOMOMDN778MMMMMNZI7ONND8OO7$$ODDMMMMMMDZZOOOOOOOOIDD8III778MNDZOOOOOO$O8DMMMMMMMMMMMNMD8OZZZOMMMMMMMMMMMMMMMMMMMMMMM
+++INMNMMMM++ZMMMMMZI+ONNND88O7$$$ZONMNMNNMMMMMMMN$O88MD$77777$DNDDODDDMDN8ND8NMMMMMMMNMDMDZOZ77ONMMMMMMMMMMOMMMMMMMMMMM
+++?NMONND87+INMMDO+==ONDDDZ7IZ+=++?8N888NNMMMMM?IM8DNOZ7777II7$8DN8ODDDDN8II7DD8DNNNNNDD8O$I??+DMMMMMMMMMM?++NMMMMMMMMM
++++7DNND8+++==?I=====78DD8Z7I??++=+DN888MMMMM=8ZINDNO$IIII??+???ONNDD8ON8D8DN888DDMNNNDDDZZ$7I+DMMNZ8NNNNZ++?MMMMMMMMMM
++++++88O7I?++++++++===?DD8Z77II???IDN8DDMM$=+8MO7NN8$$77I77777I778DDDNO$ND88D8DDDNNNNNDDDOZ$7??DDI+++I8NOI++$I7NMMMMM8D
++++++++++++++++++++++??DDDZ$77D?IIIDNDD8I=:N+8M?DNO8O$77777777$ZZODDN88ZDO88N88DDNMNNNDNDOZ77I?DDI++?+?+????7N$$MMMMMMM
++++++++++++++++++++++++DDDZZ7INII?IDND7?MD=M+DZDNO8OZ77$7$7777777ZZ8OD8ONONDNDD8NNMNNNNNN8OZ$I???????????????7??$NDDMMM
????????++++++++++++++++DDDOZ$7IIIII$7?D7D8?NINNN$OZ7III7I?II$$III777ZO87N8N8DN8NNDOMNNNND8O$$7I?I??I????I????????IMMMMM
??????????????????+?+?+?DND8Z$7I7I?I+MN8$NO7IONNZ$77???????+???I$$I??I$$ODOMZNNDNNNN88NNNDD8OZ7IIIIIIIIIIIIIIIIIIZZMNMMM
????????????????????????NND8Z$7O77I8ONNDON$7N8NOZ$7?????+?I7$7??????IIIZ7D8DMZNNNN88NMND8ND8ZD$IIIIIIIIII7IIIIIIIIIMMMMM
I?IIIII?????????????????DNN8Z$Z=7I7DONNNOI88MO$$$$$II7$I??II?IIIIIII77I7$Z8N8DMDNMNZNDNNMDNOO8$$7777777777777777IIINMNMM
IIIIIIIIIIIIIIIIIIIIIIIINNN$I$?7$77DONNZ8N8ONI888O$77777777777I7777777ZODDZ$DN$MOMNOMMMNNDNNNZZ77777777777777777777ZNNNM
IIIIIIIIIIIIIIIIIIIIIIIID777?D$ZZ$$O8$?ZNZZO$OOOO7I777ZZ8ZZ$$$Z$$$777I7777$O8$$OD8MOMMMMNDODMDNZ$$$$$$$$$$$$$$$7$$$$77DM
7777777777777777777777O$$I?D8DOOOZZOZ77ZN7N788DDZZZZ$$ZZZZOO8Z$$777III77ZZ8NI$$D$ZNOMMMMNDODD8DNMNZ$$$$$$$$$$$$$$$$$ZMNM
77777777777777777777O7$7INNDDD$OOOZ?N$7$N$$7$$$$I??I$$7??II??III??IIII7IIIII$8ZDZZOM8NMMMD8DD8OOMMNM8ZZZZZZZZZZZZZZZ$OMM
7$$$$$77777777777$7ZIZ77NNNDDDZ8O$7DM$$$O$$$$$$7I???IIII?IIIIIII?I7ZO$IIII77$OO7ZZO8NM$MMD8NDD88ZZNMMNMZZZZZZZZZZZZZZZZD
$$$$$$$$$$$$$$$O$O7Z7I$$MMNNDD7O77O8N$$7DOOZZ$Z7IIIII?I?IIIII7II?IIIII7II7I777$ZOOO8MMM8M88DDD8O8ONDDNNMM8OZOOOZOOZOZZDM
$$$$$$$$$$$$$$Z$7$$NDIZZNNNND877O8O8N$$$ZZZZZ$III$$$Z$IIIIIII77I7III7I777$7$$$ZO8OODMMNNMODNDD88DON8DODNNNNMOOOOOOOOOOOD
ZZZZZZZZZZZOZZ$7$ZZDDIZZZZDNDO7OODO8N8$$ZZZZZ7IIIIIIIIIIIII7ZZ$Z$II7I777I77777$$O8D8NM8MMM8NNDND8OMDDOOO8NMNMMDOOOOOOOOO
ZZZZZZZZOZOZ$$$ZZZ8NNIZZZZZO?8DZOOZM$ZOOOZ$$77I77$IIIIIIIIII777I77I7I77IOZZOO$$$ZZM7NND8MDMNO88888MDD8888ODMMDNMD8O88OOO
ZZOOZODOOZZ7ZZZZOODMOIOOOOO7ZDOZOOOMZOOOZZZZ7IIII7IIIIIIII77III777I777777777$$$$$Z$8D888MD8NMDD888MDN88888888MMNNNNZ8888
OOOOZZOZD7OOOOOOOO8NO7OOO7Z8OD8OOO8OZOOZOZZZ$ZZZZ$7I7I7II7I7I77777777777777777$$$ZZO8$8DM888DMMDDDMDDDDDDD88888MMMNNND88
ODZO8ZO7OOOOOOOOOO8D8IOO+D8O8D888Z$$ZOOOOZZ7II77I77I7ZZZOD$777I7777777$I777777777$$$Z8ZDM8DDDDNMNDM8DDDDDDDDDDDDDNMMMMMN
8Z7$$ZOOOOOOOOOOO88D8I$7O8888DDO8ZZZOOOZZ$7777I777II777777777$ZZZZZZ7I7$$7$$7$$$ZO8OD8DIM8DDDDDDMMMDDDDDDDDDDDDDDDDNNMNM
Z7Z?OOOOOOOOO8O8888O8I$Z88888DDOOOOOOOOZZ77777777$777777I777777777777777ZOZZOOZ7$$$$ZZODN8DDDDDDDMMDDDDDDDDDDDDDDDDDDNMM
"""
welcome = """\
▄█ █▄ ▄████████ ▄█ ▄████████ ▄██████▄ ▄▄▄▄███▄▄▄▄ ▄████████
███ ███ ███ ███ ███ ███ ███ ███ ███ ▄██▀▀▀███▀▀▀██▄ ███ ███
███ ███ ███ █▀ ███ ███ █▀ ███ ███ ███ ███ ███ ███ █▀
███ ███ ▄███▄▄▄ ███ ███ ███ ███ ███ ███ ███ ▄███▄▄▄
███ ███ ▀▀███▀▀▀ ███ ███ ███ ███ ███ ███ ███ ▀▀███▀▀▀
███ ███ ███ █▄ ███ ███ █▄ ███ ███ ███ ███ ███ ███ █▄
███ ▄█▄ ███ ███ ███ ███▌ ▄ ███ ███ ███ ███ ███ ███ ███ ███ ███
▀███▀███▀ ██████████ █████▄▄██ ████████▀ ▀██████▀ ▀█ ███ █▀ ██████████
▀
"""
Welcome = """\
,ggg, gg ,gg
dP""Y8a 88 ,8P ,dPYb,
Yb, `88 88 d8' IP'`Yb
`" 88 88 88 I8 8I
88 88 88 I8 8'
88 88 88 ,ggg, I8 dP ,gggg, ,ggggg, ,ggg,,ggg,,ggg, ,ggg,
88 88 88 i8" "8i I8dP dP" "Yb dP" "Y8ggg ,8" "8P" "8P" "8, i8" "8i
Y8 ,88, 8P I8, ,8I I8P i8' i8' ,8I I8 8I 8I 8I I8, ,8I
Yb,,d8""8b,,dP `YbadP' ,d8b,_ ,d8,_ _,d8, ,d8' ,dP 8I 8I Yb, `YbadP'
"88" "88" 888P"Y8888P'"Y88P""Y8888PPP"Y8888P" 8P' 8I 8I `Y8888P"Y888
"""
to = """\
▄▀▀▀█▀▀▄ ▄▀▀▀▀▄
█ █ ▐ █ █
▐ █ █ █
█ ▀▄ ▄▀
▄▀ ▀▀▀▀
█
▐
"""
To = """\
.
.o8
.o888oo .ooooo.
888 d88' `88b
888 888 888
888 . 888 888
"888" `Y8bod8P'
"""
nimbh = """\
███▄ █ ██▓ ███▄ ▄███▓ ▄▄▄▄ ██░ ██
██ ▀█ █ ▓██▒▓██▒▀█▀ ██▒▓█████▄ ▓██░ ██▒
▓██ ▀█ ██▒▒██▒▓██ ▓██░▒██▒ ▄██▒██▀▀██░
▓██▒ ▐▌██▒░██░▒██ ▒██ ▒██░█▀ ░▓█ ░██
▒██░ ▓██░░██░▒██▒ ░██▒░▓█ ▀█▓░▓█▒░██▓
░ ▒░ ▒ ▒ ░▓ ░ ▒░ ░ ░░▒▓███▀▒ ▒ ░░▒░▒
░ ░░ ░ ▒░ ▒ ░░ ░ ░▒░▒ ░ ▒ ░▒░ ░
░ ░ ░ ▒ ░░ ░ ░ ░ ░ ░░ ░
░ ░ ░ ░ ░ ░ ░
░
"""
Nimbh = """\
... ... . ..
.=*8888n.."%888: @88> . uW8" .uef^"
X ?8888f '8888 %8P .. . : `t888 :d88E
88x. '8888X 8888> . .888: x888 x888. 8888 . `888E
'8888k 8888X '"*8h. .@88u ~`8888~'888X`?888f` 9888.z88N 888E .z8k
"8888 X888X .xH8 ''888E` X888 888X '888> 9888 888E 888E~?888L
`8" X888!:888X 888E X888 888X '888> 9888 888E 888E 888E
=~` X888 X888X 888E X888 888X '888> 9888 888E 888E 888E
:h. X8*` !888X 888E X888 888X '888> 9888 888E 888E 888E
X888xX" '8888..: 888& "*88%""*88" '888!` .8888 888" 888E 888E
:~`888f '*888*" R888" `~ " `"` `%888*%" m888N= 888>
"" `"` "" "` `Y" 888
J88"
@%
:"
"""
fullgreet = """\
,ggg, gg ,gg
dP""Y8a 88 ,8P ,dPYb,
Yb, `88 88 d8' IP'`Yb
`" 88 88 88 I8 8I
88 88 88 I8 8'
88 88 88 ,ggg, I8 dP ,gggg, ,ggggg, ,ggg,,ggg,,ggg, ,ggg,
88 88 88 i8" "8i I8dP dP" "Yb dP" "Y8ggg ,8" "8P" "8P" "8, i8" "8i
Y8 ,88, 8P I8, ,8I I8P i8' i8' ,8I I8 8I 8I 8I I8, ,8I
Yb,,d8""8b,,dP `YbadP' ,d8b,_ ,d8,_ _,d8, ,d8' ,dP 8I 8I Yb, `YbadP'
"88" "88" 888P"Y8888P'"Y88P""Y8888PPP"Y8888P" 8P' 8I 8I `Y8888P"Y888
.
.o8
.o888oo .ooooo.
888 d88' `88b
888 888 888
888 . 888 888
"888" `Y8bod8P'
... ... . ..
.=*8888n.."%888: @88> . uW8" .uef^"
X ?8888f '8888 %8P .. . : `t888 :d88E
88x. '8888X 8888> . .888: x888 x888. 8888 . `888E
'8888k 8888X '"*8h. .@88u ~`8888~'888X`?888f` 9888.z88N 888E .z8k
"8888 X888X .xH8 ''888E` X888 888X '888> 9888 888E 888E~?888L
`8" X888!:888X 888E X888 888X '888> 9888 888E 888E 888E
=~` X888 X888X 888E X888 888X '888> 9888 888E 888E 888E
:h. X8*` !888X 888E X888 888X '888> 9888 888E 888E 888E
X888xX" '8888..: 888& "*88%""*88" '888!` .8888 888" 888E 888E
:~`888f '*888*" R888" `~ " `"` `%888*%" m888N= 888>
"" `"` "" "` `Y" 888
J88"
@%
:"
"""
greet = """\
\ / _ | _ _ _ _ _
\/\/ (/_|(_(_)| | |(/_
_|_ _
| (_)
|\ |. _ _ |_ |_
| \||| | ||_)| |
"""
dead = """\
__ __ _____ _ _ _______ ______ _______ ______ _______ _______ ______
\\_/ | | | | |_____| |_____/ |______ | \\ |______ |_____| | \\
| |_____| |_____| | | | \\_ |______ |_____/ |______ | | |_____/
"""
cinfo = """\
NIMBH
Copyright (c) 2016 <NAME>. All rights reserved.
NIMBH is held under the Attribution-NonCommercial-ShareAlike (CC BY-NC-SA) License.
Version 0.1 Alpha
"""
info = """\
Remember: You may press Control+D or ALT+F4 at any time to exit.
ALT+ENTER toggles fullscreen, although doing this may distort formatting.
Formatting is designed best around a 1920x1080 fullscreen monitor.
Press ALT+TAB to switch in and out of the game.
"""
def maxSize(x):
if x < 10:
return 9
if x < 100:
return 99
if x < 1000:
return 999
if x < 10000:
return 9999
else:
return 99
def randomDigits(y):
return ''.join(str(random.randint(0,9)) for x in range(y))
def randomChars(y):
return ''.join(random.choice(string.ascii_letters) for x in range(y))
def isInt(c):
try:
int(c)
return True
except:
return False
LF_FACESIZE = 32
STD_OUTPUT_HANDLE = -11
def nprint(s, x=0, c=" "):
for line in s.splitlines():
print(line.center(x, c), end="\r")
def replaceNumbers(s):
return re.sub('\d', lambda m: str(random.randint(0,9)), s)
class COORD(ctypes.Structure):
_fields_ = [("X", ctypes.c_short), ("Y", ctypes.c_short)]
class CONSOLE_FONT_INFOEX(ctypes.Structure):
_fields_ = [("cbSize", ctypes.c_ulong),
("nFont", ctypes.c_ulong),
("dwFontSize", COORD),
("FontFamily", ctypes.c_uint),
("FontWeight", ctypes.c_uint),
("FaceName", ctypes.c_wchar * LF_FACESIZE)]
def printXY(x, y, text):
sys.stdout.write("\x1b7\x1b[%d;%df%s\x1b8" % (x, y, text))
time.sleep(1)
sys.stdout.flush()
def beep(sound):
winsound.PlaySound('%s.wav' % sound, winsound.SND_FILENAME)
def blood(decay=15, dur=100, fast=True):
sizex, sizey = get_terminal_size()
#os.system("mode con: cols="+str(sizex)+ "lines="+str(sizey))
positions = []
text = "0"*sizex
for i in range(sizex//2* + decay*2):
positions.append(random.randint(0, sizex))
if len(positions) >= sizex:
break
if 0 in positions:
positions = [x for x in positions if x != 0]
# positions.append(int(random.gauss(sizex//2, sizex//4)))
for i in range(dur):
if all(x == text[0] for x in text) and text[0] != "0":
for i in range(sizey):
start_time = time.time()
print("")
if time.time() - start_time < 0.008:
time.sleep(0.008 - (time.time() - start_time))
break # break function once it everything looks done
#positions.append(random.randint(0, sizex))
lenp = len(positions)
#text = str(randomDigits(x))
for index, j in enumerate(positions):
if all(x == text[0] for x in text) and text[0] != "0":
break
found = False
count = 0
while found == False and fast == True and count < decay:
count += 1
if all(x == text[0] for x in text):
found = True
break
pos = random.randint(0,sizex-1)
# print("pos:", pos)
if text[pos].isdigit() == True:
# print("True!")
text = text[:pos] + ' ' + text[pos + 1:]
found == True
break # Not sure why this look won't end without breaking
# else:
# print("False :(")
# print("break")
#positions.append(random.randint(0, sizex))
text = replaceNumbers(text)
shift = random.randint(0,1)
text = text[:j] + ' ' + text[j + 1:]
if shift == 0 and j == 0:
positions[index] += 1
elif shift == 1 and j == lenp - 1:
positions[index] -= 1
else:
if shift == 0:
positions[index] -= 1
else:
positions[index] += 1
print(Fore.RED, Style.DIM, text, end="\r")
def youdied(decay=15, dur=100, fast=True):
sizex, sizey = get_terminal_size()
#os.system("mode con: cols="+str(sizex)+ "lines="+str(sizey))
positions = []
text = "0"*sizex
for i in range(sizex//2* + decay*2):
positions.append(random.randint(0, sizex))
if len(positions) >= sizex:
break
if 0 in positions:
positions = [x for x in positions if x != 0]
# positions.append(int(random.gauss(sizex//2, sizex//4)))
for i in range(dur):
if all(x == text[0] for x in text) and text[0] != "0":
for i in range(sizey):
start_time = time.time()
print("")
if time.time() - start_time < 0.01:
time.sleep(0.01 - (time.time() - start_time))
break # break function once it everything looks done
#positions.append(random.randint(0, sizex))
lenp = len(positions)
#text = str(randomDigits(x))
for index, j in enumerate(positions):
if all(x == text[0] for x in text) and text[0] != "0":
break
found = False
count = 0
while found == False and fast == True and count < decay:
count += 1
if all(x == text[0] for x in text):
found = True
break
pos = random.randint(0,sizex-1)
# print("pos:", pos)
if text[pos].isdigit() == True:
# print("True!")
text = text[:pos] + ' ' + text[pos + 1:]
found == True
break # Not sure why this look won't end without breaking
# else:
# print("False :(")
# print("break")
#positions.append(random.randint(0, sizex))
text = replaceNumbers(text)
shift = random.randint(0,1)
text = text[:j] + ' ' + text[j + 1:]
if shift == 0 and j == 0:
positions[index] += 1
elif shift == 1 and j == lenp - 1:
positions[index] -= 1
else:
if shift == 0:
positions[index] -= 1
else:
positions[index] += 1
print(Fore.RED, Style.DIM, text, end="\r")
print(Style.BRIGHT)
nprint(dead, sizex)
for i in range(sizey//2-2):
print("")
time.sleep(0.03)
print(Fore.WHITE, Style.DIM)
# time.sleep(1.5)
sys.stdout.write("\r")
ret = input("Enter 'q' to quit, or anything else to return to the main menu.".center(sizex) + Fore.RED + Style.BRIGHT)
return ret
def rain(dur=10**5): # pretend you're upside down ;)
sizex, sizey = get_terminal_size()
os.system("mode con: cols="+str(sizex)+ "lines="+str(sizey))
positions = []
#bolt = x//2 + random.randint(-x//3, x//3)
#boltf = bolt
time1 = 250
time2 = 491
time3 = 599
time4 = 759
time5 = 956
nextbolt = time5 + random.randint(5,sizex)
bl1 = random.gauss(sizey//2, sizey//4)
bl2 = random.gauss(sizey//2, sizey//4)
bl3 = random.gauss(sizey//2, sizey//4)
bl4 = random.gauss(sizey//2, sizey//4)
bl5 = random.gauss(sizey//2, sizey//4)
bln = random.gauss(sizey//2, sizey//4)
fade = 0
def lightning(bolt, text):
boltf = bolt
boltf += random.randint(-1,1)
if boltf == bolt:
text = text[:boltf] + '|' + text[boltf + 1:]
elif boltf > bolt:
text = text[:boltf] + '\\' + text[boltf + 1:]
else:
text = text[:boltf] + '/' + text[boltf + 1:]
#p = str(Fore.BLUE, text[:bolt], Fore.YELLOW, text[bolt], Fore.BLUE, text[boltf + 1:])
p = Fore.BLUE + text[:boltf] + Fore.YELLOW + Style.BRIGHT + text[boltf] + Fore.BLUE + Style.NORMAL + text[boltf + 1:]
print(p, end="\r")
return boltf
for i in range(sizex*3):
positions.append(random.randint(0, sizex))
for i in range(dur):
text = "o"*sizex
#positions.append(random.randint(0, sizex))
lenp = len(positions)
#text = str(randomDigits(x))
for index, j in enumerate(positions):
shift = random.randint(0,1)
text = text[:j] + ' ' + text[j + 1:]
if shift == 0 and j == 0:
positions[index] += 1
elif shift == 1 and j == lenp - 1:
positions[index] -= 1
else:
if shift == 0:
positions[index] -= 1
else:
positions[index] += 1
if i >= time1 and i < time1 + bl1:
if i == time1:
bolt1 = sizex//2 + random.randint(-sizex//3, sizex//3)
bolt1 = lightning(bolt1, text)
elif i >= time2 and i < time2 + bl2:
if i == time2:
bolt2 = sizex//2 + random.randint(-sizex//3, sizex//3)
bolt2 = lightning(bolt2, text)
elif i >= time3 and i < time3 + bl3:
if i == time3:
bolt3 = sizex//2 + random.randint(-sizex//3, sizex//3)
bolt3 = lightning(bolt3, text)
elif i >= time4 and i < time4 + bl4:
if i == time4:
bolt4 = sizex//2 + random.randint(-sizex//3, sizex//3)
bolt4 = lightning(bolt4, text)
elif i >= time5 and i < time5 + bl5:
if i == time5:
bolt5 = sizex//2 + random.randint(-sizex//3, sizex//3)
bolt5 = lightning(bolt5, text)
elif i >= nextbolt and i < nextbolt + bln:
if i == nextbolt:
boltn = sizex//2 + random.randint(-sizex//3, sizex//3)
boltn = lightning(boltn, text)
if i == nextbolt + (sizey)//2:
nextbolt += sizey + fade + random.randint(1,sizex)
bln = random.gauss(sizey//2, sizey//4)
fade += 5
else:
print(Fore.BLUE, end="\r")
print(text, end="\r")
def tendrils():
sizex, sizey = get_terminal_size()
os.system("mode con: cols="+str(sizex)+ "lines="+str(sizey))
positions = []
for i in range(sizey * 2):
start_time = time.time()
positions.append(random.randint(0, sizex))
positions.append(random.randint(0, sizex))
lenp = len(positions)
text = " " * sizex
for index, j in enumerate(positions):
shift = random.randint(0,1)
text = text[:j] + str(random.randint(0,9)) + text[j + 1:]
if shift == 0 and j == 0:
positions[index] += 1
elif shift == 1 and j == lenp - 1:
positions[index] -= 1
else:
if shift == 0:
positions[index] -= 1
else:
positions[index] += 1
print(text, end="\r")
if time.time() - start_time < 0.01:
time.sleep(0.01 - (time.time() - start_time))
def bloodText1(x, y):
positions = []
for i in range(int(x//2)):
positions.append(random.randint(0, x))
lenp = len(positions)
for i in range(y):
text = str(randomDigits(x))
for index, j in enumerate(positions):
shift = random.randint(0,1)
text = text[:j] + ' ' + text[j + 1:]
if shift == 0 and j == 0:
positions[index] += 1
elif shift == 1 and j == lenp - 1:
positions[index] -= 1
else:
if shift == 0:
positions[index] -= 1
else:
positions[index] += 1
print(text, end="\r")
def intro():
ctypes.windll.kernel32.SetConsoleTitleA("NIMBH")
font = CONSOLE_FONT_INFOEX()
font.cbSize = ctypes.sizeof(CONSOLE_FONT_INFOEX)
font.nFont = 12
font.dwFontSize.X = 12
font.dwFontSize.Y = 12
font.FontFamily = 54
font.FontWeight = 400
font.FaceName = "Lucida Console"
handle1 = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
ctypes.windll.kernel32.SetCurrentConsoleFontEx(
handle1, ctypes.c_long(False), ctypes.pointer(font))
AltEnter()
sizex, sizey = get_terminal_size()
#mode con: cols=sizex lines=sizey
#system("mode CON: COLS=",str(sizey))
#bufsize = wintypes._COORD(sizex, sizey) # rows, columns
#STDERR = -12
#h = windll.kernel32.GetStdHandle(STDERR)
#windll.kernel32.SetConsoleScreenBufferSize(h, bufsize)
#subprocess.Popen(["mode", "con:", "cols=",str(sizex), "lines=",str(sizey)])
#sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=32, cols=100))
os.system("mode con: cols="+str(sizex)+ "lines="+str(sizey))
#print("Terminal size:", get_terminal_size())
#pause = input("Press enter to begin.\n")
#clear()
count = 0
fullgreetsize = len(re.findall("\n", fullgreet))
for i in range((sizey- 1)):
print("| {0:<{1}} |".format("", sizex-4), end = "\r")
for line in fullgreet.splitlines():
count += 1
print("| {0:<{1}} |".format(line.center(sizex-4), sizex-4), end = "\r")
time.sleep(0.015)
for i in range((sizey-fullgreetsize)//2):
count += 1
print("| {0:<{1}} |".format("", sizex-4), end = "\r")
time.sleep(0.03)
#tendrils()
#clear()
print(Fore.RED, Style.DIM, end="\r"),
time.sleep(3)
print("".center(sizex, "_"), end="\r")
blood(40)
clear()
toPrint = replaceNumbers(fullgreet)
print(Style.RESET_ALL),
#print(Style.DIM),
print(Style.DIM),
for i in cinfo.splitlines():
print(i.rjust(sizex))
print(Fore.RED),
print(Style.BRIGHT),
#print(Back.WHITE)
for i in range((sizey - 63)//2-3):
print("")
for i in toPrint.splitlines():
print(i.center(sizex), end = "\r")
sys.stdout.write('\r')
sys.stdout.flush()
for i in range((sizey - 63)//2-1):
print("")
print(Fore.BLUE)
pause = input("Press enter to continue.\n")
clear()
for i in range((sizey)//2-6):
print("")
print(Fore.CYAN)
print(Style.DIM),
nprint(info, sizex)
for i in range((sizey)//2-6):
print("")
pause = input(Fore.RED + "Press enter to begin.\n")
if __name__ == "__main__":
intro()
clear()
youdied()
|
[
"sys.stdout.write",
"win32api.SetCursorPos",
"winsound.PlaySound",
"ctypes.create_string_buffer",
"sys.stdout.flush",
"ctypes.windll.kernel32.GetConsoleScreenBufferInfo",
"win32api.mouse_event",
"random.gauss",
"ctypes.WinDLL",
"random.randint",
"ctypes.byref",
"ctypes.sizeof",
"shlex.split",
"ctypes.pointer",
"re.findall",
"ctypes.get_last_error",
"ctypes.POINTER",
"ctypes.windll.kernel32.GetStdHandle",
"struct.unpack",
"time.sleep",
"sys.stdout.isatty",
"ctypes.c_long",
"platform.system",
"ctypes.windll.kernel32.SetConsoleTitleA",
"random.choice",
"time.time",
"sys.stderr.write"
] |
[((666, 710), 'ctypes.WinDLL', 'ctypes.WinDLL', (['"""user32"""'], {'use_last_error': '(True)'}), "('user32', use_last_error=True)\n", (679, 710), False, 'import ctypes\n'), ((3082, 3103), 'ctypes.POINTER', 'ctypes.POINTER', (['INPUT'], {}), '(INPUT)\n', (3096, 3103), False, 'import ctypes\n'), ((928, 957), 'win32api.SetCursorPos', 'win32api.SetCursorPos', (['(x, y)'], {}), '((x, y))\n', (949, 957), False, 'import win32api, win32con\n'), ((961, 1024), 'win32api.mouse_event', 'win32api.mouse_event', (['win32con.MOUSEEVENTF_LEFTDOWN', 'x', 'y', '(0)', '(0)'], {}), '(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)\n', (981, 1024), False, 'import win32api, win32con\n'), ((1025, 1086), 'win32api.mouse_event', 'win32api.mouse_event', (['win32con.MOUSEEVENTF_LEFTUP', 'x', 'y', '(0)', '(0)'], {}), '(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)\n', (1045, 1086), False, 'import win32api, win32con\n'), ((4052, 4067), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (4062, 4067), False, 'import time\n'), ((4210, 4225), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (4220, 4225), False, 'import time\n'), ((5290, 5307), 'platform.system', 'platform.system', ([], {}), '()\n', (5305, 5307), False, 'import platform\n'), ((6688, 6721), 'sys.stderr.write', 'sys.stderr.write', (['"""\x1b[2J\x1b[H"""'], {}), "('\\x1b[2J\\x1b[H')\n", (6704, 6721), False, 'import sys\n'), ((23327, 23385), 'sys.stdout.write', 'sys.stdout.write', (["('\\x1b7\\x1b[%d;%df%s\\x1b8' % (x, y, text))"], {}), "('\\x1b7\\x1b[%d;%df%s\\x1b8' % (x, y, text))\n", (23343, 23385), False, 'import sys\n'), ((23391, 23404), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (23401, 23404), False, 'import time\n'), ((23410, 23428), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (23426, 23428), False, 'import sys\n'), ((23452, 23511), 'winsound.PlaySound', 'winsound.PlaySound', (["('%s.wav' % sound)", 'winsound.SND_FILENAME'], {}), "('%s.wav' % sound, winsound.SND_FILENAME)\n", (23470, 23511), False, 'import winsound\n'), ((28390, 28412), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (28406, 28412), False, 'import sys\n'), ((28931, 28967), 'random.gauss', 'random.gauss', (['(sizey // 2)', '(sizey // 4)'], {}), '(sizey // 2, sizey // 4)\n', (28943, 28967), False, 'import random\n'), ((28974, 29010), 'random.gauss', 'random.gauss', (['(sizey // 2)', '(sizey // 4)'], {}), '(sizey // 2, sizey // 4)\n', (28986, 29010), False, 'import random\n'), ((29017, 29053), 'random.gauss', 'random.gauss', (['(sizey // 2)', '(sizey // 4)'], {}), '(sizey // 2, sizey // 4)\n', (29029, 29053), False, 'import random\n'), ((29060, 29096), 'random.gauss', 'random.gauss', (['(sizey // 2)', '(sizey // 4)'], {}), '(sizey // 2, sizey // 4)\n', (29072, 29096), False, 'import random\n'), ((29103, 29139), 'random.gauss', 'random.gauss', (['(sizey // 2)', '(sizey // 4)'], {}), '(sizey // 2, sizey // 4)\n', (29115, 29139), False, 'import random\n'), ((29146, 29182), 'random.gauss', 'random.gauss', (['(sizey // 2)', '(sizey // 4)'], {}), '(sizey // 2, sizey // 4)\n', (29158, 29182), False, 'import random\n'), ((33583, 33631), 'ctypes.windll.kernel32.SetConsoleTitleA', 'ctypes.windll.kernel32.SetConsoleTitleA', (['"""NIMBH"""'], {}), "('NIMBH')\n", (33622, 33631), False, 'import ctypes\n'), ((33683, 33717), 'ctypes.sizeof', 'ctypes.sizeof', (['CONSOLE_FONT_INFOEX'], {}), '(CONSOLE_FONT_INFOEX)\n', (33696, 33717), False, 'import ctypes\n'), ((33895, 33949), 'ctypes.windll.kernel32.GetStdHandle', 'ctypes.windll.kernel32.GetStdHandle', (['STD_OUTPUT_HANDLE'], {}), '(STD_OUTPUT_HANDLE)\n', (33930, 33949), False, 'import ctypes\n'), ((35263, 35276), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (35273, 35276), False, 'import time\n'), ((860, 869), 'ctypes.byref', 'byref', (['pt'], {}), '(pt)\n', (865, 869), False, 'from ctypes import windll, byref, wintypes, Structure, c_ulong\n'), ((3581, 3596), 'ctypes.byref', 'ctypes.byref', (['x'], {}), '(x)\n', (3593, 3596), False, 'import ctypes\n'), ((3598, 3614), 'ctypes.sizeof', 'ctypes.sizeof', (['x'], {}), '(x)\n', (3611, 3614), False, 'import ctypes\n'), ((3802, 3817), 'ctypes.byref', 'ctypes.byref', (['x'], {}), '(x)\n', (3814, 3817), False, 'import ctypes\n'), ((3819, 3835), 'ctypes.sizeof', 'ctypes.sizeof', (['x'], {}), '(x)\n', (3832, 3835), False, 'import ctypes\n'), ((4565, 4598), 'ctypes.windll.kernel32.GetStdHandle', 'windll.kernel32.GetStdHandle', (['(-12)'], {}), '(-12)\n', (4593, 4598), False, 'from ctypes import windll, create_string_buffer\n'), ((4614, 4638), 'ctypes.create_string_buffer', 'create_string_buffer', (['(22)'], {}), '(22)\n', (4634, 4638), False, 'from ctypes import windll, create_string_buffer\n'), ((4653, 4704), 'ctypes.windll.kernel32.GetConsoleScreenBufferInfo', 'windll.kernel32.GetConsoleScreenBufferInfo', (['h', 'csbi'], {}), '(h, csbi)\n', (4695, 4704), False, 'from ctypes import windll, create_string_buffer\n'), ((5870, 5903), 'ctypes.windll.kernel32.GetStdHandle', 'windll.kernel32.GetStdHandle', (['(-12)'], {}), '(-12)\n', (5898, 5903), False, 'from ctypes import windll, create_string_buffer\n'), ((5919, 5943), 'ctypes.create_string_buffer', 'create_string_buffer', (['(22)'], {}), '(22)\n', (5939, 5943), False, 'from ctypes import windll, create_string_buffer\n'), ((5958, 6009), 'ctypes.windll.kernel32.GetConsoleScreenBufferInfo', 'windll.kernel32.GetConsoleScreenBufferInfo', (['h', 'csbi'], {}), '(h, csbi)\n', (6000, 6009), False, 'from ctypes import windll, create_string_buffer\n'), ((28315, 28331), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (28325, 28331), False, 'import time\n'), ((28897, 28921), 'random.randint', 'random.randint', (['(5)', 'sizex'], {}), '(5, sizex)\n', (28911, 28921), False, 'import random\n'), ((29261, 29282), 'random.randint', 'random.randint', (['(-1)', '(1)'], {}), '(-1, 1)\n', (29275, 29282), False, 'import random\n'), ((32068, 32079), 'time.time', 'time.time', ([], {}), '()\n', (32077, 32079), False, 'import time\n'), ((34019, 34039), 'ctypes.c_long', 'ctypes.c_long', (['(False)'], {}), '(False)\n', (34032, 34039), False, 'import ctypes\n'), ((34041, 34061), 'ctypes.pointer', 'ctypes.pointer', (['font'], {}), '(font)\n', (34055, 34061), False, 'import ctypes\n'), ((34748, 34775), 're.findall', 're.findall', (['"""\n"""', 'fullgreet'], {}), "('\\n', fullgreet)\n", (34758, 34775), False, 'import re\n'), ((35018, 35035), 'time.sleep', 'time.sleep', (['(0.015)'], {}), '(0.015)\n', (35028, 35035), False, 'import time\n'), ((35171, 35187), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (35181, 35187), False, 'import time\n'), ((35734, 35756), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (35750, 35756), False, 'import sys\n'), ((35765, 35783), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (35781, 35783), False, 'import sys\n'), ((597, 616), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (614, 616), False, 'import sys\n'), ((3193, 3216), 'ctypes.get_last_error', 'ctypes.get_last_error', ([], {}), '()\n', (3214, 3216), False, 'import ctypes\n'), ((4831, 4869), 'struct.unpack', 'struct.unpack', (['"""hhhhHhhhhhh"""', 'csbi.raw'], {}), "('hhhhHhhhhhh', csbi.raw)\n", (4844, 4869), False, 'import struct\n'), ((6136, 6174), 'struct.unpack', 'struct.unpack', (['"""hhhhHhhhhhh"""', 'csbi.raw'], {}), "('hhhhHhhhhhh', csbi.raw)\n", (6149, 6174), False, 'import struct\n'), ((22495, 22530), 'random.choice', 'random.choice', (['string.ascii_letters'], {}), '(string.ascii_letters)\n', (22508, 22530), False, 'import random\n'), ((23766, 23790), 'random.randint', 'random.randint', (['(0)', 'sizex'], {}), '(0, sizex)\n', (23780, 23790), False, 'import random\n'), ((25420, 25440), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (25434, 25440), False, 'import random\n'), ((26117, 26141), 'random.randint', 'random.randint', (['(0)', 'sizex'], {}), '(0, sizex)\n', (26131, 26141), False, 'import random\n'), ((27769, 27789), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (27783, 27789), False, 'import random\n'), ((29844, 29868), 'random.randint', 'random.randint', (['(0)', 'sizex'], {}), '(0, sizex)\n', (29858, 29868), False, 'import random\n'), ((30105, 30125), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (30119, 30125), False, 'import random\n'), ((31733, 31769), 'random.gauss', 'random.gauss', (['(sizey // 2)', '(sizey // 4)'], {}), '(sizey // 2, sizey // 4)\n', (31745, 31769), False, 'import random\n'), ((32105, 32129), 'random.randint', 'random.randint', (['(0)', 'sizex'], {}), '(0, sizex)\n', (32119, 32129), False, 'import random\n'), ((32156, 32180), 'random.randint', 'random.randint', (['(0)', 'sizex'], {}), '(0, sizex)\n', (32170, 32180), False, 'import random\n'), ((32305, 32325), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (32319, 32325), False, 'import random\n'), ((32959, 32979), 'random.randint', 'random.randint', (['(0)', 'x'], {}), '(0, x)\n', (32973, 32979), False, 'import random\n'), ((33132, 33152), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (33146, 33152), False, 'import random\n'), ((6520, 6544), 'shlex.split', 'shlex.split', (['"""tput cols"""'], {}), "('tput cols')\n", (6531, 6544), False, 'import shlex\n'), ((6588, 6613), 'shlex.split', 'shlex.split', (['"""tput lines"""'], {}), "('tput lines')\n", (6599, 6613), False, 'import shlex\n'), ((22411, 22431), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (22425, 22431), False, 'import random\n'), ((22850, 22870), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (22864, 22870), False, 'import random\n'), ((24139, 24150), 'time.time', 'time.time', ([], {}), '()\n', (24148, 24150), False, 'import time\n'), ((24894, 24922), 'random.randint', 'random.randint', (['(0)', '(sizex - 1)'], {}), '(0, sizex - 1)\n', (24908, 24922), False, 'import random\n'), ((26490, 26501), 'time.time', 'time.time', ([], {}), '()\n', (26499, 26501), False, 'import time\n'), ((27243, 27271), 'random.randint', 'random.randint', (['(0)', '(sizex - 1)'], {}), '(0, sizex - 1)\n', (27257, 27271), False, 'import random\n'), ((31691, 31715), 'random.randint', 'random.randint', (['(1)', 'sizex'], {}), '(1, sizex)\n', (31705, 31715), False, 'import random\n'), ((32769, 32780), 'time.time', 'time.time', ([], {}), '()\n', (32778, 32780), False, 'import time\n'), ((30595, 30634), 'random.randint', 'random.randint', (['(-sizex // 3)', '(sizex // 3)'], {}), '(-sizex // 3, sizex // 3)\n', (30609, 30634), False, 'import random\n'), ((24196, 24207), 'time.time', 'time.time', ([], {}), '()\n', (24205, 24207), False, 'import time\n'), ((26547, 26558), 'time.time', 'time.time', ([], {}), '()\n', (26556, 26558), False, 'import time\n'), ((30781, 30820), 'random.randint', 'random.randint', (['(-sizex // 3)', '(sizex // 3)'], {}), '(-sizex // 3, sizex // 3)\n', (30795, 30820), False, 'import random\n'), ((32359, 32379), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (32373, 32379), False, 'import random\n'), ((32833, 32844), 'time.time', 'time.time', ([], {}), '()\n', (32842, 32844), False, 'import time\n'), ((30967, 31006), 'random.randint', 'random.randint', (['(-sizex // 3)', '(sizex // 3)'], {}), '(-sizex // 3, sizex // 3)\n', (30981, 31006), False, 'import random\n'), ((24270, 24281), 'time.time', 'time.time', ([], {}), '()\n', (24279, 24281), False, 'import time\n'), ((26619, 26630), 'time.time', 'time.time', ([], {}), '()\n', (26628, 26630), False, 'import time\n'), ((31153, 31192), 'random.randint', 'random.randint', (['(-sizex // 3)', '(sizex // 3)'], {}), '(-sizex // 3, sizex // 3)\n', (31167, 31192), False, 'import random\n'), ((31339, 31378), 'random.randint', 'random.randint', (['(-sizex // 3)', '(sizex // 3)'], {}), '(-sizex // 3, sizex // 3)\n', (31353, 31378), False, 'import random\n'), ((31534, 31573), 'random.randint', 'random.randint', (['(-sizex // 3)', '(sizex // 3)'], {}), '(-sizex // 3, sizex // 3)\n', (31548, 31573), False, 'import random\n')]
|
"""Application class definition"""
import asyncio
import logging
import signal
from collections import namedtuple
import uvloop
from .factories import create_message_sink, create_message_source, \
create_router
from .exceptions import MessageSinkError
LOGGER = logging.getLogger(__name__)
#: Represents a message and the source it was received from
SourceMessagePair = namedtuple("SourceMessagePair", ["source_name", "message"])
SourceMessagePair.source_name.__doc__ = "Name of the message source"
SourceMessagePair.message.__doc__ = "The received message"
# pylint: disable=too-few-public-methods, too-many-instance-attributes
class Application:
"""Rabbit force application"""
def __init__(self, config, *, ignore_replay_storage_errors=False,
ignore_sink_errors=False,
source_connection_timeout=10.0):
"""
Application is the mediator class which is responsible for listening
for messages from the source objects and routing them to the right
message sinks.
.. note::
The application configures itself the first time :meth:`run` is
called. If you want to run the application with a different
configuration then a new Application instance should be created.
:param dict config: Application configuration
:param bool ignore_replay_storage_errors: If True then no exceptions \
will be raised in case of a network error occurs in the replay marker \
storage object
:param bool ignore_sink_errors: If True then no exceptions \
will be raised in case a message sink error occurs
:param source_connection_timeout: The maximum amount of time to wait \
for the message source to re-establish a connection with the server \
when the connection fails. If ``0`` then the message source will try \
to reconnect indefinitely.
:type source_connection_timeout: int, float or None
"""
#: The application's configuration
self.config = config
#: Marks whether to raise exceptions on replay storage errors or not
self.ignore_replay_storage_errors = ignore_replay_storage_errors
#: Marks whether to raise exceptions on message sink errors or not
self.ignore_sink_errors = ignore_sink_errors
#: Maximum allowed connection timeout for message source
self.source_connection_timeout = source_connection_timeout
#: Marks whether the application is already configured or not
self._configured = False
#: A message source object
self._source = None
#: A message sink object
self._sink = None
#: A message router object
self._router = None
#: The currently running message forwarding tasks
self._forwarding_tasks = {}
#: Event loop
self._loop = None
# The main task of the application
self._main_task = None
def run(self):
"""Run the Rabbit force application, listen for and forward messages
until a keyboard interrupt or a termination signal is received"""
# use the uvloop event loop policy
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
# create an event loop and create the main task
self._loop = asyncio.get_event_loop()
self._main_task = asyncio.ensure_future(self._run(), loop=self._loop)
# add SIGTERM handler
self._loop.add_signal_handler(signal.SIGTERM,
self._on_termination_signal,
self._main_task)
# run the task until completion
try:
LOGGER.debug("Starting event loop")
self._loop.run_until_complete(self._main_task)
# on a keyboard interrupt cancel the main task and await its completion
except KeyboardInterrupt:
LOGGER.debug("Received keyboard interrupt")
self._main_task.cancel()
self._loop.run_until_complete(self._main_task)
finally:
LOGGER.debug("Event loop terminated")
@staticmethod
def _on_termination_signal(task):
"""Cancel the *task*"""
LOGGER.debug("Received termination signal")
task.cancel()
async def _run(self):
"""Configure the application and listen for incoming messages until
cancellation"""
LOGGER.info("Configuring application ...")
# configure the application
await self._configure()
LOGGER.debug("Start listening for messages")
# listen for incoming messages
await self._listen_for_messages()
async def _configure(self):
"""Create and configure collaborator objects"""
LOGGER.debug("Creating message source from configuration")
self._source = await create_message_source(
**self.config["source"],
ignore_replay_storage_errors=self.ignore_replay_storage_errors,
connection_timeout=self.source_connection_timeout,
loop=self._loop
)
LOGGER.debug("Creating message sink from configuration")
self._sink = await create_message_sink(
**self.config["sink"],
loop=self._loop
)
LOGGER.debug("Creating message router from configuration")
self._router = create_router(**self.config["router"])
self._configured = True
async def _listen_for_messages(self):
"""Listen for incoming messages and route them to the appropriate
brokers
This method will block until it's cancelled. On cancellation it'll
drain all the pending messages and forwarding tasks.
"""
try:
# open the message source
LOGGER.debug("Opening message source")
await self._source.open()
LOGGER.debug("Waiting for incoming messages")
# consume messages until the message source is not closed, or until
# all the messages are consumed from a closed message source
while not self._source.closed or self._source.has_pending_messages:
try:
# await an incoming message
source_name, message = await self._source.get_message()
LOGGER.debug("Received incoming message from source %r, "
"scheduling message forwarding",
source_name)
# forward the message in non blocking fashion
# (without awaiting the tasks result)
await self._schedule_message_forwarding(source_name,
message)
# on cancellation close the message source but continue to
# consume pending messages until there is no more left
except asyncio.CancelledError:
LOGGER.debug("Canceling wait for incoming messages")
await self._source.close()
LOGGER.info("Shutting down ...")
finally:
# close the source in case it wasn't closed in the inner loop
# (idempotent if already closed)
LOGGER.debug("Closing message source")
await self._source.close()
# if the source is closed and there are no more messages to
# consume, await the completion of scheduled forwaring tasks
LOGGER.debug("Waiting for running forwarding tasks to complete")
await self._wait_scheduled_forwarding_tasks()
# when all the messages are forwarded close the message sink
LOGGER.debug("Closing message sink")
await self._sink.close()
async def _schedule_message_forwarding(self, source_name, message):
"""Create a task for forwarding the *message* from *source_name* and
add it to the map of active forwarding tasks
:param str source_name: Name of the message source
:param dict message: A message
"""
# create a task to forward the message
forwarding_task = asyncio.ensure_future(
self._forward_message(source_name, message),
loop=self._loop
)
# set a callback to consume the tasks result
forwarding_task.add_done_callback(self._forward_message_done)
# add the task and message to the map of running tasks
self._forwarding_tasks[forwarding_task] = \
SourceMessagePair(source_name, message)
async def _wait_scheduled_forwarding_tasks(self):
"""Wait for all the active forwarding tasks to complete"""
# check if there are any running forwarding tasks, and await them
if self._forwarding_tasks:
await asyncio.wait(self._forwarding_tasks, loop=self._loop)
async def _forward_message(self, source_name, message):
"""Forward the *message* from *source_name* with the appropriate route
:param str source_name: Name of the message source
:param dict message: A message
:return: The routing parameters used to forward the message or None \
if no suitable route was found
:rtype: Route or None
"""
# find a matching route for the message
route = self._router.find_route(source_name, message)
# if a route was found for the message then forward it using the
# routing parameters
if route is not None:
await self._sink.consume_message(message,
route.broker_name,
route.exchange_name,
route.routing_key,
route.properties)
# return the message, source_name and the routing parameters
return route
def _forward_message_done(self, future):
"""Consume the result of a completed message forwarding task
:param asyncio.Future future: A future object
"""
# remove task from the map of running tasks
source_message_pair = self._forwarding_tasks.pop(future)
# extract message and source information
source_name = source_message_pair.source_name
channel = source_message_pair.message["channel"]
replay_id = source_message_pair.message["data"]["event"]["replayId"]
try:
route = future.result()
if route:
LOGGER.info("Forwarded message %r on channel %r "
"from %r to %r.",
replay_id, channel, source_name, route)
else:
LOGGER.warning("Dropped message %r on channel %r from %r, "
"no route found.",
replay_id, channel, source_name)
except MessageSinkError as error:
if self.ignore_sink_errors:
LOGGER.error("Dropped message %r on channel %r from %r. %s",
replay_id, channel, source_name, str(error))
else:
self._on_unexpected_error(error)
except Exception as error: # pylint: disable=broad-except
self._on_unexpected_error(error)
def _on_unexpected_error(self, error):
"""Handle unexpected errors of forwarding tasks
Sets the *error* as the exception of the application's main task.
"""
LOGGER.debug("An unexpected error occurred. Setting it as the "
"exception of the main task.")
self._main_task.set_exception(error)
# pylint: enable=too-few-public-methods, too-many-instance-attributes
|
[
"asyncio.get_event_loop",
"collections.namedtuple",
"uvloop.EventLoopPolicy",
"asyncio.wait",
"logging.getLogger"
] |
[((269, 296), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (286, 296), False, 'import logging\n'), ((378, 437), 'collections.namedtuple', 'namedtuple', (['"""SourceMessagePair"""', "['source_name', 'message']"], {}), "('SourceMessagePair', ['source_name', 'message'])\n", (388, 437), False, 'from collections import namedtuple\n'), ((3341, 3365), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3363, 3365), False, 'import asyncio\n'), ((3237, 3261), 'uvloop.EventLoopPolicy', 'uvloop.EventLoopPolicy', ([], {}), '()\n', (3259, 3261), False, 'import uvloop\n'), ((8859, 8912), 'asyncio.wait', 'asyncio.wait', (['self._forwarding_tasks'], {'loop': 'self._loop'}), '(self._forwarding_tasks, loop=self._loop)\n', (8871, 8912), False, 'import asyncio\n')]
|
if __package__:
from pluGET.utils.consoleoutput import consoleTitle, clearConsole, printMainMenu
from pluGET.utils.utilities import check_requirements
from pluGET.handlers.handle_input import createInputLists, getInput
from pluGET.handlers.handle_config import checkConfig
else:
from utils.consoleoutput import consoleTitle, clearConsole, printMainMenu
from utils.utilities import check_requirements
from handlers.handle_input import createInputLists, getInput
from handlers.handle_config import checkConfig
def mainFunction():
consoleTitle()
clearConsole()
checkConfig()
check_requirements()
createInputLists()
printMainMenu()
getInput()
mainFunction()
|
[
"utils.utilities.check_requirements",
"handlers.handle_input.createInputLists",
"utils.consoleoutput.printMainMenu",
"utils.consoleoutput.clearConsole",
"utils.consoleoutput.consoleTitle",
"handlers.handle_input.getInput",
"handlers.handle_config.checkConfig"
] |
[((566, 580), 'utils.consoleoutput.consoleTitle', 'consoleTitle', ([], {}), '()\n', (578, 580), False, 'from utils.consoleoutput import consoleTitle, clearConsole, printMainMenu\n'), ((585, 599), 'utils.consoleoutput.clearConsole', 'clearConsole', ([], {}), '()\n', (597, 599), False, 'from utils.consoleoutput import consoleTitle, clearConsole, printMainMenu\n'), ((604, 617), 'handlers.handle_config.checkConfig', 'checkConfig', ([], {}), '()\n', (615, 617), False, 'from handlers.handle_config import checkConfig\n'), ((622, 642), 'utils.utilities.check_requirements', 'check_requirements', ([], {}), '()\n', (640, 642), False, 'from utils.utilities import check_requirements\n'), ((647, 665), 'handlers.handle_input.createInputLists', 'createInputLists', ([], {}), '()\n', (663, 665), False, 'from handlers.handle_input import createInputLists, getInput\n'), ((670, 685), 'utils.consoleoutput.printMainMenu', 'printMainMenu', ([], {}), '()\n', (683, 685), False, 'from utils.consoleoutput import consoleTitle, clearConsole, printMainMenu\n'), ((690, 700), 'handlers.handle_input.getInput', 'getInput', ([], {}), '()\n', (698, 700), False, 'from handlers.handle_input import createInputLists, getInput\n')]
|
from pygame import *
from .game_object import *
from .player import *
from .spawner import *
from .decoration import *
from .label import *
from .shared import *
from .enemy import *
from .tiled import *
from .collision_manager import *
from .resource_handler import *
from threading import Thread
import time
import random
import os
import code
import copy
import random
class App():
def __init__(self):
self.children = []
self.clock = Clock()
self.running = True
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (window_position[0], window_position[1])
self.surface = pygame.display.set_mode((window_size[0], window_size[1]), HWSURFACE | DOUBLEBUF)
#self.surface = pygame.display.set_mode((window_size[0], window_size[1]), HWSURFACE | DOUBLEBUF | FULLSCREEN)
self.collision_manager = CollisionManager()
pygame.init()
pygame.key.set_repeat(200,60)
self.run()
def handle_events(self, event):
if event.type == pygame.QUIT:
self.quit()
else:
for x in event_receiver_objects:
x.on_event(event, self)
def loop(self):
to_collide = []
for object in all_objects:
# w, h = pygame.display.get_surface().get_size()
# if not ( -object.size.x <= object.position.x <= w and -object.size.y <= object.position.y <= h ) and object.type != PLAYER:
# object.kill()
try:
object.every_tick()
except Exception as e:
print(e)
if object.layer == collision_layer:
to_collide.append(object)
self.collision_manager.handle_all_collisions(to_collide)
def render(self):
self.surface.fill(background_color)
try:
#TODO do better
camera_position.x, camera_position.y = [ ( obj.position.x - (window_size[0]/2) + (obj.size.x/2) , obj.position.y - (window_size[1]/2) + (obj.size.y/2) ) for obj in all_objects if obj.type == PLAYER ][0]
#draw object in layered order
for layer in range(min(object.layer for object in all_objects), max(object.layer for object in all_objects)+1):
for object in all_objects:
if object.layer == layer:
self.surface.blit(object.surface, (object.position.x - camera_position.x, object.position.y - camera_position.y))
except Exception as e:
print(e)
pass
pygame.display.flip()
def quit(self):
all_objects.clear()
self.running = False
pygame.quit()
def run(self):
while(self.running):
self.loop()
self.render()
self.clock.tick(tick)
for event in pygame.event.get():
self.handle_events(event)
def exec(self, cmd):
exec(cmd)
Thread(target=App).start()
time.sleep(1)
###########################################
#TODO music doesnt play if file imported from somewhere
#TODO replace above time.sleep to sth that makes more sense
|
[
"threading.Thread",
"time.sleep"
] |
[((3029, 3042), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3039, 3042), False, 'import time\n'), ((3002, 3020), 'threading.Thread', 'Thread', ([], {'target': 'App'}), '(target=App)\n', (3008, 3020), False, 'from threading import Thread\n')]
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.contrib.node.targets.node_module import NodeModule
class NodePreinstalledModule(NodeModule):
"""A NodeModule which resolves deps by downloading an archived node_modules directory.
This is basically an example, to demonstrate how additional types of NodeModule targets with
their own resolvers (in this case NodePreinstalledModuleResolver), which still work with
NodeTest, can be registered. To be generallly correct, this target type and associated resolver
would have to use platform- and Node-version-specific node_modules archives, rather than just
a single dependencies_archive_url used verbatim. Consider NodePreinstalledModule and
NodePreinstalledModuleResolver subject to future change or removal for now.
"""
def __init__(self, dependencies_archive_url=None, sources=None,
address=None, payload=None, **kwargs):
"""
:param string url: The location of a tar.gz file containing containing a node_modules directory.
"""
payload = payload or Payload()
payload.add_fields({
'dependencies_archive_url': PrimitiveField(dependencies_archive_url),
})
super(NodePreinstalledModule, self).__init__(sources=sources, address=address,
payload=payload, **kwargs)
@property
def dependencies_archive_url(self):
"""Where to download the archive containing the node_modules directory.
:rtype: string
"""
return self.payload.dependencies_archive_url
|
[
"pants.base.payload_field.PrimitiveField",
"pants.base.payload.Payload"
] |
[((1333, 1342), 'pants.base.payload.Payload', 'Payload', ([], {}), '()\n', (1340, 1342), False, 'from pants.base.payload import Payload\n'), ((1402, 1442), 'pants.base.payload_field.PrimitiveField', 'PrimitiveField', (['dependencies_archive_url'], {}), '(dependencies_archive_url)\n', (1416, 1442), False, 'from pants.base.payload_field import PrimitiveField\n')]
|
"""Wrapper for Torch Dataset class to enable contrastive training
"""
import torch
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio_augmentations import Compose
from typing import Tuple, List
class ContrastiveDataset(Dataset):
def __init__(self, dataset: Dataset, input_shape: List[int], transform: Compose):
self.dataset = dataset
self.transform = transform
self.input_shape = input_shape
self.ignore_idx = []
def __getitem__(self, idx) -> Tuple[Tensor, Tensor]:
if idx in self.ignore_idx:
return self[idx + 1]
audio, label = self.dataset[idx]
if audio.shape[1] < self.input_shape[1]:
self.ignore_idx.append(idx)
return self[idx + 1]
if self.transform:
audio = self.transform(audio)
return audio, label
def __len__(self) -> int:
return len(self.dataset)
def concat_clip(self, n: int, audio_length: float) -> Tensor:
audio, _ = self.dataset[n]
batch = torch.split(audio, audio_length, dim=1)
batch = torch.cat(batch[:-1])
batch = batch.unsqueeze(dim=1)
if self.transform:
batch = self.transform(batch)
return batch
class SiameseContrastiveDataset(Dataset):
def __init__(self, dataset: Dataset, input_shape: List[int], transform: Compose):
self.dataset = dataset
self.transform = transform
self.input_shape = input_shape
self.ignore_idx = []
def __getitem__(self, idx) -> Tuple[Tensor, Tensor]:
if idx in self.ignore_idx:
return self[idx + 1]
hum, song, label = self.dataset[idx]
if hum.shape[1] < self.input_shape[1] or song.shape[1] < self.input_shape[1]:
self.ignore_idx.append(idx)
return self[idx + 1]
if self.transform:
hum = self.transform(hum)
song = self.transform(song)
return hum, song, label
def __len__(self) -> int:
return len(self.dataset)
def concat_clip(self, n: int, audio_length: float) -> Tensor:
hum, song, _ = self.dataset[n]
hum_batch = torch.split(hum, audio_length, dim=1)
hum_batch = torch.cat(hum_batch[:-1])
hum_batch = hum_batch.unsqueeze(dim=1)
if self.transform:
hum_batch = self.transform(hum_batch)
song_batch = torch.split(song, audio_length, dim=1)
song_batch = torch.cat(song_batch[:-1])
song_batch = song_batch.unsqueeze(dim=1)
if self.transform:
song_batch = self.transform(song_batch)
return hum_batch, song_batch
|
[
"torch.split",
"torch.cat"
] |
[((1049, 1088), 'torch.split', 'torch.split', (['audio', 'audio_length'], {'dim': '(1)'}), '(audio, audio_length, dim=1)\n', (1060, 1088), False, 'import torch\n'), ((1105, 1126), 'torch.cat', 'torch.cat', (['batch[:-1]'], {}), '(batch[:-1])\n', (1114, 1126), False, 'import torch\n'), ((2194, 2231), 'torch.split', 'torch.split', (['hum', 'audio_length'], {'dim': '(1)'}), '(hum, audio_length, dim=1)\n', (2205, 2231), False, 'import torch\n'), ((2252, 2277), 'torch.cat', 'torch.cat', (['hum_batch[:-1]'], {}), '(hum_batch[:-1])\n', (2261, 2277), False, 'import torch\n'), ((2437, 2475), 'torch.split', 'torch.split', (['song', 'audio_length'], {'dim': '(1)'}), '(song, audio_length, dim=1)\n', (2448, 2475), False, 'import torch\n'), ((2497, 2523), 'torch.cat', 'torch.cat', (['song_batch[:-1]'], {}), '(song_batch[:-1])\n', (2506, 2523), False, 'import torch\n')]
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
def h2olog_and_echo():
"""
Python API test: h2o.log_and_echo(message=u'')
"""
try:
h2o.log_and_echo("Testing h2o.log_and_echo")
except Exception as e:
assert False, "h2o.log_and_echo() command is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2olog_and_echo)
else:
h2olog_and_echo()
|
[
"h2o.log_and_echo",
"tests.pyunit_utils.standalone_test",
"sys.path.insert"
] |
[((49, 80), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../"""'], {}), "(1, '../../../')\n", (64, 80), False, 'import sys\n'), ((402, 447), 'tests.pyunit_utils.standalone_test', 'pyunit_utils.standalone_test', (['h2olog_and_echo'], {}), '(h2olog_and_echo)\n', (430, 447), False, 'from tests import pyunit_utils\n'), ((230, 274), 'h2o.log_and_echo', 'h2o.log_and_echo', (['"""Testing h2o.log_and_echo"""'], {}), "('Testing h2o.log_and_echo')\n", (246, 274), False, 'import h2o\n')]
|
# -*- coding:utf-8 -*-
# ------------------------
# written by <NAME>
# 2018-10
# ------------------------
import math
import torch
def get_IoU(ground_truth, region):
# xmin, ymin, xmax, ymax
x1 = max(ground_truth[0], region[0])
y1 = max(ground_truth[1], region[1])
x2 = min(ground_truth[2], region[0] + region[2])
y2 = min(ground_truth[3], region[1] + region[3])
if x2 - x1 < 0:
return 0
inter_area = (x2 - x1 + 1) * (y2 - y1 + 1)
outer_area = (region[2] - region[0] + 1) * (region[3] - region[1] + 1) \
+ (ground_truth[2] - ground_truth[0] + 1) * (ground_truth[3] - ground_truth[1] + 1) - inter_area
if outer_area == 0:
return 0
iou = inter_area / outer_area
return iou
def bbox_loss(bbox_output, rois, roi_labels, ground_truths):
# output: (20, 4) ground_truth: (, 4)
bbox_output = bbox_output.view(-1, 4)
roi_num = rois.size(0)
loss = 0
for i in range(roi_num):
label = roi_labels[i]
if label == 20:
continue
dx, dy, dw, dh = bbox_output[label, :].long()
Gx, Gy, Gw, Gh = ground_truths[i]
Px, Py, Pw, Ph = rois[i].long()
tx = (Gx - Px) / Pw
ty = (Gy - Py) / Ph
try:
tw = math.log(int(Gw) / int(Pw))
th = math.log(int(Gh) / int(Ph))
except:
print("******log exception******")
print(Gw, Pw, Gh, Ph)
print(Gw / Pw, Gh / Ph)
continue
t = torch.FloatTensor([tx, ty, tw, th])
d = torch.FloatTensor([dx, dy, dw, dh])
loss += sum((t - d) ** 2)
return loss / roi_num
def smooth(x):
if abs(x) < 1:
return 0.5 * x ** 2
else:
return abs(x) - 0.5
|
[
"torch.FloatTensor"
] |
[((1507, 1542), 'torch.FloatTensor', 'torch.FloatTensor', (['[tx, ty, tw, th]'], {}), '([tx, ty, tw, th])\n', (1524, 1542), False, 'import torch\n'), ((1555, 1590), 'torch.FloatTensor', 'torch.FloatTensor', (['[dx, dy, dw, dh]'], {}), '([dx, dy, dw, dh])\n', (1572, 1590), False, 'import torch\n')]
|
#/usr/bin/python2
"""
udev service for USB
transfer USB data to zeromq pull server via tcp
required pull socket: tcp://localhost:6372
Author: <NAME>. <<EMAIL>>
Python version: 2.7
"""
import argparse
import os
import signal
import struct
import sys
import time
import usb1 as _usb1
import zmq as _zmq
import hardware
from logger import getLogger
parser = argparse.ArgumentParser()
parser.add_argument('bus')
args = parser.parse_args()
_args = args.bus.split(':')
LOG = getLogger('usb2mq')
if len(_args) < 2:
LOG.error('invalid argument %s', args.bus)
sys.exit(1)
bus, address = _args[0:2]
# write pid into file
#pidfile = open('/opt/ca-hub-rpi/pid/{}-{}'.format(bus, address), 'w')
#pidfile.write(str(os.getpid()))
#pidfile.close()
# init zmq broker
zmq = _zmq.Context()
sender = zmq.socket(_zmq.PUSH)
sender.linger = 250
sender.connect('tcp://127.0.0.1:6372')
LOG.info('connect zmq pull server')
def send(*arr):
if not sender:
return
try:
data = bytearray()
for x in arr:
data += bytearray(x)
sender.send(struct.pack('>' + str(len(data)) + 'B', *data), flags=_zmq.NOBLOCK)
except _zmq.ZMQError:
pass
# register signal handler
running = False
def shutdown(signum, frame):
global running
LOG.info('Shutting down...')
if running and not sender.closed:
running = False
send([2, int(bus), int(address), productId])
running = False
if handle is not None:
handle.releaseInterface(0)
handle.close()
if type(device).__name__ == 'USBDevice':
device.close()
sender.close()
zmq.term()
os._exit(0)
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGTERM, shutdown)
# get device from bus and address
usb1 = _usb1.USBContext()
device = None
handle = None
productId = hardware.INVALID_PRODUCT_ID
productName = ''
maxPacketSize = 64
try:
for _device in usb1.getDeviceIterator(skip_on_error=True):
if (_device.getBusNumber() == int(bus) and _device.getDeviceAddress() == int(address)):
LOG.info('Initialize device bus:%s address:%s', bus, address)
productName = _device.getProduct()
maxPacketSize = _device.getMaxPacketSize(hardware.ENDPOINT_ADDRESS)
LOG.info('%s (%s)', productName, _device.getManufacturer())
LOG.info('packet size: %d', maxPacketSize)
productId = hardware.getIdFromProductName(productName)
device = _device
break
except (RuntimeError, IOError, _usb1.USBError) as e:
LOG.error("Unexpected error 1: %s", str(e))
send([3, int(bus), int(address), productId], str(e))
shutdown(0, 0)
if device is None:
LOG.error('Device can not be initialized!')
shutdown(0, 0)
if productId == hardware.INVALID_PRODUCT_ID:
LOG.error('Unsupport USB device')
shutdown(0, 0)
# transfer callback function
def mainloop():
global handle
global running
# init device
try:
handle = device.open()
handle.claimInterface(0)
send([1, int(bus), int(address), productId])
running = True
#scheduler = sched.scheduler(time.time, time.sleep)
while running:
try:
data = handle.interruptRead(hardware.ENDPOINT_ADDRESS, maxPacketSize)
isValid = False
if productId == hardware.SPO2_PRODUCT_ID:
assert len(data) == 6
isValid = True
time.sleep(1.0/hardware.SPO2_SAMPLING_RATE_HZ/1.5)
elif productId == hardware.ECG_PRODUCT_ID:
assert len(data) == 27
isValid = True
time.sleep(1.0/hardware.ECG_SAMPLING_RATE_HZ/1.5)
if isValid and running:
send([0, int(bus), int(address), productId], data)
except _usb1.USBErrorInterrupted as e:
LOG.error("USB Error: %s", str(e))
send([3, int(bus), int(address), productId], str(e))
shutdown(0, 0)
except (RuntimeError, IOError, _usb1.USBError) as e:
LOG.error("Unexpected error 3: %s", str(e))
send([3, int(bus), int(address), productId], str(e))
if __name__ == '__main__':
mainloop()
|
[
"argparse.ArgumentParser",
"usb1.USBContext",
"logger.getLogger",
"time.sleep",
"hardware.getIdFromProductName",
"os._exit",
"sys.exit",
"signal.signal",
"zmq.Context"
] |
[((358, 383), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (381, 383), False, 'import argparse\n'), ((472, 491), 'logger.getLogger', 'getLogger', (['"""usb2mq"""'], {}), "('usb2mq')\n", (481, 491), False, 'from logger import getLogger\n'), ((770, 784), 'zmq.Context', '_zmq.Context', ([], {}), '()\n', (782, 784), True, 'import zmq as _zmq\n'), ((1643, 1681), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'shutdown'], {}), '(signal.SIGINT, shutdown)\n', (1656, 1681), False, 'import signal\n'), ((1682, 1721), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'shutdown'], {}), '(signal.SIGTERM, shutdown)\n', (1695, 1721), False, 'import signal\n'), ((1765, 1783), 'usb1.USBContext', '_usb1.USBContext', ([], {}), '()\n', (1781, 1783), True, 'import usb1 as _usb1\n'), ((562, 573), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (570, 573), False, 'import sys\n'), ((1630, 1641), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (1638, 1641), False, 'import os\n'), ((2404, 2446), 'hardware.getIdFromProductName', 'hardware.getIdFromProductName', (['productName'], {}), '(productName)\n', (2433, 2446), False, 'import hardware\n'), ((3485, 3539), 'time.sleep', 'time.sleep', (['(1.0 / hardware.SPO2_SAMPLING_RATE_HZ / 1.5)'], {}), '(1.0 / hardware.SPO2_SAMPLING_RATE_HZ / 1.5)\n', (3495, 3539), False, 'import time\n'), ((3693, 3746), 'time.sleep', 'time.sleep', (['(1.0 / hardware.ECG_SAMPLING_RATE_HZ / 1.5)'], {}), '(1.0 / hardware.ECG_SAMPLING_RATE_HZ / 1.5)\n', (3703, 3746), False, 'import time\n')]
|
from typing import List, Dict, Any
from torch_tensorrt import _enums
import torch_tensorrt.ts
from torch_tensorrt import logging
import torch
from enum import Enum
class _IRType(Enum):
"""Enum to set the minimum required logging level to print a message to stdout
"""
ts = 0
fx = 1
def _module_ir(module: Any, ir: str) -> _IRType.ts:
# Possible module types
module_is_tsable = any(isinstance(module, t) for t in [torch.nn.Module, torch.jit.ScriptModule, torch.jit.ScriptFunction])
module_is_fxable = any(isinstance(module, t) for t in [torch.nn.Module, torch.fx.GraphModule])
ir_targets_torchscript = any([ir == opt for opt in ["torchscript", "ts"]])
ir_targets_fx = ir == "fx"
if module_is_tsable and ir_targets_torchscript:
return _IRType.ts
elif module_is_fxable and ir_targets_fx:
if isinstance(module, torch.fx.GraphModule):
raise ValueError("Was given a torch.fx.GraphModule, fx is not currently supported by Torch-TensorRT")
elif ir_targets_fx:
raise ValueError("Preferred ir was set to \"fx\" which is currently not supported by Torch-TensorRT")
else:
raise ValueError("Torch-TensorRT currently does not support fx")
# return _IRType.fx
else:
if ir == "default":
# Options are listed in order of preference
if module_is_tsable:
logging.log(logging.Level.Info, "ir was set to default, using TorchScript as ir")
return _IRType.ts
elif module_is_fxable:
raise ValueError("Was given a torch.fx.GraphModule, fx is not currently supported by Torch-TensorRT")
#logging.log(logging.Level.Info, "ir was set to default, using TorchScript as fx")
#return _IRType.fx
else:
raise ValueError("Module was provided with in an unsupported format")
else:
raise ValueError("Unknown ir was requested")
def compile(module: Any,
ir="default",
inputs=[],
enabled_precisions=set([_enums.dtype.float]),
**kwargs):
target_ir = _module_ir(module, ir)
if target_ir == _IRType.ts:
ts_mod = module
if isinstance(module, torch.nn.Module):
logging.log("Module was provided as a torch.nn.Module, trying to script the module with torch.jit.script. In the event of a failure please preconvert your module to TorchScript")
ts_mod = torch.jit.script(module)
return torch_tensorrt.ts.compile(ts_mod, inputs=inputs, enabled_precisions=enabled_precisions, **kwargs)
elif target_ir == _IRType.fx:
raise RuntimeError("fx is currently not supported")
else:
raise RuntimeError("Module is an unknown format or the ir requested is unknown")
def convert_method_to_trt_engine(module: Any,
method_name: str,
ir="default",
inputs=[],
enabled_precisions=set([_enums.dtype.float]),
**kwargs):
target_ir = _module_ir(module, ir)
if target_ir == _IRType.ts:
ts_mod = module
if isinstance(module, torch.nn.Module):
logging.log("Module was provided as a torch.nn.Module, trying to script the module with torch.jit.script. In the event of a failure please preconvert your module to TorchScript")
ts_mod = torch.jit.script(module)
return torch_tensorrt.ts.convert_method_to_trt_engine(ts_mod, method_name, inputs=inputs, enabled_precisions=enabled_precisions, **kwargs)
elif target_ir == _IRType.fx:
raise RuntimeError("fx is currently not supported")
else:
raise RuntimeError("Module is an unknown format or the ir requested is unknown")
|
[
"torch_tensorrt.logging.log",
"torch.jit.script"
] |
[((2054, 2242), 'torch_tensorrt.logging.log', 'logging.log', (['"""Module was provided as a torch.nn.Module, trying to script the module with torch.jit.script. In the event of a failure please preconvert your module to TorchScript"""'], {}), "(\n 'Module was provided as a torch.nn.Module, trying to script the module with torch.jit.script. In the event of a failure please preconvert your module to TorchScript'\n )\n", (2065, 2242), False, 'from torch_tensorrt import logging\n'), ((2245, 2269), 'torch.jit.script', 'torch.jit.script', (['module'], {}), '(module)\n', (2261, 2269), False, 'import torch\n'), ((2912, 3100), 'torch_tensorrt.logging.log', 'logging.log', (['"""Module was provided as a torch.nn.Module, trying to script the module with torch.jit.script. In the event of a failure please preconvert your module to TorchScript"""'], {}), "(\n 'Module was provided as a torch.nn.Module, trying to script the module with torch.jit.script. In the event of a failure please preconvert your module to TorchScript'\n )\n", (2923, 3100), False, 'from torch_tensorrt import logging\n'), ((3103, 3127), 'torch.jit.script', 'torch.jit.script', (['module'], {}), '(module)\n', (3119, 3127), False, 'import torch\n'), ((1299, 1384), 'torch_tensorrt.logging.log', 'logging.log', (['logging.Level.Info', '"""ir was set to default, using TorchScript as ir"""'], {}), "(logging.Level.Info,\n 'ir was set to default, using TorchScript as ir')\n", (1310, 1384), False, 'from torch_tensorrt import logging\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
email: <EMAIL>
GitHub: phuycke
"""
#%%
import matplotlib.pyplot as plt
import mne
import numpy as np
import os
import pandas as pd
import seaborn as sns
from scipy import ndimage
from matplotlib import ticker, rcParams, gridspec
#%%
TEXT_SIZE = 15
rcParams['font.family'] = 'Times New Roman'
rcParams['axes.titlesize'] = TEXT_SIZE
rcParams['axes.labelsize'] = TEXT_SIZE
rcParams['xtick.labelsize'] = TEXT_SIZE
rcParams['ytick.labelsize'] = TEXT_SIZE
#%%
# create grid for plots
fig = plt.figure(figsize=(10, 9))
gs = gridspec.GridSpec(2, 13)
# TFR plot
fig_3a = plt.subplot(gs[0, :8])
# topoplot
fig_3b = plt.subplot(gs[0, 8:])
# alpha on the fast timescale
fig_3c_l = plt.subplot(gs[1, 0:3]) # novel condition
fig_3c_r = plt.subplot(gs[1, 3:6]) # repeating condition
# alpha on the slow timescale
fig_3d_l = plt.subplot(gs[1, 7:10]) # novel condition
fig_3d_r = plt.subplot(gs[1, 10:13]) # repeating condition
#%%
"""
Figure 3A
"""
# path to the result of the permutation data
PERM_DATA = r"C:\Users\pieter\OneDrive - UGent\Projects\2019\overtraining - PILOT 3\figures\Publish\Data\Stimulus-locked\Repetition 1 vs. repetition 8"
TIME_DATA = r"C:\Users\pieter\OneDrive - UGent\Projects\2019\overtraining - PILOT 3\figures\TF\Group level\data"
# define frequency bands (log spaced for setting the y-ticks later on)
FREQS = np.logspace(np.log10(4),
np.log10(30),
15)
# load the time data, and select everything between 0 and 1s
times = np.load(os.path.join(TIME_DATA, "stimulus_times.npy"))
times = times[np.where((times > 0) & (times <= 1))]
# the the difference between x[0] and x[1] for each value in times, and divide
# by 2 if len(times) is larger than 1s, else fix this at 0.0005
time_diff = np.diff(times) / 2. if len(times) > 1 else [0.0005]
# compute the limits of the time window (x-axis)
# start: first value of time (a bit larger than 0) - 0.00048828
# middle: all values except the last + 0.00048828
# final: last value of time (1) + 0.00048828
time_lims = np.concatenate([[times[0] - time_diff[0]], times[:-1] +
time_diff, [times[-1] + time_diff[-1]]])
# get the values that should be on the y-axis
yvals = FREQS
# compute the ratio: x[1] = x[0] * ratio (holds for all values)
ratio = yvals[1:] / yvals[:-1]
# compute the limits of the frequencies (y-axis)
# start: first value of yvals (4) / 1.15479362
# middle: the values of yvals
# last: the last value of yvals (30) * 1.15479362
log_yvals = np.concatenate([[yvals[0] / ratio[0]], yvals,
[yvals[-1] * ratio[0]]])
# get the limits of the y-axis
# note that yvals_lims is in this case equal to yvals since yvals is
# log-spaced. This would not be true if linspace was used to get frequencies
yval_lims = np.sqrt(log_yvals[:-2] * log_yvals[2:])
time_lims = time_lims[:-1]
# create a meshgrid
# time_mesh: row values are the same, column values differ (time)
# yval_mesh: row values differ (freqs), column values are the same
time_mesh, yval_mesh = np.meshgrid(time_lims, yval_lims)
# load the permutation test result array + check dimensions of the data
f_obs = np.load(os.path.join(PERM_DATA, "f_obs.npy"))
assert f_obs.shape == (64, 15, 1024)
# 64: electrodes, 15: frequencies, 1024: time points
# we average over electrodes to retain the frequency and time information
f_obs_mean = np.mean(f_obs, axis = 0)
# apply a gaussian filter to the data, with SD = 1 for both axes
gauss = ndimage.filters.gaussian_filter(f_obs_mean,
[1, 1],
mode = 'constant')
# create a pseudocolor plot
fig_3a.pcolormesh(time_mesh,
yval_mesh,
gauss,
cmap = "RdBu_r",
shading = "gouraud")
# draw a contour around larger values
# we draw the contour around values that are percentile 97.5 or larger
fig_3a.contour(time_mesh,
yval_mesh,
gauss,
levels = [np.percentile(gauss, 97.5)],
colors = "black",
linewidths = 3,
linestyles = "solid")
# set the y-axis parameters, note that the y-axis needs to be converted to
# log, and that a ticker needs to be called to set the y-axis ticks
fig_3a.set_yscale('log')
fig_3a.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
fig_3a.yaxis.set_minor_formatter(ticker.NullFormatter())
fig_3a.yaxis.set_minor_locator(ticker.NullLocator())
# once the ticks are set, we assign the values of FREQS to the ticks
tick_vals = yvals[np.unique(np.linspace(0, len(yvals) - 1, 15).round().astype('int'))]
fig_3a.set_yticks(tick_vals)
# determine the y-ticks
ticks_str = []
for t in tick_vals:
if round(t) in [4, 8, 13, 19, 30]:
ticks_str.append("{0:.2f}".format(t))
else:
ticks_str.append(" ")
fig_3a.set_yticklabels(ticks_str)
# set the x-axis parameters: every 100 ms a label is placed
fig_3a.set_xticks(np.arange(0, 1.1, .25))
fig_3a.set_xticklabels([str(int(label)) for label in np.arange(0, 1001, 250)])
# set the general title, and the titles of the x-axis and the y-axis
fig_3a.set_xlabel('Time after stimulus (ms)')
fig_3a.set_ylabel('Frequency (Hz)')
fig_3a.set_title("Stimulus 1 vs. 8: permutation test TFR\nAlpha on the fast timescale (p = 0.001)")
# load the cluster data, and keep only the significant clusters
clust = np.load(os.path.join(PERM_DATA, "clust.npy"), allow_pickle = True)
clust_p_val = np.load(os.path.join(PERM_DATA, "clust_p_val.npy"))
f_obs_plot = np.zeros_like(f_obs)
for c, p_val in zip(clust, clust_p_val):
if p_val <= 0.05:
f_obs_plot[tuple(c)] = f_obs[tuple(c)]
# take the average (excluding NaNs) of the significant data
f_obs_plot_mean = np.nanmean(f_obs_plot, axis = 0)
# create a 2D raster of the significant data (no plot) to use for the colorbar
im = fig_3a.imshow(f_obs_plot_mean,
extent = [times[0], times[-1],
FREQS[0], FREQS[-1]],
aspect = "auto",
origin = "lower",
interpolation = "hanning",
cmap = "RdBu_r")
# get the colorbar of the above 2D raster, and paste it on the existing TFR plot
# note that this data is used to create the colorbar, and not the filtered data
# since the values become smaller due to the filtering process. The plot reflects
# the actual data, filtering is only done for visual appeal
cbar = fig.colorbar(im, ax = fig_3a)
# set some colorbar parameters, such as the title, ticks and tick labels
cbar.ax.set_title("F-statistic",
fontdict = {"fontsize": TEXT_SIZE})
cbar.ax.get_yaxis().set_ticks(np.arange(0, np.round(np.max(f_obs_plot_mean), 1) + 0.05, 4))
cbar.ax.tick_params(labelsize = TEXT_SIZE - 3)
# big fix: make sure that the 0 is shown on the x-axis of the final plot
fig_3a.set_xbound(0, 1)
#%%
"""
Figure 3B
"""
# Determines which part of the analysis to run + some plotting parameters
STIM_LOCKED = True
COMPUTE_TFR = False
BAND = [(8, 12, "Alpha")]
TMIN, TMAX = .65, .9
VMIN, VMAX = 0.5, 4.5
rcParams['font.family'] = 'Times New Roman'
rcParams['font.size'] = 8
# these are the subjects that had all 512 epochs recorded and stored safely
full_epochs = ["sub-02", "sub-03", "sub-04", "sub-05", "sub-06", "sub-08",
"sub-10", "sub-12", "sub-13", "sub-15", "sub-16", "sub-17",
"sub-18", "sub-19", "sub-20", "sub-21", "sub-22", "sub-23",
"sub-25", "sub-26", "sub-27", "sub-28", "sub-29", "sub-30"]
# load the TFR data
rep1 = mne.time_frequency.read_tfrs(r"C:\Users\pieter\Downloads\repetition 1 (24 subs)-tfr.h5")[0]
rep8 = mne.time_frequency.read_tfrs(r"C:\Users\pieter\Downloads\repetition 8 (24 subs)-tfr.h5")[0]
# save rep8 in temp, dB transform
temp = rep8
temp._data = 10 * np.log10(rep8._data)
# save rep1 in temp2, dB transform
temp2 = rep1
temp2._data = 10 * np.log10(rep1._data)
temp._data -= temp2._data
# check whether the difference does not equal rep_1 or rep_8
assert np.all(temp._data != rep1._data)
assert not np.sum(temp._data != rep8._data)
# colorbar with log scaled labels
def fmt_float(x, pos):
return r'${0:.2f}$'.format(x)
# define the data
avg_tfr = temp
# get the frequency bands
FMIN, FMAX, FNAME = BAND[0]
# make topoplot
avg_tfr.plot_topomap(tmin = TMIN,
tmax = TMAX,
fmin = FMIN,
fmax = FMAX,
vmin = VMIN,
vmax = VMAX,
unit = " ",
ch_type = "eeg",
cmap = "RdBu_r",
outlines = "head",
contours = 10,
colorbar = True,
cbar_fmt = fmt_float,
sensors = "ko",
axes = fig_3b,
title = " ")
# set a title which can be altered
fig_3b.set_title(r"$\alpha$ topography", size = TEXT_SIZE)
#%%
"""
Figure 3C
"""
# where to find the data files
ROOT = r"C:\Users\pieter\OneDrive - UGent\Projects\2019\overtraining - PILOT 3\figures\Publish\Data\Stimulus-locked\Theta, alpha, beta + behavioral data"
# seaborn param
sns.set_style("ticks")
sns.set_context("paper")
# read the data
df = pd.read_csv(os.path.join(ROOT, "theta_alpha_beta_behavioural.csv"))
# change the column names to their appropriate label
df.columns = ['Reaction time (ms)', 'RT_log', 'Accuracy', 'Accuracy_int',
'Error_int', 'Theta power', 'Alpha power', 'Beta power',
'Subject nr', 'Repetitions_overall', 'Repetition count',
'Block_overall', 'Block number', 'Condition', 'Trial_overall',
'Trial_block', 'Response', 'Stimulus_ID']
x_title, y_title = "Repetition count", "Alpha power"
# Novel condition
g = sns.regplot(x = x_title,
y = y_title,
data = df.loc[df["Condition"] == "Novel"],
x_estimator = np.mean,
x_ci = "ci",
ci = 95,
n_boot = 5000,
scatter_kws = {"s":15},
line_kws = {'lw': .75},
color = "darkgrey",
ax = fig_3c_l)
# Recurring condition
g = sns.regplot(x = x_title,
y = y_title,
data = df.loc[df["Condition"] == "Recurring"],
x_estimator = np.mean,
x_ci = "ci",
ci = 95,
n_boot = 5000,
scatter_kws = {"s":15},
line_kws = {'lw': .75},
color = "black",
ax = fig_3c_r)
# figure parameters (left figure)
fig_3c_l.set_title(r"Novel condition", size = TEXT_SIZE)
fig_3c_l.set_ylim([-.5, -.1])
fig_3c_l.set_yticks(np.arange(-.5, -.09, .1))
fig_3c_l.set_xticks(np.arange(1, 9))
fig_3c_l.set_xlim(0.5, 8.5)
fig_3c_l.set_xlabel(r"Stimulus number")
fig_3c_l.set_ylabel(r"$\alpha$ power")
# figure parameters (right figure)
fig_3c_r.set_xlim(0.5, 8.5)
fig_3c_r.set_xticks(np.arange(1, 9))
fig_3c_r.set_ylim([-.5, -.1])
fig_3c_r.set_yticks(np.arange(-.5, -.09, .1))
fig_3c_r.set_yticklabels([])
fig_3c_r.set_title(r"Repeating condition", size = TEXT_SIZE)
fig_3c_r.set_xlabel(r"Stimulus number")
fig_3c_r.set_ylabel(" ")
#%%
"""
Figure 3D
"""
# new variables
x_title, y_title = "Block number", "Alpha power"
# Novel condition
g = sns.regplot(x = x_title,
y = y_title,
data = df.loc[df["Condition"] == "Novel"],
x_estimator = np.mean,
x_ci = "ci",
ci = 95,
n_boot = 5000,
scatter_kws = {"s":15},
line_kws = {'lw': .75},
color = "darkgrey",
ax = fig_3d_l)
# Recurring condition
g = sns.regplot(x = x_title,
y = y_title,
data = df.loc[df["Condition"] == "Recurring"],
x_estimator = np.mean,
x_ci = "ci",
ci = 95,
n_boot = 5000,
scatter_kws = {"s":15},
line_kws = {'lw': .75},
color = "black",
ax = fig_3d_r)
# figure parameters (left figure)
fig_3d_l.set_title(r"Novel condition", size = TEXT_SIZE)
fig_3d_l.set_ylim([-.5, -.1])
fig_3d_l.set_yticks(np.arange(-.5, -.09, .1))
fig_3d_l.set_xticks(np.arange(1, 9))
fig_3d_l.set_xlim(0.5, 8.5)
fig_3d_l.set_xlabel(r"Block number")
fig_3d_l.set_ylabel(r"$\alpha$ power")
# figure parameters (right figure)
fig_3d_r.set_xlim(0.5, 8.5)
fig_3d_r.set_xticks(np.arange(1, 9))
fig_3d_r.set_ylim([-.5, -.1])
fig_3d_r.set_yticks(np.arange(-.5, -.09, .1))
fig_3d_r.set_yticklabels([])
fig_3d_r.set_title(r"Repeating condition", size = TEXT_SIZE)
fig_3d_r.set_xlabel(r"Block number")
fig_3d_r.set_ylabel(" ")
#%%
"""
Save figure
"""
# define the Figure dir + set the size of the image
FIG = r"C:\Users\pieter\OneDrive - UGent\Projects\2019\overtraining - PILOT 3\figures\Publish\Correct DPI plots"
# play around until the figure is satisfactory (difficult with high DPI)
plt.subplots_adjust(top=0.932, bottom=0.077, left=0.097, right=0.938,
hspace=0.5, wspace=0.35)
# letters indicating the panels
plt.text(-245, 5, "A", size = TEXT_SIZE+5)
plt.text(-85, 5, "B", size = TEXT_SIZE+5)
plt.text(-245, -1, "C", size = TEXT_SIZE+5)
plt.text(-115, -1, "D", size = TEXT_SIZE+5)
# dB label for panel B
plt.text(-1.5, 4.6, "dB", size = TEXT_SIZE)
# titles for panels C and D
plt.text(-200, -1.15, r"$\alpha$ power ~ fast timescale", size = TEXT_SIZE)
plt.text(-75, -1.15, r"$\alpha$ power ~ slow timescale", size = TEXT_SIZE)
# save as tiff and pdf
plt.savefig(fname = os.path.join(FIG, "Figure 3.tiff"), dpi = 300)
plt.savefig(fname = os.path.join(FIG, "Figure 3.pdf"), dpi = 300)
plt.close("all")
|
[
"numpy.sum",
"seaborn.regplot",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"os.path.join",
"numpy.nanmean",
"matplotlib.ticker.ScalarFormatter",
"numpy.meshgrid",
"numpy.zeros_like",
"matplotlib.pyplot.close",
"numpy.max",
"numpy.log10",
"seaborn.set_context",
"seaborn.set_style",
"mne.time_frequency.read_tfrs",
"matplotlib.pyplot.text",
"numpy.percentile",
"matplotlib.pyplot.subplots_adjust",
"numpy.all",
"numpy.concatenate",
"matplotlib.pyplot.subplot",
"scipy.ndimage.filters.gaussian_filter",
"numpy.where",
"matplotlib.ticker.NullFormatter",
"numpy.diff",
"matplotlib.gridspec.GridSpec",
"matplotlib.ticker.NullLocator",
"numpy.sqrt"
] |
[((607, 634), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 9)'}), '(figsize=(10, 9))\n', (617, 634), True, 'import matplotlib.pyplot as plt\n'), ((642, 666), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(13)'], {}), '(2, 13)\n', (659, 666), False, 'from matplotlib import ticker, rcParams, gridspec\n'), ((690, 712), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, :8]'], {}), '(gs[0, :8])\n', (701, 712), True, 'import matplotlib.pyplot as plt\n'), ((736, 758), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 8:]'], {}), '(gs[0, 8:])\n', (747, 758), True, 'import matplotlib.pyplot as plt\n'), ((801, 824), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 0:3]'], {}), '(gs[1, 0:3])\n', (812, 824), True, 'import matplotlib.pyplot as plt\n'), ((860, 883), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 3:6]'], {}), '(gs[1, 3:6])\n', (871, 883), True, 'import matplotlib.pyplot as plt\n'), ((955, 979), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 7:10]'], {}), '(gs[1, 7:10])\n', (966, 979), True, 'import matplotlib.pyplot as plt\n'), ((1014, 1039), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 10:13]'], {}), '(gs[1, 10:13])\n', (1025, 1039), True, 'import matplotlib.pyplot as plt\n'), ((2195, 2296), 'numpy.concatenate', 'np.concatenate', (['[[times[0] - time_diff[0]], times[:-1] + time_diff, [times[-1] + time_diff[-1]]\n ]'], {}), '([[times[0] - time_diff[0]], times[:-1] + time_diff, [times[-\n 1] + time_diff[-1]]])\n', (2209, 2296), True, 'import numpy as np\n'), ((2682, 2752), 'numpy.concatenate', 'np.concatenate', (['[[yvals[0] / ratio[0]], yvals, [yvals[-1] * ratio[0]]]'], {}), '([[yvals[0] / ratio[0]], yvals, [yvals[-1] * ratio[0]]])\n', (2696, 2752), True, 'import numpy as np\n'), ((2980, 3019), 'numpy.sqrt', 'np.sqrt', (['(log_yvals[:-2] * log_yvals[2:])'], {}), '(log_yvals[:-2] * log_yvals[2:])\n', (2987, 3019), True, 'import numpy as np\n'), ((3232, 3265), 'numpy.meshgrid', 'np.meshgrid', (['time_lims', 'yval_lims'], {}), '(time_lims, yval_lims)\n', (3243, 3265), True, 'import numpy as np\n'), ((3571, 3593), 'numpy.mean', 'np.mean', (['f_obs'], {'axis': '(0)'}), '(f_obs, axis=0)\n', (3578, 3593), True, 'import numpy as np\n'), ((3670, 3738), 'scipy.ndimage.filters.gaussian_filter', 'ndimage.filters.gaussian_filter', (['f_obs_mean', '[1, 1]'], {'mode': '"""constant"""'}), "(f_obs_mean, [1, 1], mode='constant')\n", (3701, 3738), False, 'from scipy import ndimage\n'), ((5760, 5780), 'numpy.zeros_like', 'np.zeros_like', (['f_obs'], {}), '(f_obs)\n', (5773, 5780), True, 'import numpy as np\n'), ((5978, 6008), 'numpy.nanmean', 'np.nanmean', (['f_obs_plot'], {'axis': '(0)'}), '(f_obs_plot, axis=0)\n', (5988, 6008), True, 'import numpy as np\n'), ((8330, 8362), 'numpy.all', 'np.all', (['(temp._data != rep1._data)'], {}), '(temp._data != rep1._data)\n', (8336, 8362), True, 'import numpy as np\n'), ((9552, 9574), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (9565, 9574), True, 'import seaborn as sns\n'), ((9575, 9599), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (9590, 9599), True, 'import seaborn as sns\n'), ((10173, 10385), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'x_title', 'y': 'y_title', 'data': "df.loc[df['Condition'] == 'Novel']", 'x_estimator': 'np.mean', 'x_ci': '"""ci"""', 'ci': '(95)', 'n_boot': '(5000)', 'scatter_kws': "{'s': 15}", 'line_kws': "{'lw': 0.75}", 'color': '"""darkgrey"""', 'ax': 'fig_3c_l'}), "(x=x_title, y=y_title, data=df.loc[df['Condition'] == 'Novel'],\n x_estimator=np.mean, x_ci='ci', ci=95, n_boot=5000, scatter_kws={'s': \n 15}, line_kws={'lw': 0.75}, color='darkgrey', ax=fig_3c_l)\n", (10184, 10385), True, 'import seaborn as sns\n'), ((10656, 10868), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'x_title', 'y': 'y_title', 'data': "df.loc[df['Condition'] == 'Recurring']", 'x_estimator': 'np.mean', 'x_ci': '"""ci"""', 'ci': '(95)', 'n_boot': '(5000)', 'scatter_kws': "{'s': 15}", 'line_kws': "{'lw': 0.75}", 'color': '"""black"""', 'ax': 'fig_3c_r'}), "(x=x_title, y=y_title, data=df.loc[df['Condition'] ==\n 'Recurring'], x_estimator=np.mean, x_ci='ci', ci=95, n_boot=5000,\n scatter_kws={'s': 15}, line_kws={'lw': 0.75}, color='black', ax=fig_3c_r)\n", (10667, 10868), True, 'import seaborn as sns\n'), ((11889, 12101), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'x_title', 'y': 'y_title', 'data': "df.loc[df['Condition'] == 'Novel']", 'x_estimator': 'np.mean', 'x_ci': '"""ci"""', 'ci': '(95)', 'n_boot': '(5000)', 'scatter_kws': "{'s': 15}", 'line_kws': "{'lw': 0.75}", 'color': '"""darkgrey"""', 'ax': 'fig_3d_l'}), "(x=x_title, y=y_title, data=df.loc[df['Condition'] == 'Novel'],\n x_estimator=np.mean, x_ci='ci', ci=95, n_boot=5000, scatter_kws={'s': \n 15}, line_kws={'lw': 0.75}, color='darkgrey', ax=fig_3d_l)\n", (11900, 12101), True, 'import seaborn as sns\n'), ((12372, 12584), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'x_title', 'y': 'y_title', 'data': "df.loc[df['Condition'] == 'Recurring']", 'x_estimator': 'np.mean', 'x_ci': '"""ci"""', 'ci': '(95)', 'n_boot': '(5000)', 'scatter_kws': "{'s': 15}", 'line_kws': "{'lw': 0.75}", 'color': '"""black"""', 'ax': 'fig_3d_r'}), "(x=x_title, y=y_title, data=df.loc[df['Condition'] ==\n 'Recurring'], x_estimator=np.mean, x_ci='ci', ci=95, n_boot=5000,\n scatter_kws={'s': 15}, line_kws={'lw': 0.75}, color='black', ax=fig_3d_r)\n", (12383, 12584), True, 'import seaborn as sns\n'), ((13751, 13849), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.932)', 'bottom': '(0.077)', 'left': '(0.097)', 'right': '(0.938)', 'hspace': '(0.5)', 'wspace': '(0.35)'}), '(top=0.932, bottom=0.077, left=0.097, right=0.938,\n hspace=0.5, wspace=0.35)\n', (13770, 13849), True, 'import matplotlib.pyplot as plt\n'), ((13900, 13942), 'matplotlib.pyplot.text', 'plt.text', (['(-245)', '(5)', '"""A"""'], {'size': '(TEXT_SIZE + 5)'}), "(-245, 5, 'A', size=TEXT_SIZE + 5)\n", (13908, 13942), True, 'import matplotlib.pyplot as plt\n'), ((13947, 13988), 'matplotlib.pyplot.text', 'plt.text', (['(-85)', '(5)', '"""B"""'], {'size': '(TEXT_SIZE + 5)'}), "(-85, 5, 'B', size=TEXT_SIZE + 5)\n", (13955, 13988), True, 'import matplotlib.pyplot as plt\n'), ((13993, 14036), 'matplotlib.pyplot.text', 'plt.text', (['(-245)', '(-1)', '"""C"""'], {'size': '(TEXT_SIZE + 5)'}), "(-245, -1, 'C', size=TEXT_SIZE + 5)\n", (14001, 14036), True, 'import matplotlib.pyplot as plt\n'), ((14041, 14084), 'matplotlib.pyplot.text', 'plt.text', (['(-115)', '(-1)', '"""D"""'], {'size': '(TEXT_SIZE + 5)'}), "(-115, -1, 'D', size=TEXT_SIZE + 5)\n", (14049, 14084), True, 'import matplotlib.pyplot as plt\n'), ((14113, 14154), 'matplotlib.pyplot.text', 'plt.text', (['(-1.5)', '(4.6)', '"""dB"""'], {'size': 'TEXT_SIZE'}), "(-1.5, 4.6, 'dB', size=TEXT_SIZE)\n", (14121, 14154), True, 'import matplotlib.pyplot as plt\n'), ((14190, 14263), 'matplotlib.pyplot.text', 'plt.text', (['(-200)', '(-1.15)', '"""$\\\\alpha$ power ~ fast timescale"""'], {'size': 'TEXT_SIZE'}), "(-200, -1.15, '$\\\\alpha$ power ~ fast timescale', size=TEXT_SIZE)\n", (14198, 14263), True, 'import matplotlib.pyplot as plt\n'), ((14266, 14338), 'matplotlib.pyplot.text', 'plt.text', (['(-75)', '(-1.15)', '"""$\\\\alpha$ power ~ slow timescale"""'], {'size': 'TEXT_SIZE'}), "(-75, -1.15, '$\\\\alpha$ power ~ slow timescale', size=TEXT_SIZE)\n", (14274, 14338), True, 'import matplotlib.pyplot as plt\n'), ((14499, 14515), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (14508, 14515), True, 'import matplotlib.pyplot as plt\n'), ((1493, 1504), 'numpy.log10', 'np.log10', (['(4)'], {}), '(4)\n', (1501, 1504), True, 'import numpy as np\n'), ((1527, 1539), 'numpy.log10', 'np.log10', (['(30)'], {}), '(30)\n', (1535, 1539), True, 'import numpy as np\n'), ((1648, 1693), 'os.path.join', 'os.path.join', (['TIME_DATA', '"""stimulus_times.npy"""'], {}), "(TIME_DATA, 'stimulus_times.npy')\n", (1660, 1693), False, 'import os\n'), ((1713, 1749), 'numpy.where', 'np.where', (['((times > 0) & (times <= 1))'], {}), '((times > 0) & (times <= 1))\n', (1721, 1749), True, 'import numpy as np\n'), ((3355, 3391), 'os.path.join', 'os.path.join', (['PERM_DATA', '"""f_obs.npy"""'], {}), "(PERM_DATA, 'f_obs.npy')\n", (3367, 3391), False, 'import os\n'), ((4559, 4583), 'matplotlib.ticker.ScalarFormatter', 'ticker.ScalarFormatter', ([], {}), '()\n', (4581, 4583), False, 'from matplotlib import ticker, rcParams, gridspec\n'), ((4618, 4640), 'matplotlib.ticker.NullFormatter', 'ticker.NullFormatter', ([], {}), '()\n', (4638, 4640), False, 'from matplotlib import ticker, rcParams, gridspec\n'), ((4673, 4693), 'matplotlib.ticker.NullLocator', 'ticker.NullLocator', ([], {}), '()\n', (4691, 4693), False, 'from matplotlib import ticker, rcParams, gridspec\n'), ((5179, 5202), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.25)'], {}), '(0, 1.1, 0.25)\n', (5188, 5202), True, 'import numpy as np\n'), ((5621, 5657), 'os.path.join', 'os.path.join', (['PERM_DATA', '"""clust.npy"""'], {}), "(PERM_DATA, 'clust.npy')\n", (5633, 5657), False, 'import os\n'), ((5702, 5744), 'os.path.join', 'os.path.join', (['PERM_DATA', '"""clust_p_val.npy"""'], {}), "(PERM_DATA, 'clust_p_val.npy')\n", (5714, 5744), False, 'import os\n'), ((7860, 7956), 'mne.time_frequency.read_tfrs', 'mne.time_frequency.read_tfrs', (['"""C:\\\\Users\\\\pieter\\\\Downloads\\\\repetition 1 (24 subs)-tfr.h5"""'], {}), "(\n 'C:\\\\Users\\\\pieter\\\\Downloads\\\\repetition 1 (24 subs)-tfr.h5')\n", (7888, 7956), False, 'import mne\n'), ((7959, 8055), 'mne.time_frequency.read_tfrs', 'mne.time_frequency.read_tfrs', (['"""C:\\\\Users\\\\pieter\\\\Downloads\\\\repetition 8 (24 subs)-tfr.h5"""'], {}), "(\n 'C:\\\\Users\\\\pieter\\\\Downloads\\\\repetition 8 (24 subs)-tfr.h5')\n", (7987, 8055), False, 'import mne\n'), ((8120, 8140), 'numpy.log10', 'np.log10', (['rep8._data'], {}), '(rep8._data)\n', (8128, 8140), True, 'import numpy as np\n'), ((8209, 8229), 'numpy.log10', 'np.log10', (['rep1._data'], {}), '(rep1._data)\n', (8217, 8229), True, 'import numpy as np\n'), ((8374, 8406), 'numpy.sum', 'np.sum', (['(temp._data != rep8._data)'], {}), '(temp._data != rep8._data)\n', (8380, 8406), True, 'import numpy as np\n'), ((9634, 9688), 'os.path.join', 'os.path.join', (['ROOT', '"""theta_alpha_beta_behavioural.csv"""'], {}), "(ROOT, 'theta_alpha_beta_behavioural.csv')\n", (9646, 9688), False, 'import os\n'), ((11260, 11287), 'numpy.arange', 'np.arange', (['(-0.5)', '(-0.09)', '(0.1)'], {}), '(-0.5, -0.09, 0.1)\n', (11269, 11287), True, 'import numpy as np\n'), ((11309, 11324), 'numpy.arange', 'np.arange', (['(1)', '(9)'], {}), '(1, 9)\n', (11318, 11324), True, 'import numpy as np\n'), ((11517, 11532), 'numpy.arange', 'np.arange', (['(1)', '(9)'], {}), '(1, 9)\n', (11526, 11532), True, 'import numpy as np\n'), ((11586, 11613), 'numpy.arange', 'np.arange', (['(-0.5)', '(-0.09)', '(0.1)'], {}), '(-0.5, -0.09, 0.1)\n', (11595, 11613), True, 'import numpy as np\n'), ((12976, 13003), 'numpy.arange', 'np.arange', (['(-0.5)', '(-0.09)', '(0.1)'], {}), '(-0.5, -0.09, 0.1)\n', (12985, 13003), True, 'import numpy as np\n'), ((13025, 13040), 'numpy.arange', 'np.arange', (['(1)', '(9)'], {}), '(1, 9)\n', (13034, 13040), True, 'import numpy as np\n'), ((13231, 13246), 'numpy.arange', 'np.arange', (['(1)', '(9)'], {}), '(1, 9)\n', (13240, 13246), True, 'import numpy as np\n'), ((13300, 13327), 'numpy.arange', 'np.arange', (['(-0.5)', '(-0.09)', '(0.1)'], {}), '(-0.5, -0.09, 0.1)\n', (13309, 13327), True, 'import numpy as np\n'), ((1908, 1922), 'numpy.diff', 'np.diff', (['times'], {}), '(times)\n', (1915, 1922), True, 'import numpy as np\n'), ((14385, 14419), 'os.path.join', 'os.path.join', (['FIG', '"""Figure 3.tiff"""'], {}), "(FIG, 'Figure 3.tiff')\n", (14397, 14419), False, 'import os\n'), ((14452, 14485), 'os.path.join', 'os.path.join', (['FIG', '"""Figure 3.pdf"""'], {}), "(FIG, 'Figure 3.pdf')\n", (14464, 14485), False, 'import os\n'), ((4214, 4240), 'numpy.percentile', 'np.percentile', (['gauss', '(97.5)'], {}), '(gauss, 97.5)\n', (4227, 4240), True, 'import numpy as np\n'), ((5256, 5279), 'numpy.arange', 'np.arange', (['(0)', '(1001)', '(250)'], {}), '(0, 1001, 250)\n', (5265, 5279), True, 'import numpy as np\n'), ((6978, 7001), 'numpy.max', 'np.max', (['f_obs_plot_mean'], {}), '(f_obs_plot_mean)\n', (6984, 7001), True, 'import numpy as np\n')]
|
import sys
from setuptools import setup, find_packages # pylint: disable=no-name-in-module,import-error
def dependencies(file):
with open(file) as f:
return f.read().splitlines()
setup(
name='log_symbols',
packages=find_packages(exclude=('tests', 'examples')),
version='0.0.14',
license='MIT',
description='Colored symbols for various log levels for Python',
long_description='Colored symbols for various log levels for Python. Find the documentation here: https://github.com/manrajgrover/py-log-symbols.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/manrajgrover/py-log-symbols',
keywords=[
'log symbols',
'symbols',
'log'
],
install_requires=dependencies('requirements.txt'),
extras_require={
':python_version < "3.4"': [
'enum34==1.1.6',
],
},
tests_require=dependencies('requirements-dev.txt'),
include_package_data=True
)
|
[
"setuptools.find_packages"
] |
[((240, 284), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "('tests', 'examples')"}), "(exclude=('tests', 'examples'))\n", (253, 284), False, 'from setuptools import setup, find_packages\n')]
|
"""
日期修改
"""
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
np.set_printoptions(threshold=np.inf)
root_dir = '/media/xiayule/bdcp/other'
def modify_date():
img_path = os.path.join(root_dir, '3.jpg')
img = cv2.imread(img_path)
# _, img1 = cv2.threshold(img, 150, 200, cv2.THRESH_BINARY)
hue_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
l_range = np.array([140, 43, 46])
h_range = np.array([180, 255, 255])
th = cv2.inRange(hue_img, l_range, h_range)
index1 = th == 255
img1 = np.zeros(img.shape, np.uint8)
img1[:, :] = (255, 255, 255)
img1[index1] = img[index1]
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', img1)
cv2.waitKey()
cv2.destroyAllWindows()
def get_print():
"""
:return:
"""
img_path = os.path.join(root_dir, 'zhuangbei2.jpg')
img = cv2.imread(img_path)
w, h, c = img.shape
dst_img = np.ones((w, h, c), np.uint8)*255
dst_img = cv2.cvtColor(dst_img, cv2.COLOR_BGR2BGRA)
for i in range(w):
for j in range(h):
pixel = img[i, j, :]
b, g, r = pixel[0], pixel[1], pixel[2]
if 80 <= b < 160 and 80 <= g < 150 and 140 <= r < 240:
dst_img[i, j, 0] = b
dst_img[i, j, 1] = g
dst_img[i, j, 2] = r
# dst_img[i, j, 3] = [b, g, r]
else:
dst_img[i, j, 3] = 0
cv2.imwrite(os.path.join(root_dir, 'zhuangbei2.png'), dst_img)
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', dst_img)
cv2.waitKey()
cv2.destroyAllWindows()
def get_print1():
"""
:return:
"""
img_path = os.path.join(root_dir, 'zhuangbei2.jpg')
img = cv2.imread(img_path)
w, h, c = img.shape
dst_img = np.ones((w, h, c), np.uint8)*255
dst_img = cv2.cvtColor(dst_img, cv2.COLOR_BGR2BGRA)
for i in range(w):
for j in range(h):
pixel = img[i, j, :]
b, g, r = pixel[0], pixel[1], pixel[2]
m = (int(b)+int(g)+int(r))/3
if abs(b-m) < 20 and abs(g-m) < 20 and abs(r-m) < 20:
dst_img[i, j, 3] = 0
else:
dst_img[i, j, 0] = b
dst_img[i, j, 1] = g
dst_img[i, j, 2] = r
cv2.imwrite(os.path.join(root_dir, 'zhuangbei2.png'), dst_img)
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', dst_img)
cv2.waitKey()
cv2.destroyAllWindows()
def get_touming():
"""
:return:
"""
img_path = os.path.join(root_dir, '26.jpg')
img = cv2.imread(img_path)
w, h, c = img.shape
dst_img = np.ones((w, h, c), np.uint8)*255
dst_img = cv2.cvtColor(dst_img, cv2.COLOR_BGR2BGRA)
for i in range(w):
for j in range(h):
pixel = img[i, j, :]
b, g, r = pixel[0], pixel[1], pixel[2]
if 0 <= b < 50 and 0 <= g < 50 and 0 <= r < 50:
dst_img[i, j, 0] = b
dst_img[i, j, 1] = g
dst_img[i, j, 2] = r
# dst_img[i, j, 3] = [b, g, r]
else:
dst_img[i, j, 3] = 0
cv2.imwrite(os.path.join(root_dir, '26_1.png'), dst_img)
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', dst_img)
cv2.waitKey()
cv2.destroyAllWindows()
def myfunc1(x):
if x >= 0:
return x
else:
return 2*x/(1+np.exp(-x))
def myfunc1_der1(x):
if x >= 0:
return 1
else:
return 2*(1 + np.exp(-x) + x * np.exp(-x)) / pow(1 + np.exp(-x), 2)
def plot_swish():
"""
swish图像
:return:
"""
x = np.linspace(-4, 4, 1001)
y = np.array([myfunc1(i) for i in x])
y_d1 = np.array([myfunc1_der1(i) for i in x])
plt.plot(x, y, x, y_d1)
plt.show()
def modify_pixel():
img_path = os.path.join(root_dir, '51.png')
img = cv2.imread(img_path).astype('int')
w, h, c = img.shape
dst_img = np.ones((w, h, c), np.uint8) * 255
dst_img = cv2.cvtColor(dst_img, cv2.COLOR_BGR2BGRA)
for i in range(w):
for j in range(h):
pixel = img[i, j, :]
b, g, r = pixel[0], pixel[1], pixel[2]
if b < 255 and g < 255 and r < 255:
dst_img[i, j, 0] = b
dst_img[i, j, 1] = g
dst_img[i, j, 2] = r+15
# dst_img[i, j, 3] = [b, g, r]
else:
dst_img[i, j, 3] = 0
dst_img[dst_img > 255] = 255
cv2.imwrite(os.path.join(root_dir, '5_1.png'), dst_img)
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', dst_img)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == r'__main__':
get_touming()
# plot_swish()
# modify_pixel()
|
[
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.zeros",
"numpy.ones",
"cv2.imread",
"cv2.inRange",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"cv2.imshow",
"os.path.join",
"cv2.namedWindow"
] |
[((85, 122), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (104, 122), True, 'import numpy as np\n'), ((199, 230), 'os.path.join', 'os.path.join', (['root_dir', '"""3.jpg"""'], {}), "(root_dir, '3.jpg')\n", (211, 230), False, 'import os\n'), ((241, 261), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (251, 261), False, 'import cv2\n'), ((340, 376), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (352, 376), False, 'import cv2\n'), ((391, 414), 'numpy.array', 'np.array', (['[140, 43, 46]'], {}), '([140, 43, 46])\n', (399, 414), True, 'import numpy as np\n'), ((429, 454), 'numpy.array', 'np.array', (['[180, 255, 255]'], {}), '([180, 255, 255])\n', (437, 454), True, 'import numpy as np\n'), ((464, 502), 'cv2.inRange', 'cv2.inRange', (['hue_img', 'l_range', 'h_range'], {}), '(hue_img, l_range, h_range)\n', (475, 502), False, 'import cv2\n'), ((537, 566), 'numpy.zeros', 'np.zeros', (['img.shape', 'np.uint8'], {}), '(img.shape, np.uint8)\n', (545, 566), True, 'import numpy as np\n'), ((636, 675), 'cv2.namedWindow', 'cv2.namedWindow', (['"""1"""', 'cv2.WINDOW_NORMAL'], {}), "('1', cv2.WINDOW_NORMAL)\n", (651, 675), False, 'import cv2\n'), ((680, 701), 'cv2.imshow', 'cv2.imshow', (['"""1"""', 'img1'], {}), "('1', img1)\n", (690, 701), False, 'import cv2\n'), ((706, 719), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (717, 719), False, 'import cv2\n'), ((724, 747), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (745, 747), False, 'import cv2\n'), ((812, 852), 'os.path.join', 'os.path.join', (['root_dir', '"""zhuangbei2.jpg"""'], {}), "(root_dir, 'zhuangbei2.jpg')\n", (824, 852), False, 'import os\n'), ((863, 883), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (873, 883), False, 'import cv2\n'), ((970, 1011), 'cv2.cvtColor', 'cv2.cvtColor', (['dst_img', 'cv2.COLOR_BGR2BGRA'], {}), '(dst_img, cv2.COLOR_BGR2BGRA)\n', (982, 1011), False, 'import cv2\n'), ((1499, 1538), 'cv2.namedWindow', 'cv2.namedWindow', (['"""1"""', 'cv2.WINDOW_NORMAL'], {}), "('1', cv2.WINDOW_NORMAL)\n", (1514, 1538), False, 'import cv2\n'), ((1543, 1567), 'cv2.imshow', 'cv2.imshow', (['"""1"""', 'dst_img'], {}), "('1', dst_img)\n", (1553, 1567), False, 'import cv2\n'), ((1572, 1585), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1583, 1585), False, 'import cv2\n'), ((1590, 1613), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1611, 1613), False, 'import cv2\n'), ((1678, 1718), 'os.path.join', 'os.path.join', (['root_dir', '"""zhuangbei2.jpg"""'], {}), "(root_dir, 'zhuangbei2.jpg')\n", (1690, 1718), False, 'import os\n'), ((1729, 1749), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1739, 1749), False, 'import cv2\n'), ((1836, 1877), 'cv2.cvtColor', 'cv2.cvtColor', (['dst_img', 'cv2.COLOR_BGR2BGRA'], {}), '(dst_img, cv2.COLOR_BGR2BGRA)\n', (1848, 1877), False, 'import cv2\n'), ((2358, 2397), 'cv2.namedWindow', 'cv2.namedWindow', (['"""1"""', 'cv2.WINDOW_NORMAL'], {}), "('1', cv2.WINDOW_NORMAL)\n", (2373, 2397), False, 'import cv2\n'), ((2402, 2426), 'cv2.imshow', 'cv2.imshow', (['"""1"""', 'dst_img'], {}), "('1', dst_img)\n", (2412, 2426), False, 'import cv2\n'), ((2431, 2444), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (2442, 2444), False, 'import cv2\n'), ((2449, 2472), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2470, 2472), False, 'import cv2\n'), ((2539, 2571), 'os.path.join', 'os.path.join', (['root_dir', '"""26.jpg"""'], {}), "(root_dir, '26.jpg')\n", (2551, 2571), False, 'import os\n'), ((2582, 2602), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (2592, 2602), False, 'import cv2\n'), ((2689, 2730), 'cv2.cvtColor', 'cv2.cvtColor', (['dst_img', 'cv2.COLOR_BGR2BGRA'], {}), '(dst_img, cv2.COLOR_BGR2BGRA)\n', (2701, 2730), False, 'import cv2\n'), ((3205, 3244), 'cv2.namedWindow', 'cv2.namedWindow', (['"""1"""', 'cv2.WINDOW_NORMAL'], {}), "('1', cv2.WINDOW_NORMAL)\n", (3220, 3244), False, 'import cv2\n'), ((3249, 3273), 'cv2.imshow', 'cv2.imshow', (['"""1"""', 'dst_img'], {}), "('1', dst_img)\n", (3259, 3273), False, 'import cv2\n'), ((3278, 3291), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (3289, 3291), False, 'import cv2\n'), ((3296, 3319), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3317, 3319), False, 'import cv2\n'), ((3623, 3647), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(1001)'], {}), '(-4, 4, 1001)\n', (3634, 3647), True, 'import numpy as np\n'), ((3744, 3767), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', 'x', 'y_d1'], {}), '(x, y, x, y_d1)\n', (3752, 3767), True, 'import matplotlib.pyplot as plt\n'), ((3772, 3782), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3780, 3782), True, 'import matplotlib.pyplot as plt\n'), ((3820, 3852), 'os.path.join', 'os.path.join', (['root_dir', '"""51.png"""'], {}), "(root_dir, '51.png')\n", (3832, 3852), False, 'import os\n'), ((3986, 4027), 'cv2.cvtColor', 'cv2.cvtColor', (['dst_img', 'cv2.COLOR_BGR2BGRA'], {}), '(dst_img, cv2.COLOR_BGR2BGRA)\n', (3998, 4027), False, 'import cv2\n'), ((4525, 4564), 'cv2.namedWindow', 'cv2.namedWindow', (['"""1"""', 'cv2.WINDOW_NORMAL'], {}), "('1', cv2.WINDOW_NORMAL)\n", (4540, 4564), False, 'import cv2\n'), ((4569, 4593), 'cv2.imshow', 'cv2.imshow', (['"""1"""', 'dst_img'], {}), "('1', dst_img)\n", (4579, 4593), False, 'import cv2\n'), ((4598, 4611), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (4609, 4611), False, 'import cv2\n'), ((4616, 4639), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4637, 4639), False, 'import cv2\n'), ((923, 951), 'numpy.ones', 'np.ones', (['(w, h, c)', 'np.uint8'], {}), '((w, h, c), np.uint8)\n', (930, 951), True, 'import numpy as np\n'), ((1444, 1484), 'os.path.join', 'os.path.join', (['root_dir', '"""zhuangbei2.png"""'], {}), "(root_dir, 'zhuangbei2.png')\n", (1456, 1484), False, 'import os\n'), ((1789, 1817), 'numpy.ones', 'np.ones', (['(w, h, c)', 'np.uint8'], {}), '((w, h, c), np.uint8)\n', (1796, 1817), True, 'import numpy as np\n'), ((2303, 2343), 'os.path.join', 'os.path.join', (['root_dir', '"""zhuangbei2.png"""'], {}), "(root_dir, 'zhuangbei2.png')\n", (2315, 2343), False, 'import os\n'), ((2642, 2670), 'numpy.ones', 'np.ones', (['(w, h, c)', 'np.uint8'], {}), '((w, h, c), np.uint8)\n', (2649, 2670), True, 'import numpy as np\n'), ((3156, 3190), 'os.path.join', 'os.path.join', (['root_dir', '"""26_1.png"""'], {}), "(root_dir, '26_1.png')\n", (3168, 3190), False, 'import os\n'), ((3937, 3965), 'numpy.ones', 'np.ones', (['(w, h, c)', 'np.uint8'], {}), '((w, h, c), np.uint8)\n', (3944, 3965), True, 'import numpy as np\n'), ((4477, 4510), 'os.path.join', 'os.path.join', (['root_dir', '"""5_1.png"""'], {}), "(root_dir, '5_1.png')\n", (4489, 4510), False, 'import os\n'), ((3863, 3883), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (3873, 3883), False, 'import cv2\n'), ((3401, 3411), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (3407, 3411), True, 'import numpy as np\n'), ((3539, 3549), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (3545, 3549), True, 'import numpy as np\n'), ((3500, 3510), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (3506, 3510), True, 'import numpy as np\n'), ((3517, 3527), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (3523, 3527), True, 'import numpy as np\n')]
|
"""User fixtures"""
# pylint: disable=unused-argument, redefined-outer-name
from io import BytesIO
import pytest
from PIL import Image
from rest_framework.test import APIClient
from rest_framework_jwt.settings import api_settings
from open_discussions.factories import UserFactory
from sites.factories import AuthenticatedSiteFactory
@pytest.fixture
def user(db, use_betamax, request):
"""Create a user"""
if use_betamax:
return request.getfixturevalue("reddit_user")
return UserFactory.create()
@pytest.fixture
def staff_user(db, use_betamax, request):
"""Create a staff user"""
if use_betamax:
request.getfixturevalue("configure_betamax")
return request.getfixturevalue("reddit_staff_user")
return UserFactory.create(is_staff=True)
@pytest.fixture()
def index_user(db, use_betamax, request):
"""Create a user to be used for indexing"""
if use_betamax:
request.getfixturevalue("configure_betamax")
return request.getfixturevalue("reddit_index_user")
user = UserFactory.create(is_staff=True)
return user
@pytest.fixture()
def logged_in_user(client, user):
"""Log the user in and yield the user object"""
client.force_login(user)
return user
@pytest.fixture()
def logged_in_profile(client):
"""Add a Profile and logged-in User"""
user = UserFactory.create(username="george")
client.force_login(user)
return user.profile
@pytest.fixture
def jwt_token(db, user, client, rf, settings):
"""Creates a JWT token for a regular user"""
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
client.cookies[settings.OPEN_DISCUSSIONS_COOKIE_NAME] = token
rf.cookies.load({settings.OPEN_DISCUSSIONS_COOKIE_NAME: token})
return token
@pytest.fixture
def client(db):
"""
Similar to the builtin client but this provides the DRF client instead of the Django test client.
"""
return APIClient()
@pytest.fixture
def user_client(client, user):
"""Version of the client that is authenticated with the user"""
client.force_login(user)
return client
@pytest.fixture
def staff_client(client, staff_user):
"""Version of the client that is authenticated with the staff_user"""
client.force_login(staff_user)
return client
@pytest.fixture
def authenticated_site(db, settings):
"""The authenticated site"""
return AuthenticatedSiteFactory.create(
key=settings.OPEN_DISCUSSIONS_DEFAULT_SITE_KEY
)
@pytest.fixture
def profile_image():
""" Create a PNG image """
image_file = BytesIO()
image = Image.new("RGBA", size=(250, 250), color=(256, 0, 0))
image.save(image_file, "png")
image_file.seek(0)
return image_file
|
[
"io.BytesIO",
"open_discussions.factories.UserFactory.create",
"PIL.Image.new",
"sites.factories.AuthenticatedSiteFactory.create",
"pytest.fixture",
"rest_framework.test.APIClient"
] |
[((791, 807), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (805, 807), False, 'import pytest\n'), ((1095, 1111), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1109, 1111), False, 'import pytest\n'), ((1246, 1262), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1260, 1262), False, 'import pytest\n'), ((499, 519), 'open_discussions.factories.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (517, 519), False, 'from open_discussions.factories import UserFactory\n'), ((754, 787), 'open_discussions.factories.UserFactory.create', 'UserFactory.create', ([], {'is_staff': '(True)'}), '(is_staff=True)\n', (772, 787), False, 'from open_discussions.factories import UserFactory\n'), ((1042, 1075), 'open_discussions.factories.UserFactory.create', 'UserFactory.create', ([], {'is_staff': '(True)'}), '(is_staff=True)\n', (1060, 1075), False, 'from open_discussions.factories import UserFactory\n'), ((1348, 1385), 'open_discussions.factories.UserFactory.create', 'UserFactory.create', ([], {'username': '"""george"""'}), "(username='george')\n", (1366, 1385), False, 'from open_discussions.factories import UserFactory\n'), ((2063, 2074), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (2072, 2074), False, 'from rest_framework.test import APIClient\n'), ((2522, 2601), 'sites.factories.AuthenticatedSiteFactory.create', 'AuthenticatedSiteFactory.create', ([], {'key': 'settings.OPEN_DISCUSSIONS_DEFAULT_SITE_KEY'}), '(key=settings.OPEN_DISCUSSIONS_DEFAULT_SITE_KEY)\n', (2553, 2601), False, 'from sites.factories import AuthenticatedSiteFactory\n'), ((2703, 2712), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (2710, 2712), False, 'from io import BytesIO\n'), ((2725, 2778), 'PIL.Image.new', 'Image.new', (['"""RGBA"""'], {'size': '(250, 250)', 'color': '(256, 0, 0)'}), "('RGBA', size=(250, 250), color=(256, 0, 0))\n", (2734, 2778), False, 'from PIL import Image\n')]
|
#!/usr/bin/env python
import rospy
from ros.rosPathFinderServer import RosPathFinderServer
if __name__ == '__main__':
server = RosPathFinderServer()
rospy.spin()
|
[
"rospy.spin",
"ros.rosPathFinderServer.RosPathFinderServer"
] |
[((133, 154), 'ros.rosPathFinderServer.RosPathFinderServer', 'RosPathFinderServer', ([], {}), '()\n', (152, 154), False, 'from ros.rosPathFinderServer import RosPathFinderServer\n'), ((159, 171), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (169, 171), False, 'import rospy\n')]
|
"""
This files mimics keras.dataset download's function.
For parallel and distributed training, we need to account
for multiple processes (one per GPU) per agent.
For more information on data in Determined, read our data-access tutorial.
"""
import gzip
import tempfile
import numpy as np
from tensorflow.python.keras.utils.data_utils import get_file
def load_training_data():
"""Loads the Fashion-MNIST dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train)`.
License:
The copyright for Fashion-MNIST is held by Zalando SE.
Fashion-MNIST is licensed under the [MIT license](
https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE).
"""
download_directory = tempfile.mkdtemp()
base = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
files = [
"train-labels-idx1-ubyte.gz",
"train-images-idx3-ubyte.gz",
]
paths = []
for fname in files:
paths.append(get_file(fname, origin=base + fname, cache_subdir=download_directory))
with gzip.open(paths[0], "rb") as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], "rb") as imgpath:
x_train = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
return x_train, y_train
def load_validation_data():
"""Loads the Fashion-MNIST dataset.
Returns:
Tuple of Numpy arrays: `(x_test, y_test)`.
License:
The copyright for Fashion-MNIST is held by Zalando SE.
Fashion-MNIST is licensed under the [MIT license](
https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE).
"""
download_directory = tempfile.mkdtemp()
base = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
files = [
"t10k-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz",
]
paths = []
for fname in files:
paths.append(get_file(fname, origin=base + fname, cache_subdir=download_directory))
with gzip.open(paths[0], "rb") as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], "rb") as imgpath:
x_test = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
return x_test, y_test
|
[
"tensorflow.python.keras.utils.data_utils.get_file",
"tempfile.mkdtemp",
"gzip.open"
] |
[((740, 758), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (756, 758), False, 'import tempfile\n'), ((1735, 1753), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1751, 1753), False, 'import tempfile\n'), ((1071, 1096), 'gzip.open', 'gzip.open', (['paths[0]', '"""rb"""'], {}), "(paths[0], 'rb')\n", (1080, 1096), False, 'import gzip\n'), ((1185, 1210), 'gzip.open', 'gzip.open', (['paths[1]', '"""rb"""'], {}), "(paths[1], 'rb')\n", (1194, 1210), False, 'import gzip\n'), ((2064, 2089), 'gzip.open', 'gzip.open', (['paths[0]', '"""rb"""'], {}), "(paths[0], 'rb')\n", (2073, 2089), False, 'import gzip\n'), ((2177, 2202), 'gzip.open', 'gzip.open', (['paths[1]', '"""rb"""'], {}), "(paths[1], 'rb')\n", (2186, 2202), False, 'import gzip\n'), ((990, 1059), 'tensorflow.python.keras.utils.data_utils.get_file', 'get_file', (['fname'], {'origin': '(base + fname)', 'cache_subdir': 'download_directory'}), '(fname, origin=base + fname, cache_subdir=download_directory)\n', (998, 1059), False, 'from tensorflow.python.keras.utils.data_utils import get_file\n'), ((1983, 2052), 'tensorflow.python.keras.utils.data_utils.get_file', 'get_file', (['fname'], {'origin': '(base + fname)', 'cache_subdir': 'download_directory'}), '(fname, origin=base + fname, cache_subdir=download_directory)\n', (1991, 2052), False, 'from tensorflow.python.keras.utils.data_utils import get_file\n')]
|
"""
:mod:`zsl.tasks.asl.sum_task`
-----------------------------
Created on 22.12.2012
..moduleauthor:: <NAME>
"""
from __future__ import unicode_literals
from builtins import object
from injector import inject
from zsl import Zsl
from zsl.task.task_data import TaskData
from zsl.task.task_decorator import json_input, json_output
class SumTask(object):
@inject(app=Zsl)
def __init__(self, app):
self._app = app
@json_input
@json_output
def perform(self, data):
# type: (TaskData)->str
payload = data.payload
self._app.logger.debug("Sum task with data '{0}'.".format(payload))
return {"input": payload, "result": sum(payload)}
|
[
"injector.inject"
] |
[((365, 380), 'injector.inject', 'inject', ([], {'app': 'Zsl'}), '(app=Zsl)\n', (371, 380), False, 'from injector import inject\n')]
|
#-
# ==========================================================================
# Copyright (C) 1995 - 2006 Autodesk, Inc. and/or its licensors. All
# rights reserved.
#
# The coded instructions, statements, computer programs, and/or related
# material (collectively the "Data") in these files contain unpublished
# information proprietary to Autodesk, Inc. ("Autodesk") and/or its
# licensors, which is protected by U.S. and Canadian federal copyright
# law and by international treaties.
#
# The Data is provided for use exclusively by You. You have the right
# to use, modify, and incorporate this Data into other products for
# purposes authorized by the Autodesk software license agreement,
# without fee.
#
# The copyright notices in the Software and this entire statement,
# including the above license grant, this restriction and the
# following disclaimer, must be included in all copies of the
# Software, in whole or in part, and all derivative works of
# the Software, unless such copies or derivative works are solely
# in the form of machine-executable object code generated by a
# source language processor.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
# AUTODESK DOES NOT MAKE AND HEREBY DISCLAIMS ANY EXPRESS OR IMPLIED
# WARRANTIES INCLUDING, BUT NOT LIMITED TO, THE WARRANTIES OF
# NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR
# PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE, OR
# TRADE PRACTICE. IN NO EVENT WILL AUTODESK AND/OR ITS LICENSORS
# BE LIABLE FOR ANY LOST REVENUES, DATA, OR PROFITS, OR SPECIAL,
# DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES, EVEN IF AUTODESK
# AND/OR ITS LICENSORS HAS BEEN ADVISED OF THE POSSIBILITY
# OR PROBABILITY OF SUCH DAMAGES.
#
# ==========================================================================
#+
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import sys
import polyModifier
def statusError(message):
fullMsg = "Status failed: %s\n" % message
sys.stderr.write(fullMsg)
OpenMaya.MGlobal.displayError(fullMsg)
raise # called from exception handlers only, reraise exception
kPluginCmdName = "spSplitUV"
kPluginNodeTypeName = "spSplitUVNode"
kPluginNodeId = OpenMaya.MTypeId(0x87013)
#####################################################################
## COMMAND ##########################################################
#####################################################################
# Overview:
#
# The purpose of the splitUV command is to unshare (split) any selected UVs
# on a given object.
#
# How it works:
#
# This command is based on the polyModifierCmd. It relies on the polyModifierCmd
# to manage "how" the effects of the splitUV operation are applied (ie. directly
# on the mesh or through a modifier node). See polyModifier.py for more details
#
# To understand the algorithm behind the splitUV operation, refer to splitUVFty
#
# Limitations:
#
# (1) Can only operate on a single mesh at a given time. If there are more than one
# mesh with selected UVs, only the first mesh found in the selection list is
# operated on.
#
class splitUV(polyModifier.polyModifierCmd):
def __init__(self):
polyModifier.polyModifierCmd.__init__(self)
# Selected UVs
#
# Note: The MObject, fComponentList, is only ever accessed on a single call to the plugin.
# It is never accessed between calls and is stored on the class for access in the
# overriden initModifierNode() method.
#
self.__fComponentList = OpenMaya.MObject()
self.__fSelUVs = OpenMaya.MIntArray()
self.__fSplitUVFactory = splitUVFty()
def isUndoable(self):
return True
def doIt(self, args):
"""
implements the scripted splitUV command.
Arguments:
args - the argument list that was passes to the command from MEL
"""
# Parse the selection list for objects with selected UV components.
# To simplify things, we only take the first object that we find with
# selected UVs and operate on that object alone.
#
# All other objects are ignored and return warning messages indicating
# this limitation.
#
selList = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getActiveSelectionList(selList)
selListIter = OpenMaya.MItSelectionList(selList)
# The splitUV node only accepts a component list input, so we build
# a component list using MFnComponentListData.
#
# MIntArrays could also be passed into the node to represent the uvIds,
# but are less storage efficient than component lists, since consecutive
# components are bundled into a single entry in component lists.
#
compListFn = OpenMaya.MFnComponentListData()
compListFn.create()
found = False
foundMultiple = False
while not selListIter.isDone():
dagPath = OpenMaya.MDagPath()
component = OpenMaya.MObject()
itemMatches = True
selListIter.getDagPath(dagPath, component)
# Check for selected UV components
#
if itemMatches and (component.apiType() == OpenMaya.MFn.kMeshMapComponent):
if not found:
# The variable 'component' holds all selected components on the selected
# object, thus only a single call to MFnComponentListData::add() is needed
# to store the selected components for a given object.
#
compListFn.add(component)
# Copy the component list created by MFnComponentListData into our local
# component list MObject member.
#
self.__fComponentList = compListFn.object()
# Locally store the actual uvIds of the selected UVs so that this command
# can directly modify the mesh in the case when there is no history and
# history is turned off.
#
compFn = OpenMaya.MFnSingleIndexedComponent(component)
compFn.getElements(self.__fSelUVs)
# Ensure that this DAG path will point to the shape of our object.
# Set the DAG path for the polyModifierCmd.
#
dagPath.extendToShape()
self._setMeshNode(dagPath)
found = True
else:
# Break once we have found a multiple object holding selected UVs, since
# we are not interested in how many multiple objects there are, only
# the fact that there are multiple objects.
#
foundMultiple = True
break
selListIter.next()
if foundMultiple:
self.displayWarning("Found more than one object with selected UVs - Only operating on first found object.")
# Initialize the polyModifierCmd node type - mesh node already set
#
self._setModifierNodeType(kPluginNodeId)
if found:
if self.__validateUVs():
# Now, pass control over to the polyModifierCmd._doModifyPoly() method
# to handle the operation.
#
try:
self._doModifyPoly()
except:
self.displayError("splitUV command failed!")
raise
else:
self.setResult("splitUV command succeeded!")
else:
self.displayError("splitUV command failed: Selected UVs are not splittable")
else:
self.displayError("splitUV command failed: Unable to find selected UVs")
def redoIt(self):
"""
Implements redo for the scripted splitUV command.
This method is called when the user has undone a command of this type
and then redoes it. No arguments are passed in as all of the necessary
information is cached by the doIt method.
"""
try:
self._redoModifyPoly()
self.setResult("splitUV command succeeded!")
except:
self.displayError("splitUV command failed!")
raise
def undoIt(self):
"""
implements undo for the scripted splitUV command.
This method is called to undo a previous command of this type. The
system should be returned to the exact state that it was it previous
to this command being executed. That includes the selection state.
"""
try:
self._undoModifyPoly()
self.setResult("splitUV undo succeeded!")
except:
self.displayError("splitUV undo failed!")
raise
def _initModifierNode(self, modifierNode):
# We need to tell the splitUV node which UVs to operate on. By overriding
# the polyModifierCmd._initModifierNode() method, we can insert our own
# modifierNode initialization code.
#
depNodeFn = OpenMaya.MFnDependencyNode(modifierNode)
uvListAttr = depNodeFn.attribute("inputComponents")
# Pass the component list down to the splitUV node
#
uvListPlug = OpenMaya.MPlug(modifierNode, uvListAttr)
uvListPlug.setMObject(self.__fComponentList)
def _directModifier(self, mesh):
self.__fSplitUVFactory.setMesh(mesh)
self.__fSplitUVFactory.setUVIds(self.__fSelUVs)
# Now, perform the splitUV
#
self.__fSplitUVFactory.doIt()
def __validateUVs(self):
"""
Validate the UVs for the splitUV operation. UVs are valid only if they are shared
by more than one face. While the splitUVNode is smart enough to not process the
split if a UV is not splittable, a splitUV node is still created by the polyModifierCmd.
So call this method to validate the UVs before calling _doModifyPoly().
validateUVs() will return true so long as there is at least one valid UV. It will
also prune out any invalid UVs from both the component list and UVId array.
"""
# Get the mesh that we are operating on
#
dagPath = self._getMeshNode()
mesh = dagPath.node()
# Get the number of faces sharing the selected UVs
#
meshFn = OpenMaya.MFnMesh(mesh)
polyIter = OpenMaya.MItMeshPolygon(mesh)
selUVFaceCountArray = OpenMaya.MIntArray()
indexParam = OpenMaya.MScriptUtil(0)
indexPtr = indexParam.asIntPtr()
count = 0
selUVsCount = self.__fSelUVs.length()
for i in range(selUVsCount):
while not polyIter.isDone():
if polyIter.hasUVs():
polyVertCount = polyIter.polygonVertexCount()
for j in range(polyVertCount):
polyIter.getUVIndex(j, indexPtr)
UVIndex = indexParam.getInt(indexPtr)
if UVIndex == self.__fSelUVs[i]:
count += 1
break
polyIter.next()
selUVFaceCountArray.append(count)
# Now, check to make sure that at least one UV is being shared by more than one
# face. So long as we have one UV that we can operate on, we should proceed and let
# the splitUVNode ignore the UVs which are only shared by one face.
#
isValid = False
validUVIndices = OpenMaya.MIntArray()
for i in range(selUVsCount):
if selUVFaceCountArray[i] > 1:
isValid = True
validUVIndices.append(i)
if isValid:
self.__pruneUVs(validUVIndices)
return isValid
def __pruneUVs(self, validUVIndices):
"""
This method will remove any invalid UVIds from the component list and UVId array.
The benefit of this is to reduce the amount of extra processing that the node would
have to perform. It will result in less iterations through the mesh as there are
less UVs to search for.
"""
validUVIds = OpenMaya.MIntArray()
for i in range(validUVIndices.length()):
uvIndex = validUVIndices[i]
validUVIds.append(self.__fSelUVs[uvIndex])
# Replace the local int array of UVIds
#
self.__fSelUVs.clear()
self.__fSelUVs = validUVIds
# Build the list of valid components
#
compFn = OpenMaya.MFnSingleIndexedComponent()
try:
compFn.create(OpenMaya.MFn.kMeshMapComponent)
except:
statusError("compFn.create( MFn::kMeshMapComponent )")
try:
compFn.addElements(validUVIds)
except:
statusError("compFn.addElements( validUVIds )")
# Replace the component list
#
component = compFn.object()
compListFn = OpenMaya.MFnComponentListData()
compListFn.create()
try:
compListFn.add(component)
except:
statusError("compListFn.add( component )")
self.__fComponentList = compListFn.object()
#####################################################################
## FACTORY ##########################################################
#####################################################################
# Overview:
#
# The splitUV factory implements the actual splitUV operation. It takes in
# only two parameters:
#
# 1) A polygonal mesh
# 2) An array of selected UV Ids
#
# The algorithm works as follows:
#
# 1) Parse the mesh for the selected UVs and collect:
#
# (a) Number of faces sharing each UV
# (stored as two arrays: face array, indexing/offset array)
# (b) Associated vertex Id
#
# 2) Create (N-1) new UVIds for each selected UV, where N represents the number of faces
# sharing the UV.
#
# 3) Set each of the new UVs to the same 2D location on the UVmap.
#
# 3) Arbitrarily let the last face in the list of faces sharing this UV to keep the original
# UV.
#
# 4) Assign each other face one of the new UVIds.
#
#
class splitUVFty(polyModifier.polyModifierFty):
def __init__(self):
polyModifier.polyModifierFty.__init__(self)
# Mesh Node
# Note: We only make use of this MObject during a single call of
# the splitUV plugin. It is never maintained and used between
# calls to the plugin as the MObject handle could be invalidated
# between calls to the plugin.
#
self.__fMesh = OpenMaya.MObject()
self.__fSelUVs = OpenMaya.MIntArray()
self.__fSelUVs.clear()
def setMesh(self, mesh):
self.__fMesh = mesh
def setUVIds(self, uvIds):
self.__fSelUVs = uvIds
def doIt(self):
"""
Performs the actual splitUV operation on the given object and UVs
"""
####################################
# Declare our processing variables #
####################################
# Face Id and Face Offset map to the selected UVs
#
selUVFaceIdMap = OpenMaya.MIntArray()
selUVFaceOffsetMap = OpenMaya.MIntArray()
# Local Vertex Index map to the selected UVs
#
selUVLocalVertIdMap = OpenMaya.MIntArray()
#################################################
# Collect necessary information for the splitUV #
# #
# - uvSet #
# - faceIds / localVertIds per selected UV #
#################################################
meshFn = OpenMaya.MFnMesh(self.__fMesh)
selUVSet = meshFn.currentUVSetName()
indexParam = OpenMaya.MScriptUtil(0)
indexPtr = indexParam.asIntPtr()
offset = 0
selUVsCount = self.__fSelUVs.length()
polyIter = OpenMaya.MItMeshPolygon(self.__fMesh)
for i in range(selUVsCount):
selUVFaceOffsetMap.append(offset)
polyIter.reset()
while not polyIter.isDone():
if polyIter.hasUVs():
polyVertCount = polyIter.polygonVertexCount()
for j in range(polyVertCount):
polyIter.getUVIndex(j, indexPtr)
UVIndex = indexParam.getInt(indexPtr)
if UVIndex == self.__fSelUVs[i]:
selUVFaceIdMap.append(polyIter.index())
selUVLocalVertIdMap.append(j)
offset += 1
break
polyIter.next()
# Store total length of the faceId map in the last element of
# the offset map so that there is a way to get the number of faces
# sharing each of the selected UVs
#
selUVFaceOffsetMap.append(offset)
###############################
# Begin the splitUV operation #
###############################
currentUVCount = meshFn.numUVs(selUVSet)
for i in range(selUVsCount):
# Get the current FaceId map offset
#
offset = selUVFaceOffsetMap[i]
# Get the U and V values of the current UV
#
uvId = self.__fSelUVs[i]
uParam = OpenMaya.MScriptUtil(0.0)
uPtr = uParam.asFloatPtr()
vParam = OpenMaya.MScriptUtil(0.0)
vPtr = vParam.asFloatPtr()
meshFn.getUV(uvId, uPtr, vPtr, selUVSet)
u = uParam.getFloat(uPtr)
v = vParam.getFloat(vPtr)
# Get the number of faces sharing the current UV
#
faceCount = selUVFaceOffsetMap[i + 1] - selUVFaceOffsetMap[i]
# Arbitrarily choose that the last faceId in the list of faces
# sharing this UV, will keep the original UV.
#
for j in range(faceCount-1):
meshFn.setUV(currentUVCount, u, v, selUVSet)
localVertId = selUVLocalVertIdMap[offset]
faceId = selUVFaceIdMap[offset]
meshFn.assignUV(faceId, localVertId, currentUVCount, selUVSet)
currentUVCount += 1
offset += 1
#####################################################################
## NODE #############################################################
#####################################################################
class splitUVNode(polyModifier.polyModifierNode):
uvList = OpenMaya.MObject()
def __init__(self):
polyModifier.polyModifierNode.__init__(self)
self.fSplitUVFactory = splitUVFty()
def compute(self, plug, data):
"""
Description:
This method computes the value of the given output plug based
on the values of the input attributes.
Arguments:
plug - the plug to compute
data - object that provides access to the attributes for this node
"""
stateData = 0
state = OpenMayaMPx.cvar.MPxNode_state
try:
stateData = data.outputValue(state)
except:
statusError("ERROR getting state")
# Check for the HasNoEffect/PassThrough flag on the node.
#
# (stateData is an enumeration standard in all depend nodes - stored as short)
#
# (0 = Normal)
# (1 = HasNoEffect/PassThrough)
# (2 = Blocking)
# ...
#
if stateData.asShort() == 1:
try:
inputData = data.inputValue(splitUVNode.inMesh)
except:
statusError("ERROR getting inMesh")
try:
outputData = data.outputValue(splitUVNode.outMesh)
except:
statusError("ERROR getting outMesh")
# Simply redirect the inMesh to the outMesh for the PassThrough effect
#
outputData.setMObject(inputData.asMesh())
else:
# Check which output attribute we have been asked to
# compute. If this node doesn't know how to compute it,
# we must return MS::kUnknownParameter
#
if plug == splitUVNode.outMesh:
try:
inputData = data.inputValue(splitUVNode.inMesh)
except:
statusError("ERROR getting inMesh")
try:
outputData = data.outputValue(splitUVNode.outMesh)
except:
statusError("ERROR getting outMesh")
# Now, we get the value of the uvList and use it to perform
# the operation on this mesh
#
try:
inputUVs = data.inputValue(splitUVNode.uvList)
except:
statusError("ERROR getting uvList")
# Copy the inMesh to the outMesh, and now you can
# perform operations in-place on the outMesh
#
outputData.setMObject(inputData.asMesh())
mesh = outputData.asMesh()
# Retrieve the UV list from the component list.
#
# Note, we use a component list to store the components
# because it is more compact memory wise. (ie. comp[81:85]
# is smaller than comp[81], comp[82],...,comp[85])
#
compList = inputUVs.data()
compListFn = OpenMaya.MFnComponentListData(compList)
uvIds = OpenMaya.MIntArray()
for i in range(compListFn.length()):
comp = compListFn[i]
if comp.apiType() == OpenMaya.MFn.kMeshMapComponent:
uvComp = OpenMaya.MFnSingleIndexedComponent(comp)
for j in range(uvComp.elementCount()):
uvId = uvComp.element(j)
uvIds.append(uvId)
# Set the mesh object and uvList on the factory
#
self.fSplitUVFactory.setMesh(mesh)
self.fSplitUVFactory.setUVIds(uvIds)
# Now, perform the splitUV
#
try:
self.fSplitUVFactory.doIt()
except:
statusError("ERROR in splitUVFty.doIt()")
# Mark the output mesh as clean
#
outputData.setClean()
else:
return OpenMaya.kUnknownParameter
return None
#####################################################################
## REGISTRATION #####################################################
#####################################################################
def cmdCreator():
return OpenMayaMPx.asMPxPtr(splitUV())
def nodeCreator():
return OpenMayaMPx.asMPxPtr(splitUVNode())
def nodeInitializer():
attrFn = OpenMaya.MFnTypedAttribute()
splitUVNode.uvList = attrFn.create("inputComponents", "ics", OpenMaya.MFnComponentListData.kComponentList)
attrFn.setStorable(True) # To be stored during file-save
splitUVNode.inMesh = attrFn.create("inMesh", "im", OpenMaya.MFnMeshData.kMesh)
attrFn.setStorable(True) # To be stored during file-save
# Attribute is read-only because it is an output attribute
#
splitUVNode.outMesh = attrFn.create("outMesh", "om", OpenMaya.MFnMeshData.kMesh)
attrFn.setStorable(False)
attrFn.setWritable(False)
# Add the attributes we have created to the node
#
splitUVNode.addAttribute(splitUVNode.uvList)
splitUVNode.addAttribute(splitUVNode.inMesh)
splitUVNode.addAttribute(splitUVNode.outMesh)
# Set up a dependency between the input and the output. This will cause
# the output to be marked dirty when the input changes. The output will
# then be recomputed the next time the value of the output is requested.
#
splitUVNode.attributeAffects(splitUVNode.inMesh, splitUVNode.outMesh)
splitUVNode.attributeAffects(splitUVNode.uvList, splitUVNode.outMesh)
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject, "Autodesk", "1.0", "Any")
try:
mplugin.registerCommand(kPluginCmdName, cmdCreator)
except:
sys.stderr.write( "Failed to register command: %s\n" % kPluginCmdName)
raise
try:
mplugin.registerNode(kPluginNodeTypeName, kPluginNodeId, nodeCreator, nodeInitializer)
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeTypeName)
raise
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterCommand(kPluginCmdName)
except:
sys.stderr.write("Failed to unregister command: %s\n" % kPluginCmdName)
raise
try:
mplugin.deregisterNode(kPluginNodeId)
except:
sys.stderr.write("Failed to deregister node: %s" % kPluginNodeTypeName)
raise
|
[
"maya.OpenMaya.MGlobal.getActiveSelectionList",
"maya.OpenMaya.MObject",
"maya.OpenMaya.MScriptUtil",
"maya.OpenMaya.MItSelectionList",
"maya.OpenMaya.MPlug",
"maya.OpenMayaMPx.MFnPlugin",
"maya.OpenMaya.MIntArray",
"polyModifier.polyModifierNode.__init__",
"polyModifier.polyModifierCmd.__init__",
"maya.OpenMaya.MGlobal.displayError",
"maya.OpenMaya.MFnDependencyNode",
"maya.OpenMaya.MItMeshPolygon",
"maya.OpenMaya.MFnSingleIndexedComponent",
"maya.OpenMaya.MFnComponentListData",
"maya.OpenMaya.MDagPath",
"polyModifier.polyModifierFty.__init__",
"maya.OpenMaya.MTypeId",
"maya.OpenMaya.MSelectionList",
"maya.OpenMaya.MFnMesh",
"maya.OpenMaya.MFnTypedAttribute",
"sys.stderr.write"
] |
[((2204, 2228), 'maya.OpenMaya.MTypeId', 'OpenMaya.MTypeId', (['(552979)'], {}), '(552979)\n', (2220, 2228), True, 'import maya.OpenMaya as OpenMaya\n'), ((1989, 2014), 'sys.stderr.write', 'sys.stderr.write', (['fullMsg'], {}), '(fullMsg)\n', (2005, 2014), False, 'import sys\n'), ((2016, 2054), 'maya.OpenMaya.MGlobal.displayError', 'OpenMaya.MGlobal.displayError', (['fullMsg'], {}), '(fullMsg)\n', (2045, 2054), True, 'import maya.OpenMaya as OpenMaya\n'), ((16098, 16116), 'maya.OpenMaya.MObject', 'OpenMaya.MObject', ([], {}), '()\n', (16114, 16116), True, 'import maya.OpenMaya as OpenMaya\n'), ((19553, 19581), 'maya.OpenMaya.MFnTypedAttribute', 'OpenMaya.MFnTypedAttribute', ([], {}), '()\n', (19579, 19581), True, 'import maya.OpenMaya as OpenMaya\n'), ((20693, 20749), 'maya.OpenMayaMPx.MFnPlugin', 'OpenMayaMPx.MFnPlugin', (['mobject', '"""Autodesk"""', '"""1.0"""', '"""Any"""'], {}), "(mobject, 'Autodesk', '1.0', 'Any')\n", (20714, 20749), True, 'import maya.OpenMayaMPx as OpenMayaMPx\n'), ((21132, 21162), 'maya.OpenMayaMPx.MFnPlugin', 'OpenMayaMPx.MFnPlugin', (['mobject'], {}), '(mobject)\n', (21153, 21162), True, 'import maya.OpenMayaMPx as OpenMayaMPx\n'), ((3175, 3218), 'polyModifier.polyModifierCmd.__init__', 'polyModifier.polyModifierCmd.__init__', (['self'], {}), '(self)\n', (3212, 3218), False, 'import polyModifier\n'), ((3492, 3510), 'maya.OpenMaya.MObject', 'OpenMaya.MObject', ([], {}), '()\n', (3508, 3510), True, 'import maya.OpenMaya as OpenMaya\n'), ((3530, 3550), 'maya.OpenMaya.MIntArray', 'OpenMaya.MIntArray', ([], {}), '()\n', (3548, 3550), True, 'import maya.OpenMaya as OpenMaya\n'), ((4099, 4124), 'maya.OpenMaya.MSelectionList', 'OpenMaya.MSelectionList', ([], {}), '()\n', (4122, 4124), True, 'import maya.OpenMaya as OpenMaya\n'), ((4127, 4175), 'maya.OpenMaya.MGlobal.getActiveSelectionList', 'OpenMaya.MGlobal.getActiveSelectionList', (['selList'], {}), '(selList)\n', (4166, 4175), True, 'import maya.OpenMaya as OpenMaya\n'), ((4192, 4226), 'maya.OpenMaya.MItSelectionList', 'OpenMaya.MItSelectionList', (['selList'], {}), '(selList)\n', (4217, 4226), True, 'import maya.OpenMaya as OpenMaya\n'), ((4587, 4618), 'maya.OpenMaya.MFnComponentListData', 'OpenMaya.MFnComponentListData', ([], {}), '()\n', (4616, 4618), True, 'import maya.OpenMaya as OpenMaya\n'), ((8067, 8107), 'maya.OpenMaya.MFnDependencyNode', 'OpenMaya.MFnDependencyNode', (['modifierNode'], {}), '(modifierNode)\n', (8093, 8107), True, 'import maya.OpenMaya as OpenMaya\n'), ((8235, 8275), 'maya.OpenMaya.MPlug', 'OpenMaya.MPlug', (['modifierNode', 'uvListAttr'], {}), '(modifierNode, uvListAttr)\n', (8249, 8275), True, 'import maya.OpenMaya as OpenMaya\n'), ((9219, 9241), 'maya.OpenMaya.MFnMesh', 'OpenMaya.MFnMesh', (['mesh'], {}), '(mesh)\n', (9235, 9241), True, 'import maya.OpenMaya as OpenMaya\n'), ((9255, 9284), 'maya.OpenMaya.MItMeshPolygon', 'OpenMaya.MItMeshPolygon', (['mesh'], {}), '(mesh)\n', (9278, 9284), True, 'import maya.OpenMaya as OpenMaya\n'), ((9309, 9329), 'maya.OpenMaya.MIntArray', 'OpenMaya.MIntArray', ([], {}), '()\n', (9327, 9329), True, 'import maya.OpenMaya as OpenMaya\n'), ((9346, 9369), 'maya.OpenMaya.MScriptUtil', 'OpenMaya.MScriptUtil', (['(0)'], {}), '(0)\n', (9366, 9369), True, 'import maya.OpenMaya as OpenMaya\n'), ((10126, 10146), 'maya.OpenMaya.MIntArray', 'OpenMaya.MIntArray', ([], {}), '()\n', (10144, 10146), True, 'import maya.OpenMaya as OpenMaya\n'), ((10676, 10696), 'maya.OpenMaya.MIntArray', 'OpenMaya.MIntArray', ([], {}), '()\n', (10694, 10696), True, 'import maya.OpenMaya as OpenMaya\n'), ((10974, 11010), 'maya.OpenMaya.MFnSingleIndexedComponent', 'OpenMaya.MFnSingleIndexedComponent', ([], {}), '()\n', (11008, 11010), True, 'import maya.OpenMaya as OpenMaya\n'), ((11323, 11354), 'maya.OpenMaya.MFnComponentListData', 'OpenMaya.MFnComponentListData', ([], {}), '()\n', (11352, 11354), True, 'import maya.OpenMaya as OpenMaya\n'), ((12560, 12603), 'polyModifier.polyModifierFty.__init__', 'polyModifier.polyModifierFty.__init__', (['self'], {}), '(self)\n', (12597, 12603), False, 'import polyModifier\n'), ((12873, 12891), 'maya.OpenMaya.MObject', 'OpenMaya.MObject', ([], {}), '()\n', (12889, 12891), True, 'import maya.OpenMaya as OpenMaya\n'), ((12911, 12931), 'maya.OpenMaya.MIntArray', 'OpenMaya.MIntArray', ([], {}), '()\n', (12929, 12931), True, 'import maya.OpenMaya as OpenMaya\n'), ((13354, 13374), 'maya.OpenMaya.MIntArray', 'OpenMaya.MIntArray', ([], {}), '()\n', (13372, 13374), True, 'import maya.OpenMaya as OpenMaya\n'), ((13398, 13418), 'maya.OpenMaya.MIntArray', 'OpenMaya.MIntArray', ([], {}), '()\n', (13416, 13418), True, 'import maya.OpenMaya as OpenMaya\n'), ((13495, 13515), 'maya.OpenMaya.MIntArray', 'OpenMaya.MIntArray', ([], {}), '()\n', (13513, 13515), True, 'import maya.OpenMaya as OpenMaya\n'), ((13773, 13803), 'maya.OpenMaya.MFnMesh', 'OpenMaya.MFnMesh', (['self.__fMesh'], {}), '(self.__fMesh)\n', (13789, 13803), True, 'import maya.OpenMaya as OpenMaya\n'), ((13859, 13882), 'maya.OpenMaya.MScriptUtil', 'OpenMaya.MScriptUtil', (['(0)'], {}), '(0)\n', (13879, 13882), True, 'import maya.OpenMaya as OpenMaya\n'), ((13985, 14022), 'maya.OpenMaya.MItMeshPolygon', 'OpenMaya.MItMeshPolygon', (['self.__fMesh'], {}), '(self.__fMesh)\n', (14008, 14022), True, 'import maya.OpenMaya as OpenMaya\n'), ((16142, 16186), 'polyModifier.polyModifierNode.__init__', 'polyModifier.polyModifierNode.__init__', (['self'], {}), '(self)\n', (16180, 16186), False, 'import polyModifier\n'), ((4729, 4748), 'maya.OpenMaya.MDagPath', 'OpenMaya.MDagPath', ([], {}), '()\n', (4746, 4748), True, 'import maya.OpenMaya as OpenMaya\n'), ((4764, 4782), 'maya.OpenMaya.MObject', 'OpenMaya.MObject', ([], {}), '()\n', (4780, 4782), True, 'import maya.OpenMaya as OpenMaya\n'), ((15079, 15104), 'maya.OpenMaya.MScriptUtil', 'OpenMaya.MScriptUtil', (['(0.0)'], {}), '(0.0)\n', (15099, 15104), True, 'import maya.OpenMaya as OpenMaya\n'), ((15147, 15172), 'maya.OpenMaya.MScriptUtil', 'OpenMaya.MScriptUtil', (['(0.0)'], {}), '(0.0)\n', (15167, 15172), True, 'import maya.OpenMaya as OpenMaya\n'), ((20821, 20890), 'sys.stderr.write', 'sys.stderr.write', (["('Failed to register command: %s\\n' % kPluginCmdName)"], {}), "('Failed to register command: %s\\n' % kPluginCmdName)\n", (20837, 20890), False, 'import sys\n'), ((21007, 21076), 'sys.stderr.write', 'sys.stderr.write', (["('Failed to register node: %s' % kPluginNodeTypeName)"], {}), "('Failed to register node: %s' % kPluginNodeTypeName)\n", (21023, 21076), False, 'import sys\n'), ((21224, 21295), 'sys.stderr.write', 'sys.stderr.write', (["('Failed to unregister command: %s\\n' % kPluginCmdName)"], {}), "('Failed to unregister command: %s\\n' % kPluginCmdName)\n", (21240, 21295), False, 'import sys\n'), ((21362, 21433), 'sys.stderr.write', 'sys.stderr.write', (["('Failed to deregister node: %s' % kPluginNodeTypeName)"], {}), "('Failed to deregister node: %s' % kPluginNodeTypeName)\n", (21378, 21433), False, 'import sys\n'), ((18415, 18454), 'maya.OpenMaya.MFnComponentListData', 'OpenMaya.MFnComponentListData', (['compList'], {}), '(compList)\n', (18444, 18454), True, 'import maya.OpenMaya as OpenMaya\n'), ((18468, 18488), 'maya.OpenMaya.MIntArray', 'OpenMaya.MIntArray', ([], {}), '()\n', (18486, 18488), True, 'import maya.OpenMaya as OpenMaya\n'), ((5631, 5676), 'maya.OpenMaya.MFnSingleIndexedComponent', 'OpenMaya.MFnSingleIndexedComponent', (['component'], {}), '(component)\n', (5665, 5676), True, 'import maya.OpenMaya as OpenMaya\n'), ((18629, 18669), 'maya.OpenMaya.MFnSingleIndexedComponent', 'OpenMaya.MFnSingleIndexedComponent', (['comp'], {}), '(comp)\n', (18663, 18669), True, 'import maya.OpenMaya as OpenMaya\n')]
|
from cosymlib.shape import maps
import numpy as np
import sys
def plot_minimum_distortion_path_shape(shape_label1, shape_label2, num_points=20, output=sys.stdout, show_plot=True):
import matplotlib.pyplot as plt
path = get_shape_path(shape_label1, shape_label2, num_points)
shape_map_txt = " {:6} {:6}\n".format(shape_label1, shape_label2)
for idx, value in enumerate(path[0]):
shape_map_txt += '{:6.3f}, {:6.3f}'.format(path[0][idx], path[1][idx])
shape_map_txt += '\n'
print(shape_map_txt)
if show_plot:
plt.plot(path[0], path[1], 'k', linewidth=2.0)
plt.xlabel(shape_label1)
plt.ylabel(shape_label2)
plt.show()
def get_shape_path(shape_label1, shape_label2, num_points):
return maps.get_shape_map(shape_label1, shape_label2, num_points)
def plot_molecular_orbital_diagram(molecule, wfnsym, mo_range=None):
import matplotlib.pyplot as plt
labels = wfnsym.IRLab
if mo_range is not None:
ird_a_max = [np.argmax(ird_a_orb) for ird_a_orb in wfnsym.mo_IRd_a][mo_range[0]:mo_range[1]]
energies = molecule.electronic_structure.alpha_energies[mo_range[0]:mo_range[1]]
else:
ird_a_max = [np.argmax(ird_a_orb) for ird_a_orb in wfnsym.mo_IRd_a]
energies = molecule.electronic_structure.alpha_energies
ax1 = plt.axes()
ax1.axes.get_xaxis().set_visible(False) # Hide x axis
# ax1.axes.get_yaxis().set_visible(True)
degeneracy = [[energies[0]]]
for energy in energies[1:]:
if abs(energy - degeneracy[-1][-1]) < 1e-3:
degeneracy[-1].append(energy)
else:
degeneracy.append([energy])
max_value = 5e-3
x_center = []
for ix in degeneracy:
if len(ix) == 1:
x_center.append([0])
else:
x_center.append(np.linspace(-max_value, max_value, len(ix)))
x_center = [y for x in x_center for y in x]
plt.scatter(x_center, energies, s=500, marker="_", linewidth=3)
for i in range(len(energies)):
plt.text(-max_value * 2, energies[i], labels[ird_a_max[i]])
plt.show()
def swap_vectors(v1, v2, position):
vector1 = v1.get_copy()
vector2 = v2.get_copy()
for i in range(len(v1)):
if i >= position:
vector1[i] = v2[i]
vector2[i] = v1[i]
return vector1, vector2
def plot_symmetry_energy_evolution(molecules, wfnsym, mo_range=None):
import matplotlib.pyplot as plt
energies = []
ird_a_max = []
for idm, molecule in enumerate(molecules):
labels = wfnsym[idm].IRLab
if mo_range is not None:
ird_a_max.append(np.array([np.argmax(ird_a_orb) for ird_a_orb in wfnsym[idm].mo_IRd_a]
[mo_range[0]:mo_range[1]]))
energies.append(molecule.electronic_structure.alpha_energies[mo_range[0]:mo_range[1]])
else:
ird_a_max.append(np.array([np.argmax(ird_a_orb) for ird_a_orb in wfnsym[idm].mo_IRd_a]))
energies.append(molecule.electronic_structure.alpha_energies)
energies_x_orbital = np.array(energies).T
ird_a_x_orbital = np.array(ird_a_max).T
for i in range(len(ird_a_x_orbital)):
for j in range(len(ird_a_x_orbital[i])):
if j == 0:
old_ird = ird_a_x_orbital[i][0]
else:
if old_ird != ird_a_x_orbital[i][j]:
for k in range(len(ird_a_x_orbital) - i):
if old_ird == ird_a_x_orbital[k + i][j]:
ird_a_x_orbital[i], ird_a_x_orbital[k + i] = swap_vectors(ird_a_x_orbital[i],
ird_a_x_orbital[k + i], j)
energies_x_orbital[i], energies_x_orbital[k + i] = swap_vectors(energies_x_orbital[i],
energies_x_orbital[k + i],
j)
break
old_ird = ird_a_x_orbital[i][j]
for ide, energy in enumerate(energies_x_orbital):
x = np.arange(len(energy))
plt.plot(x, energy, marker='_')
for i in range(len(energy)):
plt.text(x[i], energy[i] + abs(energy[i])*0.001, labels[ird_a_x_orbital[ide][i]])
plt.show()
|
[
"cosymlib.shape.maps.get_shape_map",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.argmax",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.text",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((764, 822), 'cosymlib.shape.maps.get_shape_map', 'maps.get_shape_map', (['shape_label1', 'shape_label2', 'num_points'], {}), '(shape_label1, shape_label2, num_points)\n', (782, 822), False, 'from cosymlib.shape import maps\n'), ((1337, 1347), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (1345, 1347), True, 'import matplotlib.pyplot as plt\n'), ((1930, 1993), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_center', 'energies'], {'s': '(500)', 'marker': '"""_"""', 'linewidth': '(3)'}), "(x_center, energies, s=500, marker='_', linewidth=3)\n", (1941, 1993), True, 'import matplotlib.pyplot as plt\n'), ((2102, 2112), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2110, 2112), True, 'import matplotlib.pyplot as plt\n'), ((4410, 4420), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4418, 4420), True, 'import matplotlib.pyplot as plt\n'), ((559, 605), 'matplotlib.pyplot.plot', 'plt.plot', (['path[0]', 'path[1]', '"""k"""'], {'linewidth': '(2.0)'}), "(path[0], path[1], 'k', linewidth=2.0)\n", (567, 605), True, 'import matplotlib.pyplot as plt\n'), ((614, 638), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['shape_label1'], {}), '(shape_label1)\n', (624, 638), True, 'import matplotlib.pyplot as plt\n'), ((647, 671), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['shape_label2'], {}), '(shape_label2)\n', (657, 671), True, 'import matplotlib.pyplot as plt\n'), ((680, 690), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (688, 690), True, 'import matplotlib.pyplot as plt\n'), ((2037, 2096), 'matplotlib.pyplot.text', 'plt.text', (['(-max_value * 2)', 'energies[i]', 'labels[ird_a_max[i]]'], {}), '(-max_value * 2, energies[i], labels[ird_a_max[i]])\n', (2045, 2096), True, 'import matplotlib.pyplot as plt\n'), ((3092, 3110), 'numpy.array', 'np.array', (['energies'], {}), '(energies)\n', (3100, 3110), True, 'import numpy as np\n'), ((3135, 3154), 'numpy.array', 'np.array', (['ird_a_max'], {}), '(ird_a_max)\n', (3143, 3154), True, 'import numpy as np\n'), ((4242, 4273), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'energy'], {'marker': '"""_"""'}), "(x, energy, marker='_')\n", (4250, 4273), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1227), 'numpy.argmax', 'np.argmax', (['ird_a_orb'], {}), '(ird_a_orb)\n', (1216, 1227), True, 'import numpy as np\n'), ((1007, 1027), 'numpy.argmax', 'np.argmax', (['ird_a_orb'], {}), '(ird_a_orb)\n', (1016, 1027), True, 'import numpy as np\n'), ((2930, 2950), 'numpy.argmax', 'np.argmax', (['ird_a_orb'], {}), '(ird_a_orb)\n', (2939, 2950), True, 'import numpy as np\n'), ((2652, 2672), 'numpy.argmax', 'np.argmax', (['ird_a_orb'], {}), '(ird_a_orb)\n', (2661, 2672), True, 'import numpy as np\n')]
|
import asyncio
from pycallgraph2 import PyCallGraph
from pycallgraph2.output import GraphvizOutput
async def gen_1():
for value in range(0, 10):
await asyncio.sleep(1) # Could be a slow HTTP request
yield value
async def gen_2(it):
async for value in it:
await asyncio.sleep(1) # Could be a slow HTTP request
yield value * 2
async def gen_3(it):
async for value in it:
await asyncio.sleep(1) # Could be a slow HTTP request
yield value + 3
async def run():
file_path = '/'.join([
'data/output/images',
'0201_0101_asyncio.png'
])
graphviz = GraphvizOutput()
graphviz.output_file = file_path
with PyCallGraph(output=graphviz):
it_1 = gen_1()
it_2 = gen_2(it_1)
it_3 = gen_3(it_2)
async for val in it_3:
print(val)
if __name__ == '__main__':
asyncio.run(run())
|
[
"pycallgraph2.output.GraphvizOutput",
"pycallgraph2.PyCallGraph",
"asyncio.sleep"
] |
[((666, 682), 'pycallgraph2.output.GraphvizOutput', 'GraphvizOutput', ([], {}), '()\n', (680, 682), False, 'from pycallgraph2.output import GraphvizOutput\n'), ((729, 757), 'pycallgraph2.PyCallGraph', 'PyCallGraph', ([], {'output': 'graphviz'}), '(output=graphviz)\n', (740, 757), False, 'from pycallgraph2 import PyCallGraph\n'), ((165, 181), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (178, 181), False, 'import asyncio\n'), ((297, 313), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (310, 313), False, 'import asyncio\n'), ((433, 449), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (446, 449), False, 'import asyncio\n')]
|
from core.enum.menu_type import MenuType
from ui.shell.menu_factory import MenuFactory
class CarRental:
def __init__(self):
self.done = False
self.ui = MenuFactory.get(MenuType.MAIN)
self.prev_ui = self.ui
def change_ui(self, menu_type: MenuType):
self.prev_ui = self.ui
self.ui = MenuFactory.get(menu_type)
def run(self):
while not self.done:
if self.ui:
next_ui = self.ui.execute()
if next_ui is None or next_ui == MenuType.EXIT_MENU:
self.done = True
else:
self.change_ui(next_ui)
else:
self.done = True
|
[
"ui.shell.menu_factory.MenuFactory.get"
] |
[((174, 204), 'ui.shell.menu_factory.MenuFactory.get', 'MenuFactory.get', (['MenuType.MAIN'], {}), '(MenuType.MAIN)\n', (189, 204), False, 'from ui.shell.menu_factory import MenuFactory\n'), ((332, 358), 'ui.shell.menu_factory.MenuFactory.get', 'MenuFactory.get', (['menu_type'], {}), '(menu_type)\n', (347, 358), False, 'from ui.shell.menu_factory import MenuFactory\n')]
|
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2018/11/2 下午9:23
# 1.2 TensorFlow 如何工作
import tensorflow as tf
# 1. 导入/生成样本数据集。
# 2. 转换和归一化数据。
# data = tf.nn.batch_norm_with_global_normalization(...)
# 3. 划分样本数据集为训练样本集、测试样本集和验证样本集。
# 4. 设置机器学习参数(超参数)。
learning_rate = 0.01
batch_size = 100
iterations = 1000
# 5. 初始化变量和占位符。
a_var = tf.constant(42)
# x_input = tf.placeholder(tf.float32, [None, input_size])
# y_input = tf.placeholder(tf.float32, [None, num_classses])
# 6. 定义模型结构。
# y_pred = tf.add(tf.mul(x_input, weight_matrix), b_matrix)
# 7. 声明损失函数。
# loss = tf.reduce_mean(tf.square(y_actual - y_pred))
# 8. 初始化模型和训练模型。
# with tf.Session(graph=graph) as session:
# ...
# session.run(...)
# ...
# 9. 评估机器学习模型。
# 10. 调优超参数。
# 11. 发布/预测结果。
|
[
"tensorflow.constant"
] |
[((368, 383), 'tensorflow.constant', 'tf.constant', (['(42)'], {}), '(42)\n', (379, 383), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 17:38:37 2018
@author: simao
"""
import numpy as np
from scipy import stats
def onehotencoder(tind, *args):
if len(args) == 0:
maxclasses = max(tind)+1
elif len(args) == 1:
maxclasses = args[0]
else:
raise NotImplementedError
t = np.zeros((tind.shape[0], maxclasses))
t[np.arange(tind.shape[0]),tind.astype(np.int).reshape((-1,))] = 1
return t
def onehotnoise(tind, maxclasses, maxprob=0.5):
tind = tind.astype('int')
t = np.zeros((tind.shape[0], maxclasses))
t = t + (1 - maxprob) / (maxclasses - 1)
t[np.arange(tind.shape[0]), tind.reshape((-1,))] = maxprob
return t
def label_noise(t, pmin=0.8, pmax=1.0):
j = np.argmax(t, 1)
n = t.shape[0]
phigh = np.random.uniform(pmin, pmax, (n,))
plow = (1 - phigh) / (t.shape[1] - 1)
for i in range(n):
t[i] = plow[i]
t[i,j[i]] = phigh[i]
return t
def targetmode(tar_sequence):
idx = stats.mode(tar_sequence)[0][0]
return np.tile(idx, len(tar_sequence))
|
[
"numpy.random.uniform",
"scipy.stats.mode",
"numpy.argmax",
"numpy.zeros",
"numpy.arange"
] |
[((346, 383), 'numpy.zeros', 'np.zeros', (['(tind.shape[0], maxclasses)'], {}), '((tind.shape[0], maxclasses))\n', (354, 383), True, 'import numpy as np\n'), ((555, 592), 'numpy.zeros', 'np.zeros', (['(tind.shape[0], maxclasses)'], {}), '((tind.shape[0], maxclasses))\n', (563, 592), True, 'import numpy as np\n'), ((763, 778), 'numpy.argmax', 'np.argmax', (['t', '(1)'], {}), '(t, 1)\n', (772, 778), True, 'import numpy as np\n'), ((810, 845), 'numpy.random.uniform', 'np.random.uniform', (['pmin', 'pmax', '(n,)'], {}), '(pmin, pmax, (n,))\n', (827, 845), True, 'import numpy as np\n'), ((390, 414), 'numpy.arange', 'np.arange', (['tind.shape[0]'], {}), '(tind.shape[0])\n', (399, 414), True, 'import numpy as np\n'), ((644, 668), 'numpy.arange', 'np.arange', (['tind.shape[0]'], {}), '(tind.shape[0])\n', (653, 668), True, 'import numpy as np\n'), ((1018, 1042), 'scipy.stats.mode', 'stats.mode', (['tar_sequence'], {}), '(tar_sequence)\n', (1028, 1042), False, 'from scipy import stats\n')]
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from model_mommy import mommy
from rolepermissions.roles import AbstractUserRole
from rolepermissions.checkers import has_role, has_permission, has_object_permission
from rolepermissions.permissions import register_object_checker
class VerRole1(AbstractUserRole):
available_permissions = {
'permission1': True,
'permission2': True,
}
class VerRole2(AbstractUserRole):
available_permissions = {
'permission3': True,
'permission4': False,
}
class VerRole3(AbstractUserRole):
role_name = 'ver_new_name'
available_permissions = {
'permission5': False,
'permission6': False,
}
class HasRoleTests(TestCase):
def setUp(self):
self.user = mommy.make(get_user_model())
VerRole1.assign_role_to_user(self.user)
def test_user_has_VerRole1(self):
user = self.user
self.assertTrue(has_role(user, VerRole1))
def test_user_does_not_have_VerRole2(self):
user = self.user
self.assertFalse(has_role(user, VerRole2))
def test_user_has_VerRole1_or_VerRole2(self):
user = self.user
self.assertTrue(has_role(user, [VerRole1, VerRole2]))
def test_has_role_by_name(self):
user = self.user
self.assertTrue(has_role(user, 'ver_role1'))
def test_user_has_VerRole1_or_VerRole3_by_name(self):
user = self.user
VerRole3.assign_role_to_user(user)
self.assertTrue(has_role(user, ['ver_role1', 'ver_new_name']))
def test_not_existent_role(self):
user = self.user
self.assertFalse(has_role(user, 'not_a_role'))
def test_none_user_param(self):
self.assertFalse(has_role(None, 'ver_role1'))
class HasPermissionTests(TestCase):
def setUp(self):
self.user = mommy.make(get_user_model())
VerRole1.assign_role_to_user(self.user)
def test_has_VerRole1_permission(self):
user = self.user
self.assertTrue(has_permission(user, 'permission1'))
def test_dos_not_have_VerRole1_permission(self):
user = self.user
VerRole1.assign_role_to_user(user)
self.assertFalse(has_permission(user, 'permission3'))
def test_not_existent_permission(self):
user = self.user
self.assertFalse(has_permission(user, 'not_a_permission'))
def test_user_with_no_role(self):
user = mommy.make(get_user_model())
self.assertFalse(has_permission(user, 'permission1'))
def test_none_user_param(self):
self.assertFalse(has_permission(None, 'ver_role1'))
class HasObjectPermissionTests(TestCase):
def setUp(self):
self.user = mommy.make(get_user_model())
VerRole1.assign_role_to_user(self.user)
@register_object_checker()
def obj_checker(role, user, obj):
return obj and True
def test_has_object_permission(self):
user = self.user
self.assertTrue(has_object_permission('obj_checker', user, True))
def test_does_not_have_object_permission(self):
user = self.user
self.assertFalse(has_object_permission('obj_checker', user, False))
def test_check_none_role_if_user_has_no_role(self):
user = mommy.make(get_user_model())
self.assertTrue(has_object_permission('obj_checker', user, True))
|
[
"rolepermissions.checkers.has_role",
"django.contrib.auth.get_user_model",
"rolepermissions.permissions.register_object_checker",
"rolepermissions.checkers.has_permission",
"rolepermissions.checkers.has_object_permission"
] |
[((2825, 2850), 'rolepermissions.permissions.register_object_checker', 'register_object_checker', ([], {}), '()\n', (2848, 2850), False, 'from rolepermissions.permissions import register_object_checker\n'), ((820, 836), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (834, 836), False, 'from django.contrib.auth import get_user_model\n'), ((976, 1000), 'rolepermissions.checkers.has_role', 'has_role', (['user', 'VerRole1'], {}), '(user, VerRole1)\n', (984, 1000), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n'), ((1102, 1126), 'rolepermissions.checkers.has_role', 'has_role', (['user', 'VerRole2'], {}), '(user, VerRole2)\n', (1110, 1126), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n'), ((1229, 1265), 'rolepermissions.checkers.has_role', 'has_role', (['user', '[VerRole1, VerRole2]'], {}), '(user, [VerRole1, VerRole2])\n', (1237, 1265), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n'), ((1355, 1382), 'rolepermissions.checkers.has_role', 'has_role', (['user', '"""ver_role1"""'], {}), "(user, 'ver_role1')\n", (1363, 1382), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n'), ((1537, 1582), 'rolepermissions.checkers.has_role', 'has_role', (['user', "['ver_role1', 'ver_new_name']"], {}), "(user, ['ver_role1', 'ver_new_name'])\n", (1545, 1582), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n'), ((1674, 1702), 'rolepermissions.checkers.has_role', 'has_role', (['user', '"""not_a_role"""'], {}), "(user, 'not_a_role')\n", (1682, 1702), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n'), ((1766, 1793), 'rolepermissions.checkers.has_role', 'has_role', (['None', '"""ver_role1"""'], {}), "(None, 'ver_role1')\n", (1774, 1793), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n'), ((1885, 1901), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1899, 1901), False, 'from django.contrib.auth import get_user_model\n'), ((2047, 2082), 'rolepermissions.checkers.has_permission', 'has_permission', (['user', '"""permission1"""'], {}), "(user, 'permission1')\n", (2061, 2082), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n'), ((2233, 2268), 'rolepermissions.checkers.has_permission', 'has_permission', (['user', '"""permission3"""'], {}), "(user, 'permission3')\n", (2247, 2268), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n'), ((2366, 2406), 'rolepermissions.checkers.has_permission', 'has_permission', (['user', '"""not_a_permission"""'], {}), "(user, 'not_a_permission')\n", (2380, 2406), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n'), ((2473, 2489), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (2487, 2489), False, 'from django.contrib.auth import get_user_model\n'), ((2517, 2552), 'rolepermissions.checkers.has_permission', 'has_permission', (['user', '"""permission1"""'], {}), "(user, 'permission1')\n", (2531, 2552), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n'), ((2616, 2649), 'rolepermissions.checkers.has_permission', 'has_permission', (['None', '"""ver_role1"""'], {}), "(None, 'ver_role1')\n", (2630, 2649), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n'), ((2748, 2764), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (2762, 2764), False, 'from django.contrib.auth import get_user_model\n'), ((3018, 3066), 'rolepermissions.checkers.has_object_permission', 'has_object_permission', (['"""obj_checker"""', 'user', '(True)'], {}), "('obj_checker', user, True)\n", (3039, 3066), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n'), ((3172, 3221), 'rolepermissions.checkers.has_object_permission', 'has_object_permission', (['"""obj_checker"""', 'user', '(False)'], {}), "('obj_checker', user, False)\n", (3193, 3221), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n'), ((3306, 3322), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (3320, 3322), False, 'from django.contrib.auth import get_user_model\n'), ((3349, 3397), 'rolepermissions.checkers.has_object_permission', 'has_object_permission', (['"""obj_checker"""', 'user', '(True)'], {}), "('obj_checker', user, True)\n", (3370, 3397), False, 'from rolepermissions.checkers import has_role, has_permission, has_object_permission\n')]
|
import re
fh = open('data.txt')
def sumNums(line):
"""
Sum a numbers found in a line
"""
s = 0
nums = re.findall('[0-9]+', line)
for num in nums:
s += int(num)
return s
s = 0
for line in fh:
s += sumNums(line.rstrip())
print ("Sum of numbers in file:\t %d" % s)
|
[
"re.findall"
] |
[((125, 151), 're.findall', 're.findall', (['"""[0-9]+"""', 'line'], {}), "('[0-9]+', line)\n", (135, 151), False, 'import re\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
from scipy.optimize import curve_fit
import matplotlib.colors as mcolors
#Write with LaTeX
rc('text', usetex=True)
rc('font', family='serif')
def func(x, a, b):
return (a * x) + b
# Data
B1 = np.array([9.38, 12.46, 15.57])
dB1 = np.array([0.04, 0.04, 0.04])
r1 = np.array([0.217, 0.28, 0.38])
dr1 = np.array([0.024, 0.04, 0.07])
B2 = np.array([9.38, 12.46, 15.57])
dB2 = np.array([0.04, 0.04, 0.04])
r2 = np.array([0.2, 0.2500, 0.33])
dr2 = np.array([0.02, 0.03, 0.06])
# Fitting
x = np.linspace(0.15, 0.4, 5)
popt1, pcov1 = curve_fit(func, r1, B1, sigma=1./(dB1*dB1))
perr1 = np.sqrt(np.diag(pcov1))
popt2, pcov2 = curve_fit(func, r2, B2, sigma=1./(dB2*dB2))
perr2 = np.sqrt(np.diag(pcov2))
# Plot
fig, ax = plt.subplots(1, 1)
# B1 = B1(1/r1)
ax.errorbar(r1, B1, xerr = dr1, yerr = dB1, capsize=3, color='black', elinewidth=1, markeredgewidth=1, linestyle='None', marker='o', label='Calculated \n Values of $B_1$')
ax.plot(x, func(x, *popt1), color='orange', label='$B1 = B1(1/r_1)$', linewidth=1.5)
# B2 = B2(1/r2)
ax.errorbar(r2, B2, xerr = dr2, yerr = dB2, capsize=3, color='black', elinewidth=1, markeredgewidth=1, linestyle='None', marker='s', label='Calculated \n Values of $B_2$')
ax.plot(x, func(x, *popt2), color='royalblue', label='$B2 = B2(1/r_2)$', linewidth=1.5)
# Figure Specifications
ax.set_ylabel('$B$ $(\mathrm{10^{-4}\,\mathrm{T}})$')
ax.set_xlabel('$1/r$ $(\mathrm{1/\mathrm{cm}})$')
ax.legend(loc = 'upper left', prop={'size': 11})
# Show the major grid lines with dark grey lines
ax.grid(b=True, which='major', color='#666666', linestyle='--')
# Show the minor grid lines
ax.minorticks_on()
ax.grid(b=True, which='minor', color='#999999', linestyle='--', alpha=0.2)
# fix quality
fig.tight_layout()
plt.show()
# Print lines' slopes and constant coefficients
print(f"\n\n a1 = {'%0.5f'%popt1[0]} ± {'%0.5f'%perr1[0]}", f",b1 = {'%0.5f'%popt1[1]} ± {'%0.5f'%perr1[1]}")
print(f"\n\n a2 = {'%0.5f'%popt2[0]} ± {'%0.5f'%perr2[0]}", f",b2 = {'%0.5f'%popt2[1]} ± {'%0.5f'%perr2[1]}")
|
[
"matplotlib.rc",
"matplotlib.pyplot.show",
"scipy.optimize.curve_fit",
"numpy.array",
"numpy.linspace",
"numpy.diag",
"matplotlib.pyplot.subplots"
] |
[((169, 192), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (171, 192), False, 'from matplotlib import rc\n'), ((193, 219), 'matplotlib.rc', 'rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (195, 219), False, 'from matplotlib import rc\n'), ((276, 306), 'numpy.array', 'np.array', (['[9.38, 12.46, 15.57]'], {}), '([9.38, 12.46, 15.57])\n', (284, 306), True, 'import numpy as np\n'), ((313, 341), 'numpy.array', 'np.array', (['[0.04, 0.04, 0.04]'], {}), '([0.04, 0.04, 0.04])\n', (321, 341), True, 'import numpy as np\n'), ((347, 376), 'numpy.array', 'np.array', (['[0.217, 0.28, 0.38]'], {}), '([0.217, 0.28, 0.38])\n', (355, 376), True, 'import numpy as np\n'), ((383, 412), 'numpy.array', 'np.array', (['[0.024, 0.04, 0.07]'], {}), '([0.024, 0.04, 0.07])\n', (391, 412), True, 'import numpy as np\n'), ((419, 449), 'numpy.array', 'np.array', (['[9.38, 12.46, 15.57]'], {}), '([9.38, 12.46, 15.57])\n', (427, 449), True, 'import numpy as np\n'), ((456, 484), 'numpy.array', 'np.array', (['[0.04, 0.04, 0.04]'], {}), '([0.04, 0.04, 0.04])\n', (464, 484), True, 'import numpy as np\n'), ((490, 517), 'numpy.array', 'np.array', (['[0.2, 0.25, 0.33]'], {}), '([0.2, 0.25, 0.33])\n', (498, 517), True, 'import numpy as np\n'), ((526, 554), 'numpy.array', 'np.array', (['[0.02, 0.03, 0.06]'], {}), '([0.02, 0.03, 0.06])\n', (534, 554), True, 'import numpy as np\n'), ((570, 595), 'numpy.linspace', 'np.linspace', (['(0.15)', '(0.4)', '(5)'], {}), '(0.15, 0.4, 5)\n', (581, 595), True, 'import numpy as np\n'), ((612, 660), 'scipy.optimize.curve_fit', 'curve_fit', (['func', 'r1', 'B1'], {'sigma': '(1.0 / (dB1 * dB1))'}), '(func, r1, B1, sigma=1.0 / (dB1 * dB1))\n', (621, 660), False, 'from scipy.optimize import curve_fit\n'), ((703, 751), 'scipy.optimize.curve_fit', 'curve_fit', (['func', 'r2', 'B2'], {'sigma': '(1.0 / (dB2 * dB2))'}), '(func, r2, B2, sigma=1.0 / (dB2 * dB2))\n', (712, 751), False, 'from scipy.optimize import curve_fit\n'), ((797, 815), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (809, 815), True, 'import matplotlib.pyplot as plt\n'), ((1818, 1828), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1826, 1828), True, 'import matplotlib.pyplot as plt\n'), ((672, 686), 'numpy.diag', 'np.diag', (['pcov1'], {}), '(pcov1)\n', (679, 686), True, 'import numpy as np\n'), ((763, 777), 'numpy.diag', 'np.diag', (['pcov2'], {}), '(pcov2)\n', (770, 777), True, 'import numpy as np\n')]
|
import os
import numpy as np
import logging
from ..base import float_, int_
from .util import dataset_home, download, checksum, archive_extract, checkpoint
log = logging.getLogger(__name__)
_URL = 'http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz'
_SHA1 = 'b22ebbd7f3c4384ebc9ba3152939186d3750b902'
class STL10(object):
'''
The STL-10 dataset [1]
http://cs.stanford.edu/~acoates/stl10
References:
[1]: An Analysis of Single Layer Networks in Unsupervised Feature Learning,
<NAME>, <NAME>, <NAME>, AISTATS, 2011.
'''
def __init__(self):
self.name = 'stl10'
self.n_classes = 10
self.n_train = 5000
self.n_test = 8000
self.n_unlabeled = 100000
self.img_shape = (3, 96, 96)
self.data_dir = os.path.join(dataset_home, self.name)
self._npz_path = os.path.join(self.data_dir, 'stl10.npz')
self._install()
self._arrays, self.folds = self._load()
def arrays(self, dp_dtypes=False):
x_train, y_train, x_test, y_test, x_unlabeled = self._arrays
if dp_dtypes:
x_train = x_train.astype(float_)
y_train = y_train.astype(int_)
x_test = x_test.astype(float_)
y_test = y_test.astype(int_)
x_unlabeled = x_unlabeled.astype(float_)
return x_train, y_train, x_test, y_test, x_unlabeled
def _install(self):
checkpoint_file = os.path.join(self.data_dir, '__install_check')
with checkpoint(checkpoint_file) as exists:
if exists:
return
log.info('Downloading %s', _URL)
filepath = download(_URL, self.data_dir)
if _SHA1 != checksum(filepath, method='sha1'):
raise RuntimeError('Checksum mismatch for %s.' % _URL)
log.info('Unpacking %s', filepath)
archive_extract(filepath, self.data_dir)
unpack_dir = os.path.join(self.data_dir, 'stl10_binary')
log.info('Converting data to Numpy arrays')
filenames = ['train_X.bin', 'train_y.bin', 'test_X.bin',
'test_y.bin', 'unlabeled_X.bin']
def bin2numpy(filepath):
with open(filepath, 'rb') as f:
arr = np.fromfile(f, dtype=np.uint8)
if '_X' in filepath:
arr = np.reshape(arr, (-1,) + self.img_shape)
return arr
filepaths = [os.path.join(unpack_dir, f) for f in filenames]
x_train, y_train, x_test, y_test, x_unlabeled = map(bin2numpy,
filepaths)
folds = []
with open(os.path.join(unpack_dir, 'fold_indices.txt'), 'r') as f:
for line in f:
folds.append([int(s) for s in line.strip().split(' ')])
folds = np.array(folds)
with open(self._npz_path, 'wb') as f:
np.savez(f, x_train=x_train, y_train=y_train, x_test=x_test,
y_test=y_test, x_unlabeled=x_unlabeled, folds=folds)
def _load(self):
with open(self._npz_path, 'rb') as f:
dic = np.load(f)
return ((dic['x_train'], dic['y_train'], dic['x_test'],
dic['y_test'], dic['x_unlabeled']), dic['folds'])
|
[
"numpy.load",
"numpy.fromfile",
"numpy.array",
"numpy.reshape",
"numpy.savez",
"os.path.join",
"logging.getLogger"
] |
[((165, 192), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (182, 192), False, 'import logging\n'), ((796, 833), 'os.path.join', 'os.path.join', (['dataset_home', 'self.name'], {}), '(dataset_home, self.name)\n', (808, 833), False, 'import os\n'), ((859, 899), 'os.path.join', 'os.path.join', (['self.data_dir', '"""stl10.npz"""'], {}), "(self.data_dir, 'stl10.npz')\n", (871, 899), False, 'import os\n'), ((1440, 1486), 'os.path.join', 'os.path.join', (['self.data_dir', '"""__install_check"""'], {}), "(self.data_dir, '__install_check')\n", (1452, 1486), False, 'import os\n'), ((1938, 1981), 'os.path.join', 'os.path.join', (['self.data_dir', '"""stl10_binary"""'], {}), "(self.data_dir, 'stl10_binary')\n", (1950, 1981), False, 'import os\n'), ((2902, 2917), 'numpy.array', 'np.array', (['folds'], {}), '(folds)\n', (2910, 2917), True, 'import numpy as np\n'), ((3209, 3219), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (3216, 3219), True, 'import numpy as np\n'), ((2475, 2502), 'os.path.join', 'os.path.join', (['unpack_dir', 'f'], {}), '(unpack_dir, f)\n', (2487, 2502), False, 'import os\n'), ((2984, 3101), 'numpy.savez', 'np.savez', (['f'], {'x_train': 'x_train', 'y_train': 'y_train', 'x_test': 'x_test', 'y_test': 'y_test', 'x_unlabeled': 'x_unlabeled', 'folds': 'folds'}), '(f, x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test,\n x_unlabeled=x_unlabeled, folds=folds)\n', (2992, 3101), True, 'import numpy as np\n'), ((2277, 2307), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.uint8'}), '(f, dtype=np.uint8)\n', (2288, 2307), True, 'import numpy as np\n'), ((2718, 2762), 'os.path.join', 'os.path.join', (['unpack_dir', '"""fold_indices.txt"""'], {}), "(unpack_dir, 'fold_indices.txt')\n", (2730, 2762), False, 'import os\n'), ((2379, 2418), 'numpy.reshape', 'np.reshape', (['arr', '((-1,) + self.img_shape)'], {}), '(arr, (-1,) + self.img_shape)\n', (2389, 2418), True, 'import numpy as np\n')]
|
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.immunization_evaluation_dose_status_codes import (
ImmunizationEvaluationDoseStatusCodes as ImmunizationEvaluationDoseStatusCodes_,
)
__all__ = ["ImmunizationEvaluationDoseStatusCodes"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class ImmunizationEvaluationDoseStatusCodes(ImmunizationEvaluationDoseStatusCodes_):
"""
Immunization Evaluation Dose Status codes
The value set to instantiate this attribute should be drawn from a
terminologically robust code system that consists of or contains
concepts to support describing the validity of a dose relative to a
particular recommended schedule. This value set is provided as a
suggestive example.
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/immunization-evaluation-dose-status
"""
class Meta:
resource = _resource
|
[
"pathlib.Path"
] |
[((380, 394), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (384, 394), False, 'from pathlib import Path\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 13 21:46:14 2021
@author: Raj
"""
import sidpy as sid
from sidpy.sid import Reader
from sidpy.sid import Dimension
import os
import numpy as np
import h5py
from pyNSID.io.hdf_io import write_nsid_dataset
from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs
class PiFMTranslator(Reader):
"""
Class that writes images, spectrograms, point spectra and associated ancillary data sets to h5 file in pyUSID data
structure.
"""
def read(self ):
"""
Parameters
----------
file_path : String / unicode
Absolute path of the .ibw file
verbose : Boolean (Optional)
Whether or not to show print statements for debugging
Returns
-------
sidpy.Dataset : List of sidpy.Dataset objects.
Image layers are saved as separate Dataset objects
"""
self.get_path()
self.read_anfatec_params()
self.read_file_desc()
self.read_spectrograms()
self.read_imgs()
self.read_spectra()
self.datasets = self.make_datasets()
return self.datasets
def create_h5(self, append_path='', overwrite=False):
"""
Writes a new HDF5 file with the translated data
append_path : string (Optional)
h5_file to add these data to, must be a path to the h5_file on disk
overwrite : bool (optional, default=False)
If True, will overwrite an existing .h5 file of the same name
"""
self.create_hdf5_file(append_path, overwrite)
self.write_datasets_hdf5()
return
def get_path(self):
"""writes full path, directory, and file name as attributes to class"""
self.path = self._input_file_path
full_path = os.path.realpath(self.path)
directory = os.path.dirname(full_path)
# file name
basename = os.path.basename(self.path)
self.full_path = full_path
self.directory = directory
self.basename = basename
def read_anfatec_params(self):
"""reads the scan parameters and writes them to a dictionary"""
params_dictionary = {}
params = True
with open(self.path, 'r', encoding="ISO-8859-1") as f:
for line in f:
if params:
sline = [val.strip() for val in line.split(':')]
if len(sline) == 2 and sline[0][0] != ';':
params_dictionary[sline[0]] = sline[1]
#in ANFATEC parameter files, all attributes are written before file references.
if sline[0].startswith('FileDesc'):
params = False
f.close()
self.params_dictionary = params_dictionary
self.x_len, self.y_len = int(params_dictionary['xPixel']), int(params_dictionary['yPixel'])
def read_file_desc(self):
"""reads spectrogram, image, and spectra file descriptions and stores all to dictionary where
the key:value pairs are filename:[all descriptors]"""
spectrogram_desc = {}
img_desc = {}
spectrum_desc = {}
pspectrum_desc = {}
with open(self.path,'r', encoding="ISO-8859-1") as f:
lines = f.readlines()
for index, line in enumerate(lines):
sline = [val.strip() for val in line.split(':')]
#if true, then file describes image.
if sline[0].startswith('FileDescBegin'):
no_descriptors = 5
file_desc = []
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#img_desc['filename'] = caption, scale, physical unit, offset
img_desc[file_desc[0]] = file_desc[1:]
#if true, file describes spectrogram (ie hyperspectral image)
if sline[0].startswith('FileDesc2Begin'):
no_descriptors = 10
file_desc = []
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#caption, bytes perpixel, scale, physical unit, offset, offset, datatype, bytes per reading
#filename wavelengths, phys units wavelengths.
spectrogram_desc[file_desc[0]] = file_desc[1:]
if sline[0].startswith('AFMSpectrumDescBegin'):
file_desc = []
line_desc = [val.strip() for val in lines[index+1].split(':')][1]
if 'powerspectrum' in line_desc:
no_descriptors = 2
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#file name, position x, position y
pspectrum_desc[file_desc[0]] = file_desc[1:]
else:
no_descriptors = 7
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#file name, position x, position y
spectrum_desc[file_desc[0]] = file_desc[1:]
f.close()
self.img_desc = img_desc
self.spectrogram_desc = spectrogram_desc
self.spectrum_desc = spectrum_desc
self.pspectrum_desc = pspectrum_desc
def read_spectrograms(self):
"""reads spectrograms, associated spectral values, and saves them in two dictionaries"""
spectrograms = {}
spectrogram_spec_vals = {}
for file_name, descriptors in self.spectrogram_desc.items():
spec_vals_i = np.loadtxt(os.path.join(self.directory, file_name.strip('.int') + 'Wavelengths.txt'))
#if true, data is acquired with polarizer, with an attenuation data column
if np.array(spec_vals_i).ndim == 2:
spectrogram_spec_vals[file_name] = spec_vals_i[:, 0]
attenuation = {}
attenuation[file_name] = spec_vals_i[:, 1]
self.attenuation = attenuation
else:
spectrogram_spec_vals[file_name] = spec_vals_i
#load and save spectrograms
spectrogram_i = np.fromfile(os.path.join(self.directory, file_name), dtype='i4')
spectrograms[file_name] = np.zeros((self.x_len, self.y_len, len(spec_vals_i)))
for y, line in enumerate(np.split(spectrogram_i, self.y_len)):
for x, pt_spectrum in enumerate(np.split(line, self.x_len)):
spectrograms[file_name][x, y, :] = pt_spectrum * float(descriptors[2])
self.spectrograms = spectrograms
self.spectrogram_spec_vals = spectrogram_spec_vals
def read_imgs(self):
"""reads images and saves to dictionary"""
imgs = {}
for file_name, descriptors in self.img_desc.items():
img_i = np.fromfile(os.path.join(self.directory, file_name), dtype='i4')
imgs[file_name] = np.zeros((self.x_len, self.y_len))
for y, line in enumerate(np.split(img_i, self.y_len)):
for x, pixel in enumerate(np.split(line, self.x_len)):
imgs[file_name][x, y] = pixel * float(descriptors[1])
self.imgs = imgs
def read_spectra(self):
"""reads all point spectra and saves to dictionary"""
spectra = {}
spectra_spec_vals = {}
spectra_x_y_dim_name = {}
for file_name, descriptors in self.spectrum_desc.items():
spectrum_f = np.loadtxt(os.path.join(self.directory, file_name), skiprows=1)
spectra_spec_vals[file_name] = spectrum_f[:, 0]
spectra[file_name] = spectrum_f[:,1]
with open(os.path.join(self.directory, file_name)) as f:
spectra_x_y_dim_name[file_name] = f.readline().strip('\n').split('\t')
for file_name, descriptors in self.pspectrum_desc.items():
spectrum_f = np.loadtxt(os.path.join(self.directory, file_name), skiprows=1)
spectra_spec_vals[file_name] = spectrum_f[:, 0]
spectra[file_name] = spectrum_f[:,1]
with open(os.path.join(self.directory, file_name)) as f:
spectra_x_y_dim_name[file_name] = f.readline().strip('\n').split('\t')
self.spectra = spectra
self.spectra_spec_vals = spectra_spec_vals
self.spectra_x_y_dim_name = spectra_x_y_dim_name
def make_datasets(self):
datasets = []
self.make_dimensions()
# Spectrograms
if bool(self.spectrogram_desc):
for spectrogram_f, descriptors in self.spectrogram_desc.items():
# channel_i = create_indexed_group(self.h5_meas_grp, 'Channel_')
spec_vals_i = self.spectrogram_spec_vals[spectrogram_f]
spectrogram_data = self.spectrograms[spectrogram_f]
dset = sid.Dataset.from_array(spectrogram_data, name=descriptors[0])
dset.data_type = 'Spectrogram'
dset.set_dimension(0, self.dim0)
dset.set_dimension(1, self.dim0)
# spectrogram_spec_dims = Dimension('Wavelength', descriptors[8], spec_vals_i)
spectrogram_dims = Dimension(values=spec_vals_i, name='Spectrogram',
units=descriptors[3], quantity='Wavelength', type='spectral' )
dset.set_dimension(2, spectrogram_dims)
dset.metadata = {'Caption': descriptors[0],
'Bytes_Per_Pixel': descriptors[1],
'Scale': descriptors[2],
'Physical_Units': descriptors[3],
'Offset': descriptors[4],
'Datatype': descriptors[5],
'Bytes_Per_Reading': descriptors[6],
'Wavelength_File': descriptors[7],
'Wavelength_Units': descriptors[8]}
datasets.append(dset)
# Images
if bool(self.img_desc):
for img_f, descriptors in self.img_desc.items():
img_data = self.imgs[img_f]
dset = sid.Dataset.from_array(img_data, name = descriptors[0])
dset.data_type = 'Image'
dset.set_dimension(0, self.dim0)
dset.set_dimension(1, self.dim1)
dset.units = descriptors[2]
dset.quantity = descriptors[0]
dset.metadata = {'Caption': descriptors[0],
'Scale': descriptors[1],
'Physical_Units': descriptors[2],
'Offset': descriptors[3]}
datasets.append(dset)
# Spectra
if bool(self.spectrum_desc):
for spec_f, descriptors in self.spectrum_desc.items():
#create new measurement group for each spectrum
x_name = self.spectra_x_y_dim_name[spec_f][0].split(' ')[0]
x_unit = self.spectra_x_y_dim_name[spec_f][0].split(' ')[1]
y_name = self.spectra_x_y_dim_name[spec_f][1].split(' ')[0]
y_unit = self.spectra_x_y_dim_name[spec_f][1].split(' ')[1]
dset = sid.Dataset.from_array(self.spectra[spec_f], name = 'Raw_Spectrum')
dset.set_dimension(0, Dimension(np.array([float(descriptors[1])]),
name='X',units=self.params_dictionary['XPhysUnit'].replace('\xb5','u'),
quantity = 'X_position'))
dset.set_dimension(1, Dimension(np.array([float(descriptors[2])]),
name='Y',units=self.params_dictionary['YPhysUnit'].replace('\xb5','u'),
quantity = 'Y_position'))
dset.data_type = 'Spectrum'
dset.units = y_unit
dset.quantity = y_name
spectra_dims = Dimension(values=self.spectra_spec_vals[spec_f], name='Wavelength',
units=x_unit, quantity=x_name, type='spectral' )
dset.set_dimension(2, spectra_dims)
dset.metadata = {'XLoc': descriptors[1], 'YLoc': descriptors[2]}
datasets.append(dset)
# Power Spectra
if bool(self.pspectrum_desc):
for spec_f, descriptors in self.pspectrum_desc.items():
#create new measurement group for each spectrum
x_name = self.spectra_x_y_dim_name[spec_f][0].split(' ')[0]
x_unit = self.spectra_x_y_dim_name[spec_f][0].split(' ')[1]
y_name = self.spectra_x_y_dim_name[spec_f][1].split(' ')[0]
y_unit = self.spectra_x_y_dim_name[spec_f][1].split(' ')[1]
dset = sid.Dataset.from_array(self.spectra[spec_f], name = 'Power_Spectrum')
dset.set_dimension(0, Dimension(np.array([0]),
name='X',units=self.params_dictionary['XPhysUnit'].replace('\xb5','u'),
quantity = 'X_position'))
dset.set_dimension(1, Dimension(np.array([0]),
name='Y',units=self.params_dictionary['YPhysUnit'].replace('\xb5','u'),
quantity = 'Y_position'))
dset.data_type = 'Spectrum'
dset.units = y_unit
dset.quantity = y_name
spectra_dims = Dimension(values=self.spectra_spec_vals[spec_f], name='Wavelength',
units=x_unit, quantity=x_name, type='spectral' )
dset.set_dimension(2, spectra_dims)
dset.metadata = {'XLoc': 0, 'YLoc': 0}
datasets.append(dset)
return datasets
def make_dimensions(self):
x_range = float(self.params_dictionary['XScanRange'])
y_range = float(self.params_dictionary['YScanRange'])
x_center = float(self.params_dictionary['xCenter'])
y_center = float(self.params_dictionary['yCenter'])
x_start = x_center-(x_range/2); x_end = x_center+(x_range/2)
y_start = y_center-(y_range/2); y_end = y_center+(y_range/2)
dx = x_range/self.x_len
dy = y_range/self.y_len
#assumes y scan direction:down; scan angle: 0 deg
y_linspace = -np.arange(y_start, y_end, step=dy)
x_linspace = np.arange(x_start, x_end, step=dx)
qtyx = self.params_dictionary['XPhysUnit'].replace('\xb5', 'u')
qtyy = self.params_dictionary['YPhysUnit'].replace('\xb5', 'u')
self.dim0 = Dimension(x_linspace, name = 'x', units = qtyx,
dimension_type = 'spatial', quantity='Length')
self.dim1 = Dimension(y_linspace, name = 'y', units = qtyy,
dimension_type = 'spatial', quantity='Length')
# self.pos_ind, self.pos_val, self.pos_dims = pos_ind, pos_val, pos_dims
return
# HDF5 creation
def create_hdf5_file(self, append_path='', overwrite=False):
""" Sets up the HDF5 file for writing
append_path : string (Optional)
h5_file to add these data to, must be a path to the h5_file on disk
overwrite : bool (optional, default=False)
If True, will overwrite an existing .h5 file of the same name
"""
if not append_path:
h5_path = os.path.join(self.directory, self.basename.replace('.txt', '.h5'))
if os.path.exists(h5_path):
if not overwrite:
raise FileExistsError('This file already exists). Set attribute overwrite to True')
else:
print('Overwriting file', h5_path)
#os.remove(h5_path)
self.h5_f = h5py.File(h5_path, mode='w')
else:
if not os.path.exists(append_path):
raise Exception('File does not exist. Check pathname.')
self.h5_f = h5py.File(append_path, mode='r+')
self.h5_img_grp = create_indexed_group(self.h5_f, "Images")
self.h5_spectra_grp = create_indexed_group(self.h5_f, "Spectra")
self.h5_spectrogram_grp = create_indexed_group(self.h5_f, "Spectrogram")
write_simple_attrs(self.h5_img_grp, self.params_dictionary)
write_simple_attrs(self.h5_spectra_grp, self.params_dictionary)
write_simple_attrs(self.h5_spectrogram_grp, self.params_dictionary)
return
def write_datasets_hdf5(self):
""" Writes the datasets as pyNSID datasets to the HDF5 file"""
for dset in self.datasets:
if 'IMAGE' in dset.data_type.name:
write_nsid_dataset(dset, self.h5_img_grp)
elif 'SPECTRUM' in dset.data_type.name:
write_nsid_dataset(dset, self.h5_spectra_grp)
else:
write_nsid_dataset(dset, self.h5_spectrogram_grp)
self.h5_f.file.close()
return
|
[
"h5py.File",
"pyNSID.io.hdf_io.write_simple_attrs",
"os.path.basename",
"pyNSID.io.hdf_io.write_nsid_dataset",
"os.path.realpath",
"os.path.dirname",
"numpy.zeros",
"pyNSID.io.hdf_io.create_indexed_group",
"os.path.exists",
"numpy.split",
"sidpy.Dataset.from_array",
"numpy.arange",
"numpy.array",
"os.path.join",
"sidpy.sid.Dimension"
] |
[((1882, 1909), 'os.path.realpath', 'os.path.realpath', (['self.path'], {}), '(self.path)\n', (1898, 1909), False, 'import os\n'), ((1930, 1956), 'os.path.dirname', 'os.path.dirname', (['full_path'], {}), '(full_path)\n', (1945, 1956), False, 'import os\n'), ((1996, 2023), 'os.path.basename', 'os.path.basename', (['self.path'], {}), '(self.path)\n', (2012, 2023), False, 'import os\n'), ((15564, 15598), 'numpy.arange', 'np.arange', (['x_start', 'x_end'], {'step': 'dx'}), '(x_start, x_end, step=dx)\n', (15573, 15598), True, 'import numpy as np\n'), ((15781, 15873), 'sidpy.sid.Dimension', 'Dimension', (['x_linspace'], {'name': '"""x"""', 'units': 'qtyx', 'dimension_type': '"""spatial"""', 'quantity': '"""Length"""'}), "(x_linspace, name='x', units=qtyx, dimension_type='spatial',\n quantity='Length')\n", (15790, 15873), False, 'from sidpy.sid import Dimension\n'), ((15927, 16019), 'sidpy.sid.Dimension', 'Dimension', (['y_linspace'], {'name': '"""y"""', 'units': 'qtyy', 'dimension_type': '"""spatial"""', 'quantity': '"""Length"""'}), "(y_linspace, name='y', units=qtyy, dimension_type='spatial',\n quantity='Length')\n", (15936, 16019), False, 'from sidpy.sid import Dimension\n'), ((17306, 17347), 'pyNSID.io.hdf_io.create_indexed_group', 'create_indexed_group', (['self.h5_f', '"""Images"""'], {}), "(self.h5_f, 'Images')\n", (17326, 17347), False, 'from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs\n'), ((17378, 17420), 'pyNSID.io.hdf_io.create_indexed_group', 'create_indexed_group', (['self.h5_f', '"""Spectra"""'], {}), "(self.h5_f, 'Spectra')\n", (17398, 17420), False, 'from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs\n'), ((17455, 17501), 'pyNSID.io.hdf_io.create_indexed_group', 'create_indexed_group', (['self.h5_f', '"""Spectrogram"""'], {}), "(self.h5_f, 'Spectrogram')\n", (17475, 17501), False, 'from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs\n'), ((17519, 17578), 'pyNSID.io.hdf_io.write_simple_attrs', 'write_simple_attrs', (['self.h5_img_grp', 'self.params_dictionary'], {}), '(self.h5_img_grp, self.params_dictionary)\n', (17537, 17578), False, 'from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs\n'), ((17587, 17650), 'pyNSID.io.hdf_io.write_simple_attrs', 'write_simple_attrs', (['self.h5_spectra_grp', 'self.params_dictionary'], {}), '(self.h5_spectra_grp, self.params_dictionary)\n', (17605, 17650), False, 'from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs\n'), ((17659, 17726), 'pyNSID.io.hdf_io.write_simple_attrs', 'write_simple_attrs', (['self.h5_spectrogram_grp', 'self.params_dictionary'], {}), '(self.h5_spectrogram_grp, self.params_dictionary)\n', (17677, 17726), False, 'from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs\n'), ((7650, 7684), 'numpy.zeros', 'np.zeros', (['(self.x_len, self.y_len)'], {}), '((self.x_len, self.y_len))\n', (7658, 7684), True, 'import numpy as np\n'), ((15508, 15542), 'numpy.arange', 'np.arange', (['y_start', 'y_end'], {'step': 'dy'}), '(y_start, y_end, step=dy)\n', (15517, 15542), True, 'import numpy as np\n'), ((16732, 16755), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (16746, 16755), False, 'import os\n'), ((17057, 17085), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""w"""'}), "(h5_path, mode='w')\n", (17066, 17085), False, 'import h5py\n'), ((17245, 17278), 'h5py.File', 'h5py.File', (['append_path'], {'mode': '"""r+"""'}), "(append_path, mode='r+')\n", (17254, 17278), False, 'import h5py\n'), ((6867, 6906), 'os.path.join', 'os.path.join', (['self.directory', 'file_name'], {}), '(self.directory, file_name)\n', (6879, 6906), False, 'import os\n'), ((7061, 7096), 'numpy.split', 'np.split', (['spectrogram_i', 'self.y_len'], {}), '(spectrogram_i, self.y_len)\n', (7069, 7096), True, 'import numpy as np\n'), ((7567, 7606), 'os.path.join', 'os.path.join', (['self.directory', 'file_name'], {}), '(self.directory, file_name)\n', (7579, 7606), False, 'import os\n'), ((7731, 7758), 'numpy.split', 'np.split', (['img_i', 'self.y_len'], {}), '(img_i, self.y_len)\n', (7739, 7758), True, 'import numpy as np\n'), ((8241, 8280), 'os.path.join', 'os.path.join', (['self.directory', 'file_name'], {}), '(self.directory, file_name)\n', (8253, 8280), False, 'import os\n'), ((8681, 8720), 'os.path.join', 'os.path.join', (['self.directory', 'file_name'], {}), '(self.directory, file_name)\n', (8693, 8720), False, 'import os\n'), ((9659, 9720), 'sidpy.Dataset.from_array', 'sid.Dataset.from_array', (['spectrogram_data'], {'name': 'descriptors[0]'}), '(spectrogram_data, name=descriptors[0])\n', (9681, 9720), True, 'import sidpy as sid\n'), ((9992, 10107), 'sidpy.sid.Dimension', 'Dimension', ([], {'values': 'spec_vals_i', 'name': '"""Spectrogram"""', 'units': 'descriptors[3]', 'quantity': '"""Wavelength"""', 'type': '"""spectral"""'}), "(values=spec_vals_i, name='Spectrogram', units=descriptors[3],\n quantity='Wavelength', type='spectral')\n", (10001, 10107), False, 'from sidpy.sid import Dimension\n'), ((11052, 11105), 'sidpy.Dataset.from_array', 'sid.Dataset.from_array', (['img_data'], {'name': 'descriptors[0]'}), '(img_data, name=descriptors[0])\n', (11074, 11105), True, 'import sidpy as sid\n'), ((12188, 12253), 'sidpy.Dataset.from_array', 'sid.Dataset.from_array', (['self.spectra[spec_f]'], {'name': '"""Raw_Spectrum"""'}), "(self.spectra[spec_f], name='Raw_Spectrum')\n", (12210, 12253), True, 'import sidpy as sid\n'), ((12980, 13100), 'sidpy.sid.Dimension', 'Dimension', ([], {'values': 'self.spectra_spec_vals[spec_f]', 'name': '"""Wavelength"""', 'units': 'x_unit', 'quantity': 'x_name', 'type': '"""spectral"""'}), "(values=self.spectra_spec_vals[spec_f], name='Wavelength', units=\n x_unit, quantity=x_name, type='spectral')\n", (12989, 13100), False, 'from sidpy.sid import Dimension\n'), ((13841, 13908), 'sidpy.Dataset.from_array', 'sid.Dataset.from_array', (['self.spectra[spec_f]'], {'name': '"""Power_Spectrum"""'}), "(self.spectra[spec_f], name='Power_Spectrum')\n", (13863, 13908), True, 'import sidpy as sid\n'), ((14595, 14715), 'sidpy.sid.Dimension', 'Dimension', ([], {'values': 'self.spectra_spec_vals[spec_f]', 'name': '"""Wavelength"""', 'units': 'x_unit', 'quantity': 'x_name', 'type': '"""spectral"""'}), "(values=self.spectra_spec_vals[spec_f], name='Wavelength', units=\n x_unit, quantity=x_name, type='spectral')\n", (14604, 14715), False, 'from sidpy.sid import Dimension\n'), ((17120, 17147), 'os.path.exists', 'os.path.exists', (['append_path'], {}), '(append_path)\n', (17134, 17147), False, 'import os\n'), ((17982, 18023), 'pyNSID.io.hdf_io.write_nsid_dataset', 'write_nsid_dataset', (['dset', 'self.h5_img_grp'], {}), '(dset, self.h5_img_grp)\n', (18000, 18023), False, 'from pyNSID.io.hdf_io import write_nsid_dataset\n'), ((6435, 6456), 'numpy.array', 'np.array', (['spec_vals_i'], {}), '(spec_vals_i)\n', (6443, 6456), True, 'import numpy as np\n'), ((7148, 7174), 'numpy.split', 'np.split', (['line', 'self.x_len'], {}), '(line, self.x_len)\n', (7156, 7174), True, 'import numpy as np\n'), ((7816, 7842), 'numpy.split', 'np.split', (['line', 'self.x_len'], {}), '(line, self.x_len)\n', (7824, 7842), True, 'import numpy as np\n'), ((8434, 8473), 'os.path.join', 'os.path.join', (['self.directory', 'file_name'], {}), '(self.directory, file_name)\n', (8446, 8473), False, 'import os\n'), ((8874, 8913), 'os.path.join', 'os.path.join', (['self.directory', 'file_name'], {}), '(self.directory, file_name)\n', (8886, 8913), False, 'import os\n'), ((18110, 18155), 'pyNSID.io.hdf_io.write_nsid_dataset', 'write_nsid_dataset', (['dset', 'self.h5_spectra_grp'], {}), '(dset, self.h5_spectra_grp)\n', (18128, 18155), False, 'from pyNSID.io.hdf_io import write_nsid_dataset\n'), ((18225, 18274), 'pyNSID.io.hdf_io.write_nsid_dataset', 'write_nsid_dataset', (['dset', 'self.h5_spectrogram_grp'], {}), '(dset, self.h5_spectrogram_grp)\n', (18243, 18274), False, 'from pyNSID.io.hdf_io import write_nsid_dataset\n'), ((13960, 13973), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (13968, 13973), True, 'import numpy as np\n'), ((14235, 14248), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (14243, 14248), True, 'import numpy as np\n')]
|
from django.urls import resolve, set_urlconf
from routes.falcon import falcon_router
from routes.sanic import sanic_router
from routes.werkzeug import werkzeug_router
from routes.yrouter import y_router
def bench():
y_router.match("/")
y_router.match("/articles/2020/")
y_router.match("/articles/2015/")
y_router.match("/articles/2015/04/12/")
y_router.match("/articles/categories/sport/newest/")
y_router.match("/users/extra")
y_router.match("catchall")
y_router.match("/int/92")
y_router.match("/articles/2015/04/12/98/")
y_router.match("/users/extra/bog")
def bench_dj():
resolve("/")
resolve("/articles/2020/")
resolve("/articles/2015/")
resolve("/articles/2015/04/12/")
resolve("/articles/categories/sport/newest/")
resolve("/users/extra/")
resolve("/catchall")
resolve("/int/92")
try:
resolve("/articles/2015/04/12/98/")
except:
pass
try:
resolve("/users/extra/bog")
except:
pass
def bench_sanic():
sanic_router.get("/", method="BASE")
sanic_router.get("/articles/2020/", method="BASE")
sanic_router.get("/articles/2015/", method="BASE")
sanic_router.get("/articles/2015/04/12/", method="BASE")
sanic_router.get("/articles/categories/sport/newest/", method="BASE")
sanic_router.get("/users/extra", method="BASE")
sanic_router.get("/catchall", method="BASE")
sanic_router.get("/int/92", method="BASE")
try:
sanic_router.get("/articles/2015/04/12/98/", method="BASE")
except:
pass
try:
sanic_router.get("/users/extra/bog", method="BASE")
except:
pass
def bench_falcon():
falcon_router.find("/")
falcon_router.find("/articles/2020/")
falcon_router.find("/articles/2015/")
falcon_router.find("/articles/2015/04/12/")
falcon_router.find("/articles/categories/sport/newest/")
falcon_router.find("/users/extra")
falcon_router.find("/catchall")
falcon_router.find("/int/92")
falcon_router.find("/articles/2015/04/12/98")
falcon_router.find("/users/extra/bog")
def bench_werkzeug():
werkzeug_router.match("/")
werkzeug_router.match("/articles/2020/")
werkzeug_router.match("/articles/2015/")
werkzeug_router.match("/articles/2015/04/12/")
werkzeug_router.match("/articles/categories/sport/newest/")
werkzeug_router.match("/users/extra/")
werkzeug_router.match("/catchall/")
werkzeug_router.match("/int/92/")
try:
werkzeug_router.match("/articles/2015/04/12/98")
except:
pass
try:
werkzeug_router.match("/users/extra/bog")
except:
pass
if __name__ == "__main__":
import timeit
set_urlconf("routes.django")
print("yrouter is running...")
ytime = timeit.timeit("bench()", globals=globals(), number=10000)
print(f"Took {ytime} seconds.\n")
print("django is running...")
djtime = timeit.timeit("bench_dj()", globals=globals(), number=10000)
print(f"Took {djtime} seconds.\n")
print("sanic is running...")
sanic_time = timeit.timeit("bench_sanic()", globals=globals(), number=10000)
print(f"Took {sanic_time} seconds.\n")
print("falcon is running...")
falcon_time = timeit.timeit("bench_falcon()", globals=globals(), number=10000)
print(f"Took {falcon_time} seconds.\n")
print("werkzeug is running...")
werkzeug_time = timeit.timeit("bench_werkzeug()", globals=globals(), number=10000)
print(f"Took {werkzeug_time} seconds.\n")
|
[
"routes.sanic.sanic_router.get",
"routes.falcon.falcon_router.find",
"routes.yrouter.y_router.match",
"django.urls.set_urlconf",
"django.urls.resolve",
"routes.werkzeug.werkzeug_router.match"
] |
[((223, 242), 'routes.yrouter.y_router.match', 'y_router.match', (['"""/"""'], {}), "('/')\n", (237, 242), False, 'from routes.yrouter import y_router\n'), ((247, 280), 'routes.yrouter.y_router.match', 'y_router.match', (['"""/articles/2020/"""'], {}), "('/articles/2020/')\n", (261, 280), False, 'from routes.yrouter import y_router\n'), ((285, 318), 'routes.yrouter.y_router.match', 'y_router.match', (['"""/articles/2015/"""'], {}), "('/articles/2015/')\n", (299, 318), False, 'from routes.yrouter import y_router\n'), ((323, 362), 'routes.yrouter.y_router.match', 'y_router.match', (['"""/articles/2015/04/12/"""'], {}), "('/articles/2015/04/12/')\n", (337, 362), False, 'from routes.yrouter import y_router\n'), ((367, 419), 'routes.yrouter.y_router.match', 'y_router.match', (['"""/articles/categories/sport/newest/"""'], {}), "('/articles/categories/sport/newest/')\n", (381, 419), False, 'from routes.yrouter import y_router\n'), ((424, 454), 'routes.yrouter.y_router.match', 'y_router.match', (['"""/users/extra"""'], {}), "('/users/extra')\n", (438, 454), False, 'from routes.yrouter import y_router\n'), ((459, 485), 'routes.yrouter.y_router.match', 'y_router.match', (['"""catchall"""'], {}), "('catchall')\n", (473, 485), False, 'from routes.yrouter import y_router\n'), ((490, 515), 'routes.yrouter.y_router.match', 'y_router.match', (['"""/int/92"""'], {}), "('/int/92')\n", (504, 515), False, 'from routes.yrouter import y_router\n'), ((521, 563), 'routes.yrouter.y_router.match', 'y_router.match', (['"""/articles/2015/04/12/98/"""'], {}), "('/articles/2015/04/12/98/')\n", (535, 563), False, 'from routes.yrouter import y_router\n'), ((568, 602), 'routes.yrouter.y_router.match', 'y_router.match', (['"""/users/extra/bog"""'], {}), "('/users/extra/bog')\n", (582, 602), False, 'from routes.yrouter import y_router\n'), ((625, 637), 'django.urls.resolve', 'resolve', (['"""/"""'], {}), "('/')\n", (632, 637), False, 'from django.urls import resolve, set_urlconf\n'), ((642, 668), 'django.urls.resolve', 'resolve', (['"""/articles/2020/"""'], {}), "('/articles/2020/')\n", (649, 668), False, 'from django.urls import resolve, set_urlconf\n'), ((673, 699), 'django.urls.resolve', 'resolve', (['"""/articles/2015/"""'], {}), "('/articles/2015/')\n", (680, 699), False, 'from django.urls import resolve, set_urlconf\n'), ((704, 736), 'django.urls.resolve', 'resolve', (['"""/articles/2015/04/12/"""'], {}), "('/articles/2015/04/12/')\n", (711, 736), False, 'from django.urls import resolve, set_urlconf\n'), ((741, 786), 'django.urls.resolve', 'resolve', (['"""/articles/categories/sport/newest/"""'], {}), "('/articles/categories/sport/newest/')\n", (748, 786), False, 'from django.urls import resolve, set_urlconf\n'), ((791, 815), 'django.urls.resolve', 'resolve', (['"""/users/extra/"""'], {}), "('/users/extra/')\n", (798, 815), False, 'from django.urls import resolve, set_urlconf\n'), ((820, 840), 'django.urls.resolve', 'resolve', (['"""/catchall"""'], {}), "('/catchall')\n", (827, 840), False, 'from django.urls import resolve, set_urlconf\n'), ((845, 863), 'django.urls.resolve', 'resolve', (['"""/int/92"""'], {}), "('/int/92')\n", (852, 863), False, 'from django.urls import resolve, set_urlconf\n'), ((1039, 1075), 'routes.sanic.sanic_router.get', 'sanic_router.get', (['"""/"""'], {'method': '"""BASE"""'}), "('/', method='BASE')\n", (1055, 1075), False, 'from routes.sanic import sanic_router\n'), ((1080, 1130), 'routes.sanic.sanic_router.get', 'sanic_router.get', (['"""/articles/2020/"""'], {'method': '"""BASE"""'}), "('/articles/2020/', method='BASE')\n", (1096, 1130), False, 'from routes.sanic import sanic_router\n'), ((1135, 1185), 'routes.sanic.sanic_router.get', 'sanic_router.get', (['"""/articles/2015/"""'], {'method': '"""BASE"""'}), "('/articles/2015/', method='BASE')\n", (1151, 1185), False, 'from routes.sanic import sanic_router\n'), ((1190, 1246), 'routes.sanic.sanic_router.get', 'sanic_router.get', (['"""/articles/2015/04/12/"""'], {'method': '"""BASE"""'}), "('/articles/2015/04/12/', method='BASE')\n", (1206, 1246), False, 'from routes.sanic import sanic_router\n'), ((1251, 1320), 'routes.sanic.sanic_router.get', 'sanic_router.get', (['"""/articles/categories/sport/newest/"""'], {'method': '"""BASE"""'}), "('/articles/categories/sport/newest/', method='BASE')\n", (1267, 1320), False, 'from routes.sanic import sanic_router\n'), ((1325, 1372), 'routes.sanic.sanic_router.get', 'sanic_router.get', (['"""/users/extra"""'], {'method': '"""BASE"""'}), "('/users/extra', method='BASE')\n", (1341, 1372), False, 'from routes.sanic import sanic_router\n'), ((1377, 1421), 'routes.sanic.sanic_router.get', 'sanic_router.get', (['"""/catchall"""'], {'method': '"""BASE"""'}), "('/catchall', method='BASE')\n", (1393, 1421), False, 'from routes.sanic import sanic_router\n'), ((1426, 1468), 'routes.sanic.sanic_router.get', 'sanic_router.get', (['"""/int/92"""'], {'method': '"""BASE"""'}), "('/int/92', method='BASE')\n", (1442, 1468), False, 'from routes.sanic import sanic_router\n'), ((1692, 1715), 'routes.falcon.falcon_router.find', 'falcon_router.find', (['"""/"""'], {}), "('/')\n", (1710, 1715), False, 'from routes.falcon import falcon_router\n'), ((1720, 1757), 'routes.falcon.falcon_router.find', 'falcon_router.find', (['"""/articles/2020/"""'], {}), "('/articles/2020/')\n", (1738, 1757), False, 'from routes.falcon import falcon_router\n'), ((1762, 1799), 'routes.falcon.falcon_router.find', 'falcon_router.find', (['"""/articles/2015/"""'], {}), "('/articles/2015/')\n", (1780, 1799), False, 'from routes.falcon import falcon_router\n'), ((1804, 1847), 'routes.falcon.falcon_router.find', 'falcon_router.find', (['"""/articles/2015/04/12/"""'], {}), "('/articles/2015/04/12/')\n", (1822, 1847), False, 'from routes.falcon import falcon_router\n'), ((1852, 1908), 'routes.falcon.falcon_router.find', 'falcon_router.find', (['"""/articles/categories/sport/newest/"""'], {}), "('/articles/categories/sport/newest/')\n", (1870, 1908), False, 'from routes.falcon import falcon_router\n'), ((1913, 1947), 'routes.falcon.falcon_router.find', 'falcon_router.find', (['"""/users/extra"""'], {}), "('/users/extra')\n", (1931, 1947), False, 'from routes.falcon import falcon_router\n'), ((1952, 1983), 'routes.falcon.falcon_router.find', 'falcon_router.find', (['"""/catchall"""'], {}), "('/catchall')\n", (1970, 1983), False, 'from routes.falcon import falcon_router\n'), ((1988, 2017), 'routes.falcon.falcon_router.find', 'falcon_router.find', (['"""/int/92"""'], {}), "('/int/92')\n", (2006, 2017), False, 'from routes.falcon import falcon_router\n'), ((2023, 2068), 'routes.falcon.falcon_router.find', 'falcon_router.find', (['"""/articles/2015/04/12/98"""'], {}), "('/articles/2015/04/12/98')\n", (2041, 2068), False, 'from routes.falcon import falcon_router\n'), ((2073, 2111), 'routes.falcon.falcon_router.find', 'falcon_router.find', (['"""/users/extra/bog"""'], {}), "('/users/extra/bog')\n", (2091, 2111), False, 'from routes.falcon import falcon_router\n'), ((2140, 2166), 'routes.werkzeug.werkzeug_router.match', 'werkzeug_router.match', (['"""/"""'], {}), "('/')\n", (2161, 2166), False, 'from routes.werkzeug import werkzeug_router\n'), ((2171, 2211), 'routes.werkzeug.werkzeug_router.match', 'werkzeug_router.match', (['"""/articles/2020/"""'], {}), "('/articles/2020/')\n", (2192, 2211), False, 'from routes.werkzeug import werkzeug_router\n'), ((2216, 2256), 'routes.werkzeug.werkzeug_router.match', 'werkzeug_router.match', (['"""/articles/2015/"""'], {}), "('/articles/2015/')\n", (2237, 2256), False, 'from routes.werkzeug import werkzeug_router\n'), ((2261, 2307), 'routes.werkzeug.werkzeug_router.match', 'werkzeug_router.match', (['"""/articles/2015/04/12/"""'], {}), "('/articles/2015/04/12/')\n", (2282, 2307), False, 'from routes.werkzeug import werkzeug_router\n'), ((2312, 2371), 'routes.werkzeug.werkzeug_router.match', 'werkzeug_router.match', (['"""/articles/categories/sport/newest/"""'], {}), "('/articles/categories/sport/newest/')\n", (2333, 2371), False, 'from routes.werkzeug import werkzeug_router\n'), ((2376, 2414), 'routes.werkzeug.werkzeug_router.match', 'werkzeug_router.match', (['"""/users/extra/"""'], {}), "('/users/extra/')\n", (2397, 2414), False, 'from routes.werkzeug import werkzeug_router\n'), ((2419, 2454), 'routes.werkzeug.werkzeug_router.match', 'werkzeug_router.match', (['"""/catchall/"""'], {}), "('/catchall/')\n", (2440, 2454), False, 'from routes.werkzeug import werkzeug_router\n'), ((2459, 2492), 'routes.werkzeug.werkzeug_router.match', 'werkzeug_router.match', (['"""/int/92/"""'], {}), "('/int/92/')\n", (2480, 2492), False, 'from routes.werkzeug import werkzeug_router\n'), ((2721, 2749), 'django.urls.set_urlconf', 'set_urlconf', (['"""routes.django"""'], {}), "('routes.django')\n", (2732, 2749), False, 'from django.urls import resolve, set_urlconf\n'), ((882, 917), 'django.urls.resolve', 'resolve', (['"""/articles/2015/04/12/98/"""'], {}), "('/articles/2015/04/12/98/')\n", (889, 917), False, 'from django.urls import resolve, set_urlconf\n'), ((961, 988), 'django.urls.resolve', 'resolve', (['"""/users/extra/bog"""'], {}), "('/users/extra/bog')\n", (968, 988), False, 'from django.urls import resolve, set_urlconf\n'), ((1487, 1546), 'routes.sanic.sanic_router.get', 'sanic_router.get', (['"""/articles/2015/04/12/98/"""'], {'method': '"""BASE"""'}), "('/articles/2015/04/12/98/', method='BASE')\n", (1503, 1546), False, 'from routes.sanic import sanic_router\n'), ((1589, 1640), 'routes.sanic.sanic_router.get', 'sanic_router.get', (['"""/users/extra/bog"""'], {'method': '"""BASE"""'}), "('/users/extra/bog', method='BASE')\n", (1605, 1640), False, 'from routes.sanic import sanic_router\n'), ((2511, 2559), 'routes.werkzeug.werkzeug_router.match', 'werkzeug_router.match', (['"""/articles/2015/04/12/98"""'], {}), "('/articles/2015/04/12/98')\n", (2532, 2559), False, 'from routes.werkzeug import werkzeug_router\n'), ((2602, 2643), 'routes.werkzeug.werkzeug_router.match', 'werkzeug_router.match', (['"""/users/extra/bog"""'], {}), "('/users/extra/bog')\n", (2623, 2643), False, 'from routes.werkzeug import werkzeug_router\n')]
|
#! /usr/bin/env python
# Script to export users from an existing system
import os, os.path
import sys
import tarfile
# Must be run as root.
if not os.geteuid() == 0:
sys.exit('This script must be run as root (or sudo)!')
def info_message(txtmessage):
print(txtmessage, end='')
def ok_message():
print(" [ OK ]")
# Open the password, group, and shadow files
# and store their contents in lists
info_message("Reading passwd file...")
with open('/etc/passwd') as fpwd:
fpwdfile = fpwd.readlines()
ok_message()
info_message("Reading group file...")
with open('/etc/group') as fgrp:
fgrpfile = fgrp.readlines()
ok_message()
info_message("Reading shadow file...")
with open('/etc/shadow') as fsha:
fshafile = fsha.readlines()
ok_message()
# Strip out newlines
fpwdfile = [x.strip() for x in fpwdfile]
fgrpfile = [x.strip() for x in fgrpfile]
fshafile = [x.strip() for x in fshafile]
# Parse the password file. Grab only UIDs between 500 and 65534
info_message("Parsing passwd file. Looking for accounts with UID's >= 500...")
countl = 0
pwdlist = []
for line in fpwdfile:
countl += 1
fpwdsplit = (line.split(':'))
uidval = int(fpwdsplit[3])
if 1000 <= uidval < 65534:
pwdlist.append(line)
# Write the output to a new file
with open('passwd_mig.txt', "a") as fpwd_write:
for item in pwdlist:
fpwd_write.write(item + "\n")
ok_message()
# Parse the group file. Grab only GIDs between 1000 and 65534
info_message("Parsing group file. Looking for accounts with GID's >= 500...")
countl = 0
grplist = []
for line in fgrpfile:
countl += 1
fgrpsplit = (line.split(':'))
uidval = int(fgrpsplit[2])
userstr = str(fgrpsplit[0])
if 1000 <= uidval < 65534:
grplist.append(line)
# Write the output to a new file
with open('group_mig.txt', "a") as fgrp_write:
for item in grplist:
fgrp_write.write(item + "\n")
ok_message()
# Create and write new shadow file
info_message("Parsing shadow file. Looking for accounts that match the UIDs...")
with open('shadow_mig.txt', 'a') as fsha_write:
# Match only the line in the shadow file that matches the user
filter_object = filter(lambda a: userstr in a, fshafile)
strshad = ''.join(filter_object)
fsha_write.write(strshad + '\n')
ok_message()
# Tar it all up
with tarfile.open("user_export.tgz", "w:gz") as tar:
info_message('Creating tar archive...')
for file in ["passwd_mig.txt", "group_mig.txt", "shadow_mig.txt"]:
tar.add(os.path.basename(file))
ok_message()
# Cleanup
info_message('Cleaning up temp files...')
if os.path.exists("passwd_mig.txt"):
os.remove("passwd_mig.txt")
if os.path.exists("group_mig.txt"):
os.remove("group_mig.txt")
if os.path.exists("shadow_mig.txt"):
os.remove("shadow_mig.txt")
ok_message()
|
[
"os.remove",
"os.path.basename",
"os.path.exists",
"tarfile.open",
"os.geteuid",
"sys.exit"
] |
[((2811, 2843), 'os.path.exists', 'os.path.exists', (['"""passwd_mig.txt"""'], {}), "('passwd_mig.txt')\n", (2825, 2843), False, 'import os, os.path\n'), ((2880, 2911), 'os.path.exists', 'os.path.exists', (['"""group_mig.txt"""'], {}), "('group_mig.txt')\n", (2894, 2911), False, 'import os, os.path\n'), ((2947, 2979), 'os.path.exists', 'os.path.exists', (['"""shadow_mig.txt"""'], {}), "('shadow_mig.txt')\n", (2961, 2979), False, 'import os, os.path\n'), ((172, 226), 'sys.exit', 'sys.exit', (['"""This script must be run as root (or sudo)!"""'], {}), "('This script must be run as root (or sudo)!')\n", (180, 226), False, 'import sys\n'), ((2539, 2578), 'tarfile.open', 'tarfile.open', (['"""user_export.tgz"""', '"""w:gz"""'], {}), "('user_export.tgz', 'w:gz')\n", (2551, 2578), False, 'import tarfile\n'), ((2849, 2876), 'os.remove', 'os.remove', (['"""passwd_mig.txt"""'], {}), "('passwd_mig.txt')\n", (2858, 2876), False, 'import os, os.path\n'), ((2917, 2943), 'os.remove', 'os.remove', (['"""group_mig.txt"""'], {}), "('group_mig.txt')\n", (2926, 2943), False, 'import os, os.path\n'), ((2985, 3012), 'os.remove', 'os.remove', (['"""shadow_mig.txt"""'], {}), "('shadow_mig.txt')\n", (2994, 3012), False, 'import os, os.path\n'), ((149, 161), 'os.geteuid', 'os.geteuid', ([], {}), '()\n', (159, 161), False, 'import os, os.path\n'), ((2718, 2740), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (2734, 2740), False, 'import os, os.path\n')]
|
# Some pygame helper functions for simple image display
# and sound effect playback
# <NAME> July 2017
# Version 1.0
import pygame
surface = None
def setup(width=800, height=600, title=''):
'''
Sets up the pygame environment
'''
global window_size
global back_color
global text_color
global image
global surface
# Don't initialise if we already have
if surface is not None:
return
window_size = (width, height)
back_color = (255, 255, 255)
text_color = (255, 0, 0)
image = None
# pre initialise pyGame's audio engine to avoid sound latency issues
pygame.mixer.pre_init(frequency=44100)
pygame.init()
# initialise pyGame's audio engine
pygame.mixer.init()
# Create the game surface
surface = pygame.display.set_mode(window_size)
clear_display()
pygame.display.set_caption(title)
def handle_events():
'''
Consume events that are generated by the pygame window
These are not presntly used for anything
'''
setup()
for event in pygame.event.get():
pass
def play_sound(filepath):
'''
Plays the specified sound file
'''
pygame.mixer.init()
sound = pygame.mixer.Sound(filepath)
sound.play()
def display_image(filepath):
'''
Displays the image from the given filepath
Starts pygame if required
May throw exceptions
'''
global surface
global window_size
global image
handle_events()
image = pygame.image.load(filepath)
image = pygame.transform.smoothscale(image, window_size)
surface.blit(image, (0, 0))
pygame.display.flip()
def clear_display():
'''
Clears the display to the background colour
and the image (if any) on top of it
'''
global surface
global image
global back_color
handle_events()
surface.fill(back_color)
if image is not None:
surface.blit(image, (0, 0))
def get_display_lines(text, font, width):
'''
Returns a list of strings which have been split
to fit the given window width using the supplied font
'''
space_width = font.size(' ')[0]
result = []
text_lines = text.splitlines()
for text_line in text_lines:
words = text_line.split()
x = 0
line = ''
for word in words:
word_width = font.size(word)[0]
if x + word_width > width:
# Remove the trailing space from the line
# before adding to the list of lines to return
line = line.strip()
result.append(line)
line = word + ' '
x = word_width + space_width
else:
line = line + word + ' '
x = x + word_width + space_width
if line != '':
# Got a partial line to add to the end
# Remove the trailing space from the line
# before adding to the list of lines to return
line = line.strip()
result.append(line)
return result
def display_message(text, size=200, margin=20, horiz='center', vert='center',
color=(255, 0, 0)):
'''
Displays the text as a message
Sice can be used to select the size of the
text
'''
global window_size
global surface
handle_events()
clear_display()
# Get the text version of the input
text = str(text)
font = pygame.font.Font(None, size)
available_width = window_size[0] - (margin * 2)
lines = get_display_lines(text, font, available_width)
rendered_lines = []
height = 0
for line in lines:
rendered_line = font.render(line, 1, color)
height += rendered_line.get_height()
rendered_lines.append(rendered_line)
if height > window_size[1]:
raise Exception('Text too large for window')
if vert == 'center':
y = (window_size[1] - height) / 2.0
elif vert == 'top':
y = margin
elif vert == 'bottom':
y=(window_size[1]-margin) - height
for rendered_line in rendered_lines:
width = rendered_line.get_width()
height = rendered_line.get_height()
if horiz == 'center':
x = (available_width - width) / 2.0 + margin
elif horiz == 'left':
x = margin
elif horiz == 'right':
x = self.window_size[0] - width - margin
surface.blit(rendered_line, (x, y))
y += height
pygame.display.flip()
import urllib.request
import xml.etree.ElementTree
def get_weather_temp(latitude,longitude):
'''
Uses forecast.weather.gov to get the weather
for the specified latitude and longitude
'''
url="http://forecast.weather.gov/MapClick.php?lat={0}&lon={1}&unit=0&lg=english&FcstType=dwml".format(latitude,longitude)
req=urllib.request.urlopen(url)
page=req.read()
doc=xml.etree.ElementTree.fromstring(page)
# I'm not proud of this, but by gum it works...
for child in doc:
if child.tag == 'data':
if child.attrib['type'] == 'current observations':
for item in child:
if item.tag == 'parameters':
for i in item:
if i.tag == 'temperature':
if i.attrib['type'] == 'apparent':
for t in i:
if t.tag =='value':
return int(t.text)
def get_weather_desciption(latitude,longitude):
'''
Uses forecast.weather.gov to get the weather
for the specified latitude and longitude
'''
url="http://forecast.weather.gov/MapClick.php?lat={0}&lon={1}&unit=0&lg=english&FcstType=dwml".format(latitude,longitude)
req=urllib.request.urlopen(url)
page=req.read()
doc=xml.etree.ElementTree.fromstring(page)
# I'm not proud of this, but by gum it works...
for child in doc:
if child.tag == 'data':
if child.attrib['type'] == 'current observations':
for item in child:
if item.tag == 'parameters':
for i in item:
if i.tag == 'weather':
for t in i:
if t.tag == 'weather-conditions':
if t.get('weather-summary') is not None:
return t.get('weather-summary')
|
[
"pygame.transform.smoothscale",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.mixer.init",
"pygame.mixer.pre_init",
"pygame.init",
"pygame.display.flip",
"pygame.font.Font",
"pygame.image.load",
"pygame.display.set_caption",
"pygame.mixer.Sound"
] |
[((627, 665), 'pygame.mixer.pre_init', 'pygame.mixer.pre_init', ([], {'frequency': '(44100)'}), '(frequency=44100)\n', (648, 665), False, 'import pygame\n'), ((670, 683), 'pygame.init', 'pygame.init', ([], {}), '()\n', (681, 683), False, 'import pygame\n'), ((728, 747), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (745, 747), False, 'import pygame\n'), ((793, 829), 'pygame.display.set_mode', 'pygame.display.set_mode', (['window_size'], {}), '(window_size)\n', (816, 829), False, 'import pygame\n'), ((856, 889), 'pygame.display.set_caption', 'pygame.display.set_caption', (['title'], {}), '(title)\n', (882, 889), False, 'import pygame\n'), ((1062, 1080), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1078, 1080), False, 'import pygame\n'), ((1178, 1197), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (1195, 1197), False, 'import pygame\n'), ((1210, 1238), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['filepath'], {}), '(filepath)\n', (1228, 1238), False, 'import pygame\n'), ((1497, 1524), 'pygame.image.load', 'pygame.image.load', (['filepath'], {}), '(filepath)\n', (1514, 1524), False, 'import pygame\n'), ((1537, 1585), 'pygame.transform.smoothscale', 'pygame.transform.smoothscale', (['image', 'window_size'], {}), '(image, window_size)\n', (1565, 1585), False, 'import pygame\n'), ((1622, 1643), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (1641, 1643), False, 'import pygame\n'), ((3444, 3472), 'pygame.font.Font', 'pygame.font.Font', (['None', 'size'], {}), '(None, size)\n', (3460, 3472), False, 'import pygame\n'), ((4482, 4503), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4501, 4503), False, 'import pygame\n')]
|
# -*- coding: utf-8 -*-
from codecs import open
from os import path
from setuptools import setup, find_packages
import openkongqi as okq
# local path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst')) as fd:
long_description = fd.read()
requirements = [
"beautifulsoup4==4.9.3",
"celery>=5.0.5",
"hiredis==1.1.0",
"html5lib==1.1",
"pytz>=2020.5",
"redis==3.5.3",
"six>=1.13.0",
"sqlalchemy>=1.3.23",
]
setup(
name=okq.__name__,
version=okq.__version__,
author=okq.__author__,
author_email=okq.__contact__,
license="Apache License 2.0",
packages=find_packages(exclude=['docs', 'test*']),
url="https://github.com/gams/openkongqi",
description="Outdoor air quality data",
long_description=long_description,
install_requires=requirements,
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Framework :: Sphinx',
'Intended Audience :: Developers',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'Natural Language :: Chinese (Simplified)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Text Processing :: Markup :: XML',
],
keywords="air quality",
package_data={
'openkongqi': [
'data/user_agent_strings.json',
'data/sources/pm25.in.json',
'openkongqi/data/stations/cn/shanghai.json'
],
},
entry_points={
'console_scripts': [
"okq-server=openkongqi.bin:okq_server",
"okq-init=openkongqi.bin:okq_init",
"okq-source-test=utils.source_test:main",
]
},
include_package_data=True,
)
|
[
"os.path.dirname",
"os.path.join",
"setuptools.find_packages"
] |
[((173, 195), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (185, 195), False, 'from os import path\n'), ((256, 285), 'os.path.join', 'path.join', (['here', '"""README.rst"""'], {}), "(here, 'README.rst')\n", (265, 285), False, 'from os import path\n'), ((693, 733), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['docs', 'test*']"}), "(exclude=['docs', 'test*'])\n", (706, 733), False, 'from setuptools import setup, find_packages\n')]
|
import sys,math
import numpy as np
import scipy.sparse.linalg as slin
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix
from svddenseblock import *
from mytools.ioutil import myreadfile
from os.path import expanduser
home = expanduser("~")
def loadtensor2matricization(tensorfile, sumout=[], mtype=coo_matrix,
weighted=True, dtype=int):
'sumout: marginized (sumout) the given ways'
matcols={}
rindexcols={}
xs, ys, data = [], [], []
with myreadfile(tensorfile, 'rb') as f:
for line in f:
elems = line.strip().split(',')
elems = np.array(elems)
u = int(elems[0])
colidx = range(1,len(elems)-1) #remove sumout
colidx = set(colidx) - set(list(sumout))
colidx = sorted(list(colidx))
col=' '.join(elems[colidx])
if col not in matcols:
idx = len(matcols)
matcols[col] = idx
rindexcols[idx]=col
cid = matcols[col]
w = dtype(elems[-1])
xs.append(u)
ys.append(cid)
data.append(w)
nrow, ncol = max(xs)+1, max(ys)+1
sm = mtype( (data, (xs, ys)), shape=(nrow, ncol), dtype=dtype )
if weighted is False:
sm.data[0:] = dtype(1)
f.close()
return sm, rindexcols
def matricizeSVDdenseblock(sm, rindexcols, rbd='avg'):
A, tmpB = svddenseblock(sm, rbd=rbd)
rows = A.nonzero()[0]
cols = tmpB.nonzero()[0]
bcols = set()
for col in cols:
'col name'
cnm = rindexcols[col]
cnm = cnm.strip().split(' ')
b = int(cnm[0])
bcols.add(b)
return set(rows), set(bcols)
if __name__=="__main__":
path = home+'/Data/BeerAdvocate/'
respath= path+'results/'
tsfile = path+'userbeerts.dict'
ratefile = path+'userbeerrate.dict'
tensorfile =respath+'userbeer.tensor'
sm, rindexcols = loadtensor2matricization(tensorfile,
sumout=[3],mtype=csr_matrix,
dtype=float,weighted=True)
A, B = matricizeSVDdenseblock(sm, rindexcols, rbd='avg')
|
[
"numpy.array",
"mytools.ioutil.myreadfile",
"os.path.expanduser"
] |
[((234, 249), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (244, 249), False, 'from os.path import expanduser\n'), ((499, 527), 'mytools.ioutil.myreadfile', 'myreadfile', (['tensorfile', '"""rb"""'], {}), "(tensorfile, 'rb')\n", (509, 527), False, 'from mytools.ioutil import myreadfile\n'), ((621, 636), 'numpy.array', 'np.array', (['elems'], {}), '(elems)\n', (629, 636), True, 'import numpy as np\n')]
|
# ----------------------------------------------------------------------------------
# # Calculating Word Frequencies
# ----------------------------------------------------------------------------------
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from text.dataHandling import DataHandling
from text.vocabCounter import vocabCounter
from collections import Counter
from itertools import chain
def dailywordfreq(start, end, name_for_saving_processed):
print("Collecting Data")
dataObj = DataHandling()
dataObj.collectData(dataPath='/mnt/c/Users/charl/Desktop/finance_perso/BurnieYilmazRS19/dataPrep/REDDIT/data/processing/terms/', regexpr=r'^subTerms', verbose=True)
dataObj.aggData()
data = dataObj.selectFirstFrame()
del dataObj
print("REMOVE SUBMISSIONS BLANK AFTER PROCESSING")
data = data[data.text.apply(lambda x: x != [])]
print("CREATE VOCAB")
vc = vocabCounter(
rawData = data,
start = start,
end = end,
# start=1640023150,
# end=1640823600,
step = 86400
)
#vc.getRaw().to_csv('/mnt/c/Users/charl/Desktop/finance_perso/BurnieYilmazRS19/dataPrep/REDDIT/data/processing/tokenFreq/daily8888.csv')
del data
print("AT MOST ONE TERM / SUBMISSION")
vc.oneTokenPerSubmission()
print(vc.getRaw())
print("CREATE COUNTER OBJECT")
vc.createCountData()
dataf = vc.getCountData()
dataf.to_pickle(name_for_saving_processed)
# df2 = pd.read_pickle('/mnt/c/Users/charl/Desktop/finance_perso/BurnieYilmazRS19/dataPrep/REDDIT/data/processing/tokenFreq/dailyTokenFreq_041218.pkl')
# df2.to_csv('/mnt/c/Users/charl/Desktop/finance_perso/BurnieYilmazRS19/dataPrep/REDDIT/data/processing/tokenFreq/dailyTokenFreq_0666.csv')
# dailywordfreq( start=1630023150,
# end=1640823600 )
|
[
"text.vocabCounter.vocabCounter",
"text.dataHandling.DataHandling"
] |
[((547, 561), 'text.dataHandling.DataHandling', 'DataHandling', ([], {}), '()\n', (559, 561), False, 'from text.dataHandling import DataHandling\n'), ((967, 1027), 'text.vocabCounter.vocabCounter', 'vocabCounter', ([], {'rawData': 'data', 'start': 'start', 'end': 'end', 'step': '(86400)'}), '(rawData=data, start=start, end=end, step=86400)\n', (979, 1027), False, 'from text.vocabCounter import vocabCounter\n')]
|
# -*- coding: UTF-8 -*-#
from __future__ import unicode_literals, print_function
import datetime
import pytz
from django.utils.translation import ugettext_lazy as _, ugettext
from rest_framework import serializers
from rest_framework.exceptions import APIException
from django.conf import settings
from validate_email import validate_email
from action.models import Action
from dataops.pandas_db import execute_select_on_table
from scheduler.models import ScheduledAction
class ScheduledActionSerializer(serializers.ModelSerializer):
"""
Serializer to take care of a few fields and the item column
"""
item_column = serializers.CharField(source='item_column_name',
required=False)
def instantiate_or_update(self,
validated_data,
action,
execute,
item_column,
exclude_values,
payload,
scheduled_obj=None):
"""
Given the validated data and a set of parameters that have been
validated, instantiate or update the object of class ScheduledAction.
:param validated_data: Data obtained by the serializer
:param action: Action object
:param execute: Execution date/time
:param item_column: Item column object (if given)
:param exclude_values: List of values from item_column to exluce
:param payload: JSON object
:param scheduled_obj: Object to instantiate or update
:return: instantiated object
"""
if not scheduled_obj:
scheduled_obj = ScheduledAction()
scheduled_obj.user = self.context['request'].user
scheduled_obj.name = validated_data['name']
scheduled_obj.description_text = validated_data.get('description_text')
scheduled_obj.action = action
scheduled_obj.execute = execute
scheduled_obj.item_column = item_column
scheduled_obj.exclude_values = exclude_values
scheduled_obj.payload = payload
scheduled_obj.status = ScheduledAction.STATUS_PENDING
scheduled_obj.save()
return scheduled_obj
def extra_validation(self, validated_data):
"""
Checking for extra validation properties in the information contained in
the validated data. Namely:
- The action name corresponds with a valid action for the user.
- The execute time must be in the future
- The item_column, if present, must be a correct column name
- Exclude_values must be a list
- Exclude_values can only be non-empty if item_column is given.
- The received object has a payload
:param validated_data:
:return: action, execute, item_column, exclude_values, payload
"""
# Get the action
action = validated_data['action']
if action.workflow.user != self.context['request'].user:
# The action could not be found.
raise APIException(_('Incorrect permission to manipulate action.'))
# Execution date must be in the future
execute = validated_data.get('execute', None)
now = datetime.datetime.now(pytz.timezone(settings.TIME_ZONE))
if not execute or execute <= now:
raise APIException(_('Invalid date/time for execution'))
# Item_column, if present has to be a correct column name
item_column = validated_data.get('item_column_name')
if item_column:
item_column = action.workflow.columns.filter(
name=item_column
).first()
if not item_column:
raise APIException(_('Invalid column name for selecting items'))
exclude_values = validated_data.get('exclude_values')
# Exclude_values has to be a list
if exclude_values and not isinstance(exclude_values, list):
raise APIException(_('Exclude_values must be a list'))
# Exclude_values can only have content if item_column is given.
if not item_column and exclude_values:
raise APIException(_('Exclude items needs a value in item_column'))
# Check that the received object has a payload
payload = validated_data.get('payload', {})
if not payload:
raise APIException(_('Scheduled objects needs a payload.'))
return action, execute, item_column, exclude_values, payload
def create(self, validated_data, **kwargs):
action, execute, item_column, exclude_values, payload = \
self.extra_validation(validated_data)
try:
scheduled_obj = self.instantiate_or_update(validated_data,
action,
execute,
item_column,
exclude_values,
payload)
except Exception as e:
raise APIException(
ugettext('Scheduled action could not be created: {0}').format(
e.message)
)
return scheduled_obj
def update(self, instance, validated_data):
action, execute, item_column, exclude_values, payload = \
self.extra_validation(validated_data)
try:
instance = self.instantiate_or_update(validated_data,
action,
execute,
item_column,
exclude_values,
payload,
instance)
# Save the object
instance.save()
except Exception as e:
raise APIException(
ugettext('Unable to update scheduled action: {0}'.format(
e.message
))
)
return instance
class Meta:
model = ScheduledAction
fields = ('id', 'name', 'description_text', 'action', 'execute',
'item_column', 'exclude_values', 'payload')
class ScheduledEmailSerializer(ScheduledActionSerializer):
def extra_validation(self, validated_data):
action, execute, item_column, exclude_values, payload = \
super(ScheduledEmailSerializer, self).extra_validation(
validated_data
)
if action.action_type != Action.PERSONALIZED_TEXT:
raise APIException(_('Incorrect type of action to schedule.'))
subject = payload.get('subject')
if not subject:
raise APIException(_('Personalized text needs a subject.'))
if not item_column:
raise APIException(_('Personalized text needs a item_column'))
# Check if the values in the email column are correct emails
try:
column_data = execute_select_on_table(
action.workflow.id,
[],
[],
column_names=[item_column.name])
if not all([validate_email(x[0]) for x in column_data]):
# column has incorrect email addresses
raise APIException(
_('The column with email addresses has incorrect values.')
)
except TypeError:
raise APIException(
_('The column with email addresses has incorrect values.')
)
if not all([validate_email(x)
for x in payload.get('cc_email', []) if x]):
raise APIException(
_('cc_email must be a comma-separated list of emails.')
)
if not all([validate_email(x)
for x in payload.get('bcc_email', []) if x]):
raise APIException(
_('bcc_email must be a comma-separated list of emails.')
)
return action, execute, item_column, exclude_values, payload
class ScheduledJSONSerializer(ScheduledActionSerializer):
def extra_validation(self, validated_data):
action, execute, item_column, exclude_values, payload = \
super(ScheduledJSONSerializer, self).extra_validation(
validated_data
)
if action.action_type != Action.PERSONALIZED_JSON:
raise APIException(_('Incorrect type of action to schedule.'))
token = payload.get('token')
if not token:
raise APIException(_('Personalized JSON needs a token in payload.'))
return action, execute, item_column, exclude_values, payload
|
[
"scheduler.models.ScheduledAction",
"validate_email.validate_email",
"dataops.pandas_db.execute_select_on_table",
"rest_framework.serializers.CharField",
"pytz.timezone",
"django.utils.translation.ugettext",
"django.utils.translation.ugettext_lazy"
] |
[((637, 701), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""item_column_name"""', 'required': '(False)'}), "(source='item_column_name', required=False)\n", (658, 701), False, 'from rest_framework import serializers\n'), ((1723, 1740), 'scheduler.models.ScheduledAction', 'ScheduledAction', ([], {}), '()\n', (1738, 1740), False, 'from scheduler.models import ScheduledAction\n'), ((3306, 3339), 'pytz.timezone', 'pytz.timezone', (['settings.TIME_ZONE'], {}), '(settings.TIME_ZONE)\n', (3319, 3339), False, 'import pytz\n'), ((7197, 7286), 'dataops.pandas_db.execute_select_on_table', 'execute_select_on_table', (['action.workflow.id', '[]', '[]'], {'column_names': '[item_column.name]'}), '(action.workflow.id, [], [], column_names=[\n item_column.name])\n', (7220, 7286), False, 'from dataops.pandas_db import execute_select_on_table\n'), ((3119, 3166), 'django.utils.translation.ugettext_lazy', '_', (['"""Incorrect permission to manipulate action."""'], {}), "('Incorrect permission to manipulate action.')\n", (3120, 3166), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((3414, 3450), 'django.utils.translation.ugettext_lazy', '_', (['"""Invalid date/time for execution"""'], {}), "('Invalid date/time for execution')\n", (3415, 3450), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((4034, 4068), 'django.utils.translation.ugettext_lazy', '_', (['"""Exclude_values must be a list"""'], {}), "('Exclude_values must be a list')\n", (4035, 4068), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((4221, 4268), 'django.utils.translation.ugettext_lazy', '_', (['"""Exclude items needs a value in item_column"""'], {}), "('Exclude items needs a value in item_column')\n", (4222, 4268), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((4433, 4472), 'django.utils.translation.ugettext_lazy', '_', (['"""Scheduled objects needs a payload."""'], {}), "('Scheduled objects needs a payload.')\n", (4434, 4472), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((6802, 6844), 'django.utils.translation.ugettext_lazy', '_', (['"""Incorrect type of action to schedule."""'], {}), "('Incorrect type of action to schedule.')\n", (6803, 6844), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((6943, 6982), 'django.utils.translation.ugettext_lazy', '_', (['"""Personalized text needs a subject."""'], {}), "('Personalized text needs a subject.')\n", (6944, 6982), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((7044, 7086), 'django.utils.translation.ugettext_lazy', '_', (['"""Personalized text needs a item_column"""'], {}), "('Personalized text needs a item_column')\n", (7045, 7086), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((7903, 7958), 'django.utils.translation.ugettext_lazy', '_', (['"""cc_email must be a comma-separated list of emails."""'], {}), "('cc_email must be a comma-separated list of emails.')\n", (7904, 7958), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((8126, 8182), 'django.utils.translation.ugettext_lazy', '_', (['"""bcc_email must be a comma-separated list of emails."""'], {}), "('bcc_email must be a comma-separated list of emails.')\n", (8127, 8182), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((8646, 8688), 'django.utils.translation.ugettext_lazy', '_', (['"""Incorrect type of action to schedule."""'], {}), "('Incorrect type of action to schedule.')\n", (8647, 8688), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((8781, 8829), 'django.utils.translation.ugettext_lazy', '_', (['"""Personalized JSON needs a token in payload."""'], {}), "('Personalized JSON needs a token in payload.')\n", (8782, 8829), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((3784, 3828), 'django.utils.translation.ugettext_lazy', '_', (['"""Invalid column name for selecting items"""'], {}), "('Invalid column name for selecting items')\n", (3785, 3828), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((7527, 7585), 'django.utils.translation.ugettext_lazy', '_', (['"""The column with email addresses has incorrect values."""'], {}), "('The column with email addresses has incorrect values.')\n", (7528, 7585), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((7678, 7736), 'django.utils.translation.ugettext_lazy', '_', (['"""The column with email addresses has incorrect values."""'], {}), "('The column with email addresses has incorrect values.')\n", (7679, 7736), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((7772, 7789), 'validate_email.validate_email', 'validate_email', (['x'], {}), '(x)\n', (7786, 7789), False, 'from validate_email import validate_email\n'), ((7994, 8011), 'validate_email.validate_email', 'validate_email', (['x'], {}), '(x)\n', (8008, 8011), False, 'from validate_email import validate_email\n'), ((7371, 7391), 'validate_email.validate_email', 'validate_email', (['x[0]'], {}), '(x[0])\n', (7385, 7391), False, 'from validate_email import validate_email\n'), ((5204, 5258), 'django.utils.translation.ugettext', 'ugettext', (['"""Scheduled action could not be created: {0}"""'], {}), "('Scheduled action could not be created: {0}')\n", (5212, 5258), False, 'from django.utils.translation import ugettext_lazy as _, ugettext\n')]
|
from fanstatic import Library, Resource
import js.angular
import js.fullcalendar
library = Library('angular-ui-calendar', 'resources')
angular_ui_calendar = Resource(
library,
'calendar.js',
depends=[js.angular.angular, js.fullcalendar.fullcalendar])
|
[
"fanstatic.Library",
"fanstatic.Resource"
] |
[((92, 135), 'fanstatic.Library', 'Library', (['"""angular-ui-calendar"""', '"""resources"""'], {}), "('angular-ui-calendar', 'resources')\n", (99, 135), False, 'from fanstatic import Library, Resource\n'), ((159, 256), 'fanstatic.Resource', 'Resource', (['library', '"""calendar.js"""'], {'depends': '[js.angular.angular, js.fullcalendar.fullcalendar]'}), "(library, 'calendar.js', depends=[js.angular.angular, js.\n fullcalendar.fullcalendar])\n", (167, 256), False, 'from fanstatic import Library, Resource\n')]
|
import sys
import os
import numpy as np
from sklearn import metrics
from .model import SmileGAN
from .utils import highest_matching_clustering, consensus_clustering, parse_validation_data
from .clustering import Smile_GAN_train
__author__ = "<NAME>"
__copyright__ = "Copyright 2019-2020 The CBICA & SBIA Lab"
__credits__ = ["<NAME>"]
__license__ = "See LICENSE file"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def model_filtering(model_dirs, ncluster, data, covariate=None):
"""
Function used for filter out models who have significantly different clustering results with others.
This function deal with rare failing cases of Smile-GAN
Args:
model_dirs: list, list of dirs of all saved models
ncluster: int, number of defined clusters
data, data_frame, dataframe with same format as training data. CN data must be exactly same as CN data in training dataframe while
PT data can be any samples in or out of the training set.
covariate, data_frame, dataframe with same format as training covariate. CN data must be exactly same as CN data in training covariate while
PT data can be any samples in or out of the training set.
Returns: list of index indicating outlier models
"""
_, validation_data = parse_validation_data(data, covariate)
all_prediction_labels = []
for models in model_dirs:
model = SmileGAN()
model.load(models)
all_prediction_labels.append(np.argmax(model.predict_cluster(validation_data), axis=1))
model_aris = [[] for _ in range(len(model_dirs))]
filtered_models = []
for i in range(len(model_dirs)):
for j in range(len(model_dirs)):
if i!=j:
model_aris[i].append(metrics.adjusted_rand_score(all_prediction_labels[i], all_prediction_labels[j]))
median_aris = np.median(model_aris, axis=1)
for j in range(median_aris.shape[0]):
rest_aris = np.delete(median_aris,j)
if (median_aris[j]-np.mean(rest_aris))/np.std(rest_aris)<-2:
filtered_models.append(j)
return filtered_models
def calculate_ari(prediction_labels):
model_aris = []
for i in range(len(prediction_labels)):
for j in range(i+1,len(prediction_labels)):
model_aris.append(metrics.adjusted_rand_score(prediction_labels[i], prediction_labels[j]))
return np.mean(model_aris), np.std(model_aris)
def clustering_result(model_dirs, ncluster, consensus_type, data, covariate=None):
"""
Function used for derive clustering results from several saved models
Args:
model_dirs: list, list of dirs of all saved models
ncluster: int, number of defined clusters
consensus_type: string, the method used for deriving final clustering results with all models derived through CV
choose between 'highest_matching_clustering' and 'consensus_clustering'
data, data_frame, dataframe with same format as training data. CN data must be exactly same as CN data in training dataframe while
PT data can be any samples in or out of the training set.
covariate, data_frame, dataframe with same format as training covariate. CN data must be exactly same as CN data in training covariate while
PT data can be any samples in or out of the training set.
Returns: clustering outputs.
"""
_, validation_data = parse_validation_data(data, covariate)
all_prediction_labels = []
all_prediction_probabilities = []
for models in model_dirs:
model = SmileGAN()
model.load(models)
all_prediction_labels.append(np.argmax(model.predict_cluster(validation_data), axis=1))
all_prediction_probabilities.append(model.predict_cluster(validation_data))
if len(model_dirs) > 1:
mean_ari, std_ari = calculate_ari(all_prediction_labels)
print("Results have Adjuested_random_index (ARI) = %.2f+- %.2f" %(mean_ari, std_ari))
if mean_ari<0.3 and consensus_type == 'highest_matching_clustering':
print('mean ARI < 0.3, consensus_clustering is recommended')
if len(all_prediction_labels) == 1:
return np.array(all_prediction_labels[0]), np.array(all_prediction_probabilities[0]), 1, 0
elif consensus_type == 'highest_matching_clustering':
cluster_label, cluster_prob = highest_matching_clustering(all_prediction_labels, all_prediction_probabilities, ncluster)
return cluster_label, cluster_prob, mean_ari, std_ari
elif consensus_type == 'consensus_clustering':
return consensus_clustering(all_prediction_labels, ncluster), None, mean_ari, std_ari
else:
raise Exception("Please choose between 'highest_matching_clustering' and 'consensus_clustering'")
def single_model_clustering(data, ncluster, start_saving_epoch, max_epoch, output_dir, WD_threshold, AQ_threshold, \
cluster_loss_threshold, covariate=None, saved_model_name='converged_model', lam=9, mu=5, batchSize=25, lipschitz_k = 0.5, verbose = False, \
beta1 = 0.5, lr = 0.0002, max_gnorm = 100, eval_freq = 5, save_epoch_freq = 5):
"""
one of Smile-GAN core function for clustering. Only one model will be trained. (not recommended since result may be not reproducible)
Args:
data: dataframe, dataframe file with all ROI (input features) The dataframe contains
the following headers: "
"i) the first column is the participant_id;"
"iii) the second column should be the diagnosis;"
"The following column should be the extracted features. e.g., the ROI features"
covariate: dataframe, not required; dataframe file with all confounding covariates to be corrected. The dataframe contains
the following headers: "
"i) the first column is the participant_id;"
"iii) the second column should be the diagnosis;"
"The following column should be all confounding covariates. e.g., age, sex"
ncluster: int, number of defined clusters
start_saving_epoch: int, epoch number from which model will be saved and training will be stopped if stopping criteria satisfied
max_epoch: int, maximum trainig epoch: training will stop even if criteria not satisfied.
output_dir: str, the directory underwhich model and results will be saved
WD_threshold: int, chosen WD theshold for stopping criteria
AQ_threshold: int, chosen AQ threhold for stopping criteria
cluster_loss_threshold: int, chosen cluster_loss threhold for stopping criteria
load_model: bool, whether load one pre-saved checkpoint
saved_model_name: str, the name of the saved model
lam: int, hyperparameter for cluster loss
mu: int, hyperparameter for change loss
batchsize: int, batck size for training procedure
lipschitz_k = float, hyper parameter for weight clipping of mapping and clustering function
verbose: bool, choose whether to print out training procedure
beta1: float, parameter of ADAM optimization method
lr: float, learning rate
max_gnorm: float, maximum gradient norm for gradient clipping
eval_freq: int, the frequency at which the model is evaluated during training procedure
save_epoch_freq: int, the frequency at which the model is saved during training procedure
Returns: clustering outputs.
"""
print('Start Smile-GAN for semi-supervised clustering')
Smile_GAN_model = Smile_GAN_train(ncluster, start_saving_epoch, max_epoch, WD_threshold, AQ_threshold, \
cluster_loss_threshold, lam=lam, mu=mu, batchSize=batchSize, lipschitz_k = lipschitz_k,
beta1 = beta1, lr = lr, max_gnorm = max_gnorm, eval_freq = eval_freq, save_epoch_freq = save_epoch_freq)
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, verbose = verbose)
while not converge:
print("****** Model not converging or not converged at max interation, Start retraining ******")
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, verbose = verbose)
cluster_label, cluster_prob, mean_ari, std_ari = clustering_result([os.path.join(output_dir,saved_model_name)], ncluster, 'highest_matching_clustering', data, covariate)
pt_data = data.loc[data['diagnosis'] == 1][['participant_id','diagnosis']]
pt_data['cluster_label'] = cluster_label + 1
for i in range(ncluster):
pt_data['p'+str(i+1)] = cluster_prob[:,i]
pt_data.to_csv(os.path.join(output_dir,'clustering_result.csv'), index = False)
return pt_data
def cross_validated_clustering(data, ncluster, fold_number, fraction, start_saving_epoch, max_epoch, output_dir, WD_threshold, AQ_threshold, \
cluster_loss_threshold, consensus_type, covariate=None, lam=9, mu=5, batchSize=25, lipschitz_k = 0.5, verbose = False, \
beta1 = 0.5, lr = 0.0002, max_gnorm = 100, eval_freq = 5, save_epoch_freq = 5, start_fold = 0, stop_fold = None, check_outlier = True):
"""
cross_validated clustering function using Smile-GAN (recommended)
Args:
data: dataframe, dataframe file with all ROI (input features) The dataframe contains
the following headers: "
"i) the first column is the participant_id;"
"iii) the second column should be the diagnosis;"
"The following column should be the extracted features. e.g., the ROI features"
covariate: dataframe, not required; dataframe file with all confounding covariates to be corrected. The dataframe contains
the following headers: "
"i) the first column is the participant_id;"
"iii) the second column should be the diagnosis;"
"The following column should be all confounding covariates. e.g., age, sex"
ncluster: int, number of defined clusters
fold_number: int, number of folds for leave-out cross validation
fraction: float, fraction of data used for training in each fold
start_saving_epoch: int, epoch number from which model will be saved and training will be stopped if stopping criteria satisfied
max_epoch: int, maximum trainig epoch: training will stop even if criteria not satisfied.
output_dir: str, the directory underwhich model and results will be saved
WD_threshold: int, chosen WD theshold for stopping criteria
AQ_threshold: int, chosen AQ threhold for stopping criteria
cluster_loss_threshold: int, chosen cluster_loss threhold for stopping criteria
###load_model: bool, whether load one pre-saved checkpoint
consensus_type: string, the method used for deriving final clustering results with all models saved during CV
choose between 'highest_matching_clustering' and 'consensus_clustering'
saved_model_name: str, the name of the saved model
lam: int, hyperparameter for cluster loss
mu: int, hyperparameter for change loss
batchsize: int, batck size for training procedure
lipschitz_k = float, hyper parameter for weight clipping of mapping and clustering function
verbose: bool, choose whether to print out training procedure
beta1: float, parameter of ADAM optimization method
lr: float, learning rate
max_gnorm: float, maximum gradient norm for gradient clipping
eval_freq: int, the frequency at which the model is evaluated during training procedure
save_epoch_freq: int, the frequency at which the model is saved during training procedure
start_fold; int, indicate the last saved fold index,
used for restart previous half-finished cross validation; set defaultly to be 0 indicating a new cv process
stop_fold: int, indicate the index of fold at which the cv early stop,
used for stopping cv process eartly and resuming later; set defaultly to be None and cv will not stop till the end
check_outlier: bool, whether check outlier model (potential unsuccessful model) after cv process and retrain the fold
Returns: clustering outputs.
"""
print('Start Smile-GAN for semi-supervised clustering')
Smile_GAN_model = Smile_GAN_train(ncluster, start_saving_epoch, max_epoch, WD_threshold, AQ_threshold, \
cluster_loss_threshold, lam=lam, mu=mu, batchSize=batchSize, \
lipschitz_k = lipschitz_k, beta1 = beta1, lr = lr, max_gnorm = max_gnorm, eval_freq = eval_freq, save_epoch_freq = save_epoch_freq)
saved_models = [os.path.join(output_dir, 'coverged_model_fold'+str(i)) for i in range(fold_number)]
if stop_fold == None:
stop_fold = fold_number
for i in range(start_fold, stop_fold):
print('****** Starting training of Fold '+str(i)+" ******")
saved_model_name = 'coverged_model_fold'+str(i)
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, random_seed=i, data_fraction = fraction, verbose = verbose)
while not converge:
print("****** Model not converging or not converged at max interation, Start retraining ******")
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, random_seed=i, data_fraction = fraction, verbose = verbose)
if check_outlier:
print('****** Start Checking outlier models ******')
outlier_models = model_filtering(saved_models, ncluster, data, covariate)
if len(outlier_models) > 0:
print('Model', end=' ')
for model in outlier_models:
print(str(model),end=' ')
print('have low agreement with other models')
else:
print('****** There are no outlier models ******')
for i in outlier_models:
print('****** Starting training of Fold '+str(i)+" ******")
saved_model_name = 'coverged_model_fold'+str(i)
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, random_seed=i, data_fraction = fraction, verbose = verbose)
while not converge:
print("****** Model not converged at max interation, Start retraining ******")
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, random_seed=i, data_fraction = fraction, verbose = verbose)
cluster_label, cluster_prob, mean_ari, std_ari = clustering_result(saved_models, ncluster, consensus_type, data, covariate)
pt_data = data.loc[data['diagnosis'] == 1][['participant_id','diagnosis']]
pt_data['cluster_label'] = cluster_label + 1
if consensus_type == "highest_matching_clustering":
for i in range(ncluster):
pt_data['p'+str(i+1)] = cluster_prob[:,i]
pt_data["ARI = %.2f+- %.2f" %(mean_ari, std_ari)] = ''
pt_data.to_csv(os.path.join(output_dir,'clustering_result.csv'), index = False)
print('****** Smile-GAN clustering finished ******')
|
[
"numpy.median",
"numpy.std",
"numpy.mean",
"numpy.array",
"sklearn.metrics.adjusted_rand_score",
"os.path.join",
"numpy.delete"
] |
[((1793, 1822), 'numpy.median', 'np.median', (['model_aris'], {'axis': '(1)'}), '(model_aris, axis=1)\n', (1802, 1822), True, 'import numpy as np\n'), ((1876, 1901), 'numpy.delete', 'np.delete', (['median_aris', 'j'], {}), '(median_aris, j)\n', (1885, 1901), True, 'import numpy as np\n'), ((2263, 2282), 'numpy.mean', 'np.mean', (['model_aris'], {}), '(model_aris)\n', (2270, 2282), True, 'import numpy as np\n'), ((2284, 2302), 'numpy.std', 'np.std', (['model_aris'], {}), '(model_aris)\n', (2290, 2302), True, 'import numpy as np\n'), ((8038, 8087), 'os.path.join', 'os.path.join', (['output_dir', '"""clustering_result.csv"""'], {}), "(output_dir, 'clustering_result.csv')\n", (8050, 8087), False, 'import os\n'), ((13855, 13904), 'os.path.join', 'os.path.join', (['output_dir', '"""clustering_result.csv"""'], {}), "(output_dir, 'clustering_result.csv')\n", (13867, 13904), False, 'import os\n'), ((3930, 3964), 'numpy.array', 'np.array', (['all_prediction_labels[0]'], {}), '(all_prediction_labels[0])\n', (3938, 3964), True, 'import numpy as np\n'), ((3966, 4007), 'numpy.array', 'np.array', (['all_prediction_probabilities[0]'], {}), '(all_prediction_probabilities[0])\n', (3974, 4007), True, 'import numpy as np\n'), ((7724, 7766), 'os.path.join', 'os.path.join', (['output_dir', 'saved_model_name'], {}), '(output_dir, saved_model_name)\n', (7736, 7766), False, 'import os\n'), ((1942, 1959), 'numpy.std', 'np.std', (['rest_aris'], {}), '(rest_aris)\n', (1948, 1959), True, 'import numpy as np\n'), ((2182, 2253), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['prediction_labels[i]', 'prediction_labels[j]'], {}), '(prediction_labels[i], prediction_labels[j])\n', (2209, 2253), False, 'from sklearn import metrics\n'), ((1697, 1776), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['all_prediction_labels[i]', 'all_prediction_labels[j]'], {}), '(all_prediction_labels[i], all_prediction_labels[j])\n', (1724, 1776), False, 'from sklearn import metrics\n'), ((1922, 1940), 'numpy.mean', 'np.mean', (['rest_aris'], {}), '(rest_aris)\n', (1929, 1940), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import io
import re
import warnings
from scipy.stats import skew, skewtest
from scipy.stats import rankdata
from .plot_1var import *
# from plot_1var import * # for local testing only
from IPython.display import HTML
def print_list(l, br=', '):
o = ''
for e in l:
o += str(e) + br
return o[:-len(br)]
def summary(s, max_lev=10, br_way=', ', sum_num_like_cat_if_nunique_small=5):
'''
a function that takes a series and returns a summary string
'''
if s.nunique(dropna=False) == 1:
return(f'all the same: {s.unique()[0]}')
elif s.notnull().sum() == 0:
return(f'all are NaNs')
if s.dtype.name in ['object', 'bool', 'category'] or \
(('float' in s.dtype.name or 'int' in s.dtype.name) \
and s.nunique() <= sum_num_like_cat_if_nunique_small):
if len(s.unique()) <= max_lev:
# consider drop na?
vc = s.value_counts(dropna=False, normalize=True)
# vc = s.value_counts(dropna=True, normalize=True)
s = ''
for name, v in zip(vc.index, vc.values):
s += f'{name} {v*100:>2.0f}%' + br_way
return s[:-len(br_way)]
else:
vc = s.value_counts(dropna=False, normalize=True)
# vc = s.value_counts(dropna=True, normalize=True)
s = ''
i = 0
cur_sum_perc = 0
for name, v in zip(vc.index, vc.values):
if i == max_lev or \
(i >= 5 and cur_sum_perc >= 0.8) or \
(i == 0 and cur_sum_perc < 0.05):
# break if the it has describe 80% of the data, or the
break
s += f'{name} {v*100:>2.0f}%' + br_way
i += 1
cur_sum_perc += v
s += f'other {(1-cur_sum_perc)*100:>2.0f}%'
# return s[:-len(br_way)]
return s
elif 'float' in s.dtype.name or 'int' in s.dtype.name:
qs = s.quantile(q=[0, 0.25, 0.5, 0.75, 1]).values.tolist()
cv = round(s.std()/s.mean(), 2) if s.mean() != 0 else 'nan'
sk = round(skew(s[s.notnull()]), 2) if len(s[s.notnull()]) > 0 else 'nan'
o = f'{qs}{br_way}\
mean: {s.mean():.2f} std: {s.std():.2f}{br_way}\
cv: {cv} skew: {sk}'
if sum(s.notnull()) > 8: # requirement of skewtest
p = skewtest(s[s.notnull()]).pvalue
o += f'*' if p <= 0.05 else ''
if min(s[s!=0]) > 0 and len(s[s!=0]) > 8: # take log
o += f'{br_way}log skew: {skew(np.log(s[s>0])):.2f}'
p = skewtest(np.log(s[s!=0])).pvalue
o += f'*' if p != p and p <= 0.05 else ''
return o
elif 'datetime' in s.dtype.name:
qs = s.quantile(q=[0, 0.25, 0.5, 0.75, 1]).values
dt_range = (qs[-1]-qs[0]).astype('timedelta64[D]')
if dt_range > np.timedelta64(1, 'D'):
to_print = [np.datetime_as_string(q, unit='D') for q in qs]
else:
to_print = [np.datetime_as_string(q, unit='s') for q in qs]
return print_list(to_print, br=br_way)
else:
return ''
def possible_dup_lev(series, threshold=0.9, truncate=False):
try:
from fuzzywuzzy import fuzz
except ImportError:
sys.exit("""Please install fuzzywuzzy first
install it using: pip install fuzzywuzzy
if installing the dependency python-levenshtein is failed and you are using Anaconda, try
conda install -c conda-forge python-levenshtein""")
if series.dtype.name not in ['category', 'object']:
return ''
if series.nunique() > 100 and series.dtype.name == 'object' and truncate: # maybe should adjust
# warnings.warn('Checking duplicates on a long list will take a long time', RuntimeWarning)
# simplified = series.str.lower().replace(r'\W', '')
# if simplified.nunique() < series.nunique():
# return f"too many levls, didn't check, but didn't pass a quick check"
# else:
# return ''
return ''
threshold *= 100
l = series.unique().tolist()
l = [y for y in l if type(y) == str] # remove nan, True, False
candidate = []
for i in range(len(l)):
for j in range(i+1, len(l)):
if l[i].isdigit() or l[j].isdigit():
continue
if any([fuzz.ratio(l[i], l[j]) > threshold,
fuzz.partial_ratio(l[i], l[j]) > threshold,
fuzz.token_sort_ratio(l[i], l[j]) > threshold,
fuzz.token_set_ratio(l[i], l[j]) > threshold]):
candidate.append((l[i], l[j]))
o = '; '.join(['('+', '.join(can)+')' for can in candidate])
if truncate and len(o) > 1000:
o = o[:1000] + f'...truncated, call TEF.possible_dup_lev({series.name}) for a full result'
return o
def dfmeta(df, description=None, max_lev=10, transpose=True, sample=True,
style=True, color_bg_by_type=True, highlight_nan=0.5, in_cell_next_line=True,
drop=None,
check_possible_error=True, dup_lev_prop=0.9,
fitted_feat_imp=None,
plot=True,
standard=False):
# validation
assert max_lev > 2, 'max_lev should > 2'
assert sample < df.shape[0], 'sample should < nrows'
if sample == True and df.shape[0] < 3:
sample = df.shape[0]
assert drop is None or 'NaNs' not in drop, 'Cannot drop NaNs for now'
assert drop is None or 'dtype' not in drop, 'Cannot drop dtype for now'
warnings.simplefilter('ignore', RuntimeWarning) # caused from skewtest, unknown
if standard: # overwrite thise args
check_possible_error = False
sample = False
# drop=['unique levs']
# the first line, shape, dtypes, memory
buffer = io.StringIO()
df.info(verbose=False, buf=buffer)
s = buffer.getvalue()
if style == False:
print(f'shape: {df.shape}')
print(s.split('\n')[-3])
print(s.split('\n')[-2])
color_bg_by_type, highlight_nan, in_cell_next_line = False, False, False
br_way = "<br/> " if in_cell_next_line else ", " # notice a space here
o = pd.DataFrame(columns=df.columns)
o.loc['idx'] = list(range(df.shape[1]))
o.loc['dtype'] = df.dtypes
if description is not None:
o.loc['description'] = ''
for col, des in description.items():
if col in df.columns.tolist():
o.loc['description', col] = des
o.loc['NaNs'] = df.apply(lambda x: f'{sum(x.isnull())}{br_way}{sum(x.isnull())/df.shape[0]*100:.0f}%')
o.loc['unique counts'] = df.apply(lambda x: f'{len(x.unique())}{br_way}{len(x.unique())/df.shape[0]*100:.0f}%')
# def unique_index(s):
# if len(s.unique()) <= max_lev:
# o = ''
# for i in s.value_counts(dropna=False).index.tolist():
# o += str(i) + br_way
# return o[:-len(br_way)]
# else:
# return ''
# o.loc['unique levs'] = df.apply(unique_index, result_type='expand')
o.loc['summary'] = df.apply(summary, result_type='expand', max_lev=max_lev, br_way=br_way) # need result_type='true' or it will all convert to object dtype
# maybe us args=(arg1, ) or sth?
if plot and style:
o.loc['summary plot'] = ['__TO_PLOT_TO_FILL__'] * df.shape[1]
if fitted_feat_imp is not None:
def print_fitted_feat_imp(fitted_feat_imp, indices):
fitted_feat_imp = fitted_feat_imp[fitted_feat_imp.notnull()]
o = pd.Series(index=indices)
rank = len(fitted_feat_imp) - rankdata(fitted_feat_imp).astype(int) + 1
for i in range(len(fitted_feat_imp)):
o[fitted_feat_imp.index[i]] = f'{rank[i]:.0f}/{len(fitted_feat_imp)} {fitted_feat_imp[i]:.2f} {fitted_feat_imp[i]/sum(fitted_feat_imp)*100:.0f}%'
o.loc[o.isnull()] = ''
return o
o.loc['fitted feature importance'] = print_fitted_feat_imp(fitted_feat_imp, df.columns)
if check_possible_error:
def possible_nan(x):
if x.dtype.name not in ['category', 'object']:
return ''
check_list = ['NEED', 'nan', 'Nan', 'nAn', 'naN', 'NAn', 'nAN', 'NaN', 'NAN']
check_list_re = [r'^ +$', '^null$', r'^[^a-zA-Z0-9]*$']
o = ''
if sum(x==0) > 0:
o += f' "0": {sum(x==0)}, {sum(x==0)/df.shape[0]*100:.2f}%{br_way}'
for to_check in check_list:
if to_check in x.unique().tolist():
o += f' "{to_check}": {sum(x==to_check)}, {sum(x==to_check)/df.shape[0]*100:.2f}%{br_way}'
for to_check in check_list_re:
is_match = [re.match(to_check, str(lev), flags=re.IGNORECASE) is not None for lev in x]
if any(is_match):
to_print = ', '.join(x[is_match].unique())
o += f' "{to_print}": {sum(is_match)}, {sum(is_match)/df.shape[0]*100:.2f}%{br_way}'
if len(o) > 1000:
o = o[:5000] + f'...truncated'
return o
o.loc['possible NaNs'] = df.apply(possible_nan)
o.loc['possible dup lev'] = df.apply(possible_dup_lev, args=(dup_lev_prop, True))
if sample != False:
if sample == True and type(sample) is not int:
sample_df = df.sample(3).sort_index()
elif sample == 'head':
sample_df = df.head(3)
elif type(sample) is int:
sample_df = df.sample(sample)
sample_df.index = ['row ' + str(x) for x in sample_df.index.tolist()]
o = o.append(sample_df)
if drop:
o = o.drop(labels=drop)
if transpose:
o = o.transpose()
o = o.rename_axis('col name').reset_index()
if color_bg_by_type or highlight_nan != False:
def style_rule(data, color='yellow'):
if color_bg_by_type:
cell_rule = 'border: 1px solid white;'
# https://www.w3schools.com/colors/colors_picker.asp
# saturation 92%, lightness 95%
cmap = {'object': '#f2f2f2',
'datetime64[ns]': '#e7feee',
'int8': '#fefee7',
'int16': '#fefee7',
'int32': '#fefee7',
'int64': '#fefee7',
'uint8': '#fefee7',
'uint16': '#fefee7',
'uint32': '#fefee7',
'uint64': '#fefee7',
'float16': '#fef2e7',
'float32': '#fef2e7',
'float64': '#fef2e7',
'bool': '#e7fefe',
'category': '#e7ecfe'}
# if data.iloc[2] not in cmap: # idx 2 is dtype
if data.loc['dtype'].name not in cmap:
cell_rule += "background-color: grey"
else:
cell_rule += "background-color: {}".format(cmap[data.loc['dtype'].name])
rule = [cell_rule] * len(data)
if transpose:
rule[0] = 'background-color: white;'
else:
rule = [''] * len(data)
# if float(data.iloc[3][-3:-1])/100 > highlight_nan or data.iloc[3][-4:] == '100%': # idx 3 is NaNs
if float(data.loc['NaNs'][-3:-1])/100 > highlight_nan or data.loc['NaNs'][-4:] == '100%':
rule[np.where(data.index=='NaNs')[0][0]] += '; color: red'
if data.loc['unique counts'][:(3+len(br_way))] == f'{df.shape[0]}{br_way}': # all unique
rule[np.where(data.index=='unique counts')[0][0]] += '; color: blue'
elif data.loc['unique counts'][:(1+len(br_way))] == f'1{br_way}': # all the same
rule[np.where(data.index=='unique counts')[0][0]] += '; color: red'
if fitted_feat_imp is not None:
if data.loc['fitted feature importance'][:2] in ['1/', '2/', '3/']:
rule[np.where(data.index=='fitted feature importance')[0][0]] += '; font-weight: bold'
return rule
o = o.style.apply(style_rule, axis=int(transpose)) # axis=1 for row-wise, for transpose=True
if transpose:
o = o.hide_index()
if style: # caption
s = print_list(s.split('\n')[-3:-1], br='; ')
o = o.set_caption(f"shape: {df.shape}; {s}")
o = o.render() # convert from pandas.io.formats.style.Styler to html code
if plot and style:
for c in range(df.shape[1]):
html_1var = plot_1var_series(df, c, max_lev, log_numeric=False, save_plt=None, return_html=True)
o = o.replace('__TO_PLOT_TO_FILL__', html_1var, 1)
o = HTML(o) # convert from html to IPython.core.display.HTML
return o
def dfmeta_to_htmlfile(styled_df, filename, head=''):
'''
styled_df should be <class 'IPython.core.display.HTML'>
'''
r = f'<h1>{head}</h1>\n' + '<body>\n' + styled_df.data + '\n</body>'
with open(filename, 'w') as f:
f.write(r)
return f'{filename} saved'
# def print_html_standard(df, description):
# meta = dfmeta(df,
# description=description,
# check_possible_error=False, sample=False, drop=['unique levs'])
# dfmeta_verbose_html = ''
# buffer = io.StringIO()
# df.info(verbose=False, buf=buffer)
# s = buffer.getvalue().split('\n')
# dfmeta_verbose = f"shape: {df.shape}<br/>{s[-3]}<br/>{s[-2]}"
# dfmeta_verbose_html = '<p>' + dfmeta_verbose + '</p>'
# r = dfmeta_verbose_html + '<body>\n' + meta.data + '\n</body>'
# for e in r.split('\n'):
# print(e)
# def dfmeta_to_htmlfile_standard(df, description, filename, head):
# '''
# a function that call dfmeta and then dfmeta_to_htmlfile using a standard configuration
# '''
# meta = dfmeta(df,
# description=description,
# check_possible_error=False, sample=False, drop=['unique levs'])
# return dfmeta_to_htmlfile(meta, filename, head)
def get_desc_template(df, var_name='desc', suffix_idx=False):
print(var_name, '= {')
max_cn = max([len(x) for x in df.columns.tolist()]) + 1
len_cn = 25 if max_cn > 25 else max_cn
for i in range(df.shape[1]):
c = df.columns[i]
c += '"'
if c[:-1] != df.columns.tolist()[-1]:
if suffix_idx == False:
print(f' "{c:{len_cn}}: "",')
else:
print(f' "{c:{len_cn}}: "", # {i}')
else:
if suffix_idx == False:
print(f' "{c:{len_cn}}: ""')
else:
print(f' "{c:{len_cn}}: "" # {i}')
print('}')
def get_desc_template_file(df, filename='desc.py', var_name='desc', suffix_idx=False):
'''%run filename.py'''
max_cn = max([len(x) for x in df.columns.tolist()]) + 1
len_cn = 25 if max_cn > 25 else max_cn
o = var_name + ' = {' + '\n'
for i in range(df.shape[1]):
c = df.columns[i]
c += '"'
if c[:-1] != df.columns.tolist()[-1]:
o += f' "{c:{len_cn}}: "", # {i}' + '\n'
else:
o += f' "{c:{len_cn}}: "" # {i}' + '\n'
o += '}'
with open(filename, 'w') as f:
f.write(o)
return f'{filename} saved'
|
[
"pandas.DataFrame",
"fuzzywuzzy.fuzz.ratio",
"io.StringIO",
"fuzzywuzzy.fuzz.partial_ratio",
"fuzzywuzzy.fuzz.token_sort_ratio",
"warnings.simplefilter",
"numpy.log",
"numpy.datetime_as_string",
"scipy.stats.rankdata",
"numpy.timedelta64",
"numpy.where",
"pandas.Series",
"fuzzywuzzy.fuzz.token_set_ratio",
"IPython.display.HTML"
] |
[((5613, 5660), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'RuntimeWarning'], {}), "('ignore', RuntimeWarning)\n", (5634, 5660), False, 'import warnings\n'), ((5883, 5896), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (5894, 5896), False, 'import io\n'), ((6257, 6289), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df.columns'}), '(columns=df.columns)\n', (6269, 6289), True, 'import pandas as pd\n'), ((12880, 12887), 'IPython.display.HTML', 'HTML', (['o'], {}), '(o)\n', (12884, 12887), False, 'from IPython.display import HTML\n'), ((7642, 7666), 'pandas.Series', 'pd.Series', ([], {'index': 'indices'}), '(index=indices)\n', (7651, 7666), True, 'import pandas as pd\n'), ((2942, 2964), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (2956, 2964), True, 'import numpy as np\n'), ((2990, 3024), 'numpy.datetime_as_string', 'np.datetime_as_string', (['q'], {'unit': '"""D"""'}), "(q, unit='D')\n", (3011, 3024), True, 'import numpy as np\n'), ((3076, 3110), 'numpy.datetime_as_string', 'np.datetime_as_string', (['q'], {'unit': '"""s"""'}), "(q, unit='s')\n", (3097, 3110), True, 'import numpy as np\n'), ((4457, 4479), 'fuzzywuzzy.fuzz.ratio', 'fuzz.ratio', (['l[i]', 'l[j]'], {}), '(l[i], l[j])\n', (4467, 4479), False, 'from fuzzywuzzy import fuzz\n'), ((4510, 4540), 'fuzzywuzzy.fuzz.partial_ratio', 'fuzz.partial_ratio', (['l[i]', 'l[j]'], {}), '(l[i], l[j])\n', (4528, 4540), False, 'from fuzzywuzzy import fuzz\n'), ((4570, 4603), 'fuzzywuzzy.fuzz.token_sort_ratio', 'fuzz.token_sort_ratio', (['l[i]', 'l[j]'], {}), '(l[i], l[j])\n', (4591, 4603), False, 'from fuzzywuzzy import fuzz\n'), ((4634, 4666), 'fuzzywuzzy.fuzz.token_set_ratio', 'fuzz.token_set_ratio', (['l[i]', 'l[j]'], {}), '(l[i], l[j])\n', (4654, 4666), False, 'from fuzzywuzzy import fuzz\n'), ((2667, 2684), 'numpy.log', 'np.log', (['s[s != 0]'], {}), '(s[s != 0])\n', (2673, 2684), True, 'import numpy as np\n'), ((7709, 7734), 'scipy.stats.rankdata', 'rankdata', (['fitted_feat_imp'], {}), '(fitted_feat_imp)\n', (7717, 7734), False, 'from scipy.stats import rankdata\n'), ((11596, 11626), 'numpy.where', 'np.where', (["(data.index == 'NaNs')"], {}), "(data.index == 'NaNs')\n", (11604, 11626), True, 'import numpy as np\n'), ((11773, 11812), 'numpy.where', 'np.where', (["(data.index == 'unique counts')"], {}), "(data.index == 'unique counts')\n", (11781, 11812), True, 'import numpy as np\n'), ((2616, 2632), 'numpy.log', 'np.log', (['s[s > 0]'], {}), '(s[s > 0])\n', (2622, 2632), True, 'import numpy as np\n'), ((11951, 11990), 'numpy.where', 'np.where', (["(data.index == 'unique counts')"], {}), "(data.index == 'unique counts')\n", (11959, 11990), True, 'import numpy as np\n'), ((12169, 12220), 'numpy.where', 'np.where', (["(data.index == 'fitted feature importance')"], {}), "(data.index == 'fitted feature importance')\n", (12177, 12220), True, 'import numpy as np\n')]
|
"""Definition of all runner classes."""
import multiprocessing
import os
import time
import warnings
from typing import List, Optional
from .abstract_runner import AbstractRunner
from .util import start_process
class SingleRunner(AbstractRunner):
"""Runner in a Single Machine.
The runner submits the jobs in parallel to the `num_workers'. While the workers are
working, it keeps on checking and spawns a new job every time a worker is freed up.
Parameters
----------
name: str.
Runner name.
num_threads: int, optional. (default=1)/.
Number of threads to use.
num_workers: int, optional. (default = cpu_count() // num_threads - 1).
Number of workers where to run the process.
"""
num_workers: int
def __init__(
self, name: str, num_threads: int = 1, num_workers: Optional[int] = None
):
super().__init__(name, num_threads=num_threads)
if num_workers is None:
num_workers = max(1, multiprocessing.cpu_count() // num_threads - 1)
if (num_workers >= multiprocessing.cpu_count() // num_threads) and (
num_workers > 1
):
num_workers = max(1, multiprocessing.cpu_count() // num_threads - 1)
warnings.warn(f"Too many workers requested. Limiting them to {num_workers}")
self.num_workers = num_workers
def run(self, cmd_list: List[str]) -> List[str]:
"""See `AbstractRunner.run'."""
workers_idle = [False] * self.num_workers
pool = [start_process(lambda: None) for _ in range(self.num_workers)]
tasks = cmd_list[:]
while not all(workers_idle):
for i in range(self.num_workers):
if not pool[i].is_alive():
pool[i].terminate()
if len(tasks) > 0:
time.sleep(1)
cmd = tasks.pop(0)
pool[i] = start_process(lambda x: os.system(x), (cmd,))
else:
workers_idle[i] = True
return cmd_list
def run_batch(self, cmd_list: List[str]) -> str:
"""See `AbstractRunner.run_batch'."""
return "".join(self.run(cmd_list))
|
[
"warnings.warn",
"time.sleep",
"os.system",
"multiprocessing.cpu_count"
] |
[((1254, 1330), 'warnings.warn', 'warnings.warn', (['f"""Too many workers requested. Limiting them to {num_workers}"""'], {}), "(f'Too many workers requested. Limiting them to {num_workers}')\n", (1267, 1330), False, 'import warnings\n'), ((1072, 1099), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1097, 1099), False, 'import multiprocessing\n'), ((996, 1023), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1021, 1023), False, 'import multiprocessing\n'), ((1194, 1221), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1219, 1221), False, 'import multiprocessing\n'), ((1850, 1863), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1860, 1863), False, 'import time\n'), ((1965, 1977), 'os.system', 'os.system', (['x'], {}), '(x)\n', (1974, 1977), False, 'import os\n')]
|
"""GitLab merge requests collector."""
from typing import cast
from collector_utilities.functions import match_string_or_regular_expression
from collector_utilities.type import URL, Value
from source_model import Entities, Entity, SourceResponses
from .base import GitLabBase
class GitLabMergeRequests(GitLabBase):
"""Collector class to measure the number of merge requests."""
async def _api_url(self) -> URL:
"""Override to return the merge requests API."""
return await self._gitlab_api_url("merge_requests")
async def _landing_url(self, responses: SourceResponses) -> URL:
"""Extend to add the project branches."""
return URL(f"{str(await super()._landing_url(responses))}/{self._parameter('project')}/-/merge_requests")
async def _parse_entities(self, responses: SourceResponses) -> Entities:
"""Override to parse the merge requests."""
merge_requests = []
for response in responses:
merge_requests.extend(await response.json())
return Entities(self._create_entity(mr) for mr in merge_requests if self._include_merge_request(mr))
async def _parse_total(self, responses: SourceResponses) -> Value:
"""Override to parse the total number of merge requests."""
return str(sum([len(await response.json()) for response in responses]))
@staticmethod
def _create_entity(merge_request) -> Entity:
"""Create an entity from a GitLab JSON result."""
return Entity(
key=merge_request["id"],
title=merge_request["title"],
target_branch=merge_request["target_branch"],
url=merge_request["web_url"],
state=merge_request["state"],
created=merge_request.get("created_at"),
updated=merge_request.get("updated_at"),
merged=merge_request.get("merged_at"),
closed=merge_request.get("closed_at"),
downvotes=str(merge_request.get("downvotes", 0)),
upvotes=str(merge_request.get("upvotes", 0)),
)
def _include_merge_request(self, merge_request) -> bool:
"""Return whether the merge request should be counted."""
request_matches_state = merge_request["state"] in self._parameter("merge_request_state")
branches = self._parameter("target_branches_to_include")
target_branch = merge_request["target_branch"]
request_matches_branches = match_string_or_regular_expression(target_branch, branches) if branches else True
# If the required number of upvotes is zero, merge requests are included regardless of how many upvotes they
# actually have. If the required number of upvotes is more than zero then only merge requests that have fewer
# than the minimum number of upvotes are included in the count:
required_upvotes = int(cast(str, self._parameter("upvotes")))
request_has_fewer_than_min_upvotes = required_upvotes == 0 or int(merge_request["upvotes"]) < required_upvotes
return request_matches_state and request_matches_branches and request_has_fewer_than_min_upvotes
|
[
"collector_utilities.functions.match_string_or_regular_expression"
] |
[((2443, 2502), 'collector_utilities.functions.match_string_or_regular_expression', 'match_string_or_regular_expression', (['target_branch', 'branches'], {}), '(target_branch, branches)\n', (2477, 2502), False, 'from collector_utilities.functions import match_string_or_regular_expression\n')]
|
import io
import os
import sys
from tempfile import mktemp
def get_ipython_capture():
try:
# This will work inside IPython but not outside it.
name = get_ipython().__class__.__name__
if name.startswith('ZMQ'):
from IPython.utils.capture import capture_output
return capture_output
else:
return None
except NameError:
return None
class CaptureStream(object):
"""A context manager which captures any errors on a given stream (like
sys.stderr). The stream is captured and the outputs can be used.
We treat sys.stderr and stdout specially as very often these are
overridden by nose or IPython. We always wrap the underlying file
descriptors in this case as this is the intent of this context manager.
This is somewhat based on this question:
http://stackoverflow.com/questions/7018879/disabling-output-when-compiling-with-distutils
Examples
--------
See the tests in tests/test_capture_stream.py for example usage.
"""
def __init__(self, stream=sys.stderr):
self.stream = stream
if stream is sys.stderr:
self.fileno = 2
elif stream is sys.stdout:
self.fileno = 1
else:
self.fileno = stream.fileno()
self.orig_stream = None
self.tmp_stream = None
self.tmp_path = ''
self._cached_output = None
def __enter__(self):
if sys.platform.startswith('win32') and sys.version_info[:2] > (3, 5):
return self
self.orig_stream = os.dup(self.fileno)
self.tmp_path = mktemp()
self.tmp_stream = io.open(self.tmp_path, 'w+', encoding='utf-8')
os.dup2(self.tmp_stream.fileno(), self.fileno)
return self
def __exit__(self, type, value, tb):
if sys.platform.startswith('win32') and sys.version_info[:2] > (3, 5):
return
if self.orig_stream is not None:
os.dup2(self.orig_stream, self.fileno)
if self.tmp_stream is not None:
self._cache_output()
self.tmp_stream.close()
os.remove(self.tmp_path)
def _cache_output(self):
if self._cached_output is not None:
return
tmp_stream = self.tmp_stream
result = ''
if tmp_stream is not None:
tmp_stream.flush()
tmp_stream.seek(0)
result = tmp_stream.read()
self._cached_output = result
def get_output(self):
"""Return the captured output.
"""
if self._cached_output is None:
self._cache_output()
return self._cached_output
class CaptureMultipleStreams(object):
"""This lets one capture multiple streams together.
"""
def __init__(self, streams=None):
streams = (sys.stdout, sys.stderr) if streams is None else streams
self.streams = streams
self.captures = [CaptureStream(x) for x in streams]
cap = get_ipython_capture()
if cap:
self.jcap = cap(stdout=True, stderr=True, display=True)
else:
self.jcap = None
self.joutput = None
def __enter__(self):
for capture in self.captures:
capture.__enter__()
if self.jcap:
self.joutput = self.jcap.__enter__()
return self
def __exit__(self, type, value, tb):
for capture in self.captures:
capture.__exit__(type, value, tb)
if self.jcap:
self.jcap.__exit__(type, value, tb)
def get_output(self):
out = list(x.get_output() for x in self.captures)
if self.joutput:
out[0] += self.joutput.stdout
out[1] += self.joutput.stderr
return out
|
[
"sys.platform.startswith",
"os.remove",
"os.dup2",
"os.dup",
"io.open",
"tempfile.mktemp"
] |
[((1591, 1610), 'os.dup', 'os.dup', (['self.fileno'], {}), '(self.fileno)\n', (1597, 1610), False, 'import os\n'), ((1635, 1643), 'tempfile.mktemp', 'mktemp', ([], {}), '()\n', (1641, 1643), False, 'from tempfile import mktemp\n'), ((1670, 1716), 'io.open', 'io.open', (['self.tmp_path', '"""w+"""'], {'encoding': '"""utf-8"""'}), "(self.tmp_path, 'w+', encoding='utf-8')\n", (1677, 1716), False, 'import io\n'), ((1472, 1504), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win32"""'], {}), "('win32')\n", (1495, 1504), False, 'import sys\n'), ((1845, 1877), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win32"""'], {}), "('win32')\n", (1868, 1877), False, 'import sys\n'), ((1985, 2023), 'os.dup2', 'os.dup2', (['self.orig_stream', 'self.fileno'], {}), '(self.orig_stream, self.fileno)\n', (1992, 2023), False, 'import os\n'), ((2145, 2169), 'os.remove', 'os.remove', (['self.tmp_path'], {}), '(self.tmp_path)\n', (2154, 2169), False, 'import os\n')]
|
import logging
from pathlib import Path
from sys import argv
import var
import telethon.utils
from telethon import TelegramClient
from telethon import events,Button
import os
from var import Var
from . import beast
from telethon.tl import functions
from beastx.Configs import Config
from telethon.tl.functions.messages import AddChatUserRequest
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.functions.channels import LeaveChannelRequest
from telethon.tl.functions.account import UpdateProfileRequest
from beastx.utils import load_module, start_assistant
import asyncio
from . import bot,sedmrunal
bot = beast
#rom . import semxx,semxxx
#####################################
plugin_channel = "@BeastX_Plugins"
#####################################
sur = Config.PRIVATE_GROUP_ID
UL = Config.TG_BOT_USER_NAME_BF_HER
VR = "Beast 0.1"
chat_id = sur
MSG = f"""
✨𝔹𝕖𝕒𝕤𝕥 ℍ𝕒𝕤 𝔹𝕖𝕖𝕟 𝔻𝕖𝕡𝕝𝕠𝕪𝕖𝕕!
☟︎︎︎ ☟︎︎︎ ☟︎︎︎ ☟︎︎︎ ☟︎︎︎ ☟︎︎︎ ☟︎︎︎ ☟︎︎︎ ☟︎︎︎
┏━━━━━━━━━━━━━━━━━
┣•Assistant➠ {UL}
┣•Status➠ `Running`
┣•Version➠ {VR}
┗━━━━━━━━━━━━━━━━━
Do `.ping `or` /alive` for check userbot working
"""
sed = logging.getLogger("beastx")
async def add_bot(bot_token):
await bot.start(bot_token)
bot.me = await bot.get_me()
bot.uid = telethon.utils.get_peer_id(bot.me)
await sedmrunal.send_message(sur, MSG,
buttons=[
[Button.url("⭐Updates", url="https://t.me/BeastX_Userbot")],
[ Button.url("⚡Support",url="https://t.me/BeastX_Support")]
])
await beast(functions.channels.JoinChannelRequest(channel="@BeastX_Userbot"))
await beast(functions.channels.JoinChannelRequest(channel="@BeastX_Support"))
if len(argv) not in (1, 3, 4):
bot.disconnect()
else:
bot.tgbot = None
if Var.TG_BOT_USER_NAME_BF_HER is not None:
bot.tgbot = TelegramClient(
"TG_BOT_TOKEN", api_id=Var.APP_ID, api_hash=Var.API_HASH
).start(bot_token=Var.TG_BOT_TOKEN_BF_HER)
bot.loop.run_until_complete(add_bot(Var.TG_BOT_USER_NAME_BF_HER))
else:
bot.start()
async def a():
sed.info("Connecting...") ;
o = ""
la = 0
try:
await bot.start() ; sed.info("beastx connected") ; o = "client"
except:
sed.info("Telegram String Session Wrong or Expired Please Add new one ") ; quit(1)
import glob
async def a():
test1 = await bot.get_messages(plugin_channel, None , filter=InputMessagesFilterDocument) ; total = int(test1.total) ; total_doxx = range(0, total)
for ixo in total_doxx:
mxo = test1[ixo].id ; await bot.download_media(await bot.get_messages(client, ids=mxo), "beastx/modules/")
ar = glob.glob("beastx/modules/*.py")
f = len(ar)
sed.info(f" loading {f} modules it may take 1 minute please wait")
for i in ar:
br = os.path.basename(i)
cr = (os.path.splitext(br)[0])
import_module(f"beastx.modules.{cr}")
la += 1
sed.info(f" loaded {la}/{f} modules")
path = "beastx/modules/*.py"
files = glob.glob(path)
for name in files:
with open(name) as f:
path1 = Path(f.name)
shortname = path1.stem
load_module(shortname.replace(".py", ""))
if Config.ENABLE_ASSISTANTBOT == "ENABLE":
path = "beastx/modules/assistant/*.py"
files = glob.glob(path)
for name in files:
with open(name) as f:
path1 = Path(f.name)
shortname = path1.stem
start_assistant(shortname.replace(".py", ""))
sed.info("beastx And Assistant Bot Have Been Installed Successfully !")
sed.info("---------------------------------------")
sed.info("------------@BeastX_Userbot------------")
sed.info("---------------------------------------")
else:
sed.info("beastx Has Been Installed Sucessfully !")
sed.info("Hope you will enjoy")
#await bot.send_message(chat_id,MSG)
#else:
# sed.info("your Get_Msg disable")
if len(argv) not in (1, 3, 4):
bot.disconnect()
else:
bot.run_until_disconnected()
|
[
"os.path.basename",
"telethon.TelegramClient",
"pathlib.Path",
"os.path.splitext",
"glob.glob",
"telethon.tl.functions.channels.JoinChannelRequest",
"telethon.Button.url",
"logging.getLogger"
] |
[((1182, 1209), 'logging.getLogger', 'logging.getLogger', (['"""beastx"""'], {}), "('beastx')\n", (1199, 1209), False, 'import logging\n'), ((3482, 3497), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (3491, 3497), False, 'import glob\n'), ((3774, 3789), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (3783, 3789), False, 'import glob\n'), ((2914, 2946), 'glob.glob', 'glob.glob', (['"""beastx/modules/*.py"""'], {}), "('beastx/modules/*.py')\n", (2923, 2946), False, 'import glob\n'), ((3562, 3574), 'pathlib.Path', 'Path', (['f.name'], {}), '(f.name)\n', (3566, 3574), False, 'from pathlib import Path\n'), ((1702, 1766), 'telethon.tl.functions.channels.JoinChannelRequest', 'functions.channels.JoinChannelRequest', ([], {'channel': '"""@BeastX_Userbot"""'}), "(channel='@BeastX_Userbot')\n", (1739, 1766), False, 'from telethon.tl import functions\n'), ((1785, 1849), 'telethon.tl.functions.channels.JoinChannelRequest', 'functions.channels.JoinChannelRequest', ([], {'channel': '"""@BeastX_Support"""'}), "(channel='@BeastX_Support')\n", (1822, 1849), False, 'from telethon.tl import functions\n'), ((3156, 3175), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (3172, 3175), False, 'import os\n'), ((3866, 3878), 'pathlib.Path', 'Path', (['f.name'], {}), '(f.name)\n', (3870, 3878), False, 'from pathlib import Path\n'), ((2012, 2084), 'telethon.TelegramClient', 'TelegramClient', (['"""TG_BOT_TOKEN"""'], {'api_id': 'Var.APP_ID', 'api_hash': 'Var.API_HASH'}), "('TG_BOT_TOKEN', api_id=Var.APP_ID, api_hash=Var.API_HASH)\n", (2026, 2084), False, 'from telethon import TelegramClient\n'), ((3219, 3239), 'os.path.splitext', 'os.path.splitext', (['br'], {}), '(br)\n', (3235, 3239), False, 'import os\n'), ((1512, 1569), 'telethon.Button.url', 'Button.url', (['"""⭐Updates"""'], {'url': '"""https://t.me/BeastX_Userbot"""'}), "('⭐Updates', url='https://t.me/BeastX_Userbot')\n", (1522, 1569), False, 'from telethon import events, Button\n'), ((1601, 1658), 'telethon.Button.url', 'Button.url', (['"""⚡Support"""'], {'url': '"""https://t.me/BeastX_Support"""'}), "('⚡Support', url='https://t.me/BeastX_Support')\n", (1611, 1658), False, 'from telethon import events, Button\n')]
|
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='The Aim is to predict forest fire before it happens based on dataset that contains tree observations from four areas of the Roosevelt National Forest in Colorado. All observations are cartographic variables (no remote sensing) from 30 meter x 30 meter sections of forest. There are over half a million measurements total!',
author='<NAME>',
license='MIT',
)
|
[
"setuptools.find_packages"
] |
[((81, 96), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (94, 96), False, 'from setuptools import find_packages, setup\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections
from refinery.units import arg, Unit
class xfcc(Unit):
"""
The cross frame chunk count unit! It computes the number of times a chunk occurs across several frames
of input. It consumes all frames in the current and counts the number of times each item occurs. It
converts a frame tree of depth 2 into a new frame tree of depth 2 where the parent of every leaf has
this leaf as its only child. The leaves of this tree have been enriched with a meta variable containing
the number of times the corresponding chunk has occurred in the input frame tree.
"""
def __init__(
self,
variable: arg(help='The variable which is used as the accumulator') = 'count',
relative: arg.switch('-r', help='Normalize the accumulator to a number between 0 and 1.') = False
):
super().__init__(variable=variable, relative=relative)
self._trunk = None
self._store = collections.defaultdict(int)
def finish(self):
if self.args.relative and self._store:
maximum = max(self._store.values())
for k, (chunk, count) in enumerate(self._store.items()):
if self.args.relative:
count /= maximum
chunk._meta[self.args.variable] = count
chunk._path = chunk.path[:-2] + (0, k)
yield chunk
self._store.clear()
def _getcount(self, chunk):
try:
count = int(chunk.meta[self.args.variable])
except (AttributeError, KeyError, TypeError):
return 1
else:
return count
def filter(self, chunks):
it = iter(chunks)
try:
head = next(it)
except StopIteration:
return
if len(head.path) < 2:
self.log_warn(F'the current frame is nested {len(head.path)} layers deep, at least two layers are required.')
yield head
for item in it:
self.log_debug(repr(item))
yield item
return
trunk = head.path[:-2]
store = self._store
if trunk != self._trunk:
yield from self.finish()
self._trunk = trunk
store[head] += self._getcount(head)
for chunk in it:
store[chunk] += self._getcount(chunk)
|
[
"collections.defaultdict",
"refinery.units.arg.switch",
"refinery.units.arg"
] |
[((994, 1022), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (1017, 1022), False, 'import collections\n'), ((700, 757), 'refinery.units.arg', 'arg', ([], {'help': '"""The variable which is used as the accumulator"""'}), "(help='The variable which is used as the accumulator')\n", (703, 757), False, 'from refinery.units import arg, Unit\n'), ((787, 866), 'refinery.units.arg.switch', 'arg.switch', (['"""-r"""'], {'help': '"""Normalize the accumulator to a number between 0 and 1."""'}), "('-r', help='Normalize the accumulator to a number between 0 and 1.')\n", (797, 866), False, 'from refinery.units import arg, Unit\n')]
|
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.1.0'
from anime_search.base import AnimeSearch
plugin = AnimeSearch()
|
[
"anime_search.base.AnimeSearch"
] |
[((145, 158), 'anime_search.base.AnimeSearch', 'AnimeSearch', ([], {}), '()\n', (156, 158), False, 'from anime_search.base import AnimeSearch\n')]
|
# Generated by Django 2.2.7 on 2019-11-20 09:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rr', '0049_statistics'),
]
operations = [
migrations.AlterModelOptions(
name='statistics',
options={'ordering': ['-date']},
),
migrations.AddField(
model_name='serviceprovider',
name='jwks',
field=models.TextField(blank=True, verbose_name='JSON Web Key Set'),
),
migrations.AddField(
model_name='serviceprovider',
name='jwks_uri',
field=models.URLField(blank=True, max_length=255, verbose_name='URL for the JSON Web Key Set'),
),
]
|
[
"django.db.models.TextField",
"django.db.migrations.AlterModelOptions",
"django.db.models.URLField"
] |
[((222, 307), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""statistics"""', 'options': "{'ordering': ['-date']}"}), "(name='statistics', options={'ordering': ['-date']}\n )\n", (250, 307), False, 'from django.db import migrations, models\n'), ((453, 514), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""JSON Web Key Set"""'}), "(blank=True, verbose_name='JSON Web Key Set')\n", (469, 514), False, 'from django.db import migrations, models\n'), ((645, 738), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'max_length': '(255)', 'verbose_name': '"""URL for the JSON Web Key Set"""'}), "(blank=True, max_length=255, verbose_name=\n 'URL for the JSON Web Key Set')\n", (660, 738), False, 'from django.db import migrations, models\n')]
|
# Author: <NAME>
# Datetime:2021/7/3
# Copyright belongs to the author.
# Please indicate the source for reprinting.
import platform
import os
from distutils.sysconfig import get_python_lib
from qpt.kernel.qlog import Logging
def init_wrapper(var=True):
def i_wrapper(func):
if var:
@property
def render(self):
if func.__name__ in self.memory:
out = self.memory[func.__name__]
else:
out = func(self)
self.memory[func.__name__] = out
return out
else:
def render(self, *args, **kwargs):
if func.__name__ in self.memory:
out = self.memory[func.__name__]
else:
out = func(self, *args, **kwargs)
self.memory[func.__name__] = out
return out
return render
return i_wrapper
class QPTMemory:
def __init__(self):
self.memory = dict()
def set_mem(self, name, variable):
self.memory[name] = variable
return variable
def free_mem(self, name):
self.memory.pop(name)
@init_wrapper()
def platform_bit(self):
arc = platform.machine()
Logging.debug(f"操作系统位数:{arc}")
return arc
@init_wrapper()
def platform_os(self):
p_os = platform.system()
Logging.debug(f"操作系统类型:{p_os}")
return p_os
@init_wrapper()
def site_packages_path(self):
site_package_path = os.path.abspath(get_python_lib())
return site_package_path
@init_wrapper()
def pip_tool(self):
from qpt.kernel.qinterpreter import PipTools
pip_tools = PipTools()
return pip_tools
@init_wrapper()
def get_win32con(self):
import win32con
return win32con
@init_wrapper()
def get_win32api(self):
import win32api
return win32api
@init_wrapper(var=False)
def get_env_vars(self, work_dir="."):
return get_env_vars(work_dir)
QPT_MEMORY = QPTMemory()
def check_bit():
arc = QPT_MEMORY.platform_bit
assert "64" in arc, "当前QPT不支持32位操作系统"
def check_os():
p_os = QPT_MEMORY.platform_os
assert "Windows" in p_os, "当前QPT只支持Windows系统"
IGNORE_ENV_FIELD = ["conda", "Conda", "Python", "python"]
def get_env_vars(work_dir="."):
"""
获取当前待设置的环境变量字典
:param work_dir:
:return: dict
"""
env_vars = dict()
# Set PATH ENV
path_env = os.environ.get("PATH").split(";")
pre_add_env = os.path.abspath("./Python/Lib/site-packages") + ";" + \
os.path.abspath("./Python/Lib") + ";" + \
os.path.abspath("./Python/Lib/ext") + ";" + \
os.path.abspath("./Python") + ";" + \
os.path.abspath("./Python/Scripts") + ";"
for pe in path_env:
if pe:
add_flag = True
for ief in IGNORE_ENV_FIELD:
if ief in pe:
add_flag = False
break
if add_flag:
pre_add_env += pe + ";"
env_vars["PATH"] = pre_add_env + \
"%SYSTEMROOT%/System32/WindowsPowerShell/v1.0;" + \
"C:/Windows/System32/WindowsPowerShell/v1.0;" + \
"%ProgramFiles%/WindowsPowerShell/Modules;" + \
"%SystemRoot%/system32/WindowsPowerShell/v1.0/Modules;" + \
f"{os.path.join(os.path.abspath(work_dir), 'opt/CUDA')};"
# Set PYTHON PATH ENV
env_vars["PYTHONPATH"] = os.path.abspath("./Python/Lib/site-packages") + ";" + \
work_dir + ";" + \
os.path.abspath("./Python")
os_env = os.environ.copy()
os_env.update(env_vars)
if QPT_MODE and QPT_MODE.lower() == "debug":
Logging.debug(msg="Python所识别到的环境变量如下:\n" +
"".join([_ek + ":" + _e_v + " \n" for _ek, _ev in env_vars.items()
for _e_v in _ev.split(";")]))
return os_env
PYTHON_IGNORE_DIRS = [".idea", ".git", ".github", "venv"]
# 被忽略的Python包
IGNORE_PACKAGES = ["virtualenv", "pip", "setuptools", "cpython"]
# QPT运行状态 Run/Debug
QPT_MODE = os.getenv("QPT_MODE")
# QPT检测到的运行状态 Run/本地Run - 目的给予开发者警告,避免软件包膨胀
QPT_RUN_MODE = None
class CheckRun:
@staticmethod
def make_run_file(configs_path):
with open(os.path.join(configs_path, "run_act.lock"), "w") as f:
f.write("Run Done")
@staticmethod
def check_run_file(configs_path):
global QPT_RUN_MODE
if QPT_RUN_MODE is None:
QPT_RUN_MODE = os.path.exists(os.path.join(configs_path, "run_act.lock"))
return QPT_RUN_MODE
def check_all():
# 检查系统
check_os()
# 检查arc
check_bit()
check_all()
|
[
"os.path.abspath",
"qpt.kernel.qinterpreter.PipTools",
"os.environ.copy",
"qpt.kernel.qlog.Logging.debug",
"distutils.sysconfig.get_python_lib",
"os.environ.get",
"platform.system",
"platform.machine",
"os.path.join",
"os.getenv"
] |
[((4290, 4311), 'os.getenv', 'os.getenv', (['"""QPT_MODE"""'], {}), "('QPT_MODE')\n", (4299, 4311), False, 'import os\n'), ((3794, 3811), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (3809, 3811), False, 'import os\n'), ((1253, 1271), 'platform.machine', 'platform.machine', ([], {}), '()\n', (1269, 1271), False, 'import platform\n'), ((1280, 1310), 'qpt.kernel.qlog.Logging.debug', 'Logging.debug', (['f"""操作系统位数:{arc}"""'], {}), "(f'操作系统位数:{arc}')\n", (1293, 1310), False, 'from qpt.kernel.qlog import Logging\n'), ((1393, 1410), 'platform.system', 'platform.system', ([], {}), '()\n', (1408, 1410), False, 'import platform\n'), ((1419, 1450), 'qpt.kernel.qlog.Logging.debug', 'Logging.debug', (['f"""操作系统类型:{p_os}"""'], {}), "(f'操作系统类型:{p_os}')\n", (1432, 1450), False, 'from qpt.kernel.qlog import Logging\n'), ((1739, 1749), 'qpt.kernel.qinterpreter.PipTools', 'PipTools', ([], {}), '()\n', (1747, 1749), False, 'from qpt.kernel.qinterpreter import PipTools\n'), ((3753, 3780), 'os.path.abspath', 'os.path.abspath', (['"""./Python"""'], {}), "('./Python')\n", (3768, 3780), False, 'import os\n'), ((1570, 1586), 'distutils.sysconfig.get_python_lib', 'get_python_lib', ([], {}), '()\n', (1584, 1586), False, 'from distutils.sysconfig import get_python_lib\n'), ((2527, 2549), 'os.environ.get', 'os.environ.get', (['"""PATH"""'], {}), "('PATH')\n", (2541, 2549), False, 'import os\n'), ((2833, 2868), 'os.path.abspath', 'os.path.abspath', (['"""./Python/Scripts"""'], {}), "('./Python/Scripts')\n", (2848, 2868), False, 'import os\n'), ((4468, 4510), 'os.path.join', 'os.path.join', (['configs_path', '"""run_act.lock"""'], {}), "(configs_path, 'run_act.lock')\n", (4480, 4510), False, 'import os\n'), ((4715, 4757), 'os.path.join', 'os.path.join', (['configs_path', '"""run_act.lock"""'], {}), "(configs_path, 'run_act.lock')\n", (4727, 4757), False, 'import os\n'), ((2777, 2804), 'os.path.abspath', 'os.path.abspath', (['"""./Python"""'], {}), "('./Python')\n", (2792, 2804), False, 'import os\n'), ((3522, 3547), 'os.path.abspath', 'os.path.abspath', (['work_dir'], {}), '(work_dir)\n', (3537, 3547), False, 'import os\n'), ((3620, 3665), 'os.path.abspath', 'os.path.abspath', (['"""./Python/Lib/site-packages"""'], {}), "('./Python/Lib/site-packages')\n", (3635, 3665), False, 'import os\n'), ((2713, 2748), 'os.path.abspath', 'os.path.abspath', (['"""./Python/Lib/ext"""'], {}), "('./Python/Lib/ext')\n", (2728, 2748), False, 'import os\n'), ((2653, 2684), 'os.path.abspath', 'os.path.abspath', (['"""./Python/Lib"""'], {}), "('./Python/Lib')\n", (2668, 2684), False, 'import os\n'), ((2579, 2624), 'os.path.abspath', 'os.path.abspath', (['"""./Python/Lib/site-packages"""'], {}), "('./Python/Lib/site-packages')\n", (2594, 2624), False, 'import os\n')]
|
import os
import allel
import h5py
import numpy as np
import sys
import time
from fvTools import *
if not len(sys.argv) in [13,15]:
sys.exit("usage:\npython makeFeatureVecsForChrArmFromVcf_ogSHIC.py chrArmFileName chrArm chrLen targetPop winSize numSubWins maskFileName sampleToPopFileName ancestralArmFaFileName statFileName outFileName [segmentStart segmentEnd]\n")
if len(sys.argv) == 15:
chrArmFileName, chrArm, chrLen, targetPop, winSize, numSubWins, maskFileName, unmaskedFracCutoff, sampleToPopFileName, ancestralArmFaFileName, statFileName, outfn, segmentStart, segmentEnd = sys.argv[1:]
segmentStart, segmentEnd = int(segmentStart), int(segmentEnd)
else:
chrArmFileName, chrArm, chrLen, targetPop, winSize, numSubWins, maskFileName, unmaskedFracCutoff, sampleToPopFileName, ancestralArmFaFileName, statFileName, outfn = sys.argv[1:]
segmentStart = None
unmaskedFracCutoff = float(unmaskedFracCutoff)
chrLen, winSize, numSubWins = int(chrLen), int(winSize), int(numSubWins)
assert winSize % numSubWins == 0 and numSubWins > 1
subWinSize = int(winSize/numSubWins)
def getSubWinBounds(chrLen, subWinSize):
lastSubWinEnd = chrLen - chrLen % subWinSize
lastSubWinStart = lastSubWinEnd - subWinSize + 1
subWinBounds = []
for subWinStart in range(1, lastSubWinStart+1, subWinSize):
subWinEnd = subWinStart + subWinSize - 1
subWinBounds.append((subWinStart, subWinEnd))
return subWinBounds
def getSnpIndicesInSubWins(subWinSize, lastSubWinEnd, snpLocs):
subWinStart = 1
subWinEnd = subWinStart + subWinSize - 1
snpIndicesInSubWins = [[]]
for i in range(len(snpLocs)):
while snpLocs[i] <= lastSubWinEnd and not (snpLocs[i] >= subWinStart and snpLocs[i] <= subWinEnd):
subWinStart += subWinSize
subWinEnd += subWinSize
snpIndicesInSubWins.append([])
if snpLocs[i] <= lastSubWinEnd:
snpIndicesInSubWins[-1].append(i)
while subWinEnd < lastSubWinEnd:
snpIndicesInSubWins.append([])
subWinStart += subWinSize
subWinEnd += subWinSize
return snpIndicesInSubWins
chrArmFile = allel.read_vcf(chrArmFileName)
chroms = chrArmFile["variants/CHROM"]
positions = np.extract(chroms == chrArm, chrArmFile["variants/POS"])
if maskFileName.lower() in ["none", "false"]:
sys.stderr.write("Warning: a mask.fa file for the chr arm with all masked sites N'ed out is strongly recommended" +
" (pass in the reference to remove Ns at the very least)!\n")
unmasked = [True] * chrLen
else:
unmasked = readMaskDataForScan(maskFileName, chrArm)
assert len(unmasked) == chrLen
if statFileName.lower() in ["none", "false"]:
statFileName = None
samples = chrArmFile["samples"]
if not sampleToPopFileName.lower() in ["none", "false"]:
sampleToPop = readSampleToPopFile(sampleToPopFileName)
sampleIndicesToKeep = [i for i in range(len(samples)) if sampleToPop.get(samples[i], "popNotFound!") == targetPop]
else:
sampleIndicesToKeep = [i for i in range(len(samples))]
rawgenos = np.take(chrArmFile["calldata/GT"], [i for i in range(len(chroms)) if chroms[i] == chrArm], axis=0)
genos = allel.GenotypeArray(rawgenos)
refAlleles = np.extract(chroms == chrArm, chrArmFile['variants/REF'])
altAlleles = np.extract(chroms == chrArm, chrArmFile['variants/ALT'])
if segmentStart != None:
snpIndicesToKeep = [i for i in range(len(positions)) if segmentStart <= positions[i] <= segmentEnd]
positions = [positions[i] for i in snpIndicesToKeep]
refAlleles = [refAlleles[i] for i in snpIndicesToKeep]
altAlleles = [altAlleles[i] for i in snpIndicesToKeep]
genos = allel.GenotypeArray(genos.subset(sel0=snpIndicesToKeep))
genos = allel.GenotypeArray(genos.subset(sel1=sampleIndicesToKeep))
alleleCounts = genos.count_alleles()
#remove all but mono/biallelic unmasked sites
isBiallelic = alleleCounts.is_biallelic()
for i in range(len(isBiallelic)):
if not isBiallelic[i]:
unmasked[positions[i]-1] = False
#polarize
if not ancestralArmFaFileName.lower() in ["none", "false"]:
sys.stderr.write("polarizing snps\n")
ancArm = readFaArm(ancestralArmFaFileName, chrArm).upper()
startTime = time.clock()
#NOTE: mapping specifies which alleles to swap counts for based on polarization; leaves unpolarized snps alone
#NOTE: those snps need to be filtered later on (as done below)!
# this will also remove sites that could not be polarized
mapping, unmasked = polarizeSnps(unmasked, positions, refAlleles, altAlleles, ancArm)
sys.stderr.write("took %s seconds\n" %(time.clock()-startTime))
statNames = ["pi", "thetaW", "tajD", "thetaH", "fayWuH", "maxFDA", "HapCount", "H1", "H12", "H2/H1", "ZnS", "Omega", "distVar", "distSkew", "distKurt"]
else:
statNames = ["pi", "thetaW", "tajD", "HapCount", "H1", "H12", "H2/H1", "ZnS", "Omega", "distVar", "distSkew", "distKurt"]
snpIndicesToKeep = [i for i in range(len(positions)) if unmasked[positions[i]-1]]
genos = allel.GenotypeArray(genos.subset(sel0=snpIndicesToKeep))
positions = [positions[i] for i in snpIndicesToKeep]
alleleCounts = allel.AlleleCountsArray([[alleleCounts[i][0], max(alleleCounts[i][1:])] for i in snpIndicesToKeep])
if not ancestralArmFaFileName.lower() in ["none", "false"]:
mapping = [mapping[i] for i in snpIndicesToKeep]
alleleCounts = alleleCounts.map_alleles(mapping)
haps = genos.to_haplotypes()
subWinBounds = getSubWinBounds(chrLen, subWinSize)
precomputedStats = {} #not using this
header = "chrom classifiedWinStart classifiedWinEnd bigWinRange".split()
statHeader = "chrom start end".split()
for statName in statNames:
statHeader.append(statName)
for i in range(numSubWins):
header.append("%s_win%d" %(statName, i))
statHeader = "\t".join(statHeader)
header = "\t".join(header)
outFile=open(outfn,'w')
outFile.write(header+"\n")
statVals = {}
for statName in statNames:
statVals[statName] = []
startTime = time.clock()
goodSubWins = []
lastSubWinEnd = chrLen - chrLen % subWinSize
snpIndicesInSubWins = getSnpIndicesInSubWins(subWinSize, lastSubWinEnd, positions)
subWinIndex = 0
lastSubWinStart = lastSubWinEnd - subWinSize + 1
if statFileName:
statFile = open(statFileName, "w")
statFile.write(statHeader + "\n")
for subWinStart in range(1, lastSubWinStart+1, subWinSize):
subWinEnd = subWinStart + subWinSize - 1
unmaskedFrac = unmasked[subWinStart-1:subWinEnd].count(True)/float(subWinEnd-subWinStart+1)
if segmentStart == None or subWinStart >= segmentStart and subWinEnd <= segmentEnd:
sys.stderr.write("%d-%d num unmasked snps: %d; unmasked frac: %f\n" %(subWinStart, subWinEnd, len(snpIndicesInSubWins[subWinIndex]), unmaskedFrac))
if len(snpIndicesInSubWins[subWinIndex]) > 0 and unmaskedFrac >= unmaskedFracCutoff:
hapsInSubWin = allel.HaplotypeArray(haps.subset(sel0=snpIndicesInSubWins[subWinIndex]))
statValStr = []
for statName in statNames:
calcAndAppendStatValForScan(alleleCounts, positions, statName, subWinStart, \
subWinEnd, statVals, subWinIndex, hapsInSubWin, unmasked, precomputedStats)
statValStr.append("%s: %s" %(statName, statVals[statName][-1]))
sys.stderr.write("\t".join(statValStr) + "\n")
goodSubWins.append(True)
if statFileName:
statFile.write("\t".join([chrArm, str(subWinStart), str(subWinEnd)] + [str(statVals[statName][-1]) for statName in statNames]) + "\n")
else:
for statName in statNames:
appendStatValsForMonomorphicForScan(statName, statVals, subWinIndex)
goodSubWins.append(False)
if goodSubWins[-numSubWins:].count(True) == numSubWins:
outVec = []
for statName in statNames:
outVec += normalizeFeatureVec(statVals[statName][-numSubWins:])
midSubWinEnd = subWinEnd - subWinSize*(numSubWins/2)
midSubWinStart = midSubWinEnd-subWinSize+1
outFile.write("%s\t%d\t%d\t%d-%d\t" %(chrArm, midSubWinStart, midSubWinEnd, subWinEnd-winSize+1, subWinEnd) + "\t".join([str(x) for x in outVec]))
outFile.write('\n')
subWinIndex += 1
if statFileName:
statFile.close()
outFile.close()
sys.stderr.write("completed in %g seconds\n" %(time.clock()-startTime))
|
[
"numpy.extract",
"allel.read_vcf",
"time.clock",
"sys.stderr.write",
"allel.GenotypeArray",
"sys.exit"
] |
[((2142, 2172), 'allel.read_vcf', 'allel.read_vcf', (['chrArmFileName'], {}), '(chrArmFileName)\n', (2156, 2172), False, 'import allel\n'), ((2223, 2279), 'numpy.extract', 'np.extract', (['(chroms == chrArm)', "chrArmFile['variants/POS']"], {}), "(chroms == chrArm, chrArmFile['variants/POS'])\n", (2233, 2279), True, 'import numpy as np\n'), ((3169, 3198), 'allel.GenotypeArray', 'allel.GenotypeArray', (['rawgenos'], {}), '(rawgenos)\n', (3188, 3198), False, 'import allel\n'), ((3212, 3268), 'numpy.extract', 'np.extract', (['(chroms == chrArm)', "chrArmFile['variants/REF']"], {}), "(chroms == chrArm, chrArmFile['variants/REF'])\n", (3222, 3268), True, 'import numpy as np\n'), ((3282, 3338), 'numpy.extract', 'np.extract', (['(chroms == chrArm)', "chrArmFile['variants/ALT']"], {}), "(chroms == chrArm, chrArmFile['variants/ALT'])\n", (3292, 3338), True, 'import numpy as np\n'), ((5953, 5965), 'time.clock', 'time.clock', ([], {}), '()\n', (5963, 5965), False, 'import time\n'), ((137, 384), 'sys.exit', 'sys.exit', (['"""usage:\npython makeFeatureVecsForChrArmFromVcf_ogSHIC.py chrArmFileName chrArm chrLen targetPop winSize numSubWins maskFileName sampleToPopFileName ancestralArmFaFileName statFileName outFileName [segmentStart segmentEnd]\n"""'], {}), '(\n """usage:\npython makeFeatureVecsForChrArmFromVcf_ogSHIC.py chrArmFileName chrArm chrLen targetPop winSize numSubWins maskFileName sampleToPopFileName ancestralArmFaFileName statFileName outFileName [segmentStart segmentEnd]\n"""\n )\n', (145, 384), False, 'import sys\n'), ((2331, 2521), 'sys.stderr.write', 'sys.stderr.write', (['("Warning: a mask.fa file for the chr arm with all masked sites N\'ed out is strongly recommended"\n + """ (pass in the reference to remove Ns at the very least)!\n""")'], {}), '(\n "Warning: a mask.fa file for the chr arm with all masked sites N\'ed out is strongly recommended"\n + """ (pass in the reference to remove Ns at the very least)!\n""")\n', (2347, 2521), False, 'import sys\n'), ((4083, 4120), 'sys.stderr.write', 'sys.stderr.write', (['"""polarizing snps\n"""'], {}), "('polarizing snps\\n')\n", (4099, 4120), False, 'import sys\n'), ((4200, 4212), 'time.clock', 'time.clock', ([], {}), '()\n', (4210, 4212), False, 'import time\n'), ((8246, 8258), 'time.clock', 'time.clock', ([], {}), '()\n', (8256, 8258), False, 'import time\n'), ((4591, 4603), 'time.clock', 'time.clock', ([], {}), '()\n', (4601, 4603), False, 'import time\n')]
|
# python getting_pixels.py --image obama.jpg
# import the necessary packages
import argparse
from collections import defaultdict
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("--image", required=True,
help="path to input image")
args = vars(ap.parse_args())
# load the image, grab its spatial dimensions (width and height),
# and then display the original image to our screen
image = cv2.imread(args["image"])
(h, w) = image.shape[:2]
cv2.imshow("Original", image)
# images are simply NumPy arrays -- with the origin (0, 0) located at
# the top-left of the image
(b, g, r) = image[0, 0]
print("Pixel at (0, 0) - Red: {}, Green: {}, Blue: {}".format(r, g, b))
# access the pixel located at x=100, y=5
(b, g, r) = image[5, 100]
print("Pixel at (100, 5) - Red: {}, Green: {}, Blue: {}".format(r, g, b))
# access the pixel at x= 100, y =50 and set it to blue
(b, g, r) = image[50, 100]
(b, g, r) = (255, 0, 0)
print("Pixel at (100, 50) - Red: {}, Green: {}, Blue: {}".format(r, g, b))
# compute the center of the image, which is simply the width and height
# divided by two
(cX, cY) = (w // 2, h // 2)
print ("the centre of the image is cX: {}, cY: {}".format(cX, cY))
cv2.waitKey(0)
|
[
"cv2.waitKey",
"cv2.imread",
"cv2.imshow",
"argparse.ArgumentParser"
] |
[((211, 236), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (234, 236), False, 'import argparse\n'), ((471, 496), 'cv2.imread', 'cv2.imread', (["args['image']"], {}), "(args['image'])\n", (481, 496), False, 'import cv2\n'), ((524, 553), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'image'], {}), "('Original', image)\n", (534, 553), False, 'import cv2\n'), ((1286, 1300), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1297, 1300), False, 'import cv2\n')]
|
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from debtcollector import removals
from keystoneclient import base
from keystoneclient import exceptions
from keystoneclient.i18n import _
class Role(base.Resource):
"""Represents an Identity role.
Attributes:
* id: a uuid that identifies the role
* name: user-facing identifier
* domain: optional domain for the role
"""
pass
class InferenceRule(base.Resource):
"""Represents a rule that states one role implies another.
Attributes:
* prior_role: this role implies the other
* implied_role: this role is implied by the other
"""
pass
class RoleManager(base.CrudManager):
"""Manager class for manipulating Identity roles."""
resource_class = Role
collection_key = 'roles'
key = 'role'
deprecation_msg = 'keystoneclient.v3.roles.InferenceRuleManager'
def _role_grants_base_url(self, user, group, system, domain, project,
use_inherit_extension):
# When called, we have already checked that only one of user & group
# and one of domain & project have been specified
params = {}
if project:
params['project_id'] = base.getid(project)
base_url = '/projects/%(project_id)s'
elif domain:
params['domain_id'] = base.getid(domain)
base_url = '/domains/%(domain_id)s'
elif system:
if system == 'all':
base_url = '/system'
else:
# NOTE(lbragstad): If we've made it this far, a user is
# attempting to do something with system scope that isn't
# supported yet (e.g. 'all' is currently the only supported
# system scope). In the future that may change but until then
# we should fail like we would if a user provided a bogus
# project name or domain ID.
msg = _("Only a system scope of 'all' is currently supported")
raise exceptions.ValidationError(msg)
if use_inherit_extension:
base_url = '/OS-INHERIT' + base_url
if user:
params['user_id'] = base.getid(user)
base_url += '/users/%(user_id)s'
elif group:
params['group_id'] = base.getid(group)
base_url += '/groups/%(group_id)s'
return base_url % params
def _enforce_mutually_exclusive_group(self, system, domain, project):
if not system:
if domain and project:
msg = _('Specify either a domain or project, not both')
raise exceptions.ValidationError(msg)
elif not (domain or project):
msg = _('Must specify either system, domain, or project')
raise exceptions.ValidationError(msg)
elif system:
if domain and project:
msg = _(
'Specify either system, domain, or project, not all three.'
)
raise exceptions.ValidationError(msg)
if domain:
msg = _('Specify either system or a domain, not both')
raise exceptions.ValidationError(msg)
if project:
msg = _('Specify either a system or project, not both')
raise exceptions.ValidationError(msg)
def _require_user_xor_group(self, user, group):
if user and group:
msg = _('Specify either a user or group, not both')
raise exceptions.ValidationError(msg)
elif not (user or group):
msg = _('Must specify either a user or group')
raise exceptions.ValidationError(msg)
def create(self, name, domain=None, **kwargs):
"""Create a role.
:param str name: the name of the role.
:param domain: the domain of the role. If a value is passed it is a
domain-scoped role, otherwise it's a global role.
:type domain: str or :class:`keystoneclient.v3.domains.Domain`
:param kwargs: any other attribute provided will be passed to the
server.
:returns: the created role returned from server.
:rtype: :class:`keystoneclient.v3.roles.Role`
"""
domain_id = None
if domain:
domain_id = base.getid(domain)
return super(RoleManager, self).create(
name=name,
domain_id=domain_id,
**kwargs)
def get(self, role):
"""Retrieve a role.
:param role: the role to be retrieved from the server.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:returns: the specified role returned from server.
:rtype: :class:`keystoneclient.v3.roles.Role`
"""
return super(RoleManager, self).get(role_id=base.getid(role))
def list(self, user=None, group=None, system=None, domain=None,
project=None, os_inherit_extension_inherited=False, **kwargs):
"""List roles and role grants.
:param user: filter in role grants for the specified user on a
resource. Domain or project must be specified.
User and group are mutually exclusive.
:type user: str or :class:`keystoneclient.v3.users.User`
:param group: filter in role grants for the specified group on a
resource. Domain or project must be specified.
User and group are mutually exclusive.
:type group: str or :class:`keystoneclient.v3.groups.Group`
:param domain: filter in role grants on the specified domain. Either
user or group must be specified. Project, domain, and
system are mutually exclusive.
:type domain: str or :class:`keystoneclient.v3.domains.Domain`
:param project: filter in role grants on the specified project. Either
user or group must be specified. Project, domain and
system are mutually exclusive.
:type project: str or :class:`keystoneclient.v3.projects.Project`
:param bool os_inherit_extension_inherited: OS-INHERIT will be used.
It provides the ability for
projects to inherit role
assignments from their
domains or from parent
projects in the hierarchy.
:param kwargs: any other attribute provided will filter roles on.
:returns: a list of roles.
:rtype: list of :class:`keystoneclient.v3.roles.Role`
"""
if os_inherit_extension_inherited:
kwargs['tail'] = '/inherited_to_projects'
if user or group:
self._require_user_xor_group(user, group)
self._enforce_mutually_exclusive_group(system, domain, project)
base_url = self._role_grants_base_url(
user, group, system, domain, project,
os_inherit_extension_inherited
)
return super(RoleManager, self).list(base_url=base_url,
**kwargs)
return super(RoleManager, self).list(**kwargs)
def update(self, role, name=None, **kwargs):
"""Update a role.
:param role: the role to be updated on the server.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:param str name: the new name of the role.
:param kwargs: any other attribute provided will be passed to server.
:returns: the updated role returned from server.
:rtype: :class:`keystoneclient.v3.roles.Role`
"""
return super(RoleManager, self).update(
role_id=base.getid(role),
name=name,
**kwargs)
def delete(self, role):
"""Delete a role.
When a role is deleted all the role inferences that have deleted role
as prior role will be deleted as well.
:param role: the role to be deleted on the server.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:returns: Response object with 204 status.
:rtype: :class:`requests.models.Response`
"""
return super(RoleManager, self).delete(
role_id=base.getid(role))
def grant(self, role, user=None, group=None, system=None, domain=None,
project=None, os_inherit_extension_inherited=False, **kwargs):
"""Grant a role to a user or group on a domain or project.
:param role: the role to be granted on the server.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:param user: the specified user to have the role granted on a resource.
Domain or project must be specified. User and group are
mutually exclusive.
:type user: str or :class:`keystoneclient.v3.users.User`
:param group: the specified group to have the role granted on a
resource. Domain or project must be specified.
User and group are mutually exclusive.
:type group: str or :class:`keystoneclient.v3.groups.Group`
:param system: system information to grant the role on. Project,
domain, and system are mutually exclusive.
:type system: str
:param domain: the domain in which the role will be granted. Either
user or group must be specified. Project, domain, and
system are mutually exclusive.
:type domain: str or :class:`keystoneclient.v3.domains.Domain`
:param project: the project in which the role will be granted. Either
user or group must be specified. Project, domain, and
system are mutually exclusive.
:type project: str or :class:`keystoneclient.v3.projects.Project`
:param bool os_inherit_extension_inherited: OS-INHERIT will be used.
It provides the ability for
projects to inherit role
assignments from their
domains or from parent
projects in the hierarchy.
:param kwargs: any other attribute provided will be passed to server.
:returns: the granted role returned from server.
:rtype: :class:`keystoneclient.v3.roles.Role`
"""
self._enforce_mutually_exclusive_group(system, domain, project)
self._require_user_xor_group(user, group)
if os_inherit_extension_inherited:
kwargs['tail'] = '/inherited_to_projects'
base_url = self._role_grants_base_url(
user, group, system, domain, project,
os_inherit_extension_inherited)
return super(RoleManager, self).put(base_url=base_url,
role_id=base.getid(role),
**kwargs)
def check(self, role, user=None, group=None, system=None, domain=None,
project=None, os_inherit_extension_inherited=False, **kwargs):
"""Check if a user or group has a role on a domain or project.
:param user: check for role grants for the specified user on a
resource. Domain or project must be specified.
User and group are mutually exclusive.
:type user: str or :class:`keystoneclient.v3.users.User`
:param group: check for role grants for the specified group on a
resource. Domain or project must be specified.
User and group are mutually exclusive.
:type group: str or :class:`keystoneclient.v3.groups.Group`
:param system: check for role grants on the system. Project, domain,
and system are mutually exclusive.
:type system: str
:param domain: check for role grants on the specified domain. Either
user or group must be specified. Project, domain, and
system are mutually exclusive.
:type domain: str or :class:`keystoneclient.v3.domains.Domain`
:param project: check for role grants on the specified project. Either
user or group must be specified. Project, domain, and
system are mutually exclusive.
:type project: str or :class:`keystoneclient.v3.projects.Project`
:param bool os_inherit_extension_inherited: OS-INHERIT will be used.
It provides the ability for
projects to inherit role
assignments from their
domains or from parent
projects in the hierarchy.
:param kwargs: any other attribute provided will be passed to server.
:returns: the specified role returned from server if it exists.
:rtype: :class:`keystoneclient.v3.roles.Role`
:returns: Response object with 204 status if specified role
doesn't exist.
:rtype: :class:`requests.models.Response`
"""
self._enforce_mutually_exclusive_group(system, domain, project)
self._require_user_xor_group(user, group)
if os_inherit_extension_inherited:
kwargs['tail'] = '/inherited_to_projects'
base_url = self._role_grants_base_url(
user, group, system, domain, project,
os_inherit_extension_inherited)
return super(RoleManager, self).head(
base_url=base_url,
role_id=base.getid(role),
os_inherit_extension_inherited=os_inherit_extension_inherited,
**kwargs)
def revoke(self, role, user=None, group=None, system=None, domain=None,
project=None, os_inherit_extension_inherited=False, **kwargs):
"""Revoke a role from a user or group on a domain or project.
:param user: revoke role grants for the specified user on a
resource. Domain or project must be specified.
User and group are mutually exclusive.
:type user: str or :class:`keystoneclient.v3.users.User`
:param group: revoke role grants for the specified group on a
resource. Domain or project must be specified.
User and group are mutually exclusive.
:type group: str or :class:`keystoneclient.v3.groups.Group`
:param system: revoke role grants on the system. Project, domain, and
system are mutually exclusive.
:type system: str
:param domain: revoke role grants on the specified domain. Either
user or group must be specified. Project, domain, and
system are mutually exclusive.
:type domain: str or :class:`keystoneclient.v3.domains.Domain`
:param project: revoke role grants on the specified project. Either
user or group must be specified. Project, domain, and
system are mutually exclusive.
:type project: str or :class:`keystoneclient.v3.projects.Project`
:param bool os_inherit_extension_inherited: OS-INHERIT will be used.
It provides the ability for
projects to inherit role
assignments from their
domains or from parent
projects in the hierarchy.
:param kwargs: any other attribute provided will be passed to server.
:returns: the revoked role returned from server.
:rtype: list of :class:`keystoneclient.v3.roles.Role`
"""
self._enforce_mutually_exclusive_group(system, domain, project)
self._require_user_xor_group(user, group)
if os_inherit_extension_inherited:
kwargs['tail'] = '/inherited_to_projects'
base_url = self._role_grants_base_url(
user, group, system, domain, project,
os_inherit_extension_inherited)
return super(RoleManager, self).delete(
base_url=base_url,
role_id=base.getid(role),
os_inherit_extension_inherited=os_inherit_extension_inherited,
**kwargs)
@removals.remove(message='Use %s.create instead.' % deprecation_msg,
version='3.9.0', removal_version='4.0.0')
def create_implied(self, prior_role, implied_role, **kwargs):
return InferenceRuleManager(self.client).create(prior_role,
implied_role)
@removals.remove(message='Use %s.delete instead.' % deprecation_msg,
version='3.9.0', removal_version='4.0.0')
def delete_implied(self, prior_role, implied_role, **kwargs):
return InferenceRuleManager(self.client).delete(prior_role,
implied_role)
@removals.remove(message='Use %s.get instead.' % deprecation_msg,
version='3.9.0', removal_version='4.0.0')
def get_implied(self, prior_role, implied_role, **kwargs):
return InferenceRuleManager(self.client).get(prior_role,
implied_role)
@removals.remove(message='Use %s.check instead.' % deprecation_msg,
version='3.9.0', removal_version='4.0.0')
def check_implied(self, prior_role, implied_role, **kwargs):
return InferenceRuleManager(self.client).check(prior_role,
implied_role)
@removals.remove(message='Use %s.list_inference_roles' % deprecation_msg,
version='3.9.0', removal_version='4.0.0')
def list_role_inferences(self, **kwargs):
return InferenceRuleManager(self.client).list_inference_roles()
class InferenceRuleManager(base.CrudManager):
"""Manager class for manipulating Identity inference rules."""
resource_class = InferenceRule
collection_key = 'role_inferences'
key = 'role_inference'
def _implied_role_url_tail(self, prior_role, implied_role):
base_url = ('/%(prior_role_id)s/implies/%(implied_role_id)s' %
{'prior_role_id': base.getid(prior_role),
'implied_role_id': base.getid(implied_role)})
return base_url
def create(self, prior_role, implied_role):
"""Create an inference rule.
An inference rule is comprised of two roles, a prior role and an
implied role. The prior role will imply the implied role.
Valid HTTP return codes:
* 201: Resource is created successfully
* 404: A role cannot be found
* 409: The inference rule already exists
:param prior_role: the role which implies ``implied_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:param implied_role: the role which is implied by ``prior_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:returns: a newly created role inference returned from server.
:rtype: :class:`keystoneclient.v3.roles.InferenceRule`
"""
url_tail = self._implied_role_url_tail(prior_role, implied_role)
_resp, body = self.client.put("/roles" + url_tail)
return self._prepare_return_value(
_resp, self.resource_class(self, body['role_inference']))
def delete(self, prior_role, implied_role):
"""Delete an inference rule.
When deleting an inference rule, both roles are required. Note that
neither role is deleted, only the inference relationship is dissolved.
Valid HTTP return codes:
* 204: Delete request is accepted
* 404: A role cannot be found
:param prior_role: the role which implies ``implied_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:param implied_role: the role which is implied by ``prior_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:returns: Response object with 204 status.
:rtype: :class:`requests.models.Response`
"""
url_tail = self._implied_role_url_tail(prior_role, implied_role)
return self._delete("/roles" + url_tail)
def get(self, prior_role, implied_role):
"""Retrieve an inference rule.
Valid HTTP return codes:
* 200: Inference rule is returned
* 404: A role cannot be found
:param prior_role: the role which implies ``implied_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:param implied_role: the role which is implied by ``prior_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:returns: the specified role inference returned from server.
:rtype: :class:`keystoneclient.v3.roles.InferenceRule`
"""
url_tail = self._implied_role_url_tail(prior_role, implied_role)
_resp, body = self.client.get("/roles" + url_tail)
return self._prepare_return_value(
_resp, self.resource_class(self, body['role_inference']))
def list(self, prior_role):
"""List all roles that a role may imply.
Valid HTTP return codes:
* 200: List of inference rules are returned
* 404: A role cannot be found
:param prior_role: the role which implies ``implied_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:returns: the specified role inference returned from server.
:rtype: :class:`keystoneclient.v3.roles.InferenceRule`
"""
url_tail = ('/%s/implies' % base.getid(prior_role))
_resp, body = self.client.get("/roles" + url_tail)
return self._prepare_return_value(
_resp, self.resource_class(self, body['role_inference']))
def check(self, prior_role, implied_role):
"""Check if an inference rule exists.
Valid HTTP return codes:
* 204: The rule inference exists
* 404: A role cannot be found
:param prior_role: the role which implies ``implied_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:param implied_role: the role which is implied by ``prior_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:returns: response object with 204 status returned from server.
:rtype: :class:`requests.models.Response`
"""
url_tail = self._implied_role_url_tail(prior_role, implied_role)
return self._head("/roles" + url_tail)
def list_inference_roles(self):
"""List all rule inferences.
Valid HTTP return codes:
* 200: All inference rules are returned
:param kwargs: attributes provided will be passed to the server.
:returns: a list of inference rules.
:rtype: list of :class:`keystoneclient.v3.roles.InferenceRule`
"""
return super(InferenceRuleManager, self).list()
def update(self, **kwargs):
raise exceptions.MethodNotImplemented(
_('Update not supported for rule inferences'))
def find(self, **kwargs):
raise exceptions.MethodNotImplemented(
_('Find not supported for rule inferences'))
def put(self, **kwargs):
raise exceptions.MethodNotImplemented(
_('Put not supported for rule inferences'))
|
[
"keystoneclient.exceptions.ValidationError",
"keystoneclient.base.getid",
"keystoneclient.i18n._",
"debtcollector.removals.remove"
] |
[((17591, 17705), 'debtcollector.removals.remove', 'removals.remove', ([], {'message': "('Use %s.create instead.' % deprecation_msg)", 'version': '"""3.9.0"""', 'removal_version': '"""4.0.0"""'}), "(message='Use %s.create instead.' % deprecation_msg, version\n ='3.9.0', removal_version='4.0.0')\n", (17606, 17705), False, 'from debtcollector import removals\n'), ((17932, 18046), 'debtcollector.removals.remove', 'removals.remove', ([], {'message': "('Use %s.delete instead.' % deprecation_msg)", 'version': '"""3.9.0"""', 'removal_version': '"""4.0.0"""'}), "(message='Use %s.delete instead.' % deprecation_msg, version\n ='3.9.0', removal_version='4.0.0')\n", (17947, 18046), False, 'from debtcollector import removals\n'), ((18273, 18384), 'debtcollector.removals.remove', 'removals.remove', ([], {'message': "('Use %s.get instead.' % deprecation_msg)", 'version': '"""3.9.0"""', 'removal_version': '"""4.0.0"""'}), "(message='Use %s.get instead.' % deprecation_msg, version=\n '3.9.0', removal_version='4.0.0')\n", (18288, 18384), False, 'from debtcollector import removals\n'), ((18602, 18715), 'debtcollector.removals.remove', 'removals.remove', ([], {'message': "('Use %s.check instead.' % deprecation_msg)", 'version': '"""3.9.0"""', 'removal_version': '"""4.0.0"""'}), "(message='Use %s.check instead.' % deprecation_msg, version=\n '3.9.0', removal_version='4.0.0')\n", (18617, 18715), False, 'from debtcollector import removals\n'), ((18939, 19057), 'debtcollector.removals.remove', 'removals.remove', ([], {'message': "('Use %s.list_inference_roles' % deprecation_msg)", 'version': '"""3.9.0"""', 'removal_version': '"""4.0.0"""'}), "(message='Use %s.list_inference_roles' % deprecation_msg,\n version='3.9.0', removal_version='4.0.0')\n", (18954, 19057), False, 'from debtcollector import removals\n'), ((1862, 1881), 'keystoneclient.base.getid', 'base.getid', (['project'], {}), '(project)\n', (1872, 1881), False, 'from keystoneclient import base\n'), ((2847, 2863), 'keystoneclient.base.getid', 'base.getid', (['user'], {}), '(user)\n', (2857, 2863), False, 'from keystoneclient import base\n'), ((4119, 4164), 'keystoneclient.i18n._', '_', (['"""Specify either a user or group, not both"""'], {}), "('Specify either a user or group, not both')\n", (4120, 4164), False, 'from keystoneclient.i18n import _\n'), ((4183, 4214), 'keystoneclient.exceptions.ValidationError', 'exceptions.ValidationError', (['msg'], {}), '(msg)\n', (4209, 4214), False, 'from keystoneclient import exceptions\n'), ((5002, 5020), 'keystoneclient.base.getid', 'base.getid', (['domain'], {}), '(domain)\n', (5012, 5020), False, 'from keystoneclient import base\n'), ((23051, 23073), 'keystoneclient.base.getid', 'base.getid', (['prior_role'], {}), '(prior_role)\n', (23061, 23073), False, 'from keystoneclient import base\n'), ((24505, 24550), 'keystoneclient.i18n._', '_', (['"""Update not supported for rule inferences"""'], {}), "('Update not supported for rule inferences')\n", (24506, 24550), False, 'from keystoneclient.i18n import _\n'), ((24642, 24685), 'keystoneclient.i18n._', '_', (['"""Find not supported for rule inferences"""'], {}), "('Find not supported for rule inferences')\n", (24643, 24685), False, 'from keystoneclient.i18n import _\n'), ((24776, 24818), 'keystoneclient.i18n._', '_', (['"""Put not supported for rule inferences"""'], {}), "('Put not supported for rule inferences')\n", (24777, 24818), False, 'from keystoneclient.i18n import _\n'), ((1987, 2005), 'keystoneclient.base.getid', 'base.getid', (['domain'], {}), '(domain)\n', (1997, 2005), False, 'from keystoneclient import base\n'), ((2962, 2979), 'keystoneclient.base.getid', 'base.getid', (['group'], {}), '(group)\n', (2972, 2979), False, 'from keystoneclient import base\n'), ((3216, 3265), 'keystoneclient.i18n._', '_', (['"""Specify either a domain or project, not both"""'], {}), "('Specify either a domain or project, not both')\n", (3217, 3265), False, 'from keystoneclient.i18n import _\n'), ((3288, 3319), 'keystoneclient.exceptions.ValidationError', 'exceptions.ValidationError', (['msg'], {}), '(msg)\n', (3314, 3319), False, 'from keystoneclient import exceptions\n'), ((4267, 4307), 'keystoneclient.i18n._', '_', (['"""Must specify either a user or group"""'], {}), "('Must specify either a user or group')\n", (4268, 4307), False, 'from keystoneclient.i18n import _\n'), ((4326, 4357), 'keystoneclient.exceptions.ValidationError', 'exceptions.ValidationError', (['msg'], {}), '(msg)\n', (4352, 4357), False, 'from keystoneclient import exceptions\n'), ((5510, 5526), 'keystoneclient.base.getid', 'base.getid', (['role'], {}), '(role)\n', (5520, 5526), False, 'from keystoneclient import base\n'), ((8584, 8600), 'keystoneclient.base.getid', 'base.getid', (['role'], {}), '(role)\n', (8594, 8600), False, 'from keystoneclient import base\n'), ((9136, 9152), 'keystoneclient.base.getid', 'base.getid', (['role'], {}), '(role)\n', (9146, 9152), False, 'from keystoneclient import base\n'), ((11901, 11917), 'keystoneclient.base.getid', 'base.getid', (['role'], {}), '(role)\n', (11911, 11917), False, 'from keystoneclient import base\n'), ((14750, 14766), 'keystoneclient.base.getid', 'base.getid', (['role'], {}), '(role)\n', (14760, 14766), False, 'from keystoneclient import base\n'), ((17470, 17486), 'keystoneclient.base.getid', 'base.getid', (['role'], {}), '(role)\n', (17480, 17486), False, 'from keystoneclient import base\n'), ((19584, 19606), 'keystoneclient.base.getid', 'base.getid', (['prior_role'], {}), '(prior_role)\n', (19594, 19606), False, 'from keystoneclient import base\n'), ((19648, 19672), 'keystoneclient.base.getid', 'base.getid', (['implied_role'], {}), '(implied_role)\n', (19658, 19672), False, 'from keystoneclient import base\n'), ((3384, 3435), 'keystoneclient.i18n._', '_', (['"""Must specify either system, domain, or project"""'], {}), "('Must specify either system, domain, or project')\n", (3385, 3435), False, 'from keystoneclient.i18n import _\n'), ((3458, 3489), 'keystoneclient.exceptions.ValidationError', 'exceptions.ValidationError', (['msg'], {}), '(msg)\n', (3484, 3489), False, 'from keystoneclient import exceptions\n'), ((3568, 3630), 'keystoneclient.i18n._', '_', (['"""Specify either system, domain, or project, not all three."""'], {}), "('Specify either system, domain, or project, not all three.')\n", (3569, 3630), False, 'from keystoneclient.i18n import _\n'), ((3691, 3722), 'keystoneclient.exceptions.ValidationError', 'exceptions.ValidationError', (['msg'], {}), '(msg)\n', (3717, 3722), False, 'from keystoneclient import exceptions\n'), ((3768, 3816), 'keystoneclient.i18n._', '_', (['"""Specify either system or a domain, not both"""'], {}), "('Specify either system or a domain, not both')\n", (3769, 3816), False, 'from keystoneclient.i18n import _\n'), ((3839, 3870), 'keystoneclient.exceptions.ValidationError', 'exceptions.ValidationError', (['msg'], {}), '(msg)\n', (3865, 3870), False, 'from keystoneclient import exceptions\n'), ((3917, 3966), 'keystoneclient.i18n._', '_', (['"""Specify either a system or project, not both"""'], {}), "('Specify either a system or project, not both')\n", (3918, 3966), False, 'from keystoneclient.i18n import _\n'), ((3989, 4020), 'keystoneclient.exceptions.ValidationError', 'exceptions.ValidationError', (['msg'], {}), '(msg)\n', (4015, 4020), False, 'from keystoneclient import exceptions\n'), ((2603, 2659), 'keystoneclient.i18n._', '_', (['"""Only a system scope of \'all\' is currently supported"""'], {}), '("Only a system scope of \'all\' is currently supported")\n', (2604, 2659), False, 'from keystoneclient.i18n import _\n'), ((2682, 2713), 'keystoneclient.exceptions.ValidationError', 'exceptions.ValidationError', (['msg'], {}), '(msg)\n', (2708, 2713), False, 'from keystoneclient import exceptions\n')]
|
import time
import uuid
from pathlib import Path
from flask import Blueprint, abort, jsonify, request, url_for
from src import logger
from src.api.helpers import add
bp = Blueprint("api", __name__)
@bp.route("/ping")
def ping():
return jsonify({"status": "success", "message": "pong"})
@bp.route("/add", methods=["POST"])
def add_job():
"""
Runs sum.
Expected format of response:
{
"sum": "<sum of a and b>"
}
"""
logger.info("Serving add endpoint.")
data = request.json
logger.info(f"Calling celery worker with arguments {data}.")
add.delay(data)
return jsonify({'status': 'created'}), 201
|
[
"flask.jsonify",
"flask.Blueprint",
"src.api.helpers.add.delay",
"src.logger.info"
] |
[((174, 200), 'flask.Blueprint', 'Blueprint', (['"""api"""', '__name__'], {}), "('api', __name__)\n", (183, 200), False, 'from flask import Blueprint, abort, jsonify, request, url_for\n'), ((245, 294), 'flask.jsonify', 'jsonify', (["{'status': 'success', 'message': 'pong'}"], {}), "({'status': 'success', 'message': 'pong'})\n", (252, 294), False, 'from flask import Blueprint, abort, jsonify, request, url_for\n'), ((461, 497), 'src.logger.info', 'logger.info', (['"""Serving add endpoint."""'], {}), "('Serving add endpoint.')\n", (472, 497), False, 'from src import logger\n'), ((526, 586), 'src.logger.info', 'logger.info', (['f"""Calling celery worker with arguments {data}."""'], {}), "(f'Calling celery worker with arguments {data}.')\n", (537, 586), False, 'from src import logger\n'), ((591, 606), 'src.api.helpers.add.delay', 'add.delay', (['data'], {}), '(data)\n', (600, 606), False, 'from src.api.helpers import add\n'), ((618, 648), 'flask.jsonify', 'jsonify', (["{'status': 'created'}"], {}), "({'status': 'created'})\n", (625, 648), False, 'from flask import Blueprint, abort, jsonify, request, url_for\n')]
|
import io
import json
from copy import deepcopy
import GetAwayUsers
import demistomock as demisto
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
away_user_data = util_load_json('test_data/away_user.json')
def test_script_valid(mocker):
"""
Given:
When:
- Calling to GetAwayUsers Script.
Then:
- Ensure expected outputs are returned.
"""
from GetAwayUsers import main
return_results_mock = mocker.patch.object(GetAwayUsers, 'return_results')
away_user = away_user_data
not_away_user = deepcopy(away_user_data)
not_away_user['isAway'] = False
mocker.patch.object(demisto, 'executeCommand', return_value=[{'Type': '1', 'Contents': [away_user, not_away_user]}])
main()
command_results = return_results_mock.call_args[0][0]
assert command_results.outputs == [{'email': '',
'id': 'admin',
'name': 'Admin',
'phone': '+650-123456',
'roles': {'demisto': ['Administrator']},
'username': 'admin'}]
def test_script_invalid(mocker):
"""
Given:
When:
- Calling to GetAwayUsers Script. Error during the demisto.executeCommand to getUsers.
Then:
- Ensure error is returned.
"""
from GetAwayUsers import main
error_entry_type: int = 4
mocker.patch.object(GetAwayUsers, 'return_error')
mocker.patch.object(demisto, 'error')
away_user = away_user_data
not_away_user = deepcopy(away_user_data)
not_away_user['isAway'] = False
mocker.patch.object(demisto, 'executeCommand',
return_value=[{'Type': error_entry_type, 'Contents': [away_user, not_away_user]}])
main()
assert GetAwayUsers.return_error.called
|
[
"copy.deepcopy",
"GetAwayUsers.main",
"io.open"
] |
[((611, 635), 'copy.deepcopy', 'deepcopy', (['away_user_data'], {}), '(away_user_data)\n', (619, 635), False, 'from copy import deepcopy\n'), ((797, 803), 'GetAwayUsers.main', 'main', ([], {}), '()\n', (801, 803), False, 'from GetAwayUsers import main\n'), ((1653, 1677), 'copy.deepcopy', 'deepcopy', (['away_user_data'], {}), '(away_user_data)\n', (1661, 1677), False, 'from copy import deepcopy\n'), ((1876, 1882), 'GetAwayUsers.main', 'main', ([], {}), '()\n', (1880, 1882), False, 'from GetAwayUsers import main\n'), ((137, 178), 'io.open', 'io.open', (['path'], {'mode': '"""r"""', 'encoding': '"""utf-8"""'}), "(path, mode='r', encoding='utf-8')\n", (144, 178), False, 'import io\n')]
|
import os
from datetime import datetime
import jinja2
from flask import Flask, redirect, render_template
from raven.contrib.flask import Sentry
from werkzeug.middleware.proxy_fix import ProxyFix
from config import load_django
from api import (admin_api, copy_study_api, dashboard_api, data_access_api, data_pipeline_api,
mobile_api, participant_administration, survey_api)
from config.settings import SENTRY_ELASTIC_BEANSTALK_DSN, SENTRY_JAVASCRIPT_DSN
from libs.admin_authentication import is_logged_in
from libs.security import set_secret_key
from pages import (admin_pages, data_access_web_form, mobile_pages, survey_designer,
system_admin_pages)
def subdomain(directory):
app = Flask(__name__, static_folder=directory + "/static")
set_secret_key(app)
loader = [app.jinja_loader, jinja2.FileSystemLoader(directory + "/templates")]
app.jinja_loader = jinja2.ChoiceLoader(loader)
app.wsgi_app = ProxyFix(app.wsgi_app)
return app
# Register pages here
app = subdomain("frontend")
app.jinja_env.globals['current_year'] = datetime.now().strftime('%Y')
app.register_blueprint(mobile_api.mobile_api)
app.register_blueprint(admin_pages.admin_pages)
app.register_blueprint(mobile_pages.mobile_pages)
app.register_blueprint(system_admin_pages.system_admin_pages)
app.register_blueprint(survey_designer.survey_designer)
app.register_blueprint(admin_api.admin_api)
app.register_blueprint(participant_administration.participant_administration)
app.register_blueprint(survey_api.survey_api)
app.register_blueprint(data_access_api.data_access_api)
app.register_blueprint(data_access_web_form.data_access_web_form)
app.register_blueprint(copy_study_api.copy_study_api)
app.register_blueprint(data_pipeline_api.data_pipeline_api)
app.register_blueprint(dashboard_api.dashboard_api)
# Don't set up Sentry for local development
if os.environ['DJANGO_DB_ENV'] != 'local':
sentry = Sentry(app, dsn=SENTRY_ELASTIC_BEANSTALK_DSN)
@app.route("/<page>.html")
def strip_dot_html(page):
# Strips away the dot html from pages
return redirect("/%s" % page)
@app.context_processor
def inject_dict_for_all_templates():
return {"SENTRY_JAVASCRIPT_DSN": SENTRY_JAVASCRIPT_DSN}
# Extra Production settings
if not __name__ == '__main__':
# Points our custom 404 page (in /frontend/templates) to display on a 404 error
@app.errorhandler(404)
def e404(e):
return render_template("404.html", is_logged_in=is_logged_in()), 404
# Extra Debugging settings
if __name__ == '__main__':
# might be necessary if running on windows/linux subsystem on windows.
# from gevent.wsgi import WSGIServer
# http_server = WSGIServer(('', 8080), app)
# http_server.serve_forever()
app.run(host='0.0.0.0', port=int(os.getenv("PORT", "8080")), debug=True)
|
[
"raven.contrib.flask.Sentry",
"flask.redirect",
"flask.Flask",
"jinja2.FileSystemLoader",
"libs.admin_authentication.is_logged_in",
"werkzeug.middleware.proxy_fix.ProxyFix",
"libs.security.set_secret_key",
"jinja2.ChoiceLoader",
"datetime.datetime.now",
"os.getenv"
] |
[((699, 751), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': "(directory + '/static')"}), "(__name__, static_folder=directory + '/static')\n", (704, 751), False, 'from flask import Flask, redirect, render_template\n'), ((756, 775), 'libs.security.set_secret_key', 'set_secret_key', (['app'], {}), '(app)\n', (770, 775), False, 'from libs.security import set_secret_key\n'), ((882, 909), 'jinja2.ChoiceLoader', 'jinja2.ChoiceLoader', (['loader'], {}), '(loader)\n', (901, 909), False, 'import jinja2\n'), ((929, 951), 'werkzeug.middleware.proxy_fix.ProxyFix', 'ProxyFix', (['app.wsgi_app'], {}), '(app.wsgi_app)\n', (937, 951), False, 'from werkzeug.middleware.proxy_fix import ProxyFix\n'), ((1909, 1954), 'raven.contrib.flask.Sentry', 'Sentry', (['app'], {'dsn': 'SENTRY_ELASTIC_BEANSTALK_DSN'}), '(app, dsn=SENTRY_ELASTIC_BEANSTALK_DSN)\n', (1915, 1954), False, 'from raven.contrib.flask import Sentry\n'), ((2063, 2085), 'flask.redirect', 'redirect', (["('/%s' % page)"], {}), "('/%s' % page)\n", (2071, 2085), False, 'from flask import Flask, redirect, render_template\n'), ((808, 857), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', (["(directory + '/templates')"], {}), "(directory + '/templates')\n", (831, 857), False, 'import jinja2\n'), ((1059, 1073), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1071, 1073), False, 'from datetime import datetime\n'), ((2764, 2789), 'os.getenv', 'os.getenv', (['"""PORT"""', '"""8080"""'], {}), "('PORT', '8080')\n", (2773, 2789), False, 'import os\n'), ((2453, 2467), 'libs.admin_authentication.is_logged_in', 'is_logged_in', ([], {}), '()\n', (2465, 2467), False, 'from libs.admin_authentication import is_logged_in\n')]
|
#!/usr/bin/env python
from misc.common import parse_play_args
from misc.config import load_config
from rl_server.server.agent import run_agent
from rl_server.server.run_agents import get_algo_and_agent_config
args = parse_play_args()
config = load_config(args.config)
algo_config, agent_config = get_algo_and_agent_config(
config,
args.algorithm_id,
args.agent_id,
args.seed
)
run_agent(
config,
agent_config,
checkpoint_path=args.checkpoint
)
|
[
"misc.common.parse_play_args",
"misc.config.load_config",
"rl_server.server.agent.run_agent",
"rl_server.server.run_agents.get_algo_and_agent_config"
] |
[((218, 235), 'misc.common.parse_play_args', 'parse_play_args', ([], {}), '()\n', (233, 235), False, 'from misc.common import parse_play_args\n'), ((245, 269), 'misc.config.load_config', 'load_config', (['args.config'], {}), '(args.config)\n', (256, 269), False, 'from misc.config import load_config\n'), ((299, 377), 'rl_server.server.run_agents.get_algo_and_agent_config', 'get_algo_and_agent_config', (['config', 'args.algorithm_id', 'args.agent_id', 'args.seed'], {}), '(config, args.algorithm_id, args.agent_id, args.seed)\n', (324, 377), False, 'from rl_server.server.run_agents import get_algo_and_agent_config\n'), ((397, 461), 'rl_server.server.agent.run_agent', 'run_agent', (['config', 'agent_config'], {'checkpoint_path': 'args.checkpoint'}), '(config, agent_config, checkpoint_path=args.checkpoint)\n', (406, 461), False, 'from rl_server.server.agent import run_agent\n')]
|
import os
import unittest
from eco_parser import EcoParser, ParseError
SCHEDULE_WITH_TABLE = (
"http://www.legislation.gov.uk/uksi/2017/1067/schedule/1/made/data.xml"
)
SCHEDULE_WITHOUT_TABLE = (
"http://www.legislation.gov.uk/uksi/2017/477/schedule/1/made/data.xml"
)
ARTICLE_WITHOUT_TABLE = (
"http://www.legislation.gov.uk/uksi/2017/1270/article/3/made/data.xml"
)
TABLE_WITHOUT_HEADER = (
"http://www.legislation.gov.uk/uksi/2015/1873/schedule/1/made/data.xml"
)
ONE_ROW_TABLE_VALID = (
"http://www.legislation.gov.uk/uksi/2016/1140/schedule/1/made/data.xml"
)
ONE_ROW_TABLE_INVALID = (
"http://www.legislation.gov.uk/uksi/2016/657/schedule/1/made/data.xml"
)
UNKNOWN_TABLE_FORMAT = (
"http://www.legislation.gov.uk/uksi/no-example-of-this/schedule/1/made/data.xml"
)
# stub parser implementation we can run tests against
class StubParser(EcoParser):
def get_data(self):
fixtures = {
SCHEDULE_WITH_TABLE: "fixtures/schedule_with_table.xml",
SCHEDULE_WITHOUT_TABLE: "fixtures/schedule_without_table.xml",
ARTICLE_WITHOUT_TABLE: "fixtures/article_without_table.xml",
TABLE_WITHOUT_HEADER: "fixtures/table_without_header.xml",
ONE_ROW_TABLE_VALID: "fixtures/one_row_table_valid.xml",
ONE_ROW_TABLE_INVALID: "fixtures/one_row_table_invalid.xml",
UNKNOWN_TABLE_FORMAT: "fixtures/unknown_table_format.xml",
}
dirname = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.abspath(os.path.join(dirname, fixtures[self.url]))
if self.url in fixtures:
return bytes(open(file_path, "r").read(), "utf-8")
else:
raise Exception("no test fixture defined for url '%s'" % self.url)
class ParserTest(unittest.TestCase):
def test_no_parser_found(self):
p = StubParser("foo.bar/baz")
with self.assertRaises(ParseError):
p.parse()
def test_schedule_with_table(self):
p = StubParser(SCHEDULE_WITH_TABLE)
self.assertSequenceEqual(
[
("(1) Name of borough ward", "(2) Number of councillors"),
("Crummock & Derwent Valley", "1"),
("<NAME>", "3"),
("Warnell", "1"),
("Westward Ho!", "2"),
("Audley & Queen’s Park", "2"),
],
p.parse(),
)
def test_table_without_header(self):
p = StubParser(TABLE_WITHOUT_HEADER)
self.assertSequenceEqual(
[
("Crummock & Derwent Valley", "1"),
("St John’s", "3"),
("Warnell", "1"),
("Westward Ho!", "2"),
("Audley & Queen’s Park", "2"),
],
p.parse(),
)
def test_schedule_without_table(self):
p = StubParser(SCHEDULE_WITHOUT_TABLE)
self.assertSequenceEqual(
[
("Crummock & Derwent Valley",),
("<NAME>",),
("Warnell",),
("Westward Ho!",),
("Audley & Queen’s Park",),
],
p.parse(),
)
def test_article_without_table(self):
p = StubParser(ARTICLE_WITHOUT_TABLE)
self.assertSequenceEqual(
[
("The existing wards of the borough of Foo Town are abolished",),
("The borough of Foo Town is divided into 5 wards as follows—",),
("Crummock & Derwent Valley",),
("<NAME>",),
("Warnell",),
("Westward Ho!",),
("Audley & Queen’s Park",),
(
"Each ward comprises the area identified on the map by reference to the name of the ward",
),
("Three councillors are to be elected for each ward",),
],
p.parse(),
)
def test_unknown_table_format(self):
p = StubParser(UNKNOWN_TABLE_FORMAT)
with self.assertRaises(ParseError):
p.parse()
def test_one_row_table_valid(self):
p = StubParser(ONE_ROW_TABLE_VALID)
self.assertSequenceEqual(
[
("(1) Name of borough ward", "(2) Number of councillors"),
("Crummock & Derwent Valley", "1"),
("<NAME>’s", "3"),
("Warnell", "1"),
("Westward Ho!", "2"),
("Audley & Queen’s Park", "2"),
],
p.parse(),
)
def test_one_row_table_invalid(self):
p = StubParser(ONE_ROW_TABLE_INVALID)
with self.assertRaises(ParseError):
p.parse()
|
[
"os.path.abspath",
"os.path.join"
] |
[((1477, 1502), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1492, 1502), False, 'import os\n'), ((1540, 1581), 'os.path.join', 'os.path.join', (['dirname', 'fixtures[self.url]'], {}), '(dirname, fixtures[self.url])\n', (1552, 1581), False, 'import os\n')]
|
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nsx_common as common
from cloudify import exceptions as cfy_exc
def get_tag(client_session, name):
return common.nsx_search(
client_session, 'body/securityTags/securityTag',
name, 'securityTag'
)
def add_tag(client_session, name, description):
security_group = {
'securityTag': {
'name': name
}
}
if description:
security_group['securityTag']['description'] = description
result_raw = client_session.create(
'securityTag',
request_body_dict=security_group
)
common.check_raw_result(result_raw)
return result_raw['objectId']
def delete_tag(client_session, resource_id):
result = client_session.delete(
'securityTagID',
uri_parameters={'tagId': resource_id}
)
common.check_raw_result(result)
def tag_vm_to_resource_id(tag_id, vm_id):
"""Generate resource_id from tag_id/vm_id"""
if not vm_id or not tag_id:
raise cfy_exc.NonRecoverableError(
"Please recheck tag_id/vm_id"
)
return "%s|%s" % (tag_id, vm_id)
def add_tag_vm(client_session, tag_id, vm_id):
resource_id = tag_vm_to_resource_id(tag_id, vm_id)
result_raw = client_session.update(
'securityTagVM',
uri_parameters={
'tagId': tag_id,
'vmMoid': vm_id
}
)
common.check_raw_result(result_raw)
return resource_id
def delete_tag_vm(client_session, resource_id):
ids = resource_id.split("|")
if len(ids) != 2:
raise cfy_exc.NonRecoverableError(
'Unexpected error retrieving resource ID'
)
# get list of attached
attached_vms_raw = common.nsx_read(
client_session, 'body',
'securityTagVMsList', uri_parameters={'tagId': ids[0]}
)
if not attached_vms_raw:
return
attached_vms = common.nsx_struct_get_list(
attached_vms_raw, 'basicinfolist/basicinfo'
)
# delete only attached
for vm in attached_vms:
if vm.get('objectId') == ids[1]:
result_raw = client_session.delete(
'securityTagVM',
uri_parameters={
'tagId': ids[0],
'vmMoid': ids[1]
}
)
common.check_raw_result(result_raw)
break
|
[
"nsx_common.nsx_search",
"cloudify.exceptions.NonRecoverableError",
"nsx_common.nsx_struct_get_list",
"nsx_common.nsx_read",
"nsx_common.check_raw_result"
] |
[((737, 828), 'nsx_common.nsx_search', 'common.nsx_search', (['client_session', '"""body/securityTags/securityTag"""', 'name', '"""securityTag"""'], {}), "(client_session, 'body/securityTags/securityTag', name,\n 'securityTag')\n", (754, 828), True, 'import nsx_common as common\n'), ((1191, 1226), 'nsx_common.check_raw_result', 'common.check_raw_result', (['result_raw'], {}), '(result_raw)\n', (1214, 1226), True, 'import nsx_common as common\n'), ((1426, 1457), 'nsx_common.check_raw_result', 'common.check_raw_result', (['result'], {}), '(result)\n', (1449, 1457), True, 'import nsx_common as common\n'), ((1989, 2024), 'nsx_common.check_raw_result', 'common.check_raw_result', (['result_raw'], {}), '(result_raw)\n', (2012, 2024), True, 'import nsx_common as common\n'), ((2313, 2412), 'nsx_common.nsx_read', 'common.nsx_read', (['client_session', '"""body"""', '"""securityTagVMsList"""'], {'uri_parameters': "{'tagId': ids[0]}"}), "(client_session, 'body', 'securityTagVMsList',\n uri_parameters={'tagId': ids[0]})\n", (2328, 2412), True, 'import nsx_common as common\n'), ((2496, 2567), 'nsx_common.nsx_struct_get_list', 'common.nsx_struct_get_list', (['attached_vms_raw', '"""basicinfolist/basicinfo"""'], {}), "(attached_vms_raw, 'basicinfolist/basicinfo')\n", (2522, 2567), True, 'import nsx_common as common\n'), ((1597, 1655), 'cloudify.exceptions.NonRecoverableError', 'cfy_exc.NonRecoverableError', (['"""Please recheck tag_id/vm_id"""'], {}), "('Please recheck tag_id/vm_id')\n", (1624, 1655), True, 'from cloudify import exceptions as cfy_exc\n'), ((2169, 2239), 'cloudify.exceptions.NonRecoverableError', 'cfy_exc.NonRecoverableError', (['"""Unexpected error retrieving resource ID"""'], {}), "('Unexpected error retrieving resource ID')\n", (2196, 2239), True, 'from cloudify import exceptions as cfy_exc\n'), ((2912, 2947), 'nsx_common.check_raw_result', 'common.check_raw_result', (['result_raw'], {}), '(result_raw)\n', (2935, 2947), True, 'import nsx_common as common\n')]
|
import os
from datasets.types.data_split import DataSplit
from datasets.SOT.constructor.base_interface import SingleObjectTrackingDatasetConstructor
import numpy as np
def construct_TrackingNet(constructor: SingleObjectTrackingDatasetConstructor, seed):
root_path = seed.root_path
data_type = seed.data_split
enable_set_ids = seed.enable_set_ids
sequence_name_class_map_file_path = seed.sequence_name_class_map_file_path
if data_type != DataSplit.Training and enable_set_ids is not None:
raise Exception("unsupported configuration")
sequence_name_class_map = {}
if sequence_name_class_map_file_path is None:
sequence_name_class_map_file_path = os.path.join(os.path.dirname(__file__), 'data_specs', 'trackingnet_sequence_classes_map.txt')
for line in open(sequence_name_class_map_file_path, 'r', encoding='utf-8'):
line = line.strip()
name, category = line.split('\t')
sequence_name_class_map[name] = category
categories = set(sequence_name_class_map.values())
category_id_name_map = {i: v for i, v in enumerate(categories)}
category_name_id_map = {v: i for i, v in enumerate(categories)}
if enable_set_ids is not None:
trackingNetSubsets = ['TRAIN_{}'.format(v) for v in enable_set_ids]
else:
trackingNetSubsets = []
if data_type & DataSplit.Training:
trackingNetSubsets = ['TRAIN_{}'.format(v) for v in range(12)]
if data_type & DataSplit.Testing:
trackingNetSubsets.append('TEST')
sequence_list = []
for subset in trackingNetSubsets:
subset_path = os.path.join(root_path, subset)
frames_path = os.path.join(subset_path, 'frames')
anno_path = os.path.join(subset_path, 'anno')
bounding_box_annotation_files = os.listdir(anno_path)
bounding_box_annotation_files = [bounding_box_annotation_file for bounding_box_annotation_file in
bounding_box_annotation_files if bounding_box_annotation_file.endswith('.txt')]
bounding_box_annotation_files.sort()
sequences = [sequence[:-4] for sequence in bounding_box_annotation_files]
for sequence, bounding_box_annotation_file in zip(sequences, bounding_box_annotation_files):
sequence_image_path = os.path.join(frames_path, sequence)
bounding_box_annotation_file_path = os.path.join(anno_path, bounding_box_annotation_file)
sequence_list.append((sequence, sequence_image_path, bounding_box_annotation_file_path))
constructor.set_category_id_name_map(category_id_name_map)
constructor.set_total_number_of_sequences(len(sequence_list))
for sequence, sequence_image_path, sequence_bounding_box_annotation_file_path in sequence_list:
with constructor.new_sequence(category_name_id_map[sequence_name_class_map[sequence]]) as sequence_constructor:
sequence_constructor.set_name(sequence)
bounding_boxes = np.loadtxt(sequence_bounding_box_annotation_file_path, dtype=np.float, delimiter=',')
images = os.listdir(sequence_image_path)
images = [image for image in images if image.endswith('.jpg')]
if bounding_boxes.ndim == 2:
is_testing_sequence = False
assert len(images) == len(bounding_boxes)
else:
is_testing_sequence = True
assert bounding_boxes.ndim == 1 and bounding_boxes.shape[0] == 4
for i in range(len(images)):
image_file_name = '{}.jpg'.format(i)
image_file_path = os.path.join(sequence_image_path, image_file_name)
with sequence_constructor.new_frame() as frame_constructor:
frame_constructor.set_path(image_file_path)
if is_testing_sequence:
if i == 0:
frame_constructor.set_bounding_box(bounding_boxes.tolist())
else:
frame_constructor.set_bounding_box(bounding_boxes[i].tolist())
|
[
"os.path.dirname",
"numpy.loadtxt",
"os.path.join",
"os.listdir"
] |
[((1623, 1654), 'os.path.join', 'os.path.join', (['root_path', 'subset'], {}), '(root_path, subset)\n', (1635, 1654), False, 'import os\n'), ((1677, 1712), 'os.path.join', 'os.path.join', (['subset_path', '"""frames"""'], {}), "(subset_path, 'frames')\n", (1689, 1712), False, 'import os\n'), ((1733, 1766), 'os.path.join', 'os.path.join', (['subset_path', '"""anno"""'], {}), "(subset_path, 'anno')\n", (1745, 1766), False, 'import os\n'), ((1808, 1829), 'os.listdir', 'os.listdir', (['anno_path'], {}), '(anno_path)\n', (1818, 1829), False, 'import os\n'), ((705, 730), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (720, 730), False, 'import os\n'), ((2320, 2355), 'os.path.join', 'os.path.join', (['frames_path', 'sequence'], {}), '(frames_path, sequence)\n', (2332, 2355), False, 'import os\n'), ((2404, 2457), 'os.path.join', 'os.path.join', (['anno_path', 'bounding_box_annotation_file'], {}), '(anno_path, bounding_box_annotation_file)\n', (2416, 2457), False, 'import os\n'), ((2991, 3080), 'numpy.loadtxt', 'np.loadtxt', (['sequence_bounding_box_annotation_file_path'], {'dtype': 'np.float', 'delimiter': '""","""'}), "(sequence_bounding_box_annotation_file_path, dtype=np.float,\n delimiter=',')\n", (3001, 3080), True, 'import numpy as np\n'), ((3098, 3129), 'os.listdir', 'os.listdir', (['sequence_image_path'], {}), '(sequence_image_path)\n', (3108, 3129), False, 'import os\n'), ((3619, 3669), 'os.path.join', 'os.path.join', (['sequence_image_path', 'image_file_name'], {}), '(sequence_image_path, image_file_name)\n', (3631, 3669), False, 'import os\n')]
|
import logging
import os
import sys
import click
from functools import partial
from .config import load_config
from .models import Snapshot, Table, Base
from .operations import (
copy_database,
create_database,
database_exists,
remove_database,
rename_database,
terminate_database_connections,
list_of_databases,
)
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import ProgrammingError
from psutil import pid_exists
__version__ = '0.4.1'
logger = logging.getLogger(__name__)
class Operations(object):
def __init__(self, raw_connection, config):
self.terminate_database_connections = partial(
terminate_database_connections, raw_connection
)
self.create_database = partial(create_database, raw_connection)
self.copy_database = partial(copy_database, raw_connection)
self.database_exists = partial(database_exists, raw_connection)
self.rename_database = partial(rename_database, raw_connection)
self.remove_database = partial(remove_database, raw_connection)
self.list_of_databases = partial(list_of_databases, raw_connection)
class Stellar(object):
def __init__(self):
logger.debug('Initialized Stellar()')
self.load_config()
self.init_database()
def load_config(self):
self.config = load_config()
logging.basicConfig(level=self.config['logging'])
def init_database(self):
self.raw_db = create_engine(self.config['url'], echo=False)
self.raw_conn = self.raw_db.connect()
self.operations = Operations(self.raw_conn, self.config)
try:
self.raw_conn.connection.set_isolation_level(0)
except AttributeError:
logger.info('Could not set isolation level to 0')
self.db = create_engine(self.config['stellar_url'], echo=False)
self.db.session = sessionmaker(bind=self.db)()
self.raw_db.session = sessionmaker(bind=self.raw_db)()
tables_missing = self.create_stellar_database()
self.create_stellar_tables()
# logger.getLogger('sqlalchemy.engine').setLevel(logger.WARN)
def create_stellar_database(self):
if not self.operations.database_exists('stellar_data'):
self.operations.create_database('stellar_data')
return True
else:
return False
def create_stellar_tables(self):
Base.metadata.create_all(self.db)
self.db.session.commit()
def get_snapshot(self, snapshot_name):
return self.db.session.query(Snapshot).filter(
Snapshot.snapshot_name == snapshot_name,
Snapshot.project_name == self.config['project_name']
).first()
def get_snapshots(self):
return self.db.session.query(Snapshot).filter(
Snapshot.project_name == self.config['project_name']
).order_by(
Snapshot.created_at.desc()
).all()
def get_latest_snapshot(self):
return self.db.session.query(Snapshot).filter(
Snapshot.project_name == self.config['project_name']
).order_by(Snapshot.created_at.desc()).first()
def create_snapshot(self, snapshot_name, before_copy=None):
snapshot = Snapshot(
snapshot_name=snapshot_name,
project_name=self.config['project_name']
)
self.db.session.add(snapshot)
self.db.session.flush()
for table_name in self.config['tracked_databases']:
if before_copy:
before_copy(table_name)
table = Table(
table_name=table_name,
snapshot=snapshot
)
logger.debug('Copying %s to %s' % (
table_name,
table.get_table_name('master')
))
self.operations.copy_database(
table_name,
table.get_table_name('master')
)
self.db.session.add(table)
self.db.session.commit()
self.start_background_slave_copy(snapshot)
def remove_snapshot(self, snapshot):
for table in snapshot.tables:
try:
self.operations.remove_database(
table.get_table_name('master')
)
except ProgrammingError:
pass
try:
self.operations.remove_database(
table.get_table_name('slave')
)
except ProgrammingError:
pass
self.db.session.delete(table)
self.db.session.delete(snapshot)
self.db.session.commit()
def rename_snapshot(self, snapshot, new_name):
snapshot.snapshot_name = new_name
self.db.session.commit()
def restore(self, snapshot):
for table in snapshot.tables:
click.echo("Restoring database %s" % table.table_name)
if not self.operations.database_exists(
table.get_table_name('slave')
):
click.echo(
"Database %s does not exist."
% table.get_table_name('slave')
)
sys.exit(1)
try:
self.operations.remove_database(table.table_name)
except ProgrammingError:
logger.warn('Database %s does not exist.' % table.table_name)
self.operations.rename_database(
table.get_table_name('slave'),
table.table_name
)
snapshot.worker_pid = 1
self.db.session.commit()
self.start_background_slave_copy(snapshot)
def start_background_slave_copy(self, snapshot):
logger.debug('Starting background slave copy')
snapshot_id = snapshot.id
self.raw_conn.close()
self.raw_db.session.close()
self.db.session.close()
pid = os.fork() if hasattr(os, 'fork') else None
if pid:
return
self.init_database()
self.operations = Operations(self.raw_conn, self.config)
snapshot = self.db.session.query(Snapshot).get(snapshot_id)
snapshot.worker_pid = os.getpid()
self.db.session.commit()
self.inline_slave_copy(snapshot)
sys.exit()
def inline_slave_copy(self, snapshot):
for table in snapshot.tables:
self.operations.copy_database(
table.get_table_name('master'),
table.get_table_name('slave')
)
snapshot.worker_pid = None
self.db.session.commit()
def is_copy_process_running(self, snapshot):
return pid_exists(snapshot.worker_pid)
def is_old_database(self):
for snapshot in self.db.session.query(Snapshot):
for table in snapshot.tables:
for postfix in ('master', 'slave'):
old_name = table.get_table_name(postfix=postfix, old=True)
if self.operations.database_exists(old_name):
return True
return False
def update_database_names_to_new_version(self, after_rename=None):
for snapshot in self.db.session.query(Snapshot):
for table in snapshot.tables:
for postfix in ('master', 'slave'):
old_name = table.get_table_name(postfix=postfix, old=True)
new_name = table.get_table_name(postfix=postfix, old=False)
if self.operations.database_exists(old_name):
self.operations.rename_database(old_name, new_name)
if after_rename:
after_rename(old_name, new_name)
def delete_orphan_snapshots(self, after_delete=None):
stellar_databases = set()
for snapshot in self.db.session.query(Snapshot):
for table in snapshot.tables:
stellar_databases.add(table.get_table_name('master'))
stellar_databases.add(table.get_table_name('slave'))
databases = set(self.operations.list_of_databases())
for database in filter(
lambda database: (
database.startswith('stellar_') and
database != 'stellar_data'
),
(databases-stellar_databases)
):
self.operations.remove_database(database)
if after_delete:
after_delete(database)
@property
def default_snapshot_name(self):
n = 1
while self.db.session.query(Snapshot).filter(
Snapshot.snapshot_name == 'snap%d' % n,
Snapshot.project_name == self.config['project_name']
).count():
n += 1
return 'snap%d' % n
|
[
"functools.partial",
"os.getpid",
"logging.basicConfig",
"psutil.pid_exists",
"click.echo",
"os.fork",
"sqlalchemy.create_engine",
"sys.exit",
"sqlalchemy.orm.sessionmaker",
"logging.getLogger"
] |
[((528, 555), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (545, 555), False, 'import logging\n'), ((678, 733), 'functools.partial', 'partial', (['terminate_database_connections', 'raw_connection'], {}), '(terminate_database_connections, raw_connection)\n', (685, 733), False, 'from functools import partial\n'), ((787, 827), 'functools.partial', 'partial', (['create_database', 'raw_connection'], {}), '(create_database, raw_connection)\n', (794, 827), False, 'from functools import partial\n'), ((857, 895), 'functools.partial', 'partial', (['copy_database', 'raw_connection'], {}), '(copy_database, raw_connection)\n', (864, 895), False, 'from functools import partial\n'), ((927, 967), 'functools.partial', 'partial', (['database_exists', 'raw_connection'], {}), '(database_exists, raw_connection)\n', (934, 967), False, 'from functools import partial\n'), ((999, 1039), 'functools.partial', 'partial', (['rename_database', 'raw_connection'], {}), '(rename_database, raw_connection)\n', (1006, 1039), False, 'from functools import partial\n'), ((1071, 1111), 'functools.partial', 'partial', (['remove_database', 'raw_connection'], {}), '(remove_database, raw_connection)\n', (1078, 1111), False, 'from functools import partial\n'), ((1145, 1187), 'functools.partial', 'partial', (['list_of_databases', 'raw_connection'], {}), '(list_of_databases, raw_connection)\n', (1152, 1187), False, 'from functools import partial\n'), ((1411, 1460), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': "self.config['logging']"}), "(level=self.config['logging'])\n", (1430, 1460), False, 'import logging\n'), ((1513, 1558), 'sqlalchemy.create_engine', 'create_engine', (["self.config['url']"], {'echo': '(False)'}), "(self.config['url'], echo=False)\n", (1526, 1558), False, 'from sqlalchemy import create_engine\n'), ((1856, 1909), 'sqlalchemy.create_engine', 'create_engine', (["self.config['stellar_url']"], {'echo': '(False)'}), "(self.config['stellar_url'], echo=False)\n", (1869, 1909), False, 'from sqlalchemy import create_engine\n'), ((6228, 6239), 'os.getpid', 'os.getpid', ([], {}), '()\n', (6237, 6239), False, 'import os\n'), ((6322, 6332), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6330, 6332), False, 'import sys\n'), ((6699, 6730), 'psutil.pid_exists', 'pid_exists', (['snapshot.worker_pid'], {}), '(snapshot.worker_pid)\n', (6709, 6730), False, 'from psutil import pid_exists\n'), ((1936, 1962), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'self.db'}), '(bind=self.db)\n', (1948, 1962), False, 'from sqlalchemy.orm import sessionmaker\n'), ((1995, 2025), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'self.raw_db'}), '(bind=self.raw_db)\n', (2007, 2025), False, 'from sqlalchemy.orm import sessionmaker\n'), ((4901, 4955), 'click.echo', 'click.echo', (["('Restoring database %s' % table.table_name)"], {}), "('Restoring database %s' % table.table_name)\n", (4911, 4955), False, 'import click\n'), ((5956, 5965), 'os.fork', 'os.fork', ([], {}), '()\n', (5963, 5965), False, 'import os\n'), ((5233, 5244), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5241, 5244), False, 'import sys\n')]
|
# 利用gevent进行爬虫,python3.8没有匹配的gevent,需要等待
from urllib import request # 使用gevent爬虫,自动,gevent需要安装
import gevent, time
from gevent import monkey
from http.cookiejar import CookieJar
from bs4 import BeautifulSoup
monkey.patch_all() # 把当前程序的所有的io操作给我单独的做上标记,必须要加,因为gevent不能urllib中的io操作
def f(url):
resp = request.urlopen(url) # 打开一个网页
data = resp.read() # 读取网页中所有的内容
print('%d bytes received from %s.' % (len(data), url))
urls = ['https://www.python.org/', 'https://www.yahoo.com/', 'https://github.com/' ]
time_start = time.time()
for url in urls: # 同步进行爬虫操作
f(url)
print("同步cost", time.time() - time_start)
async_time_start = time.time()
gevent.joinall([gevent.spawn(f, urls[0]), gevent.spawn(f, urls[1]), gevent.spawn(f, urls[2]), ]) # 异步进行爬虫操作
print("异步cost", time.time() - async_time_start)
request.urlretrieve("http地址", "存储到本地的完整地址") # 将文件直接下载到本地
# 爬虫的一种方法,不直接用request,而是伪造一个Request请求,加上一些请求头等
fake1 = request.Request("url")
fake1.add_header("user_agent", "Mozilla/5.0") # 添加浏览器类型
resp1 = request.urlopen(fake1)
print(resp1.read())
# 爬虫的另外一种方法,添加特殊处理方式
# 1.需要添加cookie的:HTTPCookieProcessor
# 2.需要代理才能访问的:ProxyHandler
# 3.需要https加密访问的:HTTPSHandler
# 4.有自动跳转的:HTTPRedirectHandler
# specialHandler = request.build_opener(HTTPSHandler())
# request.install_opener(specialHandler)
# request.urlopen("url")
cookie = CookieJar()
fake2 = request.build_opener(request.HTTPCookieProcessor(cookie))
request.install_opener(fake2)
resp2 = request.urlopen("url")
print(resp2.read())
# BeautifulSoup:解析html页面,需要安装beautifulsoup4,还需要安装lxml
soup= BeautifulSoup("html字符串","html.parser:指定解析器,html就是html.parser","from_encoding=utf8")
|
[
"urllib.request.Request",
"http.cookiejar.CookieJar",
"urllib.request.HTTPCookieProcessor",
"urllib.request.urlopen",
"gevent.monkey.patch_all",
"time.time",
"urllib.request.urlretrieve",
"bs4.BeautifulSoup",
"gevent.spawn",
"urllib.request.install_opener"
] |
[((209, 227), 'gevent.monkey.patch_all', 'monkey.patch_all', ([], {}), '()\n', (225, 227), False, 'from gevent import monkey\n'), ((534, 545), 'time.time', 'time.time', ([], {}), '()\n', (543, 545), False, 'import gevent, time\n'), ((647, 658), 'time.time', 'time.time', ([], {}), '()\n', (656, 658), False, 'import gevent, time\n'), ((816, 859), 'urllib.request.urlretrieve', 'request.urlretrieve', (['"""http地址"""', '"""存储到本地的完整地址"""'], {}), "('http地址', '存储到本地的完整地址')\n", (835, 859), False, 'from urllib import request\n'), ((930, 952), 'urllib.request.Request', 'request.Request', (['"""url"""'], {}), "('url')\n", (945, 952), False, 'from urllib import request\n'), ((1017, 1039), 'urllib.request.urlopen', 'request.urlopen', (['fake1'], {}), '(fake1)\n', (1032, 1039), False, 'from urllib import request\n'), ((1337, 1348), 'http.cookiejar.CookieJar', 'CookieJar', ([], {}), '()\n', (1346, 1348), False, 'from http.cookiejar import CookieJar\n'), ((1415, 1444), 'urllib.request.install_opener', 'request.install_opener', (['fake2'], {}), '(fake2)\n', (1437, 1444), False, 'from urllib import request\n'), ((1453, 1475), 'urllib.request.urlopen', 'request.urlopen', (['"""url"""'], {}), "('url')\n", (1468, 1475), False, 'from urllib import request\n'), ((1557, 1646), 'bs4.BeautifulSoup', 'BeautifulSoup', (['"""html字符串"""', '"""html.parser:指定解析器,html就是html.parser"""', '"""from_encoding=utf8"""'], {}), "('html字符串', 'html.parser:指定解析器,html就是html.parser',\n 'from_encoding=utf8')\n", (1570, 1646), False, 'from bs4 import BeautifulSoup\n'), ((307, 327), 'urllib.request.urlopen', 'request.urlopen', (['url'], {}), '(url)\n', (322, 327), False, 'from urllib import request\n'), ((1378, 1413), 'urllib.request.HTTPCookieProcessor', 'request.HTTPCookieProcessor', (['cookie'], {}), '(cookie)\n', (1405, 1413), False, 'from urllib import request\n'), ((606, 617), 'time.time', 'time.time', ([], {}), '()\n', (615, 617), False, 'import gevent, time\n'), ((675, 699), 'gevent.spawn', 'gevent.spawn', (['f', 'urls[0]'], {}), '(f, urls[0])\n', (687, 699), False, 'import gevent, time\n'), ((701, 725), 'gevent.spawn', 'gevent.spawn', (['f', 'urls[1]'], {}), '(f, urls[1])\n', (713, 725), False, 'import gevent, time\n'), ((727, 751), 'gevent.spawn', 'gevent.spawn', (['f', 'urls[2]'], {}), '(f, urls[2])\n', (739, 751), False, 'import gevent, time\n'), ((788, 799), 'time.time', 'time.time', ([], {}), '()\n', (797, 799), False, 'import gevent, time\n')]
|
import pytest
from ..width import nonparam_width, gauss_model, radial_profile
from .testing_utils import generate_filament_model
import numpy as np
import numpy.testing as npt
from scipy import ndimage as nd
def generate_gaussian_profile(pts, width=3.0, amplitude=2.0, background=0.5):
return amplitude * np.exp(- pts ** 2 / (2 * width ** 2)) + background
def test_nonparam():
pts = np.linspace(0, 10, 100)
profile = generate_gaussian_profile(pts)
params, errors, fail = \
nonparam_width(pts, profile, pts, profile, 1.0, 5, 99)
# This shouldn't be failing
assert fail is False
# Check the amplitude
npt.assert_allclose(params[0], 2.5, atol=0.01)
# Width
npt.assert_allclose(params[1], 3.0, atol=0.01)
# Background
npt.assert_allclose(params[2], 0.5, atol=0.02)
def test_gaussian():
pts = np.linspace(0, 10, 100)
profile = generate_gaussian_profile(pts)
params, errors, _, _, fail = \
gauss_model(pts, profile, np.ones_like(pts), 1.0)
# Check the amplitude
npt.assert_allclose(params[0], 2.5, atol=0.01)
# Width
npt.assert_allclose(params[1], 3.0, atol=0.01)
# Background
npt.assert_allclose(params[2], 0.5, atol=0.02)
@pytest.mark.parametrize(('theta'), [(0.0)])
def test_radial_profile_output(theta):
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=False, max_distance=20)
params, errors, _, _, fail = \
gauss_model(dist, radprof, np.ones_like(dist), 1.0)
npt.assert_allclose(params[:-1], [1.0, 10.0, 0.0], atol=1e-1)
@pytest.mark.parametrize(('cutoff'), [(10.0), (20.0), (30.0)])
def test_radial_profile_cutoff(cutoff):
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=False, max_distance=cutoff)
assert unbin_dist.max() == cutoff
assert dist.max() < cutoff
@pytest.mark.parametrize(('padding'), [(5.0), (10.0), (20.0)])
def test_radial_profile_padding(padding, max_distance=20.0):
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=False,
max_distance=max_distance, pad_to_distance=padding)
if padding <= max_distance:
assert unbin_dist.max() == max_distance
assert dist.max() < max_distance
else:
assert unbin_dist.max() == padding
assert dist.max() < padding
@pytest.mark.xfail(raises=ValueError)
def test_radial_profile_fail_pad(padding=30.0, max_distance=20.0):
'''
Cannot pad greater than max_distance
'''
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=False,
max_distance=max_distance, pad_to_distance=padding)
def test_radial_profile_autocut():
'''
Test auto-cutting with a secondary offset peak.
'''
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
model += np.roll(model, -30, axis=0).copy()
model += np.roll(model, +30, axis=0).copy()
# all_skeleton += np.roll(skeleton, -30, axis=0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=True,
max_distance=50.0, auto_cut_kwargs={'smooth_size': 3.0,
'pad_cut': 0})
npt.assert_equal(dist.max(), 19.25)
def test_radial_profile_autocut_plateau():
'''
Test auto-cutting with a plateau and a second fall.
'''
model, skeleton = generate_filament_model(shape=160, width=10.0,
amplitude=10.0, background=5.0)
# Create a second drop-off profile 40 pixels from the center on each side.
for i, row in enumerate(model[120:].T):
model[120:, i] = generate_gaussian_profile(np.arange(row.size),
width=5.0,
amplitude=5.0,
background=0.0)
for i, row in enumerate(model[:40].T):
model[:40, i] = generate_gaussian_profile(np.arange(row.size),
width=5.0,
amplitude=5.0,
background=0.0)[::-1]
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=True,
max_distance=60.0, auto_cut_kwargs={'smooth_size': 3.0,
'pad_cut': 0,
'interp_factor': 1})
# By-eye, this should be 18-19
npt.assert_almost_equal(dist.max(), 38.201, decimal=3)
|
[
"numpy.ones_like",
"numpy.roll",
"numpy.testing.assert_allclose",
"numpy.arange",
"numpy.exp",
"numpy.linspace",
"pytest.mark.parametrize",
"pytest.mark.xfail"
] |
[((1239, 1278), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""theta"""', '[0.0]'], {}), "('theta', [0.0])\n", (1262, 1278), False, 'import pytest\n'), ((1969, 2022), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cutoff"""', '[10.0, 20.0, 30.0]'], {}), "('cutoff', [10.0, 20.0, 30.0])\n", (1992, 2022), False, 'import pytest\n'), ((2629, 2682), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""padding"""', '[5.0, 10.0, 20.0]'], {}), "('padding', [5.0, 10.0, 20.0])\n", (2652, 2682), False, 'import pytest\n'), ((3505, 3541), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'ValueError'}), '(raises=ValueError)\n', (3522, 3541), False, 'import pytest\n'), ((399, 422), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (410, 422), True, 'import numpy as np\n'), ((651, 697), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['params[0]', '(2.5)'], {'atol': '(0.01)'}), '(params[0], 2.5, atol=0.01)\n', (670, 697), True, 'import numpy.testing as npt\n'), ((714, 760), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['params[1]', '(3.0)'], {'atol': '(0.01)'}), '(params[1], 3.0, atol=0.01)\n', (733, 760), True, 'import numpy.testing as npt\n'), ((782, 828), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['params[2]', '(0.5)'], {'atol': '(0.02)'}), '(params[2], 0.5, atol=0.02)\n', (801, 828), True, 'import numpy.testing as npt\n'), ((863, 886), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (874, 886), True, 'import numpy as np\n'), ((1058, 1104), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['params[0]', '(2.5)'], {'atol': '(0.01)'}), '(params[0], 2.5, atol=0.01)\n', (1077, 1104), True, 'import numpy.testing as npt\n'), ((1121, 1167), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['params[1]', '(3.0)'], {'atol': '(0.01)'}), '(params[1], 3.0, atol=0.01)\n', (1140, 1167), True, 'import numpy.testing as npt\n'), ((1189, 1235), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['params[2]', '(0.5)'], {'atol': '(0.02)'}), '(params[2], 0.5, atol=0.02)\n', (1208, 1235), True, 'import numpy.testing as npt\n'), ((1904, 1964), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['params[:-1]', '[1.0, 10.0, 0.0]'], {'atol': '(0.1)'}), '(params[:-1], [1.0, 10.0, 0.0], atol=0.1)\n', (1923, 1964), True, 'import numpy.testing as npt\n'), ((1003, 1020), 'numpy.ones_like', 'np.ones_like', (['pts'], {}), '(pts)\n', (1015, 1020), True, 'import numpy as np\n'), ((1874, 1892), 'numpy.ones_like', 'np.ones_like', (['dist'], {}), '(dist)\n', (1886, 1892), True, 'import numpy as np\n'), ((314, 350), 'numpy.exp', 'np.exp', (['(-pts ** 2 / (2 * width ** 2))'], {}), '(-pts ** 2 / (2 * width ** 2))\n', (320, 350), True, 'import numpy as np\n'), ((4459, 4486), 'numpy.roll', 'np.roll', (['model', '(-30)'], {'axis': '(0)'}), '(model, -30, axis=0)\n', (4466, 4486), True, 'import numpy as np\n'), ((4507, 4534), 'numpy.roll', 'np.roll', (['model', '(+30)'], {'axis': '(0)'}), '(model, +30, axis=0)\n', (4514, 4534), True, 'import numpy as np\n'), ((5559, 5578), 'numpy.arange', 'np.arange', (['row.size'], {}), '(row.size)\n', (5568, 5578), True, 'import numpy as np\n'), ((5870, 5889), 'numpy.arange', 'np.arange', (['row.size'], {}), '(row.size)\n', (5879, 5889), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/rlessard/packages/omtk/0.4.999/python/omtk/ui/widget_list_influences.ui'
#
# Created: Tue Feb 20 10:34:53 2018
# by: pyside2-uic running on Qt 2.0.0~alpha0
#
# WARNING! All changes made in this file will be lost!
from omtk.vendor.Qt import QtCore, QtGui, QtWidgets, QtCompat
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(316, 295)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.lineEdit_search = QtWidgets.QLineEdit(Form)
self.lineEdit_search.setObjectName("lineEdit_search")
self.horizontalLayout.addWidget(self.lineEdit_search)
self.btn_update = QtWidgets.QPushButton(Form)
self.btn_update.setObjectName("btn_update")
self.horizontalLayout.addWidget(self.btn_update)
self.verticalLayout.addLayout(self.horizontalLayout)
self.checkBox_hideAssigned = QtWidgets.QCheckBox(Form)
self.checkBox_hideAssigned.setChecked(True)
self.checkBox_hideAssigned.setObjectName("checkBox_hideAssigned")
self.verticalLayout.addWidget(self.checkBox_hideAssigned)
self.treeWidget = QtWidgets.QTreeWidget(Form)
self.treeWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.treeWidget.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.treeWidget.setObjectName("treeWidget")
self.treeWidget.headerItem().setText(0, "1")
self.treeWidget.header().setVisible(False)
self.verticalLayout.addWidget(self.treeWidget)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtCompat.translate("Form", "Form", None, -1))
self.btn_update.setText(QtCompat.translate("Form", "Update", None, -1))
self.checkBox_hideAssigned.setText(QtCompat.translate("Form", "Hide Assigned", None, -1))
|
[
"omtk.vendor.Qt.QtWidgets.QVBoxLayout",
"omtk.vendor.Qt.QtCompat.translate",
"omtk.vendor.Qt.QtWidgets.QCheckBox",
"omtk.vendor.Qt.QtWidgets.QHBoxLayout",
"omtk.vendor.Qt.QtWidgets.QPushButton",
"omtk.vendor.Qt.QtWidgets.QTreeWidget",
"omtk.vendor.Qt.QtWidgets.QLineEdit",
"omtk.vendor.Qt.QtCore.QMetaObject.connectSlotsByName"
] |
[((515, 542), 'omtk.vendor.Qt.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['Form'], {}), '(Form)\n', (536, 542), False, 'from omtk.vendor.Qt import QtCore, QtGui, QtWidgets, QtCompat\n'), ((694, 717), 'omtk.vendor.Qt.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (715, 717), False, 'from omtk.vendor.Qt import QtCore, QtGui, QtWidgets, QtCompat\n'), ((813, 838), 'omtk.vendor.Qt.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['Form'], {}), '(Form)\n', (832, 838), False, 'from omtk.vendor.Qt import QtCore, QtGui, QtWidgets, QtCompat\n'), ((989, 1016), 'omtk.vendor.Qt.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['Form'], {}), '(Form)\n', (1010, 1016), False, 'from omtk.vendor.Qt import QtCore, QtGui, QtWidgets, QtCompat\n'), ((1224, 1249), 'omtk.vendor.Qt.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['Form'], {}), '(Form)\n', (1243, 1249), False, 'from omtk.vendor.Qt import QtCore, QtGui, QtWidgets, QtCompat\n'), ((1468, 1495), 'omtk.vendor.Qt.QtWidgets.QTreeWidget', 'QtWidgets.QTreeWidget', (['Form'], {}), '(Form)\n', (1489, 1495), False, 'from omtk.vendor.Qt import QtCore, QtGui, QtWidgets, QtCompat\n'), ((1911, 1954), 'omtk.vendor.Qt.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Form'], {}), '(Form)\n', (1948, 1954), False, 'from omtk.vendor.Qt import QtCore, QtGui, QtWidgets, QtCompat\n'), ((2019, 2063), 'omtk.vendor.Qt.QtCompat.translate', 'QtCompat.translate', (['"""Form"""', '"""Form"""', 'None', '(-1)'], {}), "('Form', 'Form', None, -1)\n", (2037, 2063), False, 'from omtk.vendor.Qt import QtCore, QtGui, QtWidgets, QtCompat\n'), ((2097, 2143), 'omtk.vendor.Qt.QtCompat.translate', 'QtCompat.translate', (['"""Form"""', '"""Update"""', 'None', '(-1)'], {}), "('Form', 'Update', None, -1)\n", (2115, 2143), False, 'from omtk.vendor.Qt import QtCore, QtGui, QtWidgets, QtCompat\n'), ((2188, 2241), 'omtk.vendor.Qt.QtCompat.translate', 'QtCompat.translate', (['"""Form"""', '"""Hide Assigned"""', 'None', '(-1)'], {}), "('Form', 'Hide Assigned', None, -1)\n", (2206, 2241), False, 'from omtk.vendor.Qt import QtCore, QtGui, QtWidgets, QtCompat\n')]
|
import urllib
from contextlib import suppress
from importlib import import_module
from urllib.parse import quote
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db.models import CharField, Q
from django.db.models.functions import Lower
from django.http import Http404
from django.shortcuts import redirect
from django.utils.functional import cached_property
from i18nfield.forms import I18nModelForm
from rules.contrib.views import PermissionRequiredMixin
from pretalx.common.forms import SearchForm
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore
class ActionFromUrl:
write_permission_required = None
@cached_property
def object(self):
return self.get_object()
@cached_property
def permission_object(self):
return self.object
@cached_property
def _action(self):
if not any(_id in self.kwargs for _id in ['pk', 'code']):
return 'create'
if self.request.user.has_perm(
self.write_permission_required, self.permission_object
):
return 'edit'
return 'view'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['action'] = self._action
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['read_only'] = self._action == 'view'
if hasattr(self.request, 'event') and issubclass(
self.form_class, I18nModelForm
):
kwargs['locales'] = self.request.event.locales
return kwargs
class Sortable:
"""
In the main class, you'll have to call sort_queryset() in get_queryset.
In the template, do this:
{% load url_replace %}
<th>
{% trans "Title" %}
<a href="?{% url_replace request 'sort' '-title' %}"><i class="fa fa-caret-down"></i></a>
<a href="?{% url_replace request 'sort' 'title' %}"><i class="fa fa-caret-up"></i></a>
</th>
"""
sortable_fields = []
def sort_queryset(self, qs):
sort_key = self.request.GET.get('sort') or getattr(
self, 'default_sort_field', ''
)
if sort_key:
plain_key = sort_key[1:] if sort_key.startswith('-') else sort_key
reverse = not (plain_key == sort_key)
if plain_key in self.sortable_fields:
is_text = False
if '__' not in plain_key:
with suppress(FieldDoesNotExist):
is_text = isinstance(
qs.model._meta.get_field(plain_key), CharField
)
else:
split_key = plain_key.split('__')
if len(split_key) == 2:
is_text = isinstance(
qs.model._meta.get_field(
split_key[0]
).related_model._meta.get_field(split_key[1]),
CharField,
)
if is_text:
# TODO: this only sorts direct lookups case insensitively
# A sorting field like 'speaker__name' will not be found
qs = qs.annotate(key=Lower(plain_key)).order_by(
'-key' if reverse else 'key'
)
else:
qs = qs.order_by(sort_key)
return qs
class Filterable:
filter_fields = []
default_filters = []
def filter_queryset(self, qs):
if self.filter_fields:
qs = self._handle_filter(qs)
if 'q' in self.request.GET:
qs = self._handle_search(qs)
return qs
def _handle_filter(self, qs):
for key in self.request.GET: # Do NOT use items() to preserve multivalue fields
value = self.request.GET.getlist(key)
if len(value) == 1:
value = value[0]
elif len(value) > 1:
key = f'{key}__in' if not key.endswith('__in') else key
if value:
lookup_key = key.split('__')[0]
print(value)
if lookup_key in self.filter_fields:
qs = qs.filter(**{key: value})
return qs
def _handle_search(self, qs):
query = urllib.parse.unquote(self.request.GET['q'])
_filters = [Q(**{field: query}) for field in self.default_filters]
if len(_filters) > 1:
_filter = _filters[0]
for additional_filter in _filters[1:]:
_filter = _filter | additional_filter
qs = qs.filter(_filter)
elif _filters:
qs = qs.filter(_filters[0])
return qs
def get_context_data(self, **kwargs):
from django import forms
context = super().get_context_data(**kwargs)
context['search_form'] = SearchForm(
self.request.GET if 'q' in self.request.GET else {}
)
if hasattr(self, 'filter_form_class'):
context['filter_form'] = self.filter_form_class(
self.request.event, self.request.GET
)
elif hasattr(self, 'get_filter_form'):
context['filter_form'] = self.get_filter_form()
elif self.filter_fields:
context['filter_form'] = forms.modelform_factory(
self.model, fields=self.filter_fields
)(self.request.GET)
for field in context['filter_form'].fields.values():
field.required = False
if hasattr(field, 'queryset'):
field.queryset = field.queryset.filter(event=self.request.event)
return context
class PermissionRequired(PermissionRequiredMixin):
def has_permission(self):
result = super().has_permission()
if not result:
request = getattr(self, 'request', None)
if request and hasattr(request, 'event'):
key = f'pretalx_event_access_{request.event.pk}'
if key in request.session:
sparent = SessionStore(request.session.get(key))
parentdata = []
with suppress(Exception):
parentdata = sparent.load()
return 'event_access' in parentdata
return result
def get_login_url(self):
"""We do this to avoid leaking data about existing pages."""
raise Http404()
def handle_no_permission(self):
request = getattr(self, 'request', None)
if (
request
and hasattr(request, 'event')
and request.user.is_anonymous
and 'cfp' in request.resolver_match.namespaces
):
params = '&' + request.GET.urlencode() if request.GET else ''
return redirect(
request.event.urls.login + f'?next={quote(request.path)}' + params
)
raise Http404()
class EventPermissionRequired(PermissionRequired):
def get_permission_object(self):
return self.request.event
|
[
"urllib.parse.unquote",
"pretalx.common.forms.SearchForm",
"importlib.import_module",
"django.db.models.functions.Lower",
"django.db.models.Q",
"contextlib.suppress",
"urllib.parse.quote",
"django.http.Http404",
"django.forms.modelform_factory"
] |
[((568, 606), 'importlib.import_module', 'import_module', (['settings.SESSION_ENGINE'], {}), '(settings.SESSION_ENGINE)\n', (581, 606), False, 'from importlib import import_module\n'), ((4412, 4455), 'urllib.parse.unquote', 'urllib.parse.unquote', (["self.request.GET['q']"], {}), "(self.request.GET['q'])\n", (4432, 4455), False, 'import urllib\n'), ((4980, 5043), 'pretalx.common.forms.SearchForm', 'SearchForm', (["(self.request.GET if 'q' in self.request.GET else {})"], {}), "(self.request.GET if 'q' in self.request.GET else {})\n", (4990, 5043), False, 'from pretalx.common.forms import SearchForm\n'), ((6545, 6554), 'django.http.Http404', 'Http404', ([], {}), '()\n', (6552, 6554), False, 'from django.http import Http404\n'), ((7042, 7051), 'django.http.Http404', 'Http404', ([], {}), '()\n', (7049, 7051), False, 'from django.http import Http404\n'), ((4476, 4495), 'django.db.models.Q', 'Q', ([], {}), '(**{field: query})\n', (4477, 4495), False, 'from django.db.models import CharField, Q\n'), ((2528, 2555), 'contextlib.suppress', 'suppress', (['FieldDoesNotExist'], {}), '(FieldDoesNotExist)\n', (2536, 2555), False, 'from contextlib import suppress\n'), ((5418, 5480), 'django.forms.modelform_factory', 'forms.modelform_factory', (['self.model'], {'fields': 'self.filter_fields'}), '(self.model, fields=self.filter_fields)\n', (5441, 5480), False, 'from django import forms\n'), ((6281, 6300), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (6289, 6300), False, 'from contextlib import suppress\n'), ((6983, 7002), 'urllib.parse.quote', 'quote', (['request.path'], {}), '(request.path)\n', (6988, 7002), False, 'from urllib.parse import quote\n'), ((3334, 3350), 'django.db.models.functions.Lower', 'Lower', (['plain_key'], {}), '(plain_key)\n', (3339, 3350), False, 'from django.db.models.functions import Lower\n')]
|
#!/usr/bin/env python
#
# overlaydisplaypanel.py - The OverlayDisplayPanel.
#
# Author: <NAME> <<EMAIL>>
"""This module provides the :class:`OverlayDisplayPanel` class, a *FSLeyes
control* panel which allows the user to change overlay display settings.
"""
import logging
import functools
import collections
import collections.abc as abc
import wx
import fsleyes_props as props
import fsleyes.views.canvaspanel as canvaspanel
import fsleyes.controls.controlpanel as ctrlpanel
import fsleyes.strings as strings
import fsleyes.tooltips as fsltooltips
from . import overlaydisplaywidgets as odwidgets
log = logging.getLogger(__name__)
class OverlayDisplayPanel(ctrlpanel.SettingsPanel):
"""The ``OverlayDisplayPanel`` is a :class:`.SettingsPanel` which allows
the user to change the display settings of the currently selected
overlay (which is defined by the :attr:`.DisplayContext.selectedOverlay`
property). The display settings for an overlay are contained in the
:class:`.Display` and :class:`.DisplayOpts` instances associated with
that overlay. An ``OverlayDisplayPanel`` looks something like the
following:
.. image:: images/overlaydisplaypanel.png
:scale: 50%
:align: center
An ``OverlayDisplayPanel`` uses a :class:`.WidgetList` to organise the
settings into two main sections:
- Settings which are common across all overlays - these are defined
in the :class:`.Display` class.
- Settings which are specific to the current
:attr:`.Display.overlayType` - these are defined in the
:class:`.DisplayOpts` sub-classes.
The settings that are displayed on an ``OverlayDisplayPanel`` are
defined in the :attr:`_DISPLAY_PROPS` and :attr:`_DISPLAY_WIDGETS`
dictionaries.
"""
@staticmethod
def supportedViews():
"""Overrides :meth:`.ControlMixin.supportedViews`. The
``OverlayDisplayPanel`` is only intended to be added to
:class:`.OrthoPanel`, :class:`.LightBoxPanel`, or
:class:`.Scene3DPanel` views.
"""
return [canvaspanel.CanvasPanel]
@staticmethod
def defaultLayout():
"""Returns a dictionary containing layout settings to be passed to
:class:`.ViewPanel.togglePanel`.
"""
return {'location' : wx.LEFT}
def __init__(self, parent, overlayList, displayCtx, canvasPanel):
"""Create an ``OverlayDisplayPanel``.
:arg parent: The :mod:`wx` parent object.
:arg overlayList: The :class:`.OverlayList` instance.
:arg displayCtx: The :class:`.DisplayContext` instance.
:arg canvasPanel: The :class:`.CanvasPanel` instance.
"""
from fsleyes.views.scene3dpanel import Scene3DPanel
ctrlpanel.SettingsPanel.__init__(self,
parent,
overlayList,
displayCtx,
canvasPanel,
kbFocus=True)
displayCtx .addListener('selectedOverlay',
self.name,
self.__selectedOverlayChanged)
overlayList.addListener('overlays',
self.name,
self.__selectedOverlayChanged)
self.__threedee = isinstance(parent, Scene3DPanel)
self.__viewPanel = canvasPanel
self.__widgets = None
self.__currentOverlay = None
self.__selectedOverlayChanged()
def destroy(self):
"""Must be called when this ``OverlayDisplayPanel`` is no longer
needed. Removes property listeners, and calls the
:meth:`.SettingsPanel.destroy` method.
"""
self.displayCtx .removeListener('selectedOverlay', self.name)
self.overlayList.removeListener('overlays', self.name)
if self.__currentOverlay is not None and \
self.__currentOverlay in self.overlayList:
display = self.displayCtx.getDisplay(self.__currentOverlay)
display.removeListener('overlayType', self.name)
self.__viewPanel = None
self.__widgets = None
self.__currentOverlay = None
ctrlpanel.SettingsPanel.destroy(self)
def __selectedOverlayChanged(self, *a):
"""Called when the :class:`.OverlayList` or
:attr:`.DisplayContext.selectedOverlay` changes. Refreshes this
``OverlayDisplayPanel`` so that the display settings for the newly
selected overlay are shown.
"""
overlay = self.displayCtx.getSelectedOverlay()
lastOverlay = self.__currentOverlay
widgetList = self.getWidgetList()
if overlay is None:
self.__currentOverlay = None
self.__widgets = None
widgetList.Clear()
self.Layout()
return
if overlay is lastOverlay:
return
self.__currentOverlay = overlay
self.__widgets = collections.OrderedDict()
display = self.displayCtx.getDisplay(overlay)
opts = display.opts
if self.__threedee:
groups = ['display', 'opts', '3d']
targets = [ display, opts, opts]
labels = [strings.labels[self, display],
strings.labels[self, opts],
strings.labels[self, '3d']]
else:
groups = ['display', 'opts']
targets = [ display, opts]
labels = [strings.labels[self, display],
strings.labels[self, opts]]
keepExpanded = {g : True for g in groups}
if lastOverlay is not None and lastOverlay in self.overlayList:
lastDisplay = self.displayCtx.getDisplay(lastOverlay)
lastDisplay.removeListener('overlayType', self.name)
if lastOverlay is not None:
for g in groups:
keepExpanded[g] = widgetList.IsExpanded(g)
display.addListener('overlayType', self.name, self.__ovlTypeChanged)
widgetList.Clear()
for g, l, t in zip(groups, labels, targets):
widgetList.AddGroup(g, l)
self.__widgets[g] = self.__updateWidgets(t, g)
widgetList.Expand(g, keepExpanded[g])
self.setNavOrder()
self.Layout()
def setNavOrder(self):
allWidgets = self.__widgets.items()
allWidgets = functools.reduce(lambda a, b: a + b, allWidgets)
ctrlpanel.SettingsPanel.setNavOrder(self, allWidgets)
def __ovlTypeChanged(self, *a):
"""Called when the :attr:`.Display.overlayType` of the current overlay
changes. Refreshes the :class:`.DisplayOpts` settings which are shown,
as a new :class:`.DisplayOpts` instance will have been created for the
overlay.
"""
opts = self.displayCtx.getOpts(self.__currentOverlay)
widgetList = self.getWidgetList()
self.__widgets[opts] = self.__updateWidgets(opts, 'opts')
widgetList.RenameGroup('opts', strings.labels[self, opts])
if '3d' in self.__widgets:
self.__widgets['3d'] = self.__updateWidgets(opts, '3d')
self.setNavOrder()
self.Layout()
def updateWidgets(self, target, groupName):
"""Re-generates the widgets for the given target/group. """
self.__widgets[target] = self.__updateWidgets(target, groupName)
self.setNavOrder()
self.Layout()
def __updateWidgets(self, target, groupName):
"""Called by the :meth:`__selectedOverlayChanged` and
:meth:`__ovlTypeChanged` methods. Re-creates the controls on this
``OverlayDisplayPanel`` for the specified group.
:arg target: A :class:`.Display` or :class:`.DisplayOpts` instance,
which contains the properties that controls are to be
created for.
:arg groupName: Either ``'display'`` or ``'opts'``/``'3d'``,
corresponding to :class:`.Display` or
:class:`.DisplayOpts` properties.
:returns: A list containing all of the new widgets that
were created.
"""
widgetList = self.getWidgetList()
widgetList.ClearGroup(groupName)
if groupName == '3d':
dispProps = odwidgets.get3DPropertyList(target)
dispSpecs = odwidgets.get3DWidgetSpecs( target, self.displayCtx)
else:
dispProps = odwidgets.getPropertyList(target,
self.__threedee)
dispSpecs = odwidgets.getWidgetSpecs( target,
self.displayCtx,
self.__threedee)
allLabels = []
allTooltips = []
allWidgets = []
allContainers = []
for p in dispProps:
spec = dispSpecs[p]
specs = [spec]
labels = [strings .properties.get((target, p), None)]
tooltips = [fsltooltips.properties.get((target, p), None)]
if callable(spec):
# Will either return a contsiner
# widget/sizer and a list of widgets
# for setting the navigation order,
# or will return a list of specs
# (with an irrelevant second parameter)
container, widgets = spec(
target,
widgetList,
self,
self.overlayList,
self.displayCtx,
self.__threedee)
if isinstance(container, abc.Sequence):
specs = container
keys = [s.key for s in specs]
labels = [strings.properties.get((target, k), None)
for k in keys]
tooltips = [fsltooltips.properties.get((target, k), None)
for k in keys]
else:
allContainers.append(container)
allWidgets .extend(widgets)
specs = []
for s in specs:
widget = props.buildGUI(widgetList, target, s)
allWidgets .append(widget)
allContainers.append(widget)
allLabels .extend(labels)
allTooltips.extend(tooltips)
for widget, label, tooltip in zip(allContainers,
allLabels,
allTooltips):
if label is None:
label = ''
widgetList.AddWidget(
widget,
label,
tooltip=tooltip,
groupName=groupName)
return allWidgets
|
[
"fsleyes_props.buildGUI",
"fsleyes.controls.controlpanel.SettingsPanel.destroy",
"fsleyes.controls.controlpanel.SettingsPanel.__init__",
"fsleyes.controls.controlpanel.SettingsPanel.setNavOrder",
"functools.reduce",
"fsleyes.tooltips.properties.get",
"fsleyes.strings.properties.get",
"collections.OrderedDict",
"logging.getLogger"
] |
[((666, 693), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (683, 693), False, 'import logging\n'), ((2825, 2927), 'fsleyes.controls.controlpanel.SettingsPanel.__init__', 'ctrlpanel.SettingsPanel.__init__', (['self', 'parent', 'overlayList', 'displayCtx', 'canvasPanel'], {'kbFocus': '(True)'}), '(self, parent, overlayList, displayCtx,\n canvasPanel, kbFocus=True)\n', (2857, 2927), True, 'import fsleyes.controls.controlpanel as ctrlpanel\n'), ((4384, 4421), 'fsleyes.controls.controlpanel.SettingsPanel.destroy', 'ctrlpanel.SettingsPanel.destroy', (['self'], {}), '(self)\n', (4415, 4421), True, 'import fsleyes.controls.controlpanel as ctrlpanel\n'), ((5177, 5202), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (5200, 5202), False, 'import collections\n'), ((6608, 6656), 'functools.reduce', 'functools.reduce', (['(lambda a, b: a + b)', 'allWidgets'], {}), '(lambda a, b: a + b, allWidgets)\n', (6624, 6656), False, 'import functools\n'), ((6666, 6719), 'fsleyes.controls.controlpanel.SettingsPanel.setNavOrder', 'ctrlpanel.SettingsPanel.setNavOrder', (['self', 'allWidgets'], {}), '(self, allWidgets)\n', (6701, 6719), True, 'import fsleyes.controls.controlpanel as ctrlpanel\n'), ((9227, 9268), 'fsleyes.strings.properties.get', 'strings.properties.get', (['(target, p)', 'None'], {}), '((target, p), None)\n', (9249, 9268), True, 'import fsleyes.strings as strings\n'), ((9298, 9343), 'fsleyes.tooltips.properties.get', 'fsltooltips.properties.get', (['(target, p)', 'None'], {}), '((target, p), None)\n', (9324, 9343), True, 'import fsleyes.tooltips as fsltooltips\n'), ((10486, 10523), 'fsleyes_props.buildGUI', 'props.buildGUI', (['widgetList', 'target', 's'], {}), '(widgetList, target, s)\n', (10500, 10523), True, 'import fsleyes_props as props\n'), ((10062, 10103), 'fsleyes.strings.properties.get', 'strings.properties.get', (['(target, k)', 'None'], {}), '((target, k), None)\n', (10084, 10103), True, 'import fsleyes.strings as strings\n'), ((10183, 10228), 'fsleyes.tooltips.properties.get', 'fsltooltips.properties.get', (['(target, k)', 'None'], {}), '((target, k), None)\n', (10209, 10228), True, 'import fsleyes.tooltips as fsltooltips\n')]
|
# CASA Next Generation Infrastructure
# Copyright (C) 2021 AUI, Inc. Washington DC, USA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#################################
# Helper File
#
# Not exposed in API
#
#################################
import warnings, time, os, psutil, multiprocessing, logging, re
import numpy as np
# from casatools import table as tb
from casatools import ms
from casatools import image as ia
from casatools import quanta as qa
try:
import pandas as pd
import xarray, dask, dask.array, dask.delayed, dask.distributed
except:
print('#### ERROR - dask and/or xarray dependencies are missing ####')
try:
from casacore import tables
except:
print('#### ERROR - python-casacore not found, must be manually installed by user ####')
warnings.filterwarnings('ignore', category=FutureWarning)
# TODO: python-casacore dependency is needed here
# Problems with the table tool:
# - inflates data sizes by reading everything as 64-bit float / 128-bit complex,
# - segfaults when used in dask delayed objects with non-locking reads
# - row access not available, segfaults on column access for some test data
########################################################
# helper function to initialize the processing environment
def initialize_processing(cores=None, memory_limit=None):
# setup dask.distributed based multiprocessing environment
if cores is None: cores = multiprocessing.cpu_count()
if memory_limit is None: memory_limit = str(round(((psutil.virtual_memory().available / (1024 ** 2)) * 0.75) / cores)) + 'MB'
dask.config.set({"distributed.scheduler.allowed-failures": 10})
dask.config.set({"distributed.scheduler.work-stealing": False})
dask.config.set({"distributed.scheduler.unknown-task-duration": '99m'})
dask.config.set({"distributed.worker.memory.pause": False})
dask.config.set({"distributed.worker.memory.terminate": False})
dask.config.set({"distributed.worker.memory.recent-to-old-time": '999s'})
dask.config.set({"distributed.comm.timeouts.connect": '360s'})
dask.config.set({"distributed.comm.timeouts.tcp": '360s'})
dask.config.set({"distributed.nanny.environ.OMP_NUM_THREADS": 1})
dask.config.set({"distributed.nanny.environ.MKL_NUM_THREADS": 1})
cluster = dask.distributed.LocalCluster(n_workers=cores, threads_per_worker=1, processes=True, memory_limit=memory_limit, silence_logs=logging.ERROR)
client = dask.distributed.Client(cluster)
return client
########################################################
# helper for reading time columns to datetime format
# pandas datetimes are referenced against a 0 of 1970-01-01
# CASA's modified julian day reference time is (of course) 1858-11-17
# this requires a correction of 3506716800 seconds which is hardcoded to save time
def convert_time(rawtimes):
correction = 3506716800.0
return pd.to_datetime(np.array(rawtimes) - correction, unit='s').values
# dt = pd.to_datetime(np.atleast_1d(rawtimes) - correction, unit='s').values
# if len(np.array(rawtimes).shape) == 0: dt = dt[0]
# return dt
def revert_time(datetimes):
return (datetimes.astype(float) / 10 ** 9) + 3506716800.0
#######################################################################################
# return a dictionary of table attributes created from keywords and column descriptions
def extract_table_attributes(infile):
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
kwd = tb_tool.getkeywords()
attrs = dict([(kk, kwd[kk]) for kk in kwd if kk not in os.listdir(infile)])
cols = tb_tool.colnames()
column_descriptions = {}
for col in cols:
column_descriptions[col] = tb_tool.getcoldesc(col)
attrs['column_descriptions'] = column_descriptions
attrs['info'] = tb_tool.info()
tb_tool.close()
return attrs
#################################################
# translate numpy dtypes to casacore type strings
def type_converter(npdtype):
cctype = 'bad'
if (npdtype == 'int64') or (npdtype == 'int32'):
cctype = 'int'
elif npdtype == 'bool':
cctype = 'bool'
elif npdtype == 'float32':
cctype = 'float'
elif (npdtype == 'float64') or (npdtype == 'datetime64[ns]'):
cctype = 'double'
elif npdtype == 'complex64':
cctype = 'complex'
elif npdtype == 'complex128':
cctype = 'dcomplex'
elif str(npdtype).startswith('<U'):
cctype = 'string'
return cctype
###############################################################################
# create and initialize new output table
def create_table(outfile, xds, max_rows, infile=None, cols=None, generic=False):
if os.path.isdir(outfile):
os.system('rm -fr %s' % outfile)
# create column descriptions for table description
if cols is None: cols = list(set(list(xds.data_vars) + list(xds.attrs['column_descriptions'].keys())) if 'column_descriptions' in xds.attrs else list(xds.data_vars))
tabledesc = {}
for col in cols:
if ('column_descriptions' in xds.attrs) and (col in xds.attrs['column_descriptions']):
coldesc = xds.attrs['column_descriptions'][col]
else:
coldesc = {'valueType': type_converter(xds[col].dtype)}
if generic or (col == 'UVW'): # will be statically shaped even if not originally
coldesc = {'shape': tuple(np.clip(xds[col].shape[1:], 1, None))}
elif xds[col].ndim > 1: # make variably shaped
coldesc = {'ndim': xds[col].ndim - 1}
coldesc['name'] = col
coldesc['desc'] = col
tabledesc[col] = coldesc
if generic:
tb_tool = tables.table(outfile, tabledesc=tabledesc, nrow=max_rows, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
else:
tb_tool = tables.default_ms(outfile, tabledesc)
tb_tool.addrows(max_rows)
if 'DATA_DESC_ID' in cols: tb_tool.putcol('DATA_DESC_ID', np.zeros((max_rows), dtype='int32') - 1, 0, max_rows)
# write xds attributes to table keywords, skipping certain reserved attributes
existing_keywords = tb_tool.getkeywords()
for attr in xds.attrs:
if attr in ['bad_cols', 'bad_types', 'column_descriptions', 'history', 'subtables', 'info'] + list(existing_keywords.keys()): continue
tb_tool.putkeyword(attr, xds.attrs[attr])
if 'info' in xds.attrs: tb_tool.putinfo(xds.attrs['info'])
# copy subtables and add to main table
if infile:
subtables = [ss.path for ss in os.scandir(infile) if ss.is_dir() and ('SORTED_TABLE' not in ss.path)]
os.system('cp -r %s %s' % (' '.join(subtables), outfile))
for subtable in subtables:
sub_tbl = tables.table(os.path.join(outfile, subtable[subtable.rindex('/') + 1:]), readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
tb_tool.putkeyword(subtable[subtable.rindex('/') + 1:], sub_tbl, makesubrecord=True)
sub_tbl.close()
tb_tool.close()
##################################################################################################
##
## MeasurementSets
##
##################################################################################################
##################################################################
# takes a list of visibility xarray datasets and packages them as a dataset of datasets
# xds_list is a list of tuples (name, xds)
def vis_xds_packager(xds_list):
mxds = xarray.Dataset(attrs=dict(xds_list))
coords = {}
if 'ANTENNA' in mxds.attrs:
coords['antenna_ids'] = mxds.ANTENNA.row.values
coords['antennas'] = xarray.DataArray(mxds.ANTENNA.NAME.values, dims=['antenna_ids'])
if 'FIELD' in mxds.attrs:
coords['field_ids'] = mxds.FIELD.row.values
coords['fields'] = xarray.DataArray(mxds.FIELD.NAME.values, dims=['field_ids'])
if 'FEED' in mxds.attrs:
coords['feed_ids'] = mxds.FEED.FEED_ID.values
if 'OBSERVATION' in mxds.attrs:
coords['observation_ids'] = mxds.OBSERVATION.row.values
coords['observations'] = xarray.DataArray(mxds.OBSERVATION.PROJECT.values, dims=['observation_ids'])
if 'POLARIZATION' in mxds.attrs:
coords['polarization_ids'] = mxds.POLARIZATION.row.values
if 'SOURCE' in mxds.attrs:
coords['source_ids'] = mxds.SOURCE.SOURCE_ID.values
coords['sources'] = xarray.DataArray(mxds.SOURCE.NAME.values, dims=['source_ids'])
if 'SPECTRAL_WINDOW' in mxds.attrs:
coords['spw_ids'] = mxds.SPECTRAL_WINDOW.row.values
if 'STATE' in mxds.attrs:
coords['state_ids'] = mxds.STATE.row.values
mxds = mxds.assign_coords(coords)
return mxds
########################################################################################
# translates MS selection parameters into corresponding row indices and channel indices
def ms_selection(infile, outfile=None, verbose=False, spw=None, field=None, times=None, baseline=None, scan=None, scanintent=None, array=None, uvdist=None, observation=None, polarization=None):
"""
"""
infile = os.path.expanduser(infile)
mstool = ms()
mstool.open(infile)
# build the selection structure
selection = {}
if (spw is not None) and (len(spw) > 0): selection['spw'] = spw
if (field is not None) and (len(field) > 0): selection['field'] = field
if (scan is not None) and (len(scan) > 0): selection['scan'] = scan
if (baseline is not None) and (len(baseline) > 0): selection['baseline'] = baseline
if (times is not None) and (len(times) > 0): selection['time'] = times
if (scanintent is not None) and (len(scanintent) > 0): selection['scanintent'] = scanintent
if (uvdist is not None) and (len(uvdist) > 0): selection['uvdist'] = uvdist
if (polarization is not None) and (len(polarization) > 0): selection['polarization'] = polarization
if (array is not None) and (len(array) > 0): selection['array'] = array
if (observation is not None) and (len(observation) > 0): selection['observation'] = observation
# build structure of indices per DDI, intersected with selection criteria
ddis, total_rows = [], None
chanmap = {} # dict of ddis to channels
if len(selection) > 0:
if verbose: print('selecting data...')
mstool.msselect(selection)
total_rows = mstool.range('rows')['rows']
selectedindices = mstool.msselectedindices()
ddis, chanranges = selectedindices['dd'], selectedindices['channel']
for ci, cr in enumerate(chanranges):
if ddis[ci] not in chanmap: chanmap[ddis[ci]] = []
chanmap[ddis[ci]] = np.concatenate((chanmap[ddis[ci]], list(range(cr[1], cr[2] + 1, cr[3]))), axis=0).astype(int)
# copy the selected table to the outfile destination if given
if outfile is not None:
outfile = os.path.expanduser(outfile)
if verbose: print('copying selection to output...')
if len(selection) > 0:
mstool.split(outfile, whichcol='all')
else:
os.system('rm -fr %s' % outfile)
os.system('cp -r %s %s' % (infile, outfile))
mstool.reset()
if len(ddis) == 0: # selection didn't reduce ddi count, so get them all
ddis = list(mstool.range('data_desc_id')['data_desc_id'])
# figure out which selected rows are in which ddis
if verbose: print('intersecting DDI row ids...')
rowmap = {} # dict of ddis to (rows, channels)
for ddi in ddis:
mstool.selectinit(datadescid=ddi)
ddirowidxs = mstool.range('rows')['rows']
if total_rows is None:
rowmap[ddi] = (ddirowidxs, chanmap[ddi] if ddi in chanmap else None)
else:
rowmap[ddi] = (np.intersect1d(ddirowidxs, total_rows, assume_unique=True), chanmap[ddi] if ddi in chanmap else None)
mstool.reset()
mstool.close()
if verbose: print('selection complete')
return rowmap
##################################################################
## expand row dimension of xds to (time, baseline)
def expand_xds(xds):
txds = xds.copy()
unique_baselines, baselines = np.unique([txds.ANTENNA1.values, txds.ANTENNA2.values], axis=1, return_inverse=True)
txds['baseline'] = xarray.DataArray(baselines.astype('int32'), dims=['row'])
txds['time'] = txds['TIME'].copy()
try:
txds = txds.set_index(row=['time', 'baseline']).unstack('row').transpose('time', 'baseline', ...)
# unstack makes everything a float, so we need to reset to the proper type
for dv in txds.data_vars:
txds[dv] = txds[dv].astype(xds[dv].dtype)
except:
print("WARNING: Cannot expand rows to (time, baseline), possibly duplicate values in (time, baseline)")
txds = xds.copy()
return txds
##################################################################
## flatten (time, baseline) dimensions of xds back to single row
def flatten_xds(xds):
nan_int = np.array([np.nan]).astype('int32')[0]
txds = xds.copy()
# flatten the time x baseline dimensions of main table
if ('time' in xds.dims) and ('baseline' in xds.dims):
txds = xds.stack({'row': ('time', 'baseline')}).transpose('row', ...)
txds = txds.where((txds.STATE_ID != nan_int) & (txds.FIELD_ID != nan_int), drop=True) #.unify_chunks()
for dv in list(xds.data_vars):
txds[dv] = txds[dv].astype(xds[dv].dtype)
return txds
##################################################################
# read casacore table format in to memory
##################################################################
def read_generic_table(infile, subtables=False, timecols=None, ignore=None):
"""
read generic casacore table format to xarray dataset loaded in memory
Parameters
----------
infile : str
Input table filename. To read a subtable simply append the subtable folder name under the main table (ie infile = '/path/mytable.tbl/mysubtable')
subtables : bool
Whether or not to include subtables underneath the specified table. If true, an attribute called subtables will be added to the returned xds.
Default False
timecols : list
list of column names to convert to numpy datetime format. Default None leaves times as their original casacore format.
ignore : list
list of column names to ignore and not try to read. Default None reads all columns
Returns
-------
xarray.core.dataset.Dataset
"""
if timecols is None: timecols = []
if ignore is None: ignore = []
infile = os.path.expanduser(infile)
assert os.path.isdir(infile), "invalid input filename to read_generic_table"
attrs = extract_table_attributes(infile)
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
if tb_tool.nrows() == 0:
tb_tool.close()
return xarray.Dataset(attrs=attrs)
dims = ['row'] + ['d%i' % ii for ii in range(1, 20)]
cols = tb_tool.colnames()
ctype = dict([(col, tb_tool.getcell(col, 0)) for col in cols if (col not in ignore) and (tb_tool.iscelldefined(col, 0))])
mvars, mcoords, xds = {}, {}, xarray.Dataset()
tr = tb_tool.row(ignore, exclude=True)[:]
# extract data for each col
for col in ctype.keys():
if tb_tool.coldatatype(col) == 'record': continue # not supported
try:
data = np.stack([rr[col] for rr in tr]) # .astype(ctype[col].dtype)
if isinstance(tr[0][col], dict):
data = np.stack([rr[col]['array'].reshape(rr[col]['shape']) if len(rr[col]['array']) > 0 else np.array(['']) for rr in tr])
except:
# sometimes the columns are variable, so we need to standardize to the largest sizes
if len(np.unique([isinstance(rr[col], dict) for rr in tr])) > 1: continue # can't deal with this case
mshape = np.array(max([np.array(rr[col]).shape for rr in tr]))
try:
data = np.stack([np.pad(rr[col] if len(rr[col]) > 0 else np.array(rr[col]).reshape(np.arange(len(mshape)) * 0),
[(0, ss) for ss in mshape - np.array(rr[col]).shape], 'constant', constant_values=np.array([np.nan]).astype(np.array(ctype[col]).dtype)[0]) for rr in tr])
except:
data = []
if len(data) == 0: continue
if col in timecols: convert_time(data)
if col.endswith('_ID'):
mcoords[col] = xarray.DataArray(data, dims=['d%i_%i' % (di, ds) for di, ds in enumerate(np.array(data).shape)])
else:
mvars[col] = xarray.DataArray(data, dims=['d%i_%i' % (di, ds) for di, ds in enumerate(np.array(data).shape)])
xds = xarray.Dataset(mvars, coords=mcoords)
xds = xds.rename(dict([(dv, dims[di]) for di, dv in enumerate(xds.dims)]))
attrs['bad_cols'] = list(np.setdiff1d([dv for dv in tb_tool.colnames()], [dv for dv in list(xds.data_vars) + list(xds.coords)]))
# if this table has subtables, use a recursive call to store them in subtables attribute
if subtables:
stbl_list = sorted([tt for tt in os.listdir(infile) if os.path.isdir(os.path.join(infile, tt))])
attrs['subtables'] = []
for ii, subtable in enumerate(stbl_list):
sxds = read_generic_table(os.path.join(infile, subtable), subtables=subtables, timecols=timecols, ignore=ignore)
if len(sxds.dims) != 0: attrs['subtables'] += [(subtable, sxds)]
xds = xds.assign_attrs(attrs)
tb_tool.close()
return xds
##################################################################
# Summarize the contents of an MS directory in casacore table format
def describe_ms(infile):
infile = os.path.expanduser(infile) # does nothing if $HOME is unknown
assert os.path.isdir(infile), "invalid input filename to describe_ms"
# figure out characteristics of main table from select subtables (must all be present)
spw_xds = read_generic_table(os.path.join(infile, 'SPECTRAL_WINDOW'))
pol_xds = read_generic_table(os.path.join(infile, 'POLARIZATION'))
ddi_xds = read_generic_table(os.path.join(infile, 'DATA_DESCRIPTION'))
ddis = list(ddi_xds.row.values)
summary = pd.DataFrame([])
spw_ids = ddi_xds.SPECTRAL_WINDOW_ID.values
pol_ids = ddi_xds.POLARIZATION_ID.values
chans = spw_xds.NUM_CHAN.values
pols = pol_xds.NUM_CORR.values
for ddi in ddis:
print('processing ddi %i of %i' % (ddi + 1, len(ddis)), end='\r')
sorted_table = tables.taql('select * from %s where DATA_DESC_ID = %i' % (infile, ddi))
sdf = {'ddi': ddi, 'spw_id': spw_ids[ddi], 'pol_id': pol_ids[ddi], 'rows': sorted_table.nrows(),
'times': len(np.unique(sorted_table.getcol('TIME'))),
'baselines': len(np.unique(np.hstack([sorted_table.getcol(rr)[:, None] for rr in ['ANTENNA1', 'ANTENNA2']]), axis=0)),
'chans': chans[spw_ids[ddi]],
'pols': pols[pol_ids[ddi]]}
sdf['size_MB'] = np.ceil((sdf['times'] * sdf['baselines'] * sdf['chans'] * sdf['pols'] * 9) / 1024 ** 2).astype(int)
summary = pd.concat([summary, pd.DataFrame(sdf, index=[str(ddi)])], axis=0, sort=False)
sorted_table.close()
print(' ' * 50, end='\r')
return summary.set_index('ddi').sort_index()
#######################################################
# helper function extract data chunk for each col
# this is fed to dask.delayed
def read_flat_col_chunk(infile, col, cshape, ridxs, cstart, pstart):
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
rgrps = [(rr[0], rr[-1]) for rr in np.split(ridxs, np.where(np.diff(ridxs) > 1)[0] + 1)]
try:
if (len(cshape) == 1) or (col == 'UVW'): # all the scalars and UVW
data = np.concatenate([tb_tool.getcol(col, rr[0], rr[1] - rr[0] + 1) for rr in rgrps], axis=0)
elif len(cshape) == 2: # WEIGHT, SIGMA
data = np.concatenate([tb_tool.getcolslice(col, pstart, pstart + cshape[1] - 1, [], rr[0], rr[1] - rr[0] + 1) for rr in rgrps], axis=0)
elif len(cshape) == 3: # DATA and FLAG
data = np.concatenate([tb_tool.getcolslice(col, (cstart, pstart), (cstart + cshape[1] - 1, pstart + cshape[2] - 1), [], rr[0], rr[1] - rr[0] + 1) for rr in rgrps], axis=0)
except:
print('ERROR reading chunk: ', col, cshape, cstart, pstart)
tb_tool.close()
return data
##############################################################
def read_flat_main_table(infile, ddi, rowidxs=None, chunks=(22000, 512, 2)):
# get row indices relative to full main table
if rowidxs is None:
tb_tool = tables.taql('select rowid() as ROWS from %s where DATA_DESC_ID = %i' % (infile, ddi))
rowidxs = tb_tool.getcol('ROWS')
tb_tool.close()
nrows = len(rowidxs)
if nrows == 0:
return xarray.Dataset()
tb_tool = tables.taql('select * from %s where DATA_DESC_ID = %i' % (infile, ddi))
cols = tb_tool.colnames()
ignore = [col for col in cols if (not tb_tool.iscelldefined(col, 0)) or (tb_tool.coldatatype(col) == 'record')]
cdata = dict([(col, tb_tool.getcol(col, 0, 1)) for col in cols if col not in ignore])
chan_cnt, pol_cnt = [(cdata[cc].shape[1], cdata[cc].shape[2]) for cc in cdata if len(cdata[cc].shape) == 3][0]
mvars, mcoords, bvars, xds = {}, {}, {}, xarray.Dataset()
tb_tool.close()
# loop over row chunks
for rc in range(0, nrows, chunks[0]):
crlen = min(chunks[0], nrows - rc) # chunk row length
rcidxs = rowidxs[rc:rc + chunks[0]]
# loop over each column and create delayed dask arrays
for col in cdata.keys():
if col not in bvars: bvars[col] = []
if len(cdata[col].shape) == 1:
delayed_array = dask.delayed(read_flat_col_chunk)(infile, col, (crlen,), rcidxs, None, None)
bvars[col] += [dask.array.from_delayed(delayed_array, (crlen,), cdata[col].dtype)]
elif col == 'UVW':
delayed_array = dask.delayed(read_flat_col_chunk)(infile, col, (crlen, 3), rcidxs, None, None)
bvars[col] += [dask.array.from_delayed(delayed_array, (crlen, 3), cdata[col].dtype)]
elif len(cdata[col].shape) == 2:
pol_list = []
dd = 1 if cdata[col].shape[1] == chan_cnt else 2
for pc in range(0, cdata[col].shape[1], chunks[dd]):
plen = min(chunks[dd], cdata[col].shape[1] - pc)
delayed_array = dask.delayed(read_flat_col_chunk)(infile, col, (crlen, plen), rcidxs, None, pc)
pol_list += [dask.array.from_delayed(delayed_array, (crlen, plen), cdata[col].dtype)]
bvars[col] += [dask.array.concatenate(pol_list, axis=1)]
elif len(cdata[col].shape) == 3:
chan_list = []
for cc in range(0, chan_cnt, chunks[1]):
clen = min(chunks[1], chan_cnt - cc)
pol_list = []
for pc in range(0, cdata[col].shape[2], chunks[2]):
plen = min(chunks[2], cdata[col].shape[2] - pc)
delayed_array = dask.delayed(read_flat_col_chunk)(infile, col, (crlen, clen, plen), rcidxs, cc, pc)
pol_list += [dask.array.from_delayed(delayed_array, (crlen, clen, plen), cdata[col].dtype)]
chan_list += [dask.array.concatenate(pol_list, axis=2)]
bvars[col] += [dask.array.concatenate(chan_list, axis=1)]
# now concat all the dask chunks from each time to make the xds
mvars = {}
for kk in bvars.keys():
if kk == 'UVW':
mvars[kk] = xarray.DataArray(dask.array.concatenate(bvars[kk], axis=0), dims=['row', 'uvw_index'])
elif len(bvars[kk][0].shape) == 2 and (bvars[kk][0].shape[-1] == pol_cnt):
mvars[kk] = xarray.DataArray(dask.array.concatenate(bvars[kk], axis=0), dims=['row', 'pol'])
elif len(bvars[kk][0].shape) == 2 and (bvars[kk][0].shape[-1] == chan_cnt):
mvars[kk] = xarray.DataArray(dask.array.concatenate(bvars[kk], axis=0), dims=['row', 'chan'])
else:
mvars[kk] = xarray.DataArray(dask.array.concatenate(bvars[kk], axis=0), dims=['row', 'chan', 'pol'][:len(bvars[kk][0].shape)])
mvars['TIME'] = xarray.DataArray(convert_time(mvars['TIME'].values), dims=['row']).chunk({'row': chunks[0]})
attrs = extract_table_attributes(infile)
attrs['bad_cols'] = ignore
xds = xarray.Dataset(mvars, coords=mcoords).assign_attrs(attrs)
return xds
#####################################################################
def read_ms(infile, rowmap=None, subtables=False, expand=False, chunks=(22000, 512, 2)):
"""
Read legacy format MS to xarray Visibility Dataset
The MS is partitioned by DDI, which guarantees a fixed data shape per partition. This results in separate xarray
dataset (xds) partitions contained within a main xds (mxds).
Parameters
----------
infile : str
Input MS filename
rowmap : dict
Dictionary of DDI to tuple of (row indices, channel indices). Returned by ms_selection function. Default None ignores selections
subtables : bool
Also read and include subtables along with main table selection. Default False will omit subtables (faster)
expand : bool
Whether or not to return the original flat row structure of the MS (False) or expand the rows to time x baseline dimensions (True).
Expanding the rows allows for easier indexing and parallelization across time and baseline dimensions, at the cost of some conversion
time. Default False
chunks: 4-D tuple of ints
Shape of desired chunking in the form of (time, baseline, channel, polarization). Larger values reduce the number of chunks and
speed up the reads at the cost of more memory. Chunk size is the product of the four numbers. Default is (400, 400, 64, 2). None
disables re-chunking and returns native chunk size from table row reads
Returns
-------
xarray.core.dataset.Dataset
Main xarray dataset of datasets for this visibility set
"""
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
# parse filename to use
infile = os.path.expanduser(infile)
assert os.path.isdir(infile), "invalid input filename to read_ms"
# we need the spectral window, polarization, and data description tables for processing the main table
spw_xds = read_generic_table(os.path.join(infile, 'SPECTRAL_WINDOW'))
pol_xds = read_generic_table(os.path.join(infile, 'POLARIZATION'))
ddi_xds = read_generic_table(os.path.join(infile, 'DATA_DESCRIPTION'))
# each DATA_DESC_ID (ddi) is a fixed shape that may differ from others
# form a list of ddis to process, each will be placed it in its own xarray dataset and partition
ddis = np.arange(ddi_xds.row.shape[0]) if rowmap is None else list(rowmap.keys())
xds_list = []
####################################################################
# process each selected DDI from the input MS, assume a fixed shape within the ddi (should always be true)
for ddi in ddis:
rowidxs = None if rowmap is None else rowmap[ddi][0]
chanidxs = None if rowmap is None else rowmap[ddi][1]
if ((rowidxs is not None) and (len(rowidxs) == 0)) or ((chanidxs is not None) and (len(chanidxs) == 0)): continue
xds = read_flat_main_table(infile, ddi, rowidxs=rowidxs, chunks=chunks)
if len(xds.dims) == 0: continue
# grab the channel frequency values from the spw table data and pol idxs from the polarization table, add spw and pol ids
chan = spw_xds.CHAN_FREQ.values[ddi_xds.SPECTRAL_WINDOW_ID.values[ddi], :xds.chan.shape[0]]
pol = pol_xds.CORR_TYPE.values[ddi_xds.POLARIZATION_ID.values[ddi], :xds.pol.shape[0]]
coords = {'chan': chan, 'pol': pol, 'spw_id': [ddi_xds['SPECTRAL_WINDOW_ID'].values[ddi]], 'pol_id': [ddi_xds['POLARIZATION_ID'].values[ddi]]}
xds = xds.assign_coords(coords) # .assign_attrs(attrs)
# filter by channel selection
if (chanidxs is not None) and (len(chanidxs) < len(xds.chan)):
xds = xds.isel(chan=chanidxs)
spw_xds['CHAN_FREQ'][ddi_xds.SPECTRAL_WINDOW_ID.values[ddi], :len(chanidxs)] = spw_xds.CHAN_FREQ[ddi_xds.SPECTRAL_WINDOW_ID.values[ddi], chanidxs]
# expand the row dimension out to (time, baseline)
if expand:
xds = expand_xds(xds)
xds_list += [('xds' + str(ddi), xds)]
# read other subtables
xds_list += [('SPECTRAL_WINDOW', spw_xds), ('POLARIZATION', pol_xds), ('DATA_DESCRIPTION', ddi_xds)]
if subtables:
skip_tables = ['SORTED_TABLE', 'SPECTRAL_WINDOW', 'POLARIZATION', 'DATA_DESCRIPTION']
stbl_list = sorted([tt for tt in os.listdir(infile) if os.path.isdir(os.path.join(infile, tt)) and tt not in skip_tables])
for ii, subtable in enumerate(stbl_list):
sxds = read_generic_table(os.path.join(infile, subtable), subtables=True, timecols=['TIME'], ignore=[])
if len(sxds.dims) != 0: xds_list += [(subtable, sxds)]
# build the master xds to return
mxds = vis_xds_packager(xds_list)
return mxds
############################################################################################
## write functions
############################################################################################
###################################
def write_generic_table(xds, outfile, subtable='', cols=None, verbose=False):
"""
Write generic xds contents back to casacore table format on disk
Parameters
----------
xds : xarray.Dataset
Source xarray dataset data
outfile : str
Destination filename (or parent main table if writing subtable)
subtable : str
Name of the subtable being written, triggers special logic to add subtable to parent table. Default '' for normal generic writes
cols : str or list
List of cols to write. Default None writes all columns
"""
outfile = os.path.expanduser(outfile)
if verbose: print('writing %s...' % os.path.join(outfile, subtable))
if cols is None: cols = list(set(list(xds.data_vars) + [cc for cc in xds.coords if cc not in xds.dims] + (list(xds.attrs['column_descriptions'].keys() if 'column_descriptions' in xds.attrs else []))))
cols = list(np.atleast_1d(cols))
max_rows = xds.row.shape[0] if 'row' in xds.dims else 0
create_table(os.path.join(outfile, subtable), xds, max_rows, infile=None, cols=cols, generic=True)
tb_tool = tables.table(os.path.join(outfile, subtable), readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
try:
for dv in cols:
if (dv not in xds) or (np.prod(xds[dv].shape) == 0): continue
values = xds[dv].values if xds[dv].dtype != 'datetime64[ns]' else revert_time(xds[dv].values)
tb_tool.putcol(dv, values, 0, values.shape[0], 1)
except:
print("ERROR: exception in write generic table - %s, %s, %s, %s" % (os.path.join(outfile,subtable), dv, str(values.shape), tb_tool.nrows()))
# now we have to add this subtable to the main table keywords (assuming a main table already exists)
if len(subtable) > 0:
main_tbl = tables.table(outfile, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
main_tbl.putkeyword(subtable, tb_tool, makesubrecord=True)
main_tbl.done()
tb_tool.close()
# if this table has its own subtables, they need to be written out recursively
if 'subtables' in xds.attrs:
for st in list(xds.attrs['subtables']):
write_generic_table(st[1], os.path.join(outfile, subtable, st[0]), subtable='', verbose=verbose)
###################################
def write_main_table_slice(xda, outfile, ddi, col, full_shape, starts):
"""
Write an xds row chunk to the corresponding main table slice
"""
# trigger the DAG for this chunk and return values while the table is unlocked
values = xda.compute().values
if xda.dtype == 'datetime64[ns]':
values = revert_time(values)
tb_tool = tables.table(outfile, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
tbs = tables.taql('select * from $tb_tool where DATA_DESC_ID = %i' % ddi)
if tbs.nrows() == 0: # this DDI has not been started yet
tbs = tables.taql('select * from $tb_tool where DATA_DESC_ID = -1')
#try:
if (values.ndim == 1) or (col == 'UVW'): # scalar columns
tbs.putcol(col, values, starts[0], len(values))
else:
if not tbs.iscelldefined(col, starts[0]): tbs.putcell(col, starts[0]+np.arange(len(values)), np.zeros((full_shape)))
tbs.putcolslice(col, values, starts[1:values.ndim], tuple(np.array(starts[1:values.ndim]) + np.array(values.shape[1:])-1), [], starts[0], len(values), 1)
#except:
# print("ERROR: write exception - %s, %s, %s" % (col, str(values.shape), str(starts)))
tbs.close()
tb_tool.close()
###################################
def write_ms(mxds, outfile, infile=None, subtables=False, modcols=None, verbose=False, execute=True):
"""
Write ms format xds contents back to casacore table format on disk
Parameters
----------
mxds : xarray.Dataset
Source multi-xarray dataset (originally created by read_ms)
outfile : str
Destination filename
infile : str
Source filename to copy subtables from. Generally faster than reading/writing through mxds via the subtables parameter. Default None
does not copy subtables to output.
subtables : bool
Also write subtables from mxds. Default of False only writes mxds attributes that begin with xdsN to the MS main table.
Setting to True will write all other mxds attributes to subtables of the main table. This is probably going to be SLOW!
Use infile instead whenever possible.
modcols : list
List of strings indicating what column(s) were modified (aka xds data_vars). Different logic can be applied to speed up processing when
a data_var has not been modified from the input. Default None assumes everything has been modified (SLOW)
verbose : bool
Whether or not to print output progress. Since writes will typically execute the DAG, if something is
going to go wrong, it will be here. Default False
execute : bool
Whether or not to actually execute the DAG, or just return it with write steps appended. Default True will execute it
"""
outfile = os.path.expanduser(outfile)
if verbose: print('initializing output...')
start = time.time()
xds_list = [flatten_xds(mxds.attrs[kk]) for kk in mxds.attrs if kk.startswith('xds')]
cols = list(set([dv for dx in xds_list for dv in dx.data_vars]))
if modcols is None: modcols = cols
modcols = list(np.atleast_1d(modcols))
# create an empty main table with enough space for all desired xds partitions
# the first selected xds partition will be passed to create_table to provide a definition of columns and table keywords
# we first need to add in additional keywords for the selected subtables that will be written as well
max_rows = np.sum([dx.row.shape[0] for dx in xds_list])
create_table(outfile, xds_list[0], max_rows=max_rows, infile=infile, cols=cols, generic=False)
# start a list of dask delayed writes to disk (to be executed later)
# the SPECTRAL_WINDOW table is assumed to always be present and will always be written since it is needed for channel frequencies
delayed_writes = [dask.delayed(write_generic_table)(mxds.SPECTRAL_WINDOW, outfile, 'SPECTRAL_WINDOW', cols=None)]
if subtables: # also write the rest of the subtables
for subtable in list(mxds.attrs.keys()):
if subtable.startswith('xds') or (subtable == 'SPECTRAL_WINDOW'): continue
if verbose: print('writing subtable %s...' % subtable)
delayed_writes += [dask.delayed(write_generic_table)(mxds.attrs[subtable], outfile, subtable, cols=None, verbose=verbose)]
for xds in xds_list:
txds = xds.copy().unify_chunks()
ddi = txds.DATA_DESC_ID[:1].values[0]
# serial write entire DDI column first so subsequent delayed writes can find their spot
if verbose: print('setting up DDI %i...' % ddi)
write_main_table_slice(txds['DATA_DESC_ID'], outfile, ddi=-1, col='DATA_DESC_ID', full_shape=None, starts=(0,))
# write each chunk of each modified data_var, triggering the DAG along the way
for col in modcols:
chunks = txds[col].chunks
dims = txds[col].dims
for d0 in range(len(chunks[0])):
d0start = ([0] + list(np.cumsum(chunks[0][:-1])))[d0]
for d1 in range(len(chunks[1]) if len(chunks) > 1 else 1):
d1start = ([0] + list(np.cumsum(chunks[1][:-1])))[d1] if len(chunks) > 1 else 0
for d2 in range(len(chunks[2]) if len(chunks) > 2 else 1):
d2start = ([0] + list(np.cumsum(chunks[2][:-1])))[d2] if len(chunks) > 2 else 0
starts = [d0start, d1start, d2start]
lengths = [chunks[0][d0], (chunks[1][d1] if len(chunks) > 1 else 0), (chunks[2][d2] if len(chunks) > 2 else 0)]
slices = [slice(starts[0], starts[0]+lengths[0]), slice(starts[1], starts[1]+lengths[1]), slice(starts[2], starts[2]+lengths[2])]
txda = txds[col].isel(dict(zip(dims, slices)), missing_dims='ignore')
delayed_writes += [dask.delayed(write_main_table_slice)(txda, outfile, ddi=ddi, col=col, full_shape=txds[col].shape[1:], starts=starts)]
# now write remaining data_vars from the xds that weren't modified
# this can be done faster by collapsing the chunking to maximum size (minimum #) possible
max_chunk_size = np.prod([txds.chunks[kk][0] for kk in txds.chunks if kk in ['row', 'chan', 'pol']])
for col in list(np.setdiff1d(cols, modcols)):
col_chunk_size = np.prod([kk[0] for kk in txds[col].chunks])
col_rows = int(np.ceil(max_chunk_size / col_chunk_size)) * txds[col].chunks[0][0]
for rr in range(0, txds[col].row.shape[0], col_rows):
txda = txds[col].isel(row=slice(rr, rr + col_rows))
delayed_writes += [dask.delayed(write_main_table_slice)(txda, outfile, ddi=ddi, col=col, full_shape=txda.shape[1:], starts=(rr,)+(0,)*(len(txda.shape)-1))]
if execute:
if verbose: print('triggering DAG...')
zs = dask.compute(delayed_writes)
if verbose: print('execution time %0.2f sec' % (time.time() - start))
else:
if verbose: print('returning delayed task list')
return delayed_writes
###########################################################################################################
def visplot(xda, axis=None, overplot=False, drawplot=True, tsize=250):
"""
Plot a preview of Visibility xarray DataArray contents
Parameters
----------
xda : xarray.core.dataarray.DataArray
input DataArray to plot
axis : str or list or xarray.core.dataarray.DataArray
Coordinate(s) within the xarray DataArray, or a second xarray DataArray to plot against. Default None uses range.
All other coordinates will be maxed across dims
overplot : bool
Overlay new plot on to existing window. Default of False makes a new window for each plot
drawplot : bool
Display plot window. Should pretty much always be True unless you want to overlay things
in a Jupyter notebook.
tsize : int
target size of the preview plot (might be smaller). Default is 250 points per axis
Returns
-------
Open matplotlib window
"""
import matplotlib.pyplot as plt
import xarray
import numpy as np
import warnings
warnings.simplefilter("ignore", category=RuntimeWarning) # suppress warnings about nan-slices
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
if overplot:
axes = None
else:
fig, axes = plt.subplots(1, 1)
# fast decimate to roughly the desired size
thinf = np.ceil(np.array(xda.shape) / tsize)
txda = xda.thin(dict([(xda.dims[ii], int(thinf[ii])) for ii in range(len(thinf))]))
# can't plot complex numbers, bools (sometimes), or strings
if (txda.dtype == 'complex128') or (txda.dtype == 'complex64'):
txda = (txda.real ** 2 + txda.imag ** 2) ** 0.5
elif txda.dtype == 'bool':
txda = txda.astype(int)
elif txda.dtype.type is np.int32:
txda = txda.where(txda > np.full((1), np.nan, dtype=np.int32)[0])
elif txda.dtype.type is np.str_:
txda = xarray.DataArray(np.unique(txda, return_inverse=True)[1], dims=txda.dims, coords=txda.coords, name=txda.name)
######################
# decisions based on supplied axis to plot against
# no axis - plot against range of data
# collapse all but first dimension
if axis is None:
collapse = [ii for ii in range(1, txda.ndim)]
if len(collapse) > 0: txda = txda.max(axis=collapse)
txda[txda.dims[0]] = np.arange(txda.shape[0])
txda.plot.line(ax=axes, marker='.', linewidth=0.0)
# another xarray DataArray as axis
elif type(axis) == xarray.core.dataarray.DataArray:
txda2 = axis.thin(dict([(xda.dims[ii], int(thinf[ii])) for ii in range(len(thinf))]))
if txda2.dtype.type is np.int32: txda2 = txda2.where(txda2 > np.full((1), np.nan, dtype=np.int32)[0])
xarray.Dataset({txda.name: txda, txda2.name: txda2}).plot.scatter(txda.name, txda2.name)
# single axis
elif len(np.atleast_1d(axis)) == 1:
axis = np.atleast_1d(axis)[0]
# coord ndim is 1
if txda[axis].ndim == 1:
collapse = [ii for ii in range(txda.ndim) if txda.dims[ii] not in txda[axis].dims]
if len(collapse) > 0: txda = txda.max(axis=collapse)
txda.plot.line(ax=axes, x=axis, marker='.', linewidth=0.0)
# coord ndim is 2
elif txda[axis].ndim == 2:
collapse = [ii for ii in range(txda.ndim) if txda.dims[ii] not in txda[axis].dims]
if len(collapse) > 0: txda = txda.max(axis=collapse)
txda.plot.pcolormesh(ax=axes, x=axis, y=txda.dims[0])
# two axes
elif len(axis) == 2:
collapse = [ii for ii in range(txda.ndim) if txda.dims[ii] not in (txda[axis[0]].dims + txda[axis[1]].dims)]
if len(collapse) > 0: txda = txda.max(axis=collapse)
txda.plot.pcolormesh(ax=axes, x=axis[0], y=axis[1])
plt.title(txda.name)
if drawplot:
plt.show()
##################################################################################################
##
## Images
##
##################################################################################################
############################################
def read_image_chunk(infile, shapes, starts):
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
data = tb_tool.getcellslice(tb_tool.colnames()[0], 0, starts, tuple(np.array(starts) + np.array(shapes) - 1))
tb_tool.close()
return data
############################################
def read_image_array(infile, dimorder, chunks):
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
cshape = eval(tb_tool.getcolshapestring(tb_tool.colnames()[0])[0])
cdata = tb_tool.getcellslice(tb_tool.colnames()[0], 0, tuple(np.repeat(0, len(cshape))), tuple(np.repeat(0, len(cshape))))
tb_tool.close()
# expand the actual data shape to the full 5 possible dims
full_shape = cshape + [1 for rr in range(5) if rr >= len(cshape)]
full_chunks = chunks[::-1] + [1 for rr in range(5) if rr >= len(chunks)]
d0slices = []
for d0 in range(0, full_shape[0], full_chunks[0]):
d0len = min(full_chunks[0], full_shape[0] - d0)
d1slices = []
for d1 in range(0, full_shape[1], full_chunks[1]):
d1len = min(full_chunks[1], full_shape[1] - d1)
d2slices = []
for d2 in range(0, full_shape[2], full_chunks[2]):
d2len = min(full_chunks[2], full_shape[2] - d2)
d3slices = []
for d3 in range(0, full_shape[3], full_chunks[3]):
d3len = min(full_chunks[3], full_shape[3] - d3)
d4slices = []
for d4 in range(0, full_shape[4], full_chunks[4]):
d4len = min(full_chunks[4], full_shape[4] - d4)
shapes = tuple([d0len, d1len, d2len, d3len, d4len][:len(cshape)])
starts = tuple([d0, d1, d2, d3, d4][:len(cshape)])
delayed_array = dask.delayed(read_image_chunk)(infile, shapes, starts)
d4slices += [dask.array.from_delayed(delayed_array, shapes, cdata.dtype)]
d3slices += [dask.array.concatenate(d4slices, axis=4)] if len(cshape) > 4 else d4slices
d2slices += [dask.array.concatenate(d3slices, axis=3)] if len(cshape) > 3 else d3slices
d1slices += [dask.array.concatenate(d2slices, axis=2)] if len(cshape) > 2 else d2slices
d0slices += [dask.array.concatenate(d1slices, axis=1)] if len(cshape) > 1 else d1slices
xda = xarray.DataArray(dask.array.concatenate(d0slices, axis=0), dims=dimorder[::-1]).transpose()
return xda
############################################
def read_image(infile, masks=True, history=True, chunks=(1000, 1000, 1, 4), verbose=False):
"""
Read casacore format Image to xarray Image Dataset format
Parameters
----------
infile : str
Input image filename (.image or .fits format)
masks : bool
Also read image masks as additional image data_vars. Default is True
history : bool
Also read history log table. Default is True
chunks: 4-D tuple of ints
Shape of desired chunking in the form of (l, m, chan, pol). Default is (1000, 1000, 1, 4)
Note: chunk size is the product of the four numbers (up to the actual size of the dimension)
Returns
-------
xarray.core.dataset.Dataset
new xarray Datasets of Image data contents
"""
infile = os.path.expanduser(infile)
IA = ia()
QA = qa()
rc = IA.open(infile)
csys = IA.coordsys()
ims = IA.shape() # image shape
attrs = extract_table_attributes(infile)
if verbose: print('opening %s with shape %s' % (infile, str(ims)))
# construct a mapping of dimension names to image indices
dimmap = [(coord[:-1], attrs['coords']['pixelmap%s' % coord[-1]][0]) for coord in attrs['coords'] if coord[:-1] in ['direction', 'stokes', 'spectral', 'linear']]
dimmap = dict([(rr[0].replace('stokes','pol').replace('spectral','chan').replace('linear','component'), rr[1]) for rr in dimmap])
if 'direction' in dimmap: dimmap['l'] = dimmap.pop('direction')
if 'l' in dimmap: dimmap['m'] = dimmap['l'] + 1
# compute world coordinates for spherical dimensions
sphr_dims = [dimmap['l'], dimmap['m']] if 'l' in dimmap else []
coord_idxs = np.mgrid[[range(ims[dd]) if dd in sphr_dims else range(1) for dd in range(len(ims))]].reshape(len(ims), -1)
coord_world = csys.toworldmany(coord_idxs.astype(float))['numeric'][sphr_dims].reshape((-1,) + tuple(ims[sphr_dims]))
coords = dict([(['right_ascension','declination'][dd], (['l', 'm'], coord_world[di])) for di, dd in enumerate(sphr_dims)])
# compute world coordinates for cartesian dimensions
cart_names, cart_dims = list(zip(*[(kk, dimmap[kk]) for kk in dimmap if kk != 'direction']))
for cd in range(len(cart_dims)):
coord_idxs = np.mgrid[[range(ims[dd]) if dd == cart_dims[cd] else range(1) for dd in range(len(ims))]].reshape(len(ims), -1)
coord_world = csys.toworldmany(coord_idxs.astype(float))['numeric'][cart_dims[cd]].reshape(-1,)
coords.update({cart_names[cd]: coord_world})
# assign values to l, m coords based on incr and refpix in metadata
if len(sphr_dims) > 0:
sphr_coord = [coord for coord in attrs['coords'] if coord.startswith('direction')][0]
coords['l'] = np.arange(-attrs['coords'][sphr_coord]['crpix'][0], ims[0]-attrs['coords'][sphr_coord]['crpix'][0]) * attrs['coords'][sphr_coord]['cdelt'][0]
coords['m'] = np.arange(-attrs['coords'][sphr_coord]['crpix'][1], ims[1]-attrs['coords'][sphr_coord]['crpix'][1]) * attrs['coords'][sphr_coord]['cdelt'][1]
rc = csys.done()
rc = IA.close()
# chunks are in (l, m, chan, pol) order, rearrange to match the actual data order
dimorder = [dd for rr in range(5) for dd in dimmap if (dimmap[dd] is not None) and (dimmap[dd] == rr)]
chunks = list(np.array(chunks + (9999999,))[[['l', 'm', 'chan', 'pol', 'component'].index(rr) for rr in dimorder]])
# wrap the actual image data reads in dask delayed calls returned as an xarray dataarray
xds = xarray.Dataset(coords=coords)
xda = read_image_array(infile, dimorder, chunks)
xda = xda.rename('IMAGE')
xds[xda.name] = xda
# add mask(s) alongside image data
if masks and 'masks' in attrs:
for ii, mask in enumerate(list(attrs['masks'].keys())):
if not os.path.isdir(os.path.join(infile, mask)): continue
xda = read_image_array(os.path.join(infile, mask), dimorder, chunks)
xda = xda.rename('IMAGE_%s' % mask)
xds[xda.name] = xda
attrs[mask+'_column_descriptions'] = extract_table_attributes(os.path.join(infile, mask))['column_descriptions']
# if also loading history, put it as another xds in the attrs
if history and os.path.isdir(os.path.join(infile, 'logtable')):
attrs['history'] = read_generic_table(os.path.join(infile, 'logtable'))
if 'coords' in attrs: attrs['icoords'] = attrs.pop('coords') # rename coord table keyword to avoid confusion with xds coords
xds = xds.assign_attrs(attrs)
return xds
############################################
def write_image_slice(xda, outfile, col, starts):
"""
Write image xda chunk to the corresponding image table slice
"""
# trigger the DAG for this chunk and return values while the table is unlocked
values = xda.compute().values
tb_tool = tables.table(outfile, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
tb_tool.putcellslice(col, 0, values, starts, tuple(np.array(starts) + np.array(values.shape) - 1))
tb_tool.close()
############################################
def write_image(xds, outfile, portion='IMAGE', masks=True, history=True, verbose=False, execute=True):
"""
Read casacore format Image to xarray Image Dataset format
Parameters
----------
xds : xarray.Dataset
Image xarray dataset to write
outfile : str
Output image filename (.image format)
portion : str
Name of the data_var in the xds that corresponds to the image data. Default 'IMAGE'
masks : bool
Also write the masks to the output. Can be used instead of infile parameter. Default True
history : bool
Also write the history log file to the output. Can be used instead of infile paramter. Default True
verbose : bool
Whether or not to print output progress. Since writes will typically execute the DAG, if something is
going to go wrong, it will be here. Default False
execute : bool
Whether or not to actually execute the DAG, or just return it with write steps appended. Default True will execute it
"""
outfile = os.path.expanduser(outfile)
start = time.time()
xds = xds.copy()
# initialize list of column names and xda's to be written. The column names are not the same as the data_var names
cols = [list(xds.attrs['column_descriptions'].keys())[0] if 'column_descriptions' in xds.attrs else list(xds.data_vars.keys())[0]]
xda_list = [xds[portion]]
subtable_list = ['']
if 'icoords' in xds.attrs: xds.attrs['coords'] = xds.attrs.pop('icoords') # rename back for proper table keyword creation
# initialize output table (must do it this way since create_table mysteriously throws image tool errors when subsequently opened)
IA = ia()
imtype = 'd' if xds[portion].dtype == 'float64' else 'c' if xds[portion].dtype == 'complex64' else 'cd' if xds[portion].dtype == 'complex128' else 'f'
IA.fromshape(outfile, list(xds[portion].shape), csys=xds.attrs['coords'], overwrite=True, log=False, type=imtype)
IA.close()
# write image history to logfile subtable (not delayed)
if history and ('history' in xds.attrs):
if verbose: print('writing history log...')
write_generic_table(xds.history, outfile, subtable='logtable')
# add masks to the list of xda's to be written
if masks and ('masks' in xds.attrs):
for mask in xds.masks:
if verbose: print('writing %s...' % mask)
mask_var = '%s_%s' % (portion, mask)
if (mask + '_column_descriptions' not in xds.attrs) or (mask_var not in xds): continue
cols += [list(xds.attrs[mask+'_column_descriptions'].keys())[0]]
xda_list += [xds[mask_var]]
subtable_list += [mask]
xds.attrs['masks'][mask]['mask'] = 'Table: %s' % os.path.abspath(os.path.join(outfile, mask))
xds.attrs[mask+'_column_descriptions'][cols[-1]]['shape'] = list(xds[mask_var].transpose().shape)
txds = xarray.Dataset({mask_var: xds[mask_var]}).assign_attrs({'column_descriptions': xds.attrs[mask+'_column_descriptions']})
create_table(os.path.join(outfile, mask), txds, max_rows=1, infile=None, cols=[cols[-1]], generic=True)
# write xds attribute to output table keywords
tb_tool = tables.table(outfile, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
for attr in xds.attrs:
if (attr in ['bad_cols', 'bad_types', 'column_descriptions', 'history', 'subtables', 'info']) or attr.endswith('column_descriptions'): continue
tb_tool.putkeyword(attr, xds.attrs[attr])
if 'info' in xds.attrs: tb_tool.putinfo(xds.attrs['info'])
tb_tool.close()
# write each xda transposed to disk
chunks = [rr[0] for rr in xds[portion].chunks][::-1]
cshapes = xds[portion].shape[::-1]
dims = xds[portion].dims[::-1]
delayed_writes = []
for ii, xda in enumerate(xda_list):
for d0 in range(0, cshapes[0], chunks[0]):
d0len = min(chunks[0], cshapes[0] - d0)
for d1 in range(0, cshapes[1] if len(cshapes) > 1 else 1, chunks[1] if len(chunks) > 1 else 1):
d1len = min(chunks[1], cshapes[1] - d1) if len(cshapes) > 1 else 0
for d2 in range(0, cshapes[2] if len(cshapes) > 2 else 1, chunks[2] if len(chunks) > 2 else 1):
d2len = min(chunks[2], cshapes[2] - d2) if len(cshapes) > 2 else 0
for d3 in range(0, cshapes[3] if len(cshapes) > 3 else 1, chunks[3] if len(chunks) > 3 else 1):
d3len = min(chunks[3], cshapes[3] - d3) if len(cshapes) > 3 else 0
for d4 in range(0, cshapes[4] if len(cshapes) > 4 else 1, chunks[4] if len(chunks) > 4 else 1):
d4len = min(chunks[4], cshapes[4] - d4) if len(cshapes) > 4 else 0
starts = [d0, d1, d2, d3, d4][:len(cshapes)]
slices = [slice(d0, d0+d0len), slice(d1, d1+d1len), slice(d2, d2+d2len), slice(d3, d3+d3len), slice(d4, d4+d4len)]
txda = xda.transpose().isel(dict(zip(dims, slices)), missing_dims='ignore')
delayed_writes += [dask.delayed(write_image_slice)(txda, os.path.join(outfile, subtable_list[ii]), col=cols[ii], starts=starts)]
if execute:
if verbose: print('triggering DAG...')
zs = dask.compute(delayed_writes)
if verbose: print('execution time %0.2f sec' % (time.time() - start))
else:
if verbose: print('returning delayed task list')
return delayed_writes
|
[
"matplotlib.pyplot.title",
"psutil.virtual_memory",
"numpy.sum",
"casatools.quanta",
"numpy.clip",
"numpy.arange",
"os.path.join",
"numpy.unique",
"multiprocessing.cpu_count",
"pandas.DataFrame",
"os.path.expanduser",
"dask.distributed.Client",
"numpy.prod",
"casacore.tables.default_ms",
"warnings.simplefilter",
"numpy.full",
"casacore.tables.table",
"dask.distributed.LocalCluster",
"dask.config.set",
"numpy.cumsum",
"dask.compute",
"numpy.intersect1d",
"matplotlib.pyplot.subplots",
"dask.array.from_delayed",
"numpy.stack",
"matplotlib.pyplot.show",
"numpy.ceil",
"os.system",
"xarray.Dataset",
"os.listdir",
"os.scandir",
"casatools.image",
"dask.delayed",
"warnings.filterwarnings",
"os.path.isdir",
"numpy.setdiff1d",
"pandas.plotting.register_matplotlib_converters",
"numpy.zeros",
"time.time",
"casacore.tables.taql",
"numpy.diff",
"numpy.array",
"xarray.DataArray",
"dask.array.concatenate",
"numpy.atleast_1d",
"casatools.ms"
] |
[((1379, 1436), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (1402, 1436), False, 'import warnings\n'), ((2192, 2255), 'dask.config.set', 'dask.config.set', (["{'distributed.scheduler.allowed-failures': 10}"], {}), "({'distributed.scheduler.allowed-failures': 10})\n", (2207, 2255), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2260, 2323), 'dask.config.set', 'dask.config.set', (["{'distributed.scheduler.work-stealing': False}"], {}), "({'distributed.scheduler.work-stealing': False})\n", (2275, 2323), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2328, 2399), 'dask.config.set', 'dask.config.set', (["{'distributed.scheduler.unknown-task-duration': '99m'}"], {}), "({'distributed.scheduler.unknown-task-duration': '99m'})\n", (2343, 2399), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2404, 2463), 'dask.config.set', 'dask.config.set', (["{'distributed.worker.memory.pause': False}"], {}), "({'distributed.worker.memory.pause': False})\n", (2419, 2463), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2468, 2531), 'dask.config.set', 'dask.config.set', (["{'distributed.worker.memory.terminate': False}"], {}), "({'distributed.worker.memory.terminate': False})\n", (2483, 2531), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2536, 2609), 'dask.config.set', 'dask.config.set', (["{'distributed.worker.memory.recent-to-old-time': '999s'}"], {}), "({'distributed.worker.memory.recent-to-old-time': '999s'})\n", (2551, 2609), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2614, 2676), 'dask.config.set', 'dask.config.set', (["{'distributed.comm.timeouts.connect': '360s'}"], {}), "({'distributed.comm.timeouts.connect': '360s'})\n", (2629, 2676), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2681, 2739), 'dask.config.set', 'dask.config.set', (["{'distributed.comm.timeouts.tcp': '360s'}"], {}), "({'distributed.comm.timeouts.tcp': '360s'})\n", (2696, 2739), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2744, 2809), 'dask.config.set', 'dask.config.set', (["{'distributed.nanny.environ.OMP_NUM_THREADS': 1}"], {}), "({'distributed.nanny.environ.OMP_NUM_THREADS': 1})\n", (2759, 2809), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2814, 2879), 'dask.config.set', 'dask.config.set', (["{'distributed.nanny.environ.MKL_NUM_THREADS': 1}"], {}), "({'distributed.nanny.environ.MKL_NUM_THREADS': 1})\n", (2829, 2879), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2894, 3037), 'dask.distributed.LocalCluster', 'dask.distributed.LocalCluster', ([], {'n_workers': 'cores', 'threads_per_worker': '(1)', 'processes': '(True)', 'memory_limit': 'memory_limit', 'silence_logs': 'logging.ERROR'}), '(n_workers=cores, threads_per_worker=1,\n processes=True, memory_limit=memory_limit, silence_logs=logging.ERROR)\n', (2923, 3037), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((3047, 3079), 'dask.distributed.Client', 'dask.distributed.Client', (['cluster'], {}), '(cluster)\n', (3070, 3079), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((4031, 4119), 'casacore.tables.table', 'tables.table', (['infile'], {'readonly': '(True)', 'lockoptions': "{'option': 'usernoread'}", 'ack': '(False)'}), "(infile, readonly=True, lockoptions={'option': 'usernoread'},\n ack=False)\n", (4043, 4119), False, 'from casacore import tables\n'), ((5338, 5360), 'os.path.isdir', 'os.path.isdir', (['outfile'], {}), '(outfile)\n', (5351, 5360), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((9760, 9786), 'os.path.expanduser', 'os.path.expanduser', (['infile'], {}), '(infile)\n', (9778, 9786), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((9800, 9804), 'casatools.ms', 'ms', ([], {}), '()\n', (9802, 9804), False, 'from casatools import ms\n'), ((12791, 12879), 'numpy.unique', 'np.unique', (['[txds.ANTENNA1.values, txds.ANTENNA2.values]'], {'axis': '(1)', 'return_inverse': '(True)'}), '([txds.ANTENNA1.values, txds.ANTENNA2.values], axis=1,\n return_inverse=True)\n', (12800, 12879), True, 'import numpy as np\n'), ((15237, 15263), 'os.path.expanduser', 'os.path.expanduser', (['infile'], {}), '(infile)\n', (15255, 15263), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((15275, 15296), 'os.path.isdir', 'os.path.isdir', (['infile'], {}), '(infile)\n', (15288, 15296), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((15405, 15493), 'casacore.tables.table', 'tables.table', (['infile'], {'readonly': '(True)', 'lockoptions': "{'option': 'usernoread'}", 'ack': '(False)'}), "(infile, readonly=True, lockoptions={'option': 'usernoread'},\n ack=False)\n", (15417, 15493), False, 'from casacore import tables\n'), ((17391, 17428), 'xarray.Dataset', 'xarray.Dataset', (['mvars'], {'coords': 'mcoords'}), '(mvars, coords=mcoords)\n', (17405, 17428), False, 'import xarray\n'), ((18389, 18415), 'os.path.expanduser', 'os.path.expanduser', (['infile'], {}), '(infile)\n', (18407, 18415), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((18463, 18484), 'os.path.isdir', 'os.path.isdir', (['infile'], {}), '(infile)\n', (18476, 18484), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((18889, 18905), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (18901, 18905), True, 'import pandas as pd\n'), ((20208, 20296), 'casacore.tables.table', 'tables.table', (['infile'], {'readonly': '(True)', 'lockoptions': "{'option': 'usernoread'}", 'ack': '(False)'}), "(infile, readonly=True, lockoptions={'option': 'usernoread'},\n ack=False)\n", (20220, 20296), False, 'from casacore import tables\n'), ((21600, 21671), 'casacore.tables.taql', 'tables.taql', (["('select * from %s where DATA_DESC_ID = %i' % (infile, ddi))"], {}), "('select * from %s where DATA_DESC_ID = %i' % (infile, ddi))\n", (21611, 21671), False, 'from casacore import tables\n'), ((26943, 27000), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (26966, 27000), False, 'import warnings\n'), ((27043, 27069), 'os.path.expanduser', 'os.path.expanduser', (['infile'], {}), '(infile)\n', (27061, 27069), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((27081, 27102), 'os.path.isdir', 'os.path.isdir', (['infile'], {}), '(infile)\n', (27094, 27102), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((30880, 30907), 'os.path.expanduser', 'os.path.expanduser', (['outfile'], {}), '(outfile)\n', (30898, 30907), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((32977, 33070), 'casacore.tables.table', 'tables.table', (['outfile'], {'readonly': '(False)', 'lockoptions': "{'option': 'permanentwait'}", 'ack': '(False)'}), "(outfile, readonly=False, lockoptions={'option':\n 'permanentwait'}, ack=False)\n", (32989, 33070), False, 'from casacore import tables\n'), ((33077, 33144), 'casacore.tables.taql', 'tables.taql', (["('select * from $tb_tool where DATA_DESC_ID = %i' % ddi)"], {}), "('select * from $tb_tool where DATA_DESC_ID = %i' % ddi)\n", (33088, 33144), False, 'from casacore import tables\n'), ((35402, 35429), 'os.path.expanduser', 'os.path.expanduser', (['outfile'], {}), '(outfile)\n', (35420, 35429), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((35490, 35501), 'time.time', 'time.time', ([], {}), '()\n', (35499, 35501), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((36072, 36116), 'numpy.sum', 'np.sum', (['[dx.row.shape[0] for dx in xds_list]'], {}), '([dx.row.shape[0] for dx in xds_list])\n', (36078, 36116), True, 'import numpy as np\n'), ((40811, 40867), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (40832, 40867), False, 'import warnings\n'), ((40973, 41005), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (41003, 41005), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((43575, 43595), 'matplotlib.pyplot.title', 'plt.title', (['txda.name'], {}), '(txda.name)\n', (43584, 43595), True, 'import matplotlib.pyplot as plt\n'), ((43958, 44046), 'casacore.tables.table', 'tables.table', (['infile'], {'readonly': '(True)', 'lockoptions': "{'option': 'usernoread'}", 'ack': '(False)'}), "(infile, readonly=True, lockoptions={'option': 'usernoread'},\n ack=False)\n", (43970, 44046), False, 'from casacore import tables\n'), ((44302, 44390), 'casacore.tables.table', 'tables.table', (['infile'], {'readonly': '(True)', 'lockoptions': "{'option': 'usernoread'}", 'ack': '(False)'}), "(infile, readonly=True, lockoptions={'option': 'usernoread'},\n ack=False)\n", (44314, 44390), False, 'from casacore import tables\n'), ((47305, 47331), 'os.path.expanduser', 'os.path.expanduser', (['infile'], {}), '(infile)\n', (47323, 47331), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((47342, 47346), 'casatools.image', 'ia', ([], {}), '()\n', (47344, 47346), True, 'from casatools import image as ia\n'), ((47356, 47360), 'casatools.quanta', 'qa', ([], {}), '()\n', (47358, 47360), True, 'from casatools import quanta as qa\n'), ((50010, 50039), 'xarray.Dataset', 'xarray.Dataset', ([], {'coords': 'coords'}), '(coords=coords)\n', (50024, 50039), False, 'import xarray\n'), ((51350, 51443), 'casacore.tables.table', 'tables.table', (['outfile'], {'readonly': '(False)', 'lockoptions': "{'option': 'permanentwait'}", 'ack': '(False)'}), "(outfile, readonly=False, lockoptions={'option':\n 'permanentwait'}, ack=False)\n", (51362, 51443), False, 'from casacore import tables\n'), ((52648, 52675), 'os.path.expanduser', 'os.path.expanduser', (['outfile'], {}), '(outfile)\n', (52666, 52675), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((52688, 52699), 'time.time', 'time.time', ([], {}), '()\n', (52697, 52699), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((53302, 53306), 'casatools.image', 'ia', ([], {}), '()\n', (53304, 53306), True, 'from casatools import image as ia\n'), ((54840, 54933), 'casacore.tables.table', 'tables.table', (['outfile'], {'readonly': '(False)', 'lockoptions': "{'option': 'permanentwait'}", 'ack': '(False)'}), "(outfile, readonly=False, lockoptions={'option':\n 'permanentwait'}, ack=False)\n", (54852, 54933), False, 'from casacore import tables\n'), ((2030, 2057), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (2055, 2057), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((5370, 5402), 'os.system', 'os.system', (["('rm -fr %s' % outfile)"], {}), "('rm -fr %s' % outfile)\n", (5379, 5402), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((6331, 6460), 'casacore.tables.table', 'tables.table', (['outfile'], {'tabledesc': 'tabledesc', 'nrow': 'max_rows', 'readonly': '(False)', 'lockoptions': "{'option': 'permanentwait'}", 'ack': '(False)'}), "(outfile, tabledesc=tabledesc, nrow=max_rows, readonly=False,\n lockoptions={'option': 'permanentwait'}, ack=False)\n", (6343, 6460), False, 'from casacore import tables\n'), ((6485, 6522), 'casacore.tables.default_ms', 'tables.default_ms', (['outfile', 'tabledesc'], {}), '(outfile, tabledesc)\n', (6502, 6522), False, 'from casacore import tables\n'), ((8308, 8372), 'xarray.DataArray', 'xarray.DataArray', (['mxds.ANTENNA.NAME.values'], {'dims': "['antenna_ids']"}), "(mxds.ANTENNA.NAME.values, dims=['antenna_ids'])\n", (8324, 8372), False, 'import xarray\n'), ((8482, 8542), 'xarray.DataArray', 'xarray.DataArray', (['mxds.FIELD.NAME.values'], {'dims': "['field_ids']"}), "(mxds.FIELD.NAME.values, dims=['field_ids'])\n", (8498, 8542), False, 'import xarray\n'), ((8759, 8834), 'xarray.DataArray', 'xarray.DataArray', (['mxds.OBSERVATION.PROJECT.values'], {'dims': "['observation_ids']"}), "(mxds.OBSERVATION.PROJECT.values, dims=['observation_ids'])\n", (8775, 8834), False, 'import xarray\n'), ((9057, 9119), 'xarray.DataArray', 'xarray.DataArray', (['mxds.SOURCE.NAME.values'], {'dims': "['source_ids']"}), "(mxds.SOURCE.NAME.values, dims=['source_ids'])\n", (9073, 9119), False, 'import xarray\n'), ((11512, 11539), 'os.path.expanduser', 'os.path.expanduser', (['outfile'], {}), '(outfile)\n', (11530, 11539), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((15558, 15585), 'xarray.Dataset', 'xarray.Dataset', ([], {'attrs': 'attrs'}), '(attrs=attrs)\n', (15572, 15585), False, 'import xarray\n'), ((15834, 15850), 'xarray.Dataset', 'xarray.Dataset', ([], {}), '()\n', (15848, 15850), False, 'import xarray\n'), ((18651, 18690), 'os.path.join', 'os.path.join', (['infile', '"""SPECTRAL_WINDOW"""'], {}), "(infile, 'SPECTRAL_WINDOW')\n", (18663, 18690), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((18725, 18761), 'os.path.join', 'os.path.join', (['infile', '"""POLARIZATION"""'], {}), "(infile, 'POLARIZATION')\n", (18737, 18761), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((18796, 18836), 'os.path.join', 'os.path.join', (['infile', '"""DATA_DESCRIPTION"""'], {}), "(infile, 'DATA_DESCRIPTION')\n", (18808, 18836), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((19189, 19260), 'casacore.tables.taql', 'tables.taql', (["('select * from %s where DATA_DESC_ID = %i' % (infile, ddi))"], {}), "('select * from %s where DATA_DESC_ID = %i' % (infile, ddi))\n", (19200, 19260), False, 'from casacore import tables\n'), ((21357, 21447), 'casacore.tables.taql', 'tables.taql', (["('select rowid() as ROWS from %s where DATA_DESC_ID = %i' % (infile, ddi))"], {}), "('select rowid() as ROWS from %s where DATA_DESC_ID = %i' % (\n infile, ddi))\n", (21368, 21447), False, 'from casacore import tables\n'), ((21568, 21584), 'xarray.Dataset', 'xarray.Dataset', ([], {}), '()\n', (21582, 21584), False, 'import xarray\n'), ((22068, 22084), 'xarray.Dataset', 'xarray.Dataset', ([], {}), '()\n', (22082, 22084), False, 'import xarray\n'), ((27281, 27320), 'os.path.join', 'os.path.join', (['infile', '"""SPECTRAL_WINDOW"""'], {}), "(infile, 'SPECTRAL_WINDOW')\n", (27293, 27320), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((27355, 27391), 'os.path.join', 'os.path.join', (['infile', '"""POLARIZATION"""'], {}), "(infile, 'POLARIZATION')\n", (27367, 27391), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((27426, 27466), 'os.path.join', 'os.path.join', (['infile', '"""DATA_DESCRIPTION"""'], {}), "(infile, 'DATA_DESCRIPTION')\n", (27438, 27466), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((27656, 27687), 'numpy.arange', 'np.arange', (['ddi_xds.row.shape[0]'], {}), '(ddi_xds.row.shape[0])\n', (27665, 27687), True, 'import numpy as np\n'), ((31202, 31221), 'numpy.atleast_1d', 'np.atleast_1d', (['cols'], {}), '(cols)\n', (31215, 31221), True, 'import numpy as np\n'), ((31301, 31332), 'os.path.join', 'os.path.join', (['outfile', 'subtable'], {}), '(outfile, subtable)\n', (31313, 31332), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((31415, 31446), 'os.path.join', 'os.path.join', (['outfile', 'subtable'], {}), '(outfile, subtable)\n', (31427, 31446), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((32103, 32196), 'casacore.tables.table', 'tables.table', (['outfile'], {'readonly': '(False)', 'lockoptions': "{'option': 'permanentwait'}", 'ack': '(False)'}), "(outfile, readonly=False, lockoptions={'option':\n 'permanentwait'}, ack=False)\n", (32115, 32196), False, 'from casacore import tables\n'), ((33221, 33282), 'casacore.tables.taql', 'tables.taql', (['"""select * from $tb_tool where DATA_DESC_ID = -1"""'], {}), "('select * from $tb_tool where DATA_DESC_ID = -1')\n", (33232, 33282), False, 'from casacore import tables\n'), ((35720, 35742), 'numpy.atleast_1d', 'np.atleast_1d', (['modcols'], {}), '(modcols)\n', (35733, 35742), True, 'import numpy as np\n'), ((38793, 38880), 'numpy.prod', 'np.prod', (["[txds.chunks[kk][0] for kk in txds.chunks if kk in ['row', 'chan', 'pol']]"], {}), "([txds.chunks[kk][0] for kk in txds.chunks if kk in ['row', 'chan',\n 'pol']])\n", (38800, 38880), True, 'import numpy as np\n'), ((39481, 39509), 'dask.compute', 'dask.compute', (['delayed_writes'], {}), '(delayed_writes)\n', (39493, 39509), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((41074, 41092), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (41086, 41092), True, 'import matplotlib.pyplot as plt\n'), ((42135, 42159), 'numpy.arange', 'np.arange', (['txda.shape[0]'], {}), '(txda.shape[0])\n', (42144, 42159), True, 'import numpy as np\n'), ((43621, 43631), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (43629, 43631), True, 'import matplotlib.pyplot as plt\n'), ((56952, 56980), 'dask.compute', 'dask.compute', (['delayed_writes'], {}), '(delayed_writes)\n', (56964, 56980), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((11707, 11739), 'os.system', 'os.system', (["('rm -fr %s' % outfile)"], {}), "('rm -fr %s' % outfile)\n", (11716, 11739), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((11752, 11796), 'os.system', 'os.system', (["('cp -r %s %s' % (infile, outfile))"], {}), "('cp -r %s %s' % (infile, outfile))\n", (11761, 11796), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((16068, 16100), 'numpy.stack', 'np.stack', (['[rr[col] for rr in tr]'], {}), '([rr[col] for rr in tr])\n', (16076, 16100), True, 'import numpy as np\n'), ((25235, 25272), 'xarray.Dataset', 'xarray.Dataset', (['mvars'], {'coords': 'mcoords'}), '(mvars, coords=mcoords)\n', (25249, 25272), False, 'import xarray\n'), ((36446, 36479), 'dask.delayed', 'dask.delayed', (['write_generic_table'], {}), '(write_generic_table)\n', (36458, 36479), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((38901, 38928), 'numpy.setdiff1d', 'np.setdiff1d', (['cols', 'modcols'], {}), '(cols, modcols)\n', (38913, 38928), True, 'import numpy as np\n'), ((38960, 39003), 'numpy.prod', 'np.prod', (['[kk[0] for kk in txds[col].chunks]'], {}), '([kk[0] for kk in txds[col].chunks])\n', (38967, 39003), True, 'import numpy as np\n'), ((41162, 41181), 'numpy.array', 'np.array', (['xda.shape'], {}), '(xda.shape)\n', (41170, 41181), True, 'import numpy as np\n'), ((49244, 49350), 'numpy.arange', 'np.arange', (["(-attrs['coords'][sphr_coord]['crpix'][0])", "(ims[0] - attrs['coords'][sphr_coord]['crpix'][0])"], {}), "(-attrs['coords'][sphr_coord]['crpix'][0], ims[0] - attrs['coords'\n ][sphr_coord]['crpix'][0])\n", (49253, 49350), True, 'import numpy as np\n'), ((49408, 49514), 'numpy.arange', 'np.arange', (["(-attrs['coords'][sphr_coord]['crpix'][1])", "(ims[1] - attrs['coords'][sphr_coord]['crpix'][1])"], {}), "(-attrs['coords'][sphr_coord]['crpix'][1], ims[1] - attrs['coords'\n ][sphr_coord]['crpix'][1])\n", (49417, 49514), True, 'import numpy as np\n'), ((49804, 49833), 'numpy.array', 'np.array', (['(chunks + (9999999,))'], {}), '(chunks + (9999999,))\n', (49812, 49833), True, 'import numpy as np\n'), ((50743, 50775), 'os.path.join', 'os.path.join', (['infile', '"""logtable"""'], {}), "(infile, 'logtable')\n", (50755, 50775), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((50824, 50856), 'os.path.join', 'os.path.join', (['infile', '"""logtable"""'], {}), "(infile, 'logtable')\n", (50836, 50856), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((3507, 3525), 'numpy.array', 'np.array', (['rawtimes'], {}), '(rawtimes)\n', (3515, 3525), True, 'import numpy as np\n'), ((7188, 7206), 'os.scandir', 'os.scandir', (['infile'], {}), '(infile)\n', (7198, 7206), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((12387, 12445), 'numpy.intersect1d', 'np.intersect1d', (['ddirowidxs', 'total_rows'], {'assume_unique': '(True)'}), '(ddirowidxs, total_rows, assume_unique=True)\n', (12401, 12445), True, 'import numpy as np\n'), ((13619, 13637), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (13627, 13637), True, 'import numpy as np\n'), ((17978, 18008), 'os.path.join', 'os.path.join', (['infile', 'subtable'], {}), '(infile, subtable)\n', (17990, 18008), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((19682, 19772), 'numpy.ceil', 'np.ceil', (["(sdf['times'] * sdf['baselines'] * sdf['chans'] * sdf['pols'] * 9 / 1024 ** 2)"], {}), "(sdf['times'] * sdf['baselines'] * sdf['chans'] * sdf['pols'] * 9 / \n 1024 ** 2)\n", (19689, 19772), True, 'import numpy as np\n'), ((24434, 24475), 'dask.array.concatenate', 'dask.array.concatenate', (['bvars[kk]'], {'axis': '(0)'}), '(bvars[kk], axis=0)\n', (24456, 24475), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((29798, 29828), 'os.path.join', 'os.path.join', (['infile', 'subtable'], {}), '(infile, subtable)\n', (29810, 29828), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((30948, 30979), 'os.path.join', 'os.path.join', (['outfile', 'subtable'], {}), '(outfile, subtable)\n', (30960, 30979), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((32508, 32546), 'os.path.join', 'os.path.join', (['outfile', 'subtable', 'st[0]'], {}), '(outfile, subtable, st[0])\n', (32520, 32546), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((33524, 33544), 'numpy.zeros', 'np.zeros', (['full_shape'], {}), '(full_shape)\n', (33532, 33544), True, 'import numpy as np\n'), ((46277, 46317), 'dask.array.concatenate', 'dask.array.concatenate', (['d1slices'], {'axis': '(1)'}), '(d1slices, axis=1)\n', (46299, 46317), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((46380, 46420), 'dask.array.concatenate', 'dask.array.concatenate', (['d0slices'], {'axis': '(0)'}), '(d0slices, axis=0)\n', (46402, 46420), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((50392, 50418), 'os.path.join', 'os.path.join', (['infile', 'mask'], {}), '(infile, mask)\n', (50404, 50418), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((54683, 54710), 'os.path.join', 'os.path.join', (['outfile', 'mask'], {}), '(outfile, mask)\n', (54695, 54710), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((4207, 4225), 'os.listdir', 'os.listdir', (['infile'], {}), '(infile)\n', (4217, 4225), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((6623, 6656), 'numpy.zeros', 'np.zeros', (['max_rows'], {'dtype': '"""int32"""'}), "(max_rows, dtype='int32')\n", (6631, 6656), True, 'import numpy as np\n'), ((17794, 17812), 'os.listdir', 'os.listdir', (['infile'], {}), '(infile)\n', (17804, 17812), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((22504, 22537), 'dask.delayed', 'dask.delayed', (['read_flat_col_chunk'], {}), '(read_flat_col_chunk)\n', (22516, 22537), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((22612, 22678), 'dask.array.from_delayed', 'dask.array.from_delayed', (['delayed_array', '(crlen,)', 'cdata[col].dtype'], {}), '(delayed_array, (crlen,), cdata[col].dtype)\n', (22635, 22678), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((24628, 24669), 'dask.array.concatenate', 'dask.array.concatenate', (['bvars[kk]'], {'axis': '(0)'}), '(bvars[kk], axis=0)\n', (24650, 24669), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((29620, 29638), 'os.listdir', 'os.listdir', (['infile'], {}), '(infile)\n', (29630, 29638), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((31584, 31606), 'numpy.prod', 'np.prod', (['xds[dv].shape'], {}), '(xds[dv].shape)\n', (31591, 31606), True, 'import numpy as np\n'), ((36834, 36867), 'dask.delayed', 'dask.delayed', (['write_generic_table'], {}), '(write_generic_table)\n', (36846, 36867), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((39031, 39071), 'numpy.ceil', 'np.ceil', (['(max_chunk_size / col_chunk_size)'], {}), '(max_chunk_size / col_chunk_size)\n', (39038, 39071), True, 'import numpy as np\n'), ((42648, 42667), 'numpy.atleast_1d', 'np.atleast_1d', (['axis'], {}), '(axis)\n', (42661, 42667), True, 'import numpy as np\n'), ((42690, 42709), 'numpy.atleast_1d', 'np.atleast_1d', (['axis'], {}), '(axis)\n', (42703, 42709), True, 'import numpy as np\n'), ((44115, 44131), 'numpy.array', 'np.array', (['starts'], {}), '(starts)\n', (44123, 44131), True, 'import numpy as np\n'), ((44134, 44150), 'numpy.array', 'np.array', (['shapes'], {}), '(shapes)\n', (44142, 44150), True, 'import numpy as np\n'), ((46181, 46221), 'dask.array.concatenate', 'dask.array.concatenate', (['d2slices'], {'axis': '(2)'}), '(d2slices, axis=2)\n', (46203, 46221), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((50319, 50345), 'os.path.join', 'os.path.join', (['infile', 'mask'], {}), '(infile, mask)\n', (50331, 50345), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((50592, 50618), 'os.path.join', 'os.path.join', (['infile', 'mask'], {}), '(infile, mask)\n', (50604, 50618), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((51495, 51511), 'numpy.array', 'np.array', (['starts'], {}), '(starts)\n', (51503, 51511), True, 'import numpy as np\n'), ((51514, 51536), 'numpy.array', 'np.array', (['values.shape'], {}), '(values.shape)\n', (51522, 51536), True, 'import numpy as np\n'), ((54380, 54407), 'os.path.join', 'os.path.join', (['outfile', 'mask'], {}), '(outfile, mask)\n', (54392, 54407), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((54538, 54579), 'xarray.Dataset', 'xarray.Dataset', (['{mask_var: xds[mask_var]}'], {}), '({mask_var: xds[mask_var]})\n', (54552, 54579), False, 'import xarray\n'), ((6042, 6078), 'numpy.clip', 'np.clip', (['xds[col].shape[1:]', '(1)', 'None'], {}), '(xds[col].shape[1:], 1, None)\n', (6049, 6078), True, 'import numpy as np\n'), ((17830, 17854), 'os.path.join', 'os.path.join', (['infile', 'tt'], {}), '(infile, tt)\n', (17842, 17854), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((22744, 22777), 'dask.delayed', 'dask.delayed', (['read_flat_col_chunk'], {}), '(read_flat_col_chunk)\n', (22756, 22777), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((22854, 22922), 'dask.array.from_delayed', 'dask.array.from_delayed', (['delayed_array', '(crlen, 3)', 'cdata[col].dtype'], {}), '(delayed_array, (crlen, 3), cdata[col].dtype)\n', (22877, 22922), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((24817, 24858), 'dask.array.concatenate', 'dask.array.concatenate', (['bvars[kk]'], {'axis': '(0)'}), '(bvars[kk], axis=0)\n', (24839, 24858), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((24937, 24978), 'dask.array.concatenate', 'dask.array.concatenate', (['bvars[kk]'], {'axis': '(0)'}), '(bvars[kk], axis=0)\n', (24959, 24978), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((31879, 31910), 'os.path.join', 'os.path.join', (['outfile', 'subtable'], {}), '(outfile, subtable)\n', (31891, 31910), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((33614, 33645), 'numpy.array', 'np.array', (['starts[1:values.ndim]'], {}), '(starts[1:values.ndim])\n', (33622, 33645), True, 'import numpy as np\n'), ((33648, 33674), 'numpy.array', 'np.array', (['values.shape[1:]'], {}), '(values.shape[1:])\n', (33656, 33674), True, 'import numpy as np\n'), ((39267, 39303), 'dask.delayed', 'dask.delayed', (['write_main_table_slice'], {}), '(write_main_table_slice)\n', (39279, 39303), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((39566, 39577), 'time.time', 'time.time', ([], {}), '()\n', (39575, 39577), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((42527, 42579), 'xarray.Dataset', 'xarray.Dataset', (['{txda.name: txda, txda2.name: txda2}'], {}), '({txda.name: txda, txda2.name: txda2})\n', (42541, 42579), False, 'import xarray\n'), ((46081, 46121), 'dask.array.concatenate', 'dask.array.concatenate', (['d3slices'], {'axis': '(3)'}), '(d3slices, axis=3)\n', (46103, 46121), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((57037, 57048), 'time.time', 'time.time', ([], {}), '()\n', (57046, 57048), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((16285, 16299), 'numpy.array', 'np.array', (["['']"], {}), "([''])\n", (16293, 16299), True, 'import numpy as np\n'), ((23456, 23496), 'dask.array.concatenate', 'dask.array.concatenate', (['pol_list'], {'axis': '(1)'}), '(pol_list, axis=1)\n', (23478, 23496), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((29656, 29680), 'os.path.join', 'os.path.join', (['infile', 'tt'], {}), '(infile, tt)\n', (29668, 29680), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((37595, 37620), 'numpy.cumsum', 'np.cumsum', (['chunks[0][:-1]'], {}), '(chunks[0][:-1])\n', (37604, 37620), True, 'import numpy as np\n'), ((41602, 41636), 'numpy.full', 'np.full', (['(1)', 'np.nan'], {'dtype': 'np.int32'}), '(1, np.nan, dtype=np.int32)\n', (41609, 41636), True, 'import numpy as np\n'), ((41712, 41748), 'numpy.unique', 'np.unique', (['txda'], {'return_inverse': '(True)'}), '(txda, return_inverse=True)\n', (41721, 41748), True, 'import numpy as np\n'), ((42478, 42512), 'numpy.full', 'np.full', (['(1)', 'np.nan'], {'dtype': 'np.int32'}), '(1, np.nan, dtype=np.int32)\n', (42485, 42512), True, 'import numpy as np\n'), ((45791, 45821), 'dask.delayed', 'dask.delayed', (['read_image_chunk'], {}), '(read_image_chunk)\n', (45803, 45821), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((45883, 45942), 'dask.array.from_delayed', 'dask.array.from_delayed', (['delayed_array', 'shapes', 'cdata.dtype'], {}), '(delayed_array, shapes, cdata.dtype)\n', (45906, 45942), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((45977, 46017), 'dask.array.concatenate', 'dask.array.concatenate', (['d4slices'], {'axis': '(4)'}), '(d4slices, axis=4)\n', (45999, 46017), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((16578, 16595), 'numpy.array', 'np.array', (['rr[col]'], {}), '(rr[col])\n', (16586, 16595), True, 'import numpy as np\n'), ((20357, 20371), 'numpy.diff', 'np.diff', (['ridxs'], {}), '(ridxs)\n', (20364, 20371), True, 'import numpy as np\n'), ((23239, 23272), 'dask.delayed', 'dask.delayed', (['read_flat_col_chunk'], {}), '(read_flat_col_chunk)\n', (23251, 23272), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((23352, 23423), 'dask.array.from_delayed', 'dask.array.from_delayed', (['delayed_array', '(crlen, plen)', 'cdata[col].dtype'], {}), '(delayed_array, (crlen, plen), cdata[col].dtype)\n', (23375, 23423), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((24214, 24255), 'dask.array.concatenate', 'dask.array.concatenate', (['chan_list'], {'axis': '(1)'}), '(chan_list, axis=1)\n', (24236, 24255), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((38476, 38512), 'dask.delayed', 'dask.delayed', (['write_main_table_slice'], {}), '(write_main_table_slice)\n', (38488, 38512), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2114, 2137), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (2135, 2137), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((17220, 17234), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (17228, 17234), True, 'import numpy as np\n'), ((17356, 17370), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (17364, 17370), True, 'import numpy as np\n'), ((24141, 24181), 'dask.array.concatenate', 'dask.array.concatenate', (['pol_list'], {'axis': '(2)'}), '(pol_list, axis=2)\n', (24163, 24181), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((37745, 37770), 'numpy.cumsum', 'np.cumsum', (['chunks[1][:-1]'], {}), '(chunks[1][:-1])\n', (37754, 37770), True, 'import numpy as np\n'), ((56765, 56796), 'dask.delayed', 'dask.delayed', (['write_image_slice'], {}), '(write_image_slice)\n', (56777, 56796), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((56803, 56843), 'os.path.join', 'os.path.join', (['outfile', 'subtable_list[ii]'], {}), '(outfile, subtable_list[ii])\n', (56815, 56843), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((23907, 23940), 'dask.delayed', 'dask.delayed', (['read_flat_col_chunk'], {}), '(read_flat_col_chunk)\n', (23919, 23940), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((24028, 24105), 'dask.array.from_delayed', 'dask.array.from_delayed', (['delayed_array', '(crlen, clen, plen)', 'cdata[col].dtype'], {}), '(delayed_array, (crlen, clen, plen), cdata[col].dtype)\n', (24051, 24105), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((37929, 37954), 'numpy.cumsum', 'np.cumsum', (['chunks[2][:-1]'], {}), '(chunks[2][:-1])\n', (37938, 37954), True, 'import numpy as np\n'), ((16708, 16725), 'numpy.array', 'np.array', (['rr[col]'], {}), '(rr[col])\n', (16716, 16725), True, 'import numpy as np\n'), ((16831, 16848), 'numpy.array', 'np.array', (['rr[col]'], {}), '(rr[col])\n', (16839, 16848), True, 'import numpy as np\n'), ((16885, 16903), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (16893, 16903), True, 'import numpy as np\n'), ((16911, 16931), 'numpy.array', 'np.array', (['ctype[col]'], {}), '(ctype[col])\n', (16919, 16931), True, 'import numpy as np\n')]
|
'''counts2table.py - wrap various differential expression tools
=============================================================
:Tags: Python
Purpose
-------
This script provides a convenience wrapper for differential expression
analysis for a variety of methods.
The aim of this script is to provide a common tabular output format
that is consistent between the different methods.
The script will call the selected method and output a variety of
diagnostic plots. Generally, the analysis aims to follow published
workflows for the individual method together with outputting diagnostic
plots to spot any problems. The script will also preprocess count
data to apply some common filtering methods.
The methods implemented are:
sleuth
Application of sleuth.
deseq2
Application of DESeq2
edger
Application of EdgeR
dexseq
Application of DEXSeq
ttest
Application of Welch's ttest to FPKM values
mock
A mock analysis. No differential analysis is performed,
but fold changes are computed and output.
Use --sleuth-genewise to test at gene rather than transcript level.
For genewise analysis, also require --gene-biomart option Use
following R code to identify the correct database, e.g
hsapiens_gene_ensembl) > library(biomaRt)
>listDatasets(useEnsembl(biomart="ensembl"))
Use the option --use-ihw to use the independent hypothesis weighting
method to calculate a weighted FDR. Note this will replace the
unweighted BH FDR in the final results table.
Usage
-----
Input
+++++
The input to this script is a table of measurements reflecting
expression levels. For the tag counting methods such as DESeq2 or
EdgeR, these should be the raw counts, while for other methods such as
ttest, these can be normalized values such as FPKM values. In
addition, sleuth does not use an expression table but rather the
directory of expression estimates from e.g kallisto.
See option --sleuth-counts-dir
The script further requires a design table describing the tests to
be performed. The design table has four columns::
track include group pair
CW-CD14-R1 0 CD14 1
CW-CD14-R2 0 CD14 1
CW-CD14-R3 1 CD14 1
CW-CD4-R1 1 CD4 1
FM-CD14-R1 1 CD14 2
FM-CD4-R2 0 CD4 2
FM-CD4-R3 0 CD4 2
FM-CD4-R4 0 CD4 2
These files should be tab separated as this is enforced in downstream
analyses and will cause the script to error.
track
name of track - should correspond to column header in the counts
table.
include
flag to indicate whether or not to include this data (0, 1)
group
group indicator - experimental group
pair
pair that sample belongs to (for paired tests) - set to 0 if the
design is not paired.
Note: additional columns included after pair can be used to specify
covariates (e.g replicate number etc)
Output
++++++
The script outputs a table with the following columns:
+------------------+------------------------------------------------------+
|*Column name* |*Content* |
+------------------+------------------------------------------------------+
|test_id |Name of the test (gene name, ... |
+------------------+------------------------------------------------------+
|treatment_name |Name of the treatment condition |
+------------------+------------------------------------------------------+
|treatment_mean |Estimated expression value for treatment |
+------------------+------------------------------------------------------+
|treatment_std |Standard deviation |
+------------------+------------------------------------------------------+
|control_name |Name of the control condition |
+------------------+------------------------------------------------------+
|control_mean |Estimated expression value for control |
+------------------+------------------------------------------------------+
|control_std |Standard deviation |
+------------------+------------------------------------------------------+
|pvalue |The p value for rejecting the null hypothesis |
+------------------+------------------------------------------------------+
|qvalue |Multiple testing correction |
+------------------+------------------------------------------------------+
|l2fold |log2 foldchange of treatment/control |
+------------------+------------------------------------------------------+
|transformed_l2fold|a transformed log2 foldchange value. |
+------------------+------------------------------------------------------+
|fold |foldchange of treatment/control |
+------------------+------------------------------------------------------+
|significant |Flag, 1 if test called significant according to FDR |
+------------------+------------------------------------------------------+
|status |test status (OK|FAIL) |
+------------------+------------------------------------------------------+
Additional plots and tables are generated and method specific.
Command line options
--------------------
To do:
-- add some E.infos
Document!!!
'''
import sys
import pandas as pd
import cgatcore.experiment as E
import cgatpipelines.tasks.expression as expression
import cgatcore.iotools as iotools
import cgatpipelines.tasks.counts as Counts
import cgatpipelines.tasks.R as R
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-t", "--tag-tsv-file", dest="input_filename_tags",
type="string",
help="input file with tag counts [default=%default].")
parser.add_option("-d", "--design-tsv-file", dest="input_filename_design",
type="string",
help="input file with experimental design "
"[default=%default].")
parser.add_option("-m", "--method", dest="method", type="choice",
choices=("ttest", "sleuth", "edger", "deseq2", "mock",
"dexseq"),
help="differential expression method to apply "
"[default=%default].")
parser.add_option("--deseq2-dispersion-method",
dest="deseq2_dispersion_method",
type="choice",
choices=("pooled", "per-condition", "blind"),
help="dispersion method for deseq2 [default=%default].")
parser.add_option("--deseq2-fit-type", dest="deseq2_fit_type",
type="choice",
choices=("parametric", "local"),
help="fit type for deseq2 [default=%default].")
parser.add_option("--edger-dispersion",
dest="edger_dispersion", type="float",
help="dispersion value for edgeR if there are no "
"replicates [default=%default].")
parser.add_option("-f", "--fdr", dest="fdr", type="float",
help="fdr to apply [default=%default].")
# currently not implemented
# parser.add_option("-R", "--output-R-code", dest="save_r_environment",
# type="string",
# help="save R environment to loc [default=%default]")
parser.add_option("-r", "--reference-group", dest="ref_group",
type="string",
help="Group to use as reference to compute "
"fold changes against [default=$default]")
parser.add_option("--filter-min-counts-per-row",
dest="filter_min_counts_per_row",
type="int",
help="remove rows with less than this "
"number of counts in total [default=%default].")
parser.add_option("--filter-min-counts-per-sample",
dest="filter_min_counts_per_sample",
type="int",
help="remove samples with a maximum count per sample of "
"less than this number [default=%default].")
parser.add_option("--filter-percentile-rowsums",
dest="filter_percentile_rowsums",
type="int",
help="remove percent of rows with "
"lowest total counts [default=%default].")
parser.add_option("--model",
dest="model",
type="string",
help=("model for GLM"))
parser.add_option("--reduced-model",
dest="reduced_model",
type="string",
help=("reduced model for LRT"))
parser.add_option("--contrast",
dest="contrast",
type="string",
help=("contrast for differential expression testing"))
parser.add_option("--sleuth-counts-dir",
dest="sleuth_counts_dir",
type="string",
help=("directory containing expression estimates"
"from sleuth. Sleuth expects counts"
"files to be called abundance.h5"))
parser.add_option("--dexseq-counts-dir",
dest="dexseq_counts_dir",
type="string",
help=("directory containing counts for dexseq. DEXSeq "
"expects counts files to be called .txt and"
"to be generated by the DEXSeq_counts.py script"))
parser.add_option("--dexseq-flattened-file",
dest="dexseq_flattened_file",
type="string",
help=("directory containing flat gtf for dexseq. DEXSeq "
"expects this to be generated by the"
"DEXSeq_prepare_annotations.py script"))
parser.add_option("--outfile-sleuth-count",
dest="outfile_sleuth_count",
type="string",
help=("outfile for full count table generated by sleuth"))
parser.add_option("--outfile-sleuth-tpm",
dest="outfile_sleuth_tpm",
type="string",
help=("outfile for full tpm table generated by sleuth"))
parser.add_option("--use-ihw",
dest="use_ihw",
action="store_true",
help=("use the independent hypothesis weighting method "
"to obtain weighted FDR"))
parser.add_option("--sleuth-genewise",
dest="sleuth_genewise",
action="store_true",
help=("run genewise, rather than transcript level testing"))
parser.add_option("--gene-biomart",
dest="gene_biomart",
type="string",
help=("name of ensemble gene biomart"))
parser.add_option("--de-test",
dest="DEtest",
type="choice",
choices=("wald", "lrt"),
help=("Differential expression test"))
parser.add_option("--Rhistory",
dest="Rhistory",
type="string",
help=("Outfile for R history"))
parser.add_option("--Rimage",
dest="Rimage",
type="string",
help=("Outfile for R image"))
parser.set_defaults(
input_filename_tags="-",
input_filename_design=None,
output_filename=sys.stdout,
method="deseq2",
fdr=0.1,
deseq2_dispersion_method="pooled",
deseq2_fit_type="parametric",
edger_dispersion=0.4,
ref_group=False,
filter_min_counts_per_row=None,
filter_min_counts_per_sample=None,
filter_percentile_rowsums=None,
spike_foldchange_max=4.0,
spike_expression_max=5.0,
spike_expression_bin_width=0.5,
spike_foldchange_bin_width=0.5,
spike_max_counts_per_bin=50,
model=None,
contrast=None,
output_filename_pattern=None,
sleuth_counts_dir=None,
dexseq_counts_dir=None,
dexseq_flattened_file=None,
outfile_sleuth_count=None,
outfile_sleuth_tpm=None,
use_ihw=False,
sleuth_genewise=False,
gene_biomart=None,
DEtest="wald",
reduced_model=None,
Rhistory=None,
Rimage=None)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.start(parser, argv=argv, add_output_options=True)
RH = None
if options.Rhistory or options.Rimage:
RH = R.R_with_History()
outfile_prefix = options.output_filename_pattern
# Expression.py currently expects a refernce group for edgeR and
# sleuth, regardless of which test is used
if not options.ref_group and (
options.method is "edger" or options.method is "sleuth"):
raise ValueError("Must provide a reference group ('--reference-group')")
# create Design object
design = expression.ExperimentalDesign(
pd.read_csv(iotools.open_file(options.input_filename_design, "r"),
sep="\t", index_col=0, comment="#"))
if len(set(design.table[options.contrast])) > 2:
if options.method == "deseq2" or options.method == "sleuth":
if options.DEtest == "wald":
raise ValueError(
"Factor must have exactly two levels for Wald Test. "
"If you have more than two levels in your factor, "
"consider LRT")
else:
E.info('''There are more than 2 levels for the contrast
specified" "(%s:%s). The log2fold changes in the results table
and MA plots will be for the first two levels in the
contrast. The p-value will be the p-value for the overall
significance of the contrast. Hence, some genes will have a
signficant p-value but 0-fold change between the first two
levels''' % (options.contrast, set(design[options.contrast])))
# Sleuth reads in data itself so we don't need to create a counts object
if options.method == "sleuth":
assert options.sleuth_counts_dir, (
"need to specify the location of the abundance.h5 counts files "
" (--sleuth-counts-dir)")
# validate design against counts and model
design.validate(model=options.model)
experiment = expression.DEExperiment_Sleuth()
results = experiment.run(design,
base_dir=options.sleuth_counts_dir,
model=options.model,
contrast=options.contrast,
outfile_prefix=outfile_prefix,
counts=options.outfile_sleuth_count,
tpm=options.outfile_sleuth_tpm,
fdr=options.fdr,
genewise=options.sleuth_genewise,
gene_biomart=options.gene_biomart,
DE_test=options.DEtest,
ref_group=options.ref_group,
reduced_model=options.reduced_model)
# DEXSeq reads in data itself
elif options.method == "dexseq":
assert options.dexseq_counts_dir, (
"need to specify the location of the .txt counts files")
# create Design object
design = expression.ExperimentalDesign(
pd.read_csv(iotools.open_file(options.input_filename_design, "r"),
sep="\t", index_col=0, comment="#"))
# validate design against counts and model
# design.validate(model=options.model)
experiment = expression.DEExperiment_DEXSeq()
results = experiment.run(design,
base_dir=options.dexseq_counts_dir,
model=options.model,
contrast=options.contrast,
ref_group=options.ref_group,
outfile_prefix=outfile_prefix,
flattenedfile=options.dexseq_flattened_file,
fdr=options.fdr)
else:
# create Counts object
if options.input_filename_tags == "-":
counts = Counts.Counts(pd.io.parsers.read_csv(
sys.stdin, sep="\t", index_col=0, comment="#"))
else:
counts = Counts.Counts(pd.io.parsers.read_csv(
iotools.open_file(options.input_filename_tags, "r"),
sep="\t", index_col=0, comment="#"))
# validate design against counts and model
design.validate(counts, options.model)
# restrict counts to samples in design table
counts.restrict(design)
# remove sample with low counts
if options.filter_min_counts_per_sample:
counts.removeSamples(
min_counts_per_sample=options.filter_min_counts_per_sample)
# remove observations with low counts
if options.filter_min_counts_per_row:
counts.removeObservationsFreq(
min_counts_per_row=options.filter_min_counts_per_row)
# remove bottom percentile of observations
if options.filter_percentile_rowsums:
counts.removeObservationsPerc(
percentile_rowsums=options.filter_percentile_rowsums)
# check samples are the same in counts and design following counts
# filtering and, if not, restrict design table and re-validate
design.revalidate(counts, options.model)
# set up experiment and run tests
if options.method == "ttest":
experiment = expression.DEExperiment_TTest()
results = experiment.run(counts, design)
elif options.method == "edger":
experiment = expression.DEExperiment_edgeR()
results = experiment.run(counts,
design,
model=options.model,
contrast=options.contrast,
outfile_prefix=outfile_prefix,
ref_group=options.ref_group,
fdr=options.fdr,
dispersion=options.edger_dispersion)
elif options.method == "deseq2":
experiment = expression.DEExperiment_DESeq2()
results = experiment.run(counts,
design,
model=options.model,
contrast=options.contrast,
outfile_prefix=outfile_prefix,
fdr=options.fdr,
fit_type=options.deseq2_fit_type,
ref_group=options.ref_group,
DEtest=options.DEtest,
R=RH)
results.getResults(fdr=options.fdr)
if options.use_ihw:
results.calculateIHW(alpha=options.fdr)
for contrast in set(results.table['contrast']):
results.plotVolcano(contrast, outfile_prefix=outfile_prefix, R=RH)
results.plotMA(contrast, outfile_prefix=outfile_prefix, R=RH)
results.plotPvalueHist(contrast, outfile_prefix=outfile_prefix, R=RH)
results.plotPvalueQQ(contrast, outfile_prefix=outfile_prefix, R=RH)
results.table.to_csv(sys.stdout, sep="\t", na_rep="NA", index=False)
results.summariseDEResults()
# write out summary tables for each comparison/contrast
for test_group in list(results.Summary.keys()):
outf = iotools.open_file("_".join(
[outfile_prefix, test_group, "summary.tsv"]), "w")
outf.write("category\tcounts\n%s\n"
% results.Summary[test_group].asTable())
outf.close()
if options.Rhistory:
RH.saveHistory(options.Rhistory)
if options.Rimage:
RH.saveImage(options.Rimage)
E.stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
[
"cgatpipelines.tasks.expression.DEExperiment_Sleuth",
"pandas.io.parsers.read_csv",
"cgatpipelines.tasks.R.R_with_History",
"cgatpipelines.tasks.expression.DEExperiment_DEXSeq",
"cgatpipelines.tasks.expression.DEExperiment_edgeR",
"cgatcore.experiment.stop",
"cgatpipelines.tasks.expression.DEExperiment_DESeq2",
"cgatcore.experiment.start",
"cgatpipelines.tasks.expression.DEExperiment_TTest",
"cgatcore.iotools.open_file"
] |
[((13280, 13331), 'cgatcore.experiment.start', 'E.start', (['parser'], {'argv': 'argv', 'add_output_options': '(True)'}), '(parser, argv=argv, add_output_options=True)\n', (13287, 13331), True, 'import cgatcore.experiment as E\n'), ((21025, 21033), 'cgatcore.experiment.stop', 'E.stop', ([], {}), '()\n', (21031, 21033), True, 'import cgatcore.experiment as E\n'), ((13403, 13421), 'cgatpipelines.tasks.R.R_with_History', 'R.R_with_History', ([], {}), '()\n', (13419, 13421), True, 'import cgatpipelines.tasks.R as R\n'), ((15265, 15297), 'cgatpipelines.tasks.expression.DEExperiment_Sleuth', 'expression.DEExperiment_Sleuth', ([], {}), '()\n', (15295, 15297), True, 'import cgatpipelines.tasks.expression as expression\n'), ((13871, 13924), 'cgatcore.iotools.open_file', 'iotools.open_file', (['options.input_filename_design', '"""r"""'], {}), "(options.input_filename_design, 'r')\n", (13888, 13924), True, 'import cgatcore.iotools as iotools\n'), ((16621, 16653), 'cgatpipelines.tasks.expression.DEExperiment_DEXSeq', 'expression.DEExperiment_DEXSeq', ([], {}), '()\n', (16651, 16653), True, 'import cgatpipelines.tasks.expression as expression\n'), ((18643, 18674), 'cgatpipelines.tasks.expression.DEExperiment_TTest', 'expression.DEExperiment_TTest', ([], {}), '()\n', (18672, 18674), True, 'import cgatpipelines.tasks.expression as expression\n'), ((16384, 16437), 'cgatcore.iotools.open_file', 'iotools.open_file', (['options.input_filename_design', '"""r"""'], {}), "(options.input_filename_design, 'r')\n", (16401, 16437), True, 'import cgatcore.iotools as iotools\n'), ((17256, 17325), 'pandas.io.parsers.read_csv', 'pd.io.parsers.read_csv', (['sys.stdin'], {'sep': '"""\t"""', 'index_col': '(0)', 'comment': '"""#"""'}), "(sys.stdin, sep='\\t', index_col=0, comment='#')\n", (17278, 17325), True, 'import pandas as pd\n'), ((18794, 18825), 'cgatpipelines.tasks.expression.DEExperiment_edgeR', 'expression.DEExperiment_edgeR', ([], {}), '()\n', (18823, 18825), True, 'import cgatpipelines.tasks.expression as expression\n'), ((17433, 17484), 'cgatcore.iotools.open_file', 'iotools.open_file', (['options.input_filename_tags', '"""r"""'], {}), "(options.input_filename_tags, 'r')\n", (17450, 17484), True, 'import cgatcore.iotools as iotools\n'), ((19368, 19400), 'cgatpipelines.tasks.expression.DEExperiment_DESeq2', 'expression.DEExperiment_DESeq2', ([], {}), '()\n', (19398, 19400), True, 'import cgatpipelines.tasks.expression as expression\n')]
|
# Copyright (c) 2016-2020, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import os
#from urllib.parse import quote, unquote
import json
import gzip
import copy
from .error import reportError
from .utils import *
#-------------------------------------------------------------
# Accessor base class
#-------------------------------------------------------------
class Accessor:
def __init__(self, fileref):
self.fileref = fileref
self.caller = None
self.rna = None
def getRna(self, context):
return self.rna
global theRnas
if self.rna is None:
if self.name in theRnas.keys():
return theRnas[self.name]
else:
print("Did not find RNA", self.name)
return self.rna
def storeRna(self, rna):
global theRnas
theRnas[self.name] = rna
return
if hasattr(rna, "type"):
print("Store", rna.type, self.name, rna)
else:
print("Store RNA", self.name, rna)
def getAsset(self, id, strict=True):
global theAssets, theOtherAssets
if isinstance(id, Asset):
return id
id = normalizeRef(id)
if "?" in id:
# Attribute. Return None
return None
ref = getRef(id, self.fileref)
try:
return theAssets[ref]
except KeyError:
pass
if id[0] == "#":
if self.caller:
ref = getRef(id, self.caller.fileref)
try:
return theAssets[ref]
except KeyError:
pass
ref = getRef(id, self.fileref)
try:
return theAssets[ref]
except KeyError:
pass
try:
return theOtherAssets[ref]
except KeyError:
pass
msg = ("Missing local asset:\n '%s'\n" % ref)
if self.caller:
msg += ("in file:\n '%s'\n" % self.caller.fileref)
if not strict:
return None
reportError(msg)
return None
else:
return self.getNewAsset(id, ref, strict)
def getNewAsset(self, id, ref, strict=True):
from .files import parseAssetFile
from .load_json import loadJson
fileref = id.split("#")[0]
filepath = getDazPath(fileref)
file = None
if filepath:
struct = loadJson(filepath)
file = parseAssetFile(struct, fileref=fileref)
try:
return theAssets[ref]
except KeyError:
pass
else:
msg = ("Cannot open file:\n '%s' " % normalizePath(fileref))
reportError(msg, warnPaths=True, trigger=(3,4))
return None
LS.missingAssets[ref] = True
if strict and LS.useStrict:
msg =("Missing asset:\n '%s'\n" % ref +
"Fileref\n %s\n" % fileref +
"Filepath:\n '%s'\n" % filepath +
"File asset:\n %s\n" % file )
reportError(msg, warnPaths=True, trigger=(3,4))
return None
def getOldAsset(self, id):
global theAssets
ref = getRef(id, self.fileref)
try:
return theAssets[ref]
except KeyError:
pass
return self.getNewAsset(id, ref)
def getTypedAsset(self, id, type):
asset = self.getAsset(id)
if (asset is None or
type is None or
isinstance(asset,type)):
return asset
msg = (
"Asset of type %s not found:\n %s\n" % (type, id) +
"File ref:\n '%s'\n" % self.fileref
)
return reportError(msg, warnPaths=True)
def parseUrlAsset(self, struct, type=None):
if "url" not in struct.keys():
msg = ("URL asset failure: No URL.\n" +
"Type: %s\n" % type +
"File ref:\n '%s'\n" % self.fileref +
"Id: '%s'\n" % struct["id"] +
"Keys:\n %s\n" % list(struct.keys()))
reportError(msg, warnPaths=True, trigger=(2,3))
return None
asset = self.getTypedAsset(struct["url"], type)
if isinstance(asset, Asset):
asset.caller = self
asset.update(struct)
self.saveAsset(struct, asset)
return asset
elif asset is not None:
msg = ("Empty asset:\n %s " % struct["url"])
return reportError(msg, warnPaths=True)
else:
asset = self.getAsset(struct["url"])
msg = ("URL asset failure:\n" +
"URL: '%s'\n" % struct["url"] +
"Type: %s\n" % type +
"File ref:\n '%s'\n" % self.fileref +
"Found asset:\n %s\n" % asset)
return reportError(msg, warnPaths=True, trigger=(3,4))
return None
def saveAsset(self, struct, asset):
global theAssets
ref = ref2 = normalizeRef(asset.id)
if self.caller:
if "id" in struct.keys():
ref = getId(struct["id"], self.caller.fileref)
else:
print("No id", struct.keys())
try:
asset2 = theAssets[ref]
except KeyError:
asset2 = None
if asset2 and asset2 != asset:
msg = ("Duplicate asset definition\n" +
" Asset 1: %s\n" % asset +
" Asset 2: %s\n" % asset2 +
" Ref: %s\n" % ref)
return reportError(msg, trigger=(3,4))
theAssets[ref] = theAssets[ref2] = asset
return
if asset.caller:
ref2 = lowerPath(asset.caller.id) + "#" + struct["id"]
ref2 = normalizeRef(ref2)
if ref2 in theAssets.keys():
asset2 = theAssets[ref2]
if asset != asset2 and GS.verbosity > 1:
msg = ("Duplicate asset definition\n" +
" Asset 1: %s\n" % asset +
" Asset 2: %s\n" % asset2 +
" Caller: %s\n" % asset.caller +
" Ref 1: %s\n" % ref +
" Ref 2: %s\n" % ref2)
return reportError(msg)
else:
print("REF2", ref2)
print(" ", asset)
theAssets[ref2] = asset
#-------------------------------------------------------------
# Asset base class
#-------------------------------------------------------------
class Asset(Accessor):
def __init__(self, fileref):
Accessor.__init__(self, fileref)
self.id = None
self.url = None
self.name = None
self.label = None
self.type = None
self.parent = None
self.children = []
self.source = None
self.drivable = True
self.isSourced = False
def __repr__(self):
return ("<Asset %s t: %s r: %s>" % (self.id, self.type, self.rna))
def selfref(self):
return ("#" + self.id.rsplit("#", 2)[-1])
def getLabel(self, inst=None):
if inst and inst.label:
return inst.label
elif self.label:
return self.label
else:
return self.name
def getName(self):
if self.id is None:
return "None"
words = os.path.splitext(os.path.basename(self.id))
if len(words) == 2:
base,ext = words
else:
base,ext = words[0],None
string = base
if ext:
words = ext.split("#")
if len(words) > 1:
string = words[-1]
return getName(string)
def copySource(self, asset):
for key in dir(asset):
if hasattr(self, key) and key[0] != "_":
attr = getattr(self, key)
try:
setattr(asset, key, attr)
except RuntimeError:
pass
def copySourceFile(self, source):
global theAssets, theSources
file = source.rsplit("#", 1)[0]
asset = self.parseUrlAsset({"url": source})
if asset is None:
return None
old = asset.id.rsplit("#", 1)[0]
new = self.id.rsplit("#", 1)[0]
self.copySourceAssets(old, new)
if old not in theSources.keys():
theSources[old] = []
for other in theSources[old]:
self.copySourceAssets(other, new)
theSources[old].append(new)
return asset
def copySourceAssets(self, old, new):
nold = len(old)
nnew = len(new)
adds = []
assets = []
for key,asset in theAssets.items():
if key[0:nold] == old:
adds.append((new + key[nold:], asset))
for key,asset in adds:
if key not in theOtherAssets.keys():
theOtherAssets[key] = asset
assets.append(asset)
def parse(self, struct):
self.source = struct
if "id" in struct.keys():
self.id = getId(struct["id"], self.fileref)
else:
self.id = "?"
msg = ("Asset without id\nin file \"%s\":\n%s " % (self.fileref, struct))
reportError(msg, trigger=(1,2))
if "url" in struct.keys():
self.url = struct["url"]
elif "id" in struct.keys():
self.url = struct["id"]
if "type" in struct.keys():
self.type = struct["type"]
if "name" in struct.keys():
self.name = struct["name"]
elif "id" in struct.keys():
self.name = struct["id"]
elif self.url:
self.name = self.url
else:
self.name = "Noname"
if "label" in struct.keys():
self.label = struct["label"]
if "parent" in struct.keys():
self.parent = self.getAsset(struct["parent"])
if self.parent:
self.parent.children.append(self)
if "source" in struct.keys():
asset = self.copySourceFile(struct["source"])
if asset and not asset.isSourced:
self.copySource(asset)
asset.isSourced = True
return self
def update(self, struct):
for key,value in struct.items():
if key == "type":
self.type = value
elif key == "name":
self.name = value
elif key == "url":
self.url = value
elif key == "label":
self.label = value
elif key == "parent":
if self.parent is None and self.caller:
self.parent = self.caller.getAsset(struct["parent"])
elif key == "channel":
self.value = getCurrentValue(value)
return self
def build(self, context, inst=None):
return
raise NotImplementedError("Cannot build %s yet" % self.type)
def buildData(self, context, inst, cscale, center):
print("BDATA", self)
if self.rna is None:
self.build(context)
def postprocess(self, context, inst):
return
def connect(self, struct):
pass
def getAssetFromStruct(struct, fileref):
id = getId(struct["id"], fileref)
try:
return theAssets[id]
except KeyError:
return None
def getExistingFile(fileref):
global theAssets
ref = normalizeRef(fileref)
if ref in theAssets.keys():
#print("Reread", fileref, ref)
return theAssets[ref]
else:
return None
#-------------------------------------------------------------
#
#-------------------------------------------------------------
def storeAsset(asset, fileref):
global theAssets
theAssets[fileref] = asset
def getId(id, fileref):
id = normalizeRef(id)
if id[0] == "/":
return id
else:
return fileref + "#" + id
def getRef(id, fileref):
id = normalizeRef(id)
if id[0] == "#":
return fileref + id
else:
return id
def lowerPath(path):
#return path
if len(path) > 0 and path[0] == "/":
words = path.split("#",1)
if len(words) == 1:
return tolower(words[0])
else:
return tolower(words[0]) + "#" + words[1]
else:
return path
def normalizeRef(id):
from urllib.parse import quote
ref= lowerPath(undoQuote(quote(id)))
return ref.replace("//", "/")
def undoQuote(ref):
ref = ref.replace("%23","#").replace("%25","%").replace("%2D", "-").replace("%2E", ".").replace("%2F", "/").replace("%3F", "?")
return ref.replace("%5C", "/").replace("%5F", "_").replace("%7C", "|")
def clearAssets():
global theAssets, theOtherAssets, theSources, theRnas
theAssets = {}
theOtherAssets = {}
theSources = {}
theRnas = {}
clearAssets()
#-------------------------------------------------------------
# Paths
#-------------------------------------------------------------
def setDazPaths(scn):
from .error import DazError
global theDazPaths
filepaths = []
for path in GS.getDazPaths():
if path:
if not os.path.exists(path):
msg = ("The DAZ library path\n" +
"%s \n" % path +
"does not exist. Check and correct the\n" +
"Paths to DAZ library section in the Settings panel." +
"For more details see\n" +
"http://diffeomorphic.blogspot.se/p/settings-panel_17.html. ")
print(msg)
raise DazError(msg)
else:
filepaths.append(path)
if os.path.isdir(path):
for fname in os.listdir(path):
if "." not in fname:
numname = "".join(fname.split("_"))
if numname.isdigit():
subpath = path + "/" + fname
filepaths.append(subpath)
theDazPaths = filepaths
def fixBrokenPath(path):
"""
many asset file paths assume a case insensitive file system, try to fix here
:param path:
:return:
"""
path_components = []
head = path
while True:
head, tail = os.path.split(head)
if tail != "":
path_components.append(tail)
else:
if head != "":
path_components.append(head)
path_components.reverse()
break
check = path_components[0]
for pc in path_components[1:]:
if not os.path.exists(check):
return check
cand = os.path.join(check, pc)
if not os.path.exists(cand):
corrected = [f for f in os.listdir(check) if f.lower() == pc.lower()]
if len(corrected) > 0:
cand = os.path.join(check, corrected[0])
else:
msg = ("Broken path: '%s'\n" % path +
" Folder: '%s'\n" % check +
" File: '%s'\n" % pc +
" Files: %s" % os.listdir(check))
reportError(msg, trigger=(3,4))
check = cand
return check
def normalizePath(ref):
from urllib.parse import unquote
return unquote(ref)
def getRelativeRef(ref):
global theDazPaths
path = normalizePath(ref)
for dazpath in theDazPaths:
n = len(dazpath)
if path[0:n].lower() == dazpath.lower():
return ref[n:]
print("Not a relative path:\n '%s'" % path)
return ref
def getDazPath(ref):
global theDazPaths
path = normalizePath(ref)
if path[2] == ":":
filepath = path[1:]
if GS.verbosity > 2:
print("Load", filepath)
elif path[0] == "/":
for folder in theDazPaths:
filepath = folder + path
if os.path.exists(filepath):
return filepath
elif GS.caseSensitivePaths:
filepath = fixBrokenPath(filepath)
if os.path.exists(filepath):
return filepath
else:
filepath = path
if os.path.exists(filepath):
if GS.verbosity > 2:
print("Found", filepath)
return filepath
LS.missingAssets[ref] = True
msg = ("Did not find path:\n\"%s\"\nRef:\"%s\"" % (filepath, ref))
reportError(msg, trigger=(3,4))
return None
|
[
"urllib.parse.unquote",
"os.path.basename",
"os.path.isdir",
"os.path.exists",
"urllib.parse.quote",
"os.path.split",
"os.path.join",
"os.listdir"
] |
[((17011, 17023), 'urllib.parse.unquote', 'unquote', (['ref'], {}), '(ref)\n', (17018, 17023), False, 'from urllib.parse import unquote\n'), ((17879, 17903), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (17893, 17903), False, 'import os\n'), ((16015, 16034), 'os.path.split', 'os.path.split', (['head'], {}), '(head)\n', (16028, 16034), False, 'import os\n'), ((16386, 16409), 'os.path.join', 'os.path.join', (['check', 'pc'], {}), '(check, pc)\n', (16398, 16409), False, 'import os\n'), ((9032, 9057), 'os.path.basename', 'os.path.basename', (['self.id'], {}), '(self.id)\n', (9048, 9057), False, 'import os\n'), ((14105, 14114), 'urllib.parse.quote', 'quote', (['id'], {}), '(id)\n', (14110, 14114), False, 'from urllib.parse import quote\n'), ((16323, 16344), 'os.path.exists', 'os.path.exists', (['check'], {}), '(check)\n', (16337, 16344), False, 'import os\n'), ((16425, 16445), 'os.path.exists', 'os.path.exists', (['cand'], {}), '(cand)\n', (16439, 16445), False, 'import os\n'), ((14857, 14877), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (14871, 14877), False, 'import os\n'), ((15405, 15424), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (15418, 15424), False, 'import os\n'), ((16587, 16620), 'os.path.join', 'os.path.join', (['check', 'corrected[0]'], {}), '(check, corrected[0])\n', (16599, 16620), False, 'import os\n'), ((17607, 17631), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (17621, 17631), False, 'import os\n'), ((15459, 15475), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (15469, 15475), False, 'import os\n'), ((16483, 16500), 'os.listdir', 'os.listdir', (['check'], {}), '(check)\n', (16493, 16500), False, 'import os\n'), ((16831, 16848), 'os.listdir', 'os.listdir', (['check'], {}), '(check)\n', (16841, 16848), False, 'import os\n'), ((17775, 17799), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (17789, 17799), False, 'import os\n')]
|
#!/usr/bin/python
import requests
import requests_cache
import time
import os
from itertools import chain
from sys import exit
from userExceptions import InvalidType, NotCoinSelected, FiatInvalidType, FiatNotValid
class coinMarket:
def __init__(self,fiat=""):
""" For now will be empty
fiat: A set of fiat currencies used to check the value of our crypto
againts it.
"""
self.fiat={"AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK",\
"EUR","GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN",\
"MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB",\
"TRY", "TWD", "ZAR"}
self.response=""
self.coinNames={}
self.wholeContent=""
self._setCache()
#requests_cache.install_cache(cache_name='coinMarket_cache', backend='sqlite', expire_after=120)
## funcion para obtener todos las monedas en coin market
now = time.ctime(int(time.time()))
self.getCoinNames(fiat)
def _setCache(self):
folderName="/CryptoToolCache"
cacheFileName="coinMarket_cache"
root_os= os.path.abspath(os.sep)
cache_dir= os.path.join(root_os,"tmp"+folderName)
if(not os.path.exists(cache_dir)):
os.makedirs(cache_dir, exist_ok=True)
requests_cache.install_cache(
cache_name=os.path.join(cache_dir, cacheFileName),backend='sqlite',expire_after=120
)
def _checkValidFiat(self,fiat):
""" Source code to check if fiat currency is valid"""
currencyFiat=""
try:
if(fiat!=""):
if(type(fiat)!=str):
print("Fiat invalid type")
raise FiatInvalidType
else:
if(fiat not in self.fiat):
raise FiatNotValid
else:
currencyFiat="convert="+fiat
except FiatInvalidType:
print("Fiat type must be a string")
exit(0)
except FiatNotValid:
print("Fiat type not available")
exit(0)
else:
return(currencyFiat)
def _checkValidCoin(self,coin):
isValidCoin=False
try:
if not coin:
raise NotCoinSelected
else:
if(type(coin) is not (str)):
raise InvalidType
except NotCoinSelected:
print("You need to input a coin")
exit(0)
except InvalidType:
print("Coin value must be a string")
exit(0)
else:
isValidCoin=True
return(isValidCoin)
def getCoinNames(self,fiat=""):
allData=self.getAllCoins(fiat=fiat)
with open("currencyData.json",'w') as jsonFile:
jsonFile.write(str(allData))
if(allData):
for data in allData:
self.coinNames[data["name"]]=(data["symbol"],data["id"])
self.wholeContent=allData
def getAllCoins(self,fiat=""):
currencyFiat=self._checkValidFiat(fiat)
if(currencyFiat):
URL="https://api.coinmarketcap.com/v1/ticker/?"+str(currencyFiat)+"&limit=0"
else:
URL="https://api.coinmarketcap.com/v1/ticker/?limit=0"
try:
now = time.ctime(int(time.time()))
self.response=requests.get(URL)
print ("Time: {0} / Used Cache: {1}".format(now, self.response.from_cache))
if(self.response.status_code != requests.codes.ok):
self.response.raise_for_status()
else:
return(self.response.json())
except Exception as e:
print(e)
exit(0)
def getCoin(self,coin,fiat=""):
""" Get a specific coin data that do you want to explore
coin: string value which represent a coin you could input. Coin abreviation
or coin name (work on in soon)
"""
isValidCoin = self._checkValidCoin(coin)
currencyFiat = self._checkValidFiat(fiat)
if(isValidCoin):
if coin in self.coinNames.keys():
(_,coinId)=self.coinNames[str(coin)]
else:
results =list(chain.from_iterable( (coinList[1], coin in coinList )
for coinList in self.coinNames.values() if coin in coinList ))
if(len(results)!=0):
coinId=results[0]
else:
coinId=None
print("La moneda no existe")
if(currencyFiat):
URL="https://api.coinmarketcap.com/v1/ticker/"+str(coinId)+"/?"+currencyFiat
else:
URL="https://api.coinmarketcap.com/v1/ticker/"+str(coinId)+"/"
try:
self.response=requests.get(URL)
if(self.response.status_code != requests.codes.ok):
self.response.raise_for_status()
else:
data=self.response.json()
self.parseData(data[0],fiat)
except Exception as e:
print(e)
def getListCoins(self,coins,fiat=""):
arrayValidCoins=[]
for coin in coins:
if coin in self.coinNames.keys():
arrayValidCoins.append((self.coinNames[coin][1],True))
else:
results =list(chain.from_iterable( (coinList[1], coin in coinList )
for coinList in self.coinNames.values() if coin in coinList ))
if(len(results)==0):
arrayValidCoins.append((coin,False))
else:
arrayValidCoins.append(tuple(results))
currencyFiat=self._checkValidFiat(fiat)
coinInformation=[]
for tupleCoin in arrayValidCoins:
if(tupleCoin[1]==True):
for item in self.wholeContent:
if(item["id"]==tupleCoin[0]):
coinInformation.append(item)
else:
#coinInformation.append("coin: "+ tupleCoin[0]+" is not a valid one")
print("coin: "+ tupleCoin[0]+" is not a valid one")
for coin in coinInformation:
self.parseData(coin,fiat)
def getGlobalData(self,fiat=""):
currencyFiat=self._checkValidFiat(fiat)
if(currencyFiat):
URL="https://api.coinmarketcap.com/v1/global/"+"?"+currencyFiat
else:
URL="https://api.coinmarketcap.com/v1/global/"
try:
self.response = requests.get(URL)
if(self.response.status_code != requests.codes.ok):
self.response.raise_for_status()
else:
data = self.response.json()
with open("global.json",'w') as jsonFile:
jsonFile.write(str(data))
print("\n")
print("Total Market Cap USD: "+str(data["total_market_cap_usd"]))
if(fiat!=""):
market_cap="total_market_cap_" + fiat.lower()
print("Total Market Cap "+str(fiat)+": "+ str(data[market_cap]))
print("Active currencies: "+str(data["active_currencies"]))
print("Active assets: "+str(data["active_assets"]))
print("Active Markets: "+str(data["active_markets"]))
print("\n")
except Exception as e:
print(e)
exit(0)
def parseData(self,coin,fiat=""):
print("\n")
print("ID: "+str(coin["id"]))
print("Name: "+ str(coin["name"]))
print("Symbol: " +str(coin["symbol"]))
print("Rank: "+str(coin["rank"]))
print("Available Supply: "+str(coin["available_supply"]))
print("Total Supply: "+str(coin["total_supply"]))
print("Price USD: " +str(coin["price_usd"]))
print("Price BTC: "+str(coin["price_btc"]))
print("Market Cap USD: "+str(coin["market_cap_usd"]))
print("Percent Change for 1 hour : "+str(coin["percent_change_1h"]))
print("Percent Change for 24 hour : "+str(coin["percent_change_24h"]))
print("Percent Change for 7 days : "+str(coin["percent_change_7d"]))
if(fiat!=""):
price_string="price_"
market_string="market_cap_"
lowerFiat=fiat.lower()
price_string=price_string+lowerFiat
market_string=market_string+lowerFiat
print("Price "+str(fiat)+": "+str(coin[price_string]))
print("Market Cap "+str(fiat)+": "+str(coin[market_string]))
print("\n")
|
[
"os.path.abspath",
"os.makedirs",
"os.path.exists",
"time.time",
"requests.get",
"os.path.join",
"sys.exit"
] |
[((1152, 1175), 'os.path.abspath', 'os.path.abspath', (['os.sep'], {}), '(os.sep)\n', (1167, 1175), False, 'import os\n'), ((1195, 1236), 'os.path.join', 'os.path.join', (['root_os', "('tmp' + folderName)"], {}), "(root_os, 'tmp' + folderName)\n", (1207, 1236), False, 'import os\n'), ((1250, 1275), 'os.path.exists', 'os.path.exists', (['cache_dir'], {}), '(cache_dir)\n', (1264, 1275), False, 'import os\n'), ((1290, 1327), 'os.makedirs', 'os.makedirs', (['cache_dir'], {'exist_ok': '(True)'}), '(cache_dir, exist_ok=True)\n', (1301, 1327), False, 'import os\n'), ((3432, 3449), 'requests.get', 'requests.get', (['URL'], {}), '(URL)\n', (3444, 3449), False, 'import requests\n'), ((6685, 6702), 'requests.get', 'requests.get', (['URL'], {}), '(URL)\n', (6697, 6702), False, 'import requests\n'), ((982, 993), 'time.time', 'time.time', ([], {}), '()\n', (991, 993), False, 'import time\n'), ((1390, 1428), 'os.path.join', 'os.path.join', (['cache_dir', 'cacheFileName'], {}), '(cache_dir, cacheFileName)\n', (1402, 1428), False, 'import os\n'), ((2050, 2057), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (2054, 2057), False, 'from sys import exit\n'), ((2145, 2152), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (2149, 2152), False, 'from sys import exit\n'), ((2535, 2542), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (2539, 2542), False, 'from sys import exit\n'), ((2633, 2640), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (2637, 2640), False, 'from sys import exit\n'), ((3784, 3791), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (3788, 3791), False, 'from sys import exit\n'), ((4908, 4925), 'requests.get', 'requests.get', (['URL'], {}), '(URL)\n', (4920, 4925), False, 'import requests\n'), ((7591, 7598), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (7595, 7598), False, 'from sys import exit\n'), ((3392, 3403), 'time.time', 'time.time', ([], {}), '()\n', (3401, 3403), False, 'import time\n')]
|