code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from typing import Optional
import peewee
import pendulum
from workplanner.utils import normalize_datetime, strftime_utc
class DateTimeUTCField(peewee.DateTimeField):
def python_value(self, value: str) -> Optional[pendulum.DateTime]:
if value is not None:
return pendulum.parse(value, tz=pendulum.timezone("UTC"))
return None
def db_value(self, value: Optional[pendulum.DateTime]) -> Optional[str]:
if value is not None:
value = normalize_datetime(value)
if value.tzinfo is None:
raise ValueError(f"{value} timezone not set.")
value = strftime_utc(value)
return value
| [
"pendulum.timezone",
"workplanner.utils.normalize_datetime",
"workplanner.utils.strftime_utc"
] | [((491, 516), 'workplanner.utils.normalize_datetime', 'normalize_datetime', (['value'], {}), '(value)\n', (509, 516), False, 'from workplanner.utils import normalize_datetime, strftime_utc\n'), ((639, 658), 'workplanner.utils.strftime_utc', 'strftime_utc', (['value'], {}), '(value)\n', (651, 658), False, 'from workplanner.utils import normalize_datetime, strftime_utc\n'), ((316, 340), 'pendulum.timezone', 'pendulum.timezone', (['"""UTC"""'], {}), "('UTC')\n", (333, 340), False, 'import pendulum\n')] |
from collections import namedtuple
from django import template
from wagtail_to_ion.models import get_ion_content_type_description_model
register = template.Library()
@register.simple_tag
def content_type_description(app_label, model_name, verbose_name):
ContentTypeDescription = get_ion_content_type_description_model()
content_type_description = namedtuple("content_type_description", ["description", "image_url"])
image_path = None
try:
description_object = ContentTypeDescription.objects.get(content_type__app_label=app_label, content_type__model=model_name)
except ContentTypeDescription.DoesNotExist:
description_object = None
if description_object:
if description_object.example_image:
image_path = description_object.example_image.url
return content_type_description(description_object.description, image_path)
else:
return content_type_description('Page of type "{}"'.format(verbose_name), None)
| [
"wagtail_to_ion.models.get_ion_content_type_description_model",
"collections.namedtuple",
"django.template.Library"
] | [((151, 169), 'django.template.Library', 'template.Library', ([], {}), '()\n', (167, 169), False, 'from django import template\n'), ((289, 329), 'wagtail_to_ion.models.get_ion_content_type_description_model', 'get_ion_content_type_description_model', ([], {}), '()\n', (327, 329), False, 'from wagtail_to_ion.models import get_ion_content_type_description_model\n'), ((361, 429), 'collections.namedtuple', 'namedtuple', (['"""content_type_description"""', "['description', 'image_url']"], {}), "('content_type_description', ['description', 'image_url'])\n", (371, 429), False, 'from collections import namedtuple\n')] |
#Youtube channels
#
#
#
#
#
#<NAME>
import re
import os
import sys
import urllib2
import buggalo
import xbmcgui
import xbmcaddon
import xbmcplugin
BASE_URL = 'http://www.husham.com/?p=11081'
PLAY_VIDEO_PATH = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s'
PLAYLIST_PATH = 'plugin://plugin.video.youtube/user/stampylonghead/'
PLAYLIST_PATH2 = 'plugin://plugin.video.youtube/user/stampylongnose/'
PLAYLIST_PATH3 = 'plugin://plugin.video.youtube/user/HushamMemarGames/'
PLAYLIST_PATH4 = 'plugin://plugin.video.youtube/user/PewDiePie/'
PLAYLIST_PATH5 = 'plugin://plugin.video.youtube/user/TheMasterOv/'
PLAYLIST_PATH6 = 'plugin://plugin.video.youtube/user/SSundee/'
PLAYLIST_PATH7 = 'plugin://plugin.video.youtube/user/Vikkstar123HD/'
PLAYLIST_PATH8 = 'plugin://plugin.video.youtube/user/Vikkstar123/'
PLAYLIST_PATH9 = 'plugin://plugin.video.youtube/user/TBNRfrags/'
PLAYLIST_PATH10 = 'plugin://plugin.video.youtube/user/TBNRkenworth/'
PLAYLIST_PATH11 = 'plugin://plugin.video.youtube/channel/UCxNMYToYIBPYV829BJcmUQg/'
PLAYLIST_PATH12 = 'plugin://plugin.video.youtube/channel/UCHRTfR2r0Ss3UjFyw7gSA-A/'
PLAYLIST_PATH13 = 'plugin://plugin.video.youtube/channel/UC7p-adWThwCeIQiQ6Hw505g/'
PLAYLIST_PATH14 = 'plugin://plugin.video.youtube/user/JeromeASF/'
PLAYLIST_PATH15 = 'plugin://plugin.video.youtube/user/MoreAliA/'
PLAYLIST_PATH16 = 'plugin://plugin.video.youtube/channel/UCpGdL9Sn3Q5YWUH2DVUW1Ug/'
PLAYLIST_PATH17 = 'plugin://plugin.video.youtube/user/GamingWithJen/'
PLAYLIST_PATH18 = 'plugin://plugin.video.youtube/user/MrWoofless/'
PLAYLIST_PATH19 = 'plugin://plugin.video.youtube/channel/UC9GXn5Y56nNDUNigTS2Ib4Q/'
PLAYLIST_PATH20 = 'plugin://plugin.video.youtube/user/TheBajanCanadian/'
PLAYLIST_PATH21 = 'plugin://plugin.video.youtube/user/BrenyBeast/'
PLAYLIST_PATH22 = 'plugin://plugin.video.youtube/user/HuskyMUDKIPZ/'
PLAYLIST_PATH23 = 'plugin://plugin.video.youtube/user/TheAtlanticCraft/'
PLAYLIST_PATH24 = 'plugin://plugin.video.youtube/user/TheDiamondMinecart/'
PLAYLIST_PATH25 = 'plugin://plugin.video.youtube/user/CreepersEdge/'
PLAYLIST_PATH26 = 'plugin://plugin.video.youtube/user/CraftBattleDuty/'
PLAYLIST_PATH27 = 'plugin://plugin.video.youtube/user/FearADubh/'
PLAYLIST_PATH28 = 'plugin://plugin.video.youtube/user/iBallisticSquid/'
PLAYLIST_PATH29 = 'plugin://plugin.video.youtube/user/DeadloxMC/'
PLAYLIST_PATH30 = 'plugin://plugin.video.youtube/user/LittleLizardGaming/'
PLAYLIST_PATH31 = 'plugin://plugin.video.youtube/user/prestonplayz/'
PLAYLIST_PATH32 = 'plugin://plugin.video.youtube/user/MisterCrainer/'
if __name__ == '__main__':
ADDON = xbmcaddon.Addon()
HANDLE = int(sys.argv[1])
try:
u = urllib2.urlopen(BASE_URL)
html = u.read()
u.close()
m = re.search('//www.youtube.com/embed/([^"]+)"', html, re.DOTALL)
if m:
item = xbmcgui.ListItem('stampylonghead',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/stampycat%20pic.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH, item, True)
item = xbmcgui.ListItem('stampylongnose',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/stampycat%20pic.PNG.png')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH2, item, True)
item = xbmcgui.ListItem('Memar Games Channel',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/memar%20games.JPG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH3, item, True)
item = xbmcgui.ListItem('PewDiePie',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/pewdiepie.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH4, item, True)
item = xbmcgui.ListItem('TheMasterOv',
ADDON.getLocalizedString(30001),
iconImage='https://github.com/hmemar/husham.com/blob/master/repo/Haydar%20Games/Images/themasterov.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH5, item, True)
item = xbmcgui.ListItem('SSundee',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/ssundee.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH6, item, True)
item = xbmcgui.ListItem('Vikkstar123HD',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/vikkstarhd.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH7, item, True)
item = xbmcgui.ListItem('Vikkstar123',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/vikkstar.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH8, item, True)
item = xbmcgui.ListItem('TBNRfrags/',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/tbnrfrags.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH9, item, True)
item = xbmcgui.ListItem('TBNRkenworth',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/tbnrkenworth.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH10, item, True)
item = xbmcgui.ListItem('PlayClashOfClans',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/playclashofclans.JPG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH11, item, True)
item = xbmcgui.ListItem('ryguyrocky',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/ryguyrocky.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH12, item, True)
item = xbmcgui.ListItem('Bayanidood',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/Bayanidood.JPG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH13, item, True)
item = xbmcgui.ListItem('JeromeASF',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/Jerome.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH14, item, True)
item = xbmcgui.ListItem('MoreAliA',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/more%20alia.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH15, item, True)
item = xbmcgui.ListItem('PopularMMOs',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/pop.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH16, item, True)
item = xbmcgui.ListItem('GamingWithJen',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/gamingwithjen.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH17, item, True)
item = xbmcgui.ListItem('MrWoofless',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/mrwoofless.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH18, item, True)
item = xbmcgui.ListItem('WillMcHD',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/mrwilliamo.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH19, item, True)
item = xbmcgui.ListItem('TheBajanCanadian',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/banjancanadian.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH20, item, True)
item = xbmcgui.ListItem('BrenyBeast',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/brenybeast.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH21, item, True)
item = xbmcgui.ListItem('HuskyMUDKIPZ',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/huskymudkipz.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH22, item, True)
item = xbmcgui.ListItem('TheAtlanticCraft',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/theatlanticcraft.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH23, item, True)
item = xbmcgui.ListItem('TheDiamondMinecart',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/dantdm.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH24, item, True)
item = xbmcgui.ListItem('CreepersEdge',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/creepersedge.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH25, item, True)
item = xbmcgui.ListItem('CraftBattleDuty',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/lachlan.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH26, item, True)
item = xbmcgui.ListItem('FearADubh',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/ashduhb.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH27, item, True)
item = xbmcgui.ListItem('iBallisticSquid',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/ibillisticsquid.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH28, item, True)
item = xbmcgui.ListItem('DeadloxMC',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/deadlox.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH29, item, True)
item = xbmcgui.ListItem('LittleLizardGaming',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/littlelizardgaming.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH30, item, True)
item = xbmcgui.ListItem('prestonplayz',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/preston.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH31, item, True)
item = xbmcgui.ListItem('MisterCrainer',
ADDON.getLocalizedString(30001),
iconImage='https://raw.githubusercontent.com/hmemar/husham.com/master/repo/Haydar%20Games/Images/mistercrainer.PNG')
xbmcplugin.addDirectoryItem(HANDLE, PLAYLIST_PATH32, item, True)
xbmcplugin.endOfDirectory(HANDLE)
except:
buggalo.onExceptionRaised()
| [
"re.search",
"urllib2.urlopen",
"xbmcaddon.Addon",
"buggalo.onExceptionRaised",
"xbmcplugin.endOfDirectory",
"xbmcplugin.addDirectoryItem"
] | [((2592, 2609), 'xbmcaddon.Addon', 'xbmcaddon.Addon', ([], {}), '()\n', (2607, 2609), False, 'import xbmcaddon\n'), ((2662, 2687), 'urllib2.urlopen', 'urllib2.urlopen', (['BASE_URL'], {}), '(BASE_URL)\n', (2677, 2687), False, 'import urllib2\n'), ((2744, 2806), 're.search', 're.search', (['"""//www.youtube.com/embed/([^"]+)\\""""', 'html', 're.DOTALL'], {}), '(\'//www.youtube.com/embed/([^"]+)"\', html, re.DOTALL)\n', (2753, 2806), False, 'import re\n'), ((13994, 14027), 'xbmcplugin.endOfDirectory', 'xbmcplugin.endOfDirectory', (['HANDLE'], {}), '(HANDLE)\n', (14019, 14027), False, 'import xbmcplugin\n'), ((3111, 3173), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH, item, True)\n', (3138, 3173), False, 'import xbmcplugin\n'), ((3468, 3531), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH2', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH2, item, True)\n', (3495, 3531), False, 'import xbmcplugin\n'), ((3825, 3888), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH3', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH3, item, True)\n', (3852, 3888), False, 'import xbmcplugin\n'), ((4168, 4231), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH4', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH4, item, True)\n', (4195, 4231), False, 'import xbmcplugin\n'), ((4505, 4568), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH5', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH5, item, True)\n', (4532, 4568), False, 'import xbmcplugin\n'), ((4844, 4907), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH6', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH6, item, True)\n', (4871, 4907), False, 'import xbmcplugin\n'), ((5192, 5255), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH7', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH7, item, True)\n', (5219, 5255), False, 'import xbmcplugin\n'), ((5536, 5599), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH8', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH8, item, True)\n', (5563, 5599), False, 'import xbmcplugin\n'), ((5880, 5943), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH9', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH9, item, True)\n', (5907, 5943), False, 'import xbmcplugin\n'), ((6229, 6293), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH10', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH10, item, True)\n', (6256, 6293), False, 'import xbmcplugin\n'), ((6587, 6651), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH11', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH11, item, True)\n', (6614, 6651), False, 'import xbmcplugin\n'), ((6933, 6997), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH12', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH12, item, True)\n', (6960, 6997), False, 'import xbmcplugin\n'), ((7279, 7343), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH13', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH13, item, True)\n', (7306, 7343), False, 'import xbmcplugin\n'), ((7620, 7684), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH14', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH14, item, True)\n', (7647, 7684), False, 'import xbmcplugin\n'), ((7965, 8029), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH15', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH15, item, True)\n', (7992, 8029), False, 'import xbmcplugin\n'), ((8305, 8369), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH16', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH16, item, True)\n', (8332, 8369), False, 'import xbmcplugin\n'), ((8657, 8721), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH17', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH17, item, True)\n', (8684, 8721), False, 'import xbmcplugin\n'), ((9003, 9067), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH18', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH18, item, True)\n', (9030, 9067), False, 'import xbmcplugin\n'), ((9347, 9411), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH19', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH19, item, True)\n', (9374, 9411), False, 'import xbmcplugin\n'), ((9703, 9767), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH20', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH20, item, True)\n', (9730, 9767), False, 'import xbmcplugin\n'), ((10049, 10113), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH21', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH21, item, True)\n', (10076, 10113), False, 'import xbmcplugin\n'), ((10399, 10463), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH22', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH22, item, True)\n', (10426, 10463), False, 'import xbmcplugin\n'), ((10757, 10821), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH23', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH23, item, True)\n', (10784, 10821), False, 'import xbmcplugin\n'), ((11107, 11171), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH24', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH24, item, True)\n', (11134, 11171), False, 'import xbmcplugin\n'), ((11457, 11521), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH25', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH25, item, True)\n', (11484, 11521), False, 'import xbmcplugin\n'), ((11805, 11869), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH26', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH26, item, True)\n', (11832, 11869), False, 'import xbmcplugin\n'), ((12147, 12211), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH27', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH27, item, True)\n', (12174, 12211), False, 'import xbmcplugin\n'), ((12503, 12567), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH28', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH28, item, True)\n', (12530, 12567), False, 'import xbmcplugin\n'), ((12845, 12909), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH29', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH29, item, True)\n', (12872, 12909), False, 'import xbmcplugin\n'), ((13207, 13271), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH30', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH30, item, True)\n', (13234, 13271), False, 'import xbmcplugin\n'), ((13552, 13616), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH31', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH31, item, True)\n', (13579, 13616), False, 'import xbmcplugin\n'), ((13904, 13968), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['HANDLE', 'PLAYLIST_PATH32', 'item', '(True)'], {}), '(HANDLE, PLAYLIST_PATH32, item, True)\n', (13931, 13968), False, 'import xbmcplugin\n'), ((14048, 14075), 'buggalo.onExceptionRaised', 'buggalo.onExceptionRaised', ([], {}), '()\n', (14073, 14075), False, 'import buggalo\n')] |
# -*- coding: utf-8 -*-
""" weibo login """
import json
import os
import logging
import requests
from weibospider.WeiboAccounts import accounts
logger = logging.getLogger(__name__)
def getCookies(username, password):
""" 单个账号登录 """
def getAllAccountsCookies(rconn):
""" 将所有账号登录后把cookies存入reids """
for account in accounts:
key, username, password = "Spider:Cookies:%s--%s" % (account[0], account[1]), account[0], account[1]
if rconn.get(key) is None:
rconn.set(key), getCookies(username, password)
if "".join(rconn.keys()).count("Spider:Cookies") == 0:
logger.warning('None cookies, Stopping...')
os.system("pause")
def resetCookies(rconn, accounttext):
""" 重新获取cookies """
username = accounttext.split("--")[0]
password = accounttext.split("--")[1]
rconn.set("Spider:Cookies:%s" % accounttext, getCookies(username, password))
logger.warning("The cookie of %s has been updated successfully !" % accounttext)
def removeCookies(rconn, accounttext):
""" 删除cookies """
rconn.delete("Spider:Cookies:%s" % accounttext)
logger.warning("The cookie of %s has been deleted successfully !" % accounttext)
| [
"logging.getLogger",
"os.system"
] | [((157, 184), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (174, 184), False, 'import logging\n'), ((666, 684), 'os.system', 'os.system', (['"""pause"""'], {}), "('pause')\n", (675, 684), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 20 15:10:09 2019
@author: cooneyg
"""
import pandas as pd
ca_tech_mix = pd.read_csv('data/canadian_imports.csv')
| [
"pandas.read_csv"
] | [((121, 161), 'pandas.read_csv', 'pd.read_csv', (['"""data/canadian_imports.csv"""'], {}), "('data/canadian_imports.csv')\n", (132, 161), True, 'import pandas as pd\n')] |
import json
import pytest
import xmljson
from lxml.etree import fromstring
schema_1 = "<schema-template><fields><field><name>account_name</name><type>varchar</type><is-null>false</is-null><table>unification_lookup</table></field><field><name>Heartrate</name><type>int</type><is-null>true</is-null><table>data_1</table></field><field><name>GeoLocation</name><type>int</type><is-null>true</is-null><table>data_1</table></field><field><name>TimeStamp</name><type>int</type><is-null>true</is-null><table>data_1</table></field><field><name>Pulse</name><type>int</type><is-null>true</is-null><table>data_1</table></field></fields></schema-template>" # noqa
schema_2 = "<schema-template><fields><field><name>account_name</name><type>varchar</type><is-null>false</is-null><table>unification_lookup</table></field><field><name>DataBlob</name><type>binarydata</type><is-null>true</is-null><table>data_1</table></field><field><name>BlobSize</name><type>int</type><is-null>true</is-null><table>data_1</table></field></fields></schema-template>" # noqa
schema_3 = "<schema-template><fields><field><name>account_name</name><type>varchar</type><is-null>false</is-null><table>unification_lookup</table></field><field><name>Image</name><type>base64_mime_image</type><is-null>true</is-null><table>data_1</table></field></fields></schema-template>" # noqa
@pytest.mark.parametrize("xml_str", [schema_1, schema_2, schema_3])
def test_xml_to_json(xml_str):
xml = fromstring(xml_str)
json_str = json.dumps(xmljson.gdata.data(xml))
d = json.loads(json_str)
print(json_str)
print(d)
| [
"pytest.mark.parametrize",
"json.loads",
"xmljson.gdata.data",
"lxml.etree.fromstring"
] | [((1346, 1412), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""xml_str"""', '[schema_1, schema_2, schema_3]'], {}), "('xml_str', [schema_1, schema_2, schema_3])\n", (1369, 1412), False, 'import pytest\n'), ((1454, 1473), 'lxml.etree.fromstring', 'fromstring', (['xml_str'], {}), '(xml_str)\n', (1464, 1473), False, 'from lxml.etree import fromstring\n'), ((1533, 1553), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (1543, 1553), False, 'import json\n'), ((1500, 1523), 'xmljson.gdata.data', 'xmljson.gdata.data', (['xml'], {}), '(xml)\n', (1518, 1523), False, 'import xmljson\n')] |
import pandas as pd
import numpy as np
from sklearn.feature_selection import (SelectKBest, SequentialFeatureSelector,
f_regression, mutual_info_regression,
mutual_info_classif, f_classif)
from sklearn.svm import LinearSVC, LinearSVR
def pearson_correlation_fs(_df, cls, threshold_corr=0.75):
"""
function to check correlation of each pair of features a
and discard the one from the pair with corr > 'threshold_corr'
among the pair, the one with lower corr with the 'cls' is dropped
parameters-
@_df: train dataset
@cls: name of the class/output column
@threshold_corr: correlation threshold for feature selection
returns-
@df: train dataset with the selected features
@cols_to_drop: columns to drop from the train dataset
"""
df = _df.copy()
corr_matrix = df.corr()
cols_to_drop = set() # keep only unique features
# get the class column index
for idx in range(len(corr_matrix.columns)):
if corr_matrix.columns[idx]==cls :
cls_col_idx = idx
break
# find the features to drop
for col1_idx in range(len(corr_matrix.columns)):
for col2_idx in range(col1_idx):
col1 = corr_matrix.columns[col1_idx]
col2 = corr_matrix.columns[col2_idx]
# do nothing if any column is the output class column
# or any of the columns is already dropped
if col1 == cls or col2 == cls or col1 in cols_to_drop or col2 in cols_to_drop:
continue
if abs(corr_matrix.iloc[col1_idx, col2_idx]) > threshold_corr:
if abs(corr_matrix.iloc[col1_idx, cls_col_idx]) < abs(corr_matrix.iloc[col2_idx, cls_col_idx]):
col_to_drop = col1
else:
col_to_drop = col2
print(f'dropping {col_to_drop} from ({col1}, {col2})')
cols_to_drop.add(col_to_drop)
cols_to_drop = list(cols_to_drop)
df.drop(columns=cols_to_drop, inplace=True)
return df, cols_to_drop
def seleckKBest_fs(_df, cls, is_regression,
fixed_cols=['Station_Barisal', 'Station_Gazipur', 'Station_Rangpur', 'Station_Habiganj'],
num_features=7,
fs_method=mutual_info_regression):
"""
function to select 'k' features with statistical evaluation
parameters-
@_df: train dataset
@cls: name of the class/output column
@fixed_cols: (do not pass anything)
@num_features: number of features to be selected
@fs_methods: (check sklearn 'SelectKBest' documentation or don't pass anything)
returns-
@df: train dataset with the selected features
@cols_to_drop: columns to drop from the train dataset
"""
df = _df.copy()
fixed_cols.append(cls)
X = df.drop(columns=fixed_cols)
y = df[cls]
if is_regression:
fs_method = mutual_info_regression
else:
fs_method = mutual_info_classif
# select top 'num_features' features based on mutual info regression
# total features would be 'num_features' + 1(station column)
selector = SelectKBest(fs_method, k=num_features)
selector.fit(X, y)
selected_cols = list(X.columns[selector.get_support()])
cols_to_drop = []
for col in df.columns:
if col in [cls, 'Station_Barisal', 'Station_Gazipur', 'Station_Rangpur', 'Station_Habiganj']:
continue
elif col not in selected_cols:
cols_to_drop.append(col)
df.drop(columns=cols_to_drop, inplace=True)
return df, cols_to_drop
def selectSequential_fs(_df, cls, is_regression,
fixed_cols=['Station_Barisal', 'Station_Gazipur', 'Station_Rangpur', 'Station_Habiganj'],
num_features=7,
fs_method='forward'):
"""
function to select 'k' features by evaluating performance on Linear SVM
parameters-
@_df: train dataset
@cls: name of the class/output column
@fixed_cols: (do not pass anything)
@num_features: number of features to be selected
@fs_methods: (check sklearn 'SequentialFeatureSelector' documentation or don't pass anything)
returns-
@df: train dataset with the selected features
@cols_to_drop: columns to drop from the train dataset
"""
df = _df.copy()
fixed_cols.append(cls)
X = df.drop(columns=fixed_cols)
y = df[cls]
if is_regression:
estimator = LinearSVR(C=0.01, random_state=42)
scoring='r2'
else:
estimator = LinearSVC(C=0.01, penalty="l1", dual=False, random_state=42)
scoring = 'accuracy'
# select top 'num_features' features based on mutual info regression
# total features would be 'num_features' + 1(station column)
selector = SequentialFeatureSelector(estimator=estimator, n_features_to_select=num_features, cv=10, direction=fs_method, scoring=scoring)
selector.fit(X, y)
selected_cols = list(X.columns[selector.get_support()])
cols_to_drop = []
for col in df.columns:
if col in [cls, 'Station_Barisal', 'Station_Gazipur', 'Station_Rangpur', 'Station_Habiganj']:
continue
elif col not in selected_cols:
cols_to_drop.append(col)
df.drop(columns=cols_to_drop, inplace=True)
return df, cols_to_drop
def foo():
print('hello from featureSelection_util foo') | [
"sklearn.feature_selection.SelectKBest",
"sklearn.svm.LinearSVC",
"sklearn.feature_selection.SequentialFeatureSelector",
"sklearn.svm.LinearSVR"
] | [((3285, 3323), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', (['fs_method'], {'k': 'num_features'}), '(fs_method, k=num_features)\n', (3296, 3323), False, 'from sklearn.feature_selection import SelectKBest, SequentialFeatureSelector, f_regression, mutual_info_regression, mutual_info_classif, f_classif\n'), ((4963, 5094), 'sklearn.feature_selection.SequentialFeatureSelector', 'SequentialFeatureSelector', ([], {'estimator': 'estimator', 'n_features_to_select': 'num_features', 'cv': '(10)', 'direction': 'fs_method', 'scoring': 'scoring'}), '(estimator=estimator, n_features_to_select=\n num_features, cv=10, direction=fs_method, scoring=scoring)\n', (4988, 5094), False, 'from sklearn.feature_selection import SelectKBest, SequentialFeatureSelector, f_regression, mutual_info_regression, mutual_info_classif, f_classif\n'), ((4634, 4668), 'sklearn.svm.LinearSVR', 'LinearSVR', ([], {'C': '(0.01)', 'random_state': '(42)'}), '(C=0.01, random_state=42)\n', (4643, 4668), False, 'from sklearn.svm import LinearSVC, LinearSVR\n'), ((4716, 4776), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(0.01)', 'penalty': '"""l1"""', 'dual': '(False)', 'random_state': '(42)'}), "(C=0.01, penalty='l1', dual=False, random_state=42)\n", (4725, 4776), False, 'from sklearn.svm import LinearSVC, LinearSVR\n')] |
from unittest import mock
from urllib.parse import urlencode
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
from website.models import PracticalInfo
class PracticalInfoViewTests(TestCase):
def setUp(self):
User = get_user_model()
self.user = User.objects.create_user('bobby', '<EMAIL>', 'tables')
def test_not_logged_in(self):
response = self.client.get('/info')
url = "{}?{}".format(settings.LOGIN_URL, urlencode({'next': '/info'}))
self.assertRedirects(response, url)
@mock.patch('website.views.logger')
def test_no_info(self, mock_logger):
self.client.login(username='bobby', password='<PASSWORD>')
response = self.client.get('/info')
self.assertEqual(404, response.status_code)
mock_logger.warning.assert_called_with('No practical info added')
def test_practical_info_added(self):
practical_info_content = b'The moose is on the loose'
PracticalInfo.objects.create(content=practical_info_content)
self.client.login(username='bobby', password='<PASSWORD>')
response = self.client.get('/info')
self.assertEqual(200, response.status_code)
self.assertIn(practical_info_content, response.content)
| [
"urllib.parse.urlencode",
"django.contrib.auth.get_user_model",
"website.models.PracticalInfo.objects.create",
"unittest.mock.patch"
] | [((594, 628), 'unittest.mock.patch', 'mock.patch', (['"""website.views.logger"""'], {}), "('website.views.logger')\n", (604, 628), False, 'from unittest import mock\n'), ((294, 310), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (308, 310), False, 'from django.contrib.auth import get_user_model\n'), ((1020, 1080), 'website.models.PracticalInfo.objects.create', 'PracticalInfo.objects.create', ([], {'content': 'practical_info_content'}), '(content=practical_info_content)\n', (1048, 1080), False, 'from website.models import PracticalInfo\n'), ((514, 542), 'urllib.parse.urlencode', 'urlencode', (["{'next': '/info'}"], {}), "({'next': '/info'})\n", (523, 542), False, 'from urllib.parse import urlencode\n')] |
import numpy as np
class DOM:
"""
Object representing a discretized observation model. Comprised primarily by the
DOM.edges and DOM.chi vectors, which represent the discrete mask and state-dependent
emission probabilities, respectively.
"""
def __init__(self):
self.k = None
self.n_bins = None
self.edges = None
self.classes = None
self.chi = None
self.type = 'DOM'
self.n_params = None
def set_params(self, config):
"""
Set relevant parameters for DOM object.
Args:
config (dict): Parameters to set.
"""
params = {'n_bins', 'edges', 'classes', 'chi', 'n_params'}
self.__dict__.update((param, np.array(value)) for param, value in config.items() if param in params)
def initialize(self, k, stats):
"""
Initialize DOM parameters according to dataset properties.
Args:
k (int): Number of components to use
stats (dict): Dictionary of dataset sets, generated by Dataset.compute_stats()
"""
k = k + 5
qbin_sizes = 0.5 / k # Quantile sizes
qbin_edges = 0.25 + qbin_sizes*np.arange(0, k+1) # Edge locations (in quantile terms)
bin_edges = np.interp(qbin_edges, stats['quantile_basis'], stats['quantiles'])
self.k = k
self.n_bins = k + 2
self.classes = list(range(1, self.n_bins + 2))
self.edges = [-np.Inf] + [edge for edge in bin_edges] + [np.Inf]
self.chi = np.zeros((2, self.n_bins + 1))
dist = np.linspace(2, 1, self.n_bins) # Bins captured by observations
scaled_dist = 0.9 * dist / dist.sum() # Scaling by 0.9 to allow for 0.1 emission prob of NaN
self.chi[1, :-1] = scaled_dist # Paired emission dist
self.chi[0, :-1] = np.flip(scaled_dist) # Unpaired emission dist
self.chi[1, -1] = 0.1 # NaN observations
self.chi[0, -1] = 0.1 # NaN observations
self.n_params = 2*(self.n_bins-2)
def discretize(self, transcript):
"""
Compute the DOM class for all nucleotides in an RNA and save the resulting vector
to Transcript.obs_dom.
"""
# np.searchsorted is identical to the digitize call here, but marginally faster (especially
# for a large number of bins and/or a large number of RNAs).
# transcript.obs_dom = np.digitize(transcript.obs, bins=self.edges)
transcript.obs_dom = np.searchsorted(self.edges, transcript.obs, side='left')
def compute_emissions(self, transcript, reference=False):
"""
Compute emission probabilities according to the discretized observation model.
This amounts to simply accessing the correct indices of the DOM pdf matrix, chi.
Args:
transcript (src.patteRNA.Transcript.Transcript): Transcript to process
reference (bool): Whether or not it's a reference transcript
"""
if reference:
pass
transcript.B = self.chi[:, transcript.obs_dom-1]
@staticmethod
def post_process(transcript):
pass # No post-processing needed for DOM model
def m_step(self, transcript):
"""
Compute pseudo-counts en route to updating model parameters according to maximium-likelihood approach.
Args:
transcript (Transcript): Transcript to process
Returns:
params (dict): Partial pseudo-counts
"""
chi_0 = np.fromiter((transcript.gamma[0, transcript.obs_dom == dom_class].sum()
for dom_class in self.classes), float)
chi_1 = np.fromiter((transcript.gamma[1, transcript.obs_dom == dom_class].sum()
for dom_class in self.classes), float)
params = {'chi': np.vstack((chi_0, chi_1)),
'chi_norm': np.sum(transcript.gamma, axis=1)}
return params
def update_from_pseudocounts(self, pseudocounts, nan=False):
"""
Scheme model parameters from transcript-level pseudo-counts.
Args:
pseudocounts (dict): Dictionary of total pseudo-counts
nan (bool): Whether or not to treat NaNs as informative
"""
self.chi = pseudocounts['chi'] / pseudocounts['chi_norm'][:, None]
self.scale_chi(nan=nan)
def scale_chi(self, nan=False):
"""
Scale chi vector to a probability distribution.
Args:
nan (bool): Whether or not to treat NaNs as informative
"""
if nan:
self.chi[:, :] = self.chi[:, :] / np.sum(self.chi[:, :], axis=1)[:, np.newaxis]
else:
self.chi[:, :-1] = 0.9 * self.chi[:, :-1] / np.sum(self.chi[:, :-1], axis=1)[:, np.newaxis]
self.chi[:, -1] = 0.1 # NaN observations
def snapshot(self):
"""
Returns a text summary of model parameters.
"""
text = ""
text += "{}:\n{}\n".format('chi', np.array2string(self.chi))
return text
def serialize(self):
"""
Return a dictionary containing all of the parameters needed to describe the emission model.
"""
return {'type': self.type,
'n_bins': self.n_bins,
'classes': self.classes,
'edges': self.edges,
'chi': self.chi.tolist(),
'n_params': self.n_params}
def reset(self):
"""
Reset DOM object to un-initialized state.
"""
self.edges = None
self.chi = None
self.k = None
self.n_bins = None
self.classes = None
self.n_params = None
| [
"numpy.flip",
"numpy.searchsorted",
"numpy.array2string",
"numpy.sum",
"numpy.zeros",
"numpy.linspace",
"numpy.array",
"numpy.vstack",
"numpy.interp",
"numpy.arange"
] | [((1282, 1348), 'numpy.interp', 'np.interp', (['qbin_edges', "stats['quantile_basis']", "stats['quantiles']"], {}), "(qbin_edges, stats['quantile_basis'], stats['quantiles'])\n", (1291, 1348), True, 'import numpy as np\n'), ((1544, 1574), 'numpy.zeros', 'np.zeros', (['(2, self.n_bins + 1)'], {}), '((2, self.n_bins + 1))\n', (1552, 1574), True, 'import numpy as np\n'), ((1591, 1621), 'numpy.linspace', 'np.linspace', (['(2)', '(1)', 'self.n_bins'], {}), '(2, 1, self.n_bins)\n', (1602, 1621), True, 'import numpy as np\n'), ((1847, 1867), 'numpy.flip', 'np.flip', (['scaled_dist'], {}), '(scaled_dist)\n', (1854, 1867), True, 'import numpy as np\n'), ((2497, 2553), 'numpy.searchsorted', 'np.searchsorted', (['self.edges', 'transcript.obs'], {'side': '"""left"""'}), "(self.edges, transcript.obs, side='left')\n", (2512, 2553), True, 'import numpy as np\n'), ((3846, 3871), 'numpy.vstack', 'np.vstack', (['(chi_0, chi_1)'], {}), '((chi_0, chi_1))\n', (3855, 3871), True, 'import numpy as np\n'), ((3903, 3935), 'numpy.sum', 'np.sum', (['transcript.gamma'], {'axis': '(1)'}), '(transcript.gamma, axis=1)\n', (3909, 3935), True, 'import numpy as np\n'), ((5020, 5045), 'numpy.array2string', 'np.array2string', (['self.chi'], {}), '(self.chi)\n', (5035, 5045), True, 'import numpy as np\n'), ((1205, 1224), 'numpy.arange', 'np.arange', (['(0)', '(k + 1)'], {}), '(0, k + 1)\n', (1214, 1224), True, 'import numpy as np\n'), ((743, 758), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (751, 758), True, 'import numpy as np\n'), ((4640, 4670), 'numpy.sum', 'np.sum', (['self.chi[:, :]'], {'axis': '(1)'}), '(self.chi[:, :], axis=1)\n', (4646, 4670), True, 'import numpy as np\n'), ((4756, 4788), 'numpy.sum', 'np.sum', (['self.chi[:, :-1]'], {'axis': '(1)'}), '(self.chi[:, :-1], axis=1)\n', (4762, 4788), True, 'import numpy as np\n')] |
import re
code = open("qchecker.rb").read()
code = code.replace(" ", "").replace("\n", "")
A = re.findall(r'b\[".*?"\]', code)[1]
def decode36base(x):
n = 0
for c in x:
c = ord(c)
if ord("0")<=c<=ord("9"):
c -= ord("0")
else:
c -= ord("a")
c += 10
n = n*36+c
return n
A = [decode36base(a) for a in A[3:-2].split("+")]
A = A[2:]
M = [2**64+13, 2**64+37, 2**64+51, 2**64+81]
for a, m in zip(A, M):
print(f"flag % {hex(m)} = {hex(a)}")
def extgcd(a, b):
if b==0:
return 1, 0, a
else:
x, y, g = extgcd(b, a%b)
return y, x-a//b*y, g
def CRT(A, M):
sa = 0
sm = 1
for a, m in zip(A, M):
x, y, g = extgcd(m, sm)
sa = (sa*x*m+a*y*sm)//g
sm *= m//g
return sa%sm, sm
f, _mod = CRT(A, M)
print(f"flag = {hex(f)}")
flag = ""
while f>0:
flag += chr(f%256)
f //= 256
print(flag)
| [
"re.findall"
] | [((97, 129), 're.findall', 're.findall', (['"""b\\\\[".*?"\\\\]"""', 'code'], {}), '(\'b\\\\[".*?"\\\\]\', code)\n', (107, 129), False, 'import re\n')] |
"""Calculate the partial derivatives of the source coordinates
Description:
------------
Calculate the partial derivatives of the source coordinates.
This is done according to equations (2.47) - (2.50) in Teke [2]_.
References:
-----------
.. [1] <NAME>. and <NAME>. (eds.), IERS Conventions (2010), IERS Technical Note No. 36, BKG (2010).
http://www.iers.org/IERS/EN/Publications/TechnicalNotes/tn36.html
.. [2] <NAME>, Sub-daily parameter estimation in VLBI data analysis.
https://geo.tuwien.ac.at/fileadmin/editors/GM/GM87_teke.pdf
"""
# External library imports
import numpy as np
# Midgard imports
from midgard.dev import plugins
# Where imports
from where.lib import config
from where import apriori
from where.lib import log
# Name of parameter
PARAMETER = __name__.split(".")[-1]
@plugins.register
def src_dir(dset):
"""Calculate the partial derivative of the source coordinates
Args:
dset: A Dataset containing model data.
Returns:
Tuple: Array of partial derivatives, list of their names, and their unit
"""
column_names = ["ra", "dec"]
sources = np.asarray(dset.unique("source"))
icrf = apriori.get("crf", time=dset.time)
# Remove sources that should be fixed
fix_idx = np.zeros(len(sources))
for group in config.tech[PARAMETER].fix_sources.list:
fix_idx = np.logical_or(
[icrf[src].meta[group] if group in icrf[src].meta else src == group for src in sources], fix_idx
)
for group in config.tech[PARAMETER].except_sources.list:
except_idx = np.array([icrf[src].meta[group] if group in icrf[src].meta else src == group for src in sources])
fix_idx = np.logical_and(np.logical_not(except_idx), fix_idx)
sources = sources[np.logical_not(fix_idx)]
# Calculate partials
partials = np.zeros((dset.num_obs, len(sources) * 2))
baseline = (dset.site_pos_2.gcrs.pos - dset.site_pos_1.gcrs.pos).mat
dK_dra = dset.src_dir.dsrc_dra[:, None, :]
dK_ddec = dset.src_dir.dsrc_ddec[:, None, :]
all_partials = np.hstack((-dK_dra @ baseline, -dK_ddec @ baseline))[:, :, 0]
for idx, src in enumerate(sources):
src_idx = dset.filter(source=src)
partials[src_idx, idx * 2 : idx * 2 + 2] = all_partials[src_idx]
column_names = [s + "_" + name for s in sources for name in column_names]
return partials, column_names, "meter"
| [
"numpy.hstack",
"numpy.logical_not",
"where.apriori.get",
"numpy.logical_or",
"numpy.array"
] | [((1179, 1213), 'where.apriori.get', 'apriori.get', (['"""crf"""'], {'time': 'dset.time'}), "('crf', time=dset.time)\n", (1190, 1213), False, 'from where import apriori\n'), ((1371, 1488), 'numpy.logical_or', 'np.logical_or', (['[(icrf[src].meta[group] if group in icrf[src].meta else src == group) for\n src in sources]', 'fix_idx'], {}), '([(icrf[src].meta[group] if group in icrf[src].meta else src ==\n group) for src in sources], fix_idx)\n', (1384, 1488), True, 'import numpy as np\n'), ((1588, 1691), 'numpy.array', 'np.array', (['[(icrf[src].meta[group] if group in icrf[src].meta else src == group) for\n src in sources]'], {}), '([(icrf[src].meta[group] if group in icrf[src].meta else src ==\n group) for src in sources])\n', (1596, 1691), True, 'import numpy as np\n'), ((1779, 1802), 'numpy.logical_not', 'np.logical_not', (['fix_idx'], {}), '(fix_idx)\n', (1793, 1802), True, 'import numpy as np\n'), ((2076, 2128), 'numpy.hstack', 'np.hstack', (['(-dK_dra @ baseline, -dK_ddec @ baseline)'], {}), '((-dK_dra @ baseline, -dK_ddec @ baseline))\n', (2085, 2128), True, 'import numpy as np\n'), ((1719, 1745), 'numpy.logical_not', 'np.logical_not', (['except_idx'], {}), '(except_idx)\n', (1733, 1745), True, 'import numpy as np\n')] |
# SPDX-FileCopyrightText: 2022 <NAME> for Adafruit Industries
# SPDX-License-Identifier: MIT
"""
Used with ble_packet_buffer_test.py. Transmits "echo" to
PacketBufferService and receives it back.
"""
import time
from ble_packet_buffer_service import PacketBufferService
from adafruit_ble import BLERadio
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
ble = BLERadio()
buf = bytearray(512)
while True:
while ble.connected and any(
PacketBufferService in connection for connection in ble.connections
):
for connection in ble.connections:
if PacketBufferService not in connection:
continue
print("echo")
pb = connection[PacketBufferService]
pb.write(b"echo")
# Returns 0 if nothing was read.
packet_len = pb.readinto(buf)
if packet_len > 0:
print(buf[:packet_len])
print()
time.sleep(1)
print("disconnected, scanning")
for advertisement in ble.start_scan(ProvideServicesAdvertisement, timeout=1):
if PacketBufferService not in advertisement.services:
continue
ble.connect(advertisement)
print("connected")
break
ble.stop_scan()
| [
"adafruit_ble.BLERadio",
"time.sleep"
] | [((390, 400), 'adafruit_ble.BLERadio', 'BLERadio', ([], {}), '()\n', (398, 400), False, 'from adafruit_ble import BLERadio\n'), ((963, 976), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (973, 976), False, 'import time\n')] |
#!/usr/bin/env python
#
# fsl_ents.py - Extract ICA component time courses from a MELODIC directory.
#
# Author: <NAME> <<EMAIL>>
#
"""This module defines the ``fsl_ents`` script, for extracting component
time series from a MELODIC ``.ica`` directory.
"""
import os.path as op
import sys
import argparse
import warnings
import numpy as np
# See atlasq.py for explanation
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import fsl.data.fixlabels as fixlabels
import fsl.data.melodicanalysis as melanalysis
DTYPE = np.float64
name = "fsl_ents"
desc = 'Extract component time series from a MELODIC .ica directory'
usage = """
{name}: {desc}
Usage:
{name} <.ica directory> [-o outfile] <fixfile>
{name} <.ica directory> [-o outfile] <component> [<component> ...]
{name} <.ica directory> [-o outfile] [-c conffile] [-c conffile] <fixfile>
{name} <.ica directory> [-o outfile] [-c conffile] [-c conffile] <component> [<component> ...]
""".format(name=name, desc=desc).strip() # noqa
helps = {
'outfile' :
'File to save time series to',
'overwrite' :
'Overwrite output file if it exists',
'icadir' :
'.ica directory to extract time series from.',
'component' :
'Component number or FIX/AROMA file specifying components to extract.',
'confound' :
'Extra files to append to output file.',
}
def parseArgs(args):
"""Parses command line arguments.
:arg args: Sequence of command line arguments.
:returns: An ``argparse.Namespace`` object containing parsed arguments.
"""
if len(args) == 0:
print(usage)
sys.exit(0)
parser = argparse.ArgumentParser(prog=name,
usage=usage,
description=desc)
parser.add_argument('-o', '--outfile',
help=helps['outfile'],
default='confound_timeseries.txt')
parser.add_argument('-ow', '--overwrite',
action='store_true',
help=helps['overwrite'])
parser.add_argument('-c', '--conffile',
action='append',
help=helps['confound'])
parser.add_argument('icadir',
help=helps['icadir'])
parser.add_argument('components',
nargs='+',
help=helps['component'])
args = parser.parse_args(args)
# Error if ica directory does not exist
if not op.exists(args.icadir):
print('ICA directory {} does not exist'.format(args.icadir))
sys.exit(1)
# Error if output exists, but overwrite not specified
if op.exists(args.outfile) and not args.overwrite:
print('Output file {} already exists and --overwrite not '
'specified'.format(args.outfile))
sys.exit(1)
# Convert components into integers,
# or absolute file paths, and error
# if any are not one of these.
for i, c in enumerate(args.components):
if op.exists(c):
args.components[i] = op.abspath(c)
else:
try:
args.components[i] = int(c)
except ValueError:
print('Bad component: {}. Components must either be component '
'indices (starting from 1), or paths to FIX/AROMA '
'files.')
sys.exit(1)
# Convert confound files to absolute
# paths, error if any do not exist.
if args.conffile is None:
args.conffile = []
for i, cf in enumerate(args.conffile):
if not op.exists(cf):
print('Confound file does not exist: {}'.format(cf))
sys.exit(1)
args.conffile[i] = op.abspath(cf)
args.outfile = op.abspath(args.outfile)
args.icadir = op.abspath(args.icadir)
return args
def genComponentIndexList(comps, ncomps):
"""Turns the given sequence of integers and file paths into a list
of 0-based component indices.
:arg comps: Sequence containing 1-based component indices, and/or paths
to FIX/AROMA label text files.
:arg ncomps: Number of components in the input data - indices larger than
this will be ignored.
:returns: List of 0-based component indices.
"""
allcomps = []
for c in comps:
if isinstance(c, int):
ccomps = [c]
else:
ccomps = fixlabels.loadLabelFile(c, returnIndices=True)[2]
allcomps.extend([c - 1 for c in ccomps])
if any([c < 0 or c >= ncomps for c in allcomps]):
raise ValueError('Invalid component indices: {}'.format(allcomps))
return list(sorted(set(allcomps)))
def loadConfoundFiles(conffiles, npts):
"""Loads the given confound files, and copies them all into a single 2D
``(npoints, nconfounds)`` matrix.
:arg conffiles: Sequence of paths to files containing confound time series
(where each row corresponds to a time point, and each
column corresponds to a single confound).
:arg npts: Expected number of time points
:returns: A ``(npoints, nconfounds)`` ``numpy`` matrix.
"""
matrices = []
for cfile in conffiles:
mat = np.loadtxt(cfile, dtype=DTYPE)
if len(mat.shape) == 1:
mat = np.atleast_2d(mat).T
if mat.shape[0] != npts:
raise ValueError('Confound file {} does not have correct number '
'of points (expected {}, has {})'.format(
cfile, npts, mat.shape[0]))
matrices.append(mat)
ncols = sum([m.shape[1] for m in matrices])
confounds = np.zeros((npts, ncols), dtype=DTYPE)
coli = 0
for mat in matrices:
matcols = mat.shape[1]
confounds[:, coli:coli + matcols] = mat
coli = coli + matcols
return confounds
def main(argv=None):
"""Entry point for the ``fsl_ents`` script.
Identifies component time series to extract, extracts them, loads extra
confound files, and saves them out to a file.
"""
if argv is None:
argv = sys.argv[1:]
args = parseArgs(argv)
try:
ts = melanalysis.getComponentTimeSeries(args.icadir)
npts, ncomps = ts.shape
confs = loadConfoundFiles(args.conffile, npts)
comps = genComponentIndexList(args.components, ncomps)
ts = ts[:, comps]
except Exception as e:
print(e)
sys.exit(1)
ts = np.hstack((ts, confs))
np.savetxt(args.outfile, ts, fmt='%10.5f')
if __name__ == '__main__':
sys.exit(main())
| [
"os.path.exists",
"numpy.atleast_2d",
"argparse.ArgumentParser",
"numpy.hstack",
"warnings.catch_warnings",
"numpy.zeros",
"fsl.data.fixlabels.loadLabelFile",
"numpy.savetxt",
"sys.exit",
"os.path.abspath",
"numpy.loadtxt",
"warnings.filterwarnings",
"fsl.data.melodicanalysis.getComponentTim... | [((415, 440), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (438, 440), False, 'import warnings\n'), ((446, 503), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (469, 503), False, 'import warnings\n'), ((1718, 1783), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': 'name', 'usage': 'usage', 'description': 'desc'}), '(prog=name, usage=usage, description=desc)\n', (1741, 1783), False, 'import argparse\n'), ((3852, 3876), 'os.path.abspath', 'op.abspath', (['args.outfile'], {}), '(args.outfile)\n', (3862, 3876), True, 'import os.path as op\n'), ((3896, 3919), 'os.path.abspath', 'op.abspath', (['args.icadir'], {}), '(args.icadir)\n', (3906, 3919), True, 'import os.path as op\n'), ((5800, 5836), 'numpy.zeros', 'np.zeros', (['(npts, ncols)'], {'dtype': 'DTYPE'}), '((npts, ncols), dtype=DTYPE)\n', (5808, 5836), True, 'import numpy as np\n'), ((6703, 6725), 'numpy.hstack', 'np.hstack', (['(ts, confs)'], {}), '((ts, confs))\n', (6712, 6725), True, 'import numpy as np\n'), ((6730, 6772), 'numpy.savetxt', 'np.savetxt', (['args.outfile', 'ts'], {'fmt': '"""%10.5f"""'}), "(args.outfile, ts, fmt='%10.5f')\n", (6740, 6772), True, 'import numpy as np\n'), ((1692, 1703), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1700, 1703), False, 'import sys\n'), ((2575, 2597), 'os.path.exists', 'op.exists', (['args.icadir'], {}), '(args.icadir)\n', (2584, 2597), True, 'import os.path as op\n'), ((2676, 2687), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2684, 2687), False, 'import sys\n'), ((2754, 2777), 'os.path.exists', 'op.exists', (['args.outfile'], {}), '(args.outfile)\n', (2763, 2777), True, 'import os.path as op\n'), ((2925, 2936), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2933, 2936), False, 'import sys\n'), ((3108, 3120), 'os.path.exists', 'op.exists', (['c'], {}), '(c)\n', (3117, 3120), True, 'import os.path as op\n'), ((3817, 3831), 'os.path.abspath', 'op.abspath', (['cf'], {}), '(cf)\n', (3827, 3831), True, 'import os.path as op\n'), ((5354, 5384), 'numpy.loadtxt', 'np.loadtxt', (['cfile'], {'dtype': 'DTYPE'}), '(cfile, dtype=DTYPE)\n', (5364, 5384), True, 'import numpy as np\n'), ((6380, 6427), 'fsl.data.melodicanalysis.getComponentTimeSeries', 'melanalysis.getComponentTimeSeries', (['args.icadir'], {}), '(args.icadir)\n', (6414, 6427), True, 'import fsl.data.melodicanalysis as melanalysis\n'), ((3155, 3168), 'os.path.abspath', 'op.abspath', (['c'], {}), '(c)\n', (3165, 3168), True, 'import os.path as op\n'), ((3686, 3699), 'os.path.exists', 'op.exists', (['cf'], {}), '(cf)\n', (3695, 3699), True, 'import os.path as op\n'), ((3778, 3789), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3786, 3789), False, 'import sys\n'), ((6681, 6692), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6689, 6692), False, 'import sys\n'), ((4522, 4568), 'fsl.data.fixlabels.loadLabelFile', 'fixlabels.loadLabelFile', (['c'], {'returnIndices': '(True)'}), '(c, returnIndices=True)\n', (4545, 4568), True, 'import fsl.data.fixlabels as fixlabels\n'), ((5436, 5454), 'numpy.atleast_2d', 'np.atleast_2d', (['mat'], {}), '(mat)\n', (5449, 5454), True, 'import numpy as np\n'), ((3477, 3488), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3485, 3488), False, 'import sys\n')] |
# (CWD: executables/amd64 folder)
# TODO: finish this
import os
packages = []
total_executables_packages = len(os.listdir("."))
for package_name in os.listdir("."):
packages.append(os.path.join(".", package_name))
without_diffs = []
expected_diffs = 0
for package_path in packages:
match_found = os.listdir(package_path)
if len(match_found) >= 2:
expected_diffs += len(match_found) - 1
continue
without_diffs.append(package_path)
print("total amount of packages: {}".format(total_executables_packages))
print("total packages with only 1 version: {}".format(len(without_diffs)))
print("total amount of expected diffs: {}".format(expected_diffs)) | [
"os.listdir",
"os.path.join"
] | [((148, 163), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (158, 163), False, 'import os\n'), ((111, 126), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (121, 126), False, 'import os\n'), ((304, 328), 'os.listdir', 'os.listdir', (['package_path'], {}), '(package_path)\n', (314, 328), False, 'import os\n'), ((185, 216), 'os.path.join', 'os.path.join', (['"""."""', 'package_name'], {}), "('.', package_name)\n", (197, 216), False, 'import os\n')] |
from __future__ import print_function
from itertools import product
import torch
import torch.nn as nn
import torch_mlu
from torch.nn import Parameter
import torch.nn.functional as F
import numpy as np
import sys
import os
import copy
import random
import time
import unittest
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir+"/../../")
from common_utils import testinfo, TestCase
import logging
logging.basicConfig(level=logging.DEBUG)
torch.set_grad_enabled(False)
class TestSizeModel(nn.Module):
def __init__(self, dim):
super(TestSizeModel, self).__init__()
self.dim = dim
def forward(self, x, y):
z = x.size(self.dim)
# TODO(wangyan): test when mm fixed
return z + y
class TestSizeOp(TestCase):
@testinfo()
def test_size(self):
dim_l = [0, 3]
for dim in dim_l:
for element_type in [torch.half, torch.float, torch.int, torch.short, \
torch.long, torch.uint8, torch.int8, torch.bool]:
model = TestSizeModel(dim)
input_x = torch.rand((3,6,8,12)).to(dtype=element_type)
input_y = torch.randn((3,6,8,12))
traced_model = torch.jit.trace(model, (input_x, input_y), check_trace=False)
out_cpu = model(input_x, input_y)
input_x = input_x.to(dtype=element_type)
input_x_mlu = input_x.to('mlu')
input_y_mlu = input_y.to('mlu')
out_mlu = traced_model(input_x_mlu, input_y_mlu)
self.assertTensorsEqual(out_cpu, out_mlu.cpu(), 0.003, use_MSE = True)
if __name__ == '__main__':
unittest.main() | [
"logging.basicConfig",
"torch.jit.trace",
"unittest.main",
"common_utils.testinfo",
"os.path.abspath",
"torch.set_grad_enabled",
"sys.path.append",
"torch.randn",
"torch.rand"
] | [((331, 367), 'sys.path.append', 'sys.path.append', (["(cur_dir + '/../../')"], {}), "(cur_dir + '/../../')\n", (346, 367), False, 'import sys\n'), ((425, 465), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (444, 465), False, 'import logging\n'), ((466, 495), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (488, 495), False, 'import torch\n'), ((304, 329), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (319, 329), False, 'import os\n'), ((785, 795), 'common_utils.testinfo', 'testinfo', ([], {}), '()\n', (793, 795), False, 'from common_utils import testinfo, TestCase\n'), ((1701, 1716), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1714, 1716), False, 'import unittest\n'), ((1178, 1204), 'torch.randn', 'torch.randn', (['(3, 6, 8, 12)'], {}), '((3, 6, 8, 12))\n', (1189, 1204), False, 'import torch\n'), ((1233, 1294), 'torch.jit.trace', 'torch.jit.trace', (['model', '(input_x, input_y)'], {'check_trace': '(False)'}), '(model, (input_x, input_y), check_trace=False)\n', (1248, 1294), False, 'import torch\n'), ((1106, 1131), 'torch.rand', 'torch.rand', (['(3, 6, 8, 12)'], {}), '((3, 6, 8, 12))\n', (1116, 1131), False, 'import torch\n')] |
"""
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2005/10/12 22:27:39 $"
"""
"""
Simple sizer for PythonCard
Uses a simple method for sizing:
- each component type is defined to be fixed or stretchable
- uses GetBestSize() to get a min size for each component
- each component is placed by its centre point as laid out
- centre locations are scaled by "current window size / min size"
This will adjust component sizes for differences between OS's, but will
not move components to make space.
"""
import wx
DEBUG = False
DEBUG1 = False
#----------------------------------------------------------------------
class simpleSizer(wx.PySizer):
def __init__(self, minsize, border=0):
wx.PySizer.__init__(self)
self.minsize = minsize
self.border = border
#--------------------------------------------------
def Add(self, item, option=0, flag=0, border=0,
pos=None, size=None,
growX = False, growY = False
):
if DEBUG: print("adding", item.name, pos, size, growX, growY)
wx.PySizer.Add(self, item, option, flag, border,
userData=(pos, size, growX, growY))
#--------------------------------------------------
def CalcMin( self ):
x,y = self.minsize
return wx.Size(x, y)
#--------------------------------------------------
def RecalcSizes( self ):
# save current dimensions, etc.
curWidth, curHeight = self.GetSize()
px, py = self.GetPosition()
minWidth, minHeight = self.CalcMin()
if DEBUG: print(minWidth, minHeight, curWidth, curHeight)
if minWidth == 0 or minHeight == 0: return
scaleX = 100 * curWidth / minWidth
scaleY = 100 * curHeight / minHeight
# iterate children and set dimensions...
for item in self.GetChildren():
pos, size, growX, growY = item.GetUserData()
if DEBUG: print("in recalc", pos, size, growX, growY)
cx,cy = pos
sx,sy = size
cx = (cx * scaleX + sx*scaleX/2) / 100
cy = (cy * scaleY + sy*scaleY/2) / 100
if growX:
sx = sx * scaleX / 100
if growY:
sy = sy * scaleY / 100
self.SetItemBounds( item, cx-sx/2, cy-sy/2, sx, sy )
#--------------------------------------------------
def SetItemBounds(self, item, x, y, w, h):
# calculate the item's actual size and position within
# its grid cell
ipt = wx.Point(x, y)
isz = wx.Size(w,h)
if DEBUG: print("in itembounds", x,y,w,h)
item.SetDimension(ipt, isz)
#--------------------------------------------------
# AGT fill this list
heightGrowableTypes = ["BitmapCanvas", "CodeEditor", "HtmlWindow", \
"Image", "List", "MultiColumnList",
"Notebook", \
"RadioGroup", "StaticBox", "TextArea", \
"Tree"]
widthGrowableTypes = ["BitmapCanvas", "CheckBox", "Choice", \
"CodeEditor", "ComboBox", "HtmlWindow", \
"Image", "List", "MultiColumnList", \
"Notebook", \
"PasswordField", "RadioGroup", "Spinner", \
"StaticBox", "StaticText", "TextArea", \
"TextField", "Tree"]
growableTypes = ["Gauge", "Slider", "StaticLine"]
def autoSizer(aBg):
winX, winY = aBg.size
# make list of all components, make a simpleSizer to hold them
complist = []
for compName in aBg.components.keys():
comp = aBg.components[compName]
complist.append( comp )
sizer = simpleSizer(aBg.panel.size)
# add the components to the grid
for comp in complist:
tx, ty = comp.position
dx, dy = comp.size
# AGT Must be an easier way to get a component's type ??
compType = comp._resource.__dict__['type']
dx1, dy1 = comp.GetBestSize()
if dx1 > dx: dx = dx1
if dy1 > dy:
# hack to deal with the fact that GetBestSize() comes up with too
# large heights for textareas.
if compType != "TextArea": dy = dy1
# AGT FUTURE this checks contents of the component's userdata
# extend resourceEditor to allow a way to set this
if "HEIGHT_GROWABLE" in comp.userdata or \
compType in heightGrowableTypes or \
(compType in growableTypes and comp.layout == "vertical"):
compGrowableY = True
else: compGrowableY = False
if "WIDTH_GROWABLE" in comp.userdata or \
compType in widthGrowableTypes or \
(compType in growableTypes and comp.layout == "horizontal"):
compGrowableX = True
else: compGrowableX = False
sizer.Add(comp, pos=(tx,ty), size=(dx,dy), growX = compGrowableX, growY = compGrowableY )
if DEBUG1: print("adding ", comp.name, (tx, ty), (dx, dy), compGrowableX, compGrowableY)
sizer.SetSizeHints(aBg)
aBg.panel.SetSizer(sizer)
aBg.panel.SetAutoLayout(1)
aBg.panel.Layout()
#--------------------------------------------------
| [
"wx.Point",
"wx.PySizer.__init__",
"wx.PySizer.Add",
"wx.Size"
] | [((724, 749), 'wx.PySizer.__init__', 'wx.PySizer.__init__', (['self'], {}), '(self)\n', (743, 749), False, 'import wx\n'), ((1098, 1186), 'wx.PySizer.Add', 'wx.PySizer.Add', (['self', 'item', 'option', 'flag', 'border'], {'userData': '(pos, size, growX, growY)'}), '(self, item, option, flag, border, userData=(pos, size, growX,\n growY))\n', (1112, 1186), False, 'import wx\n'), ((1330, 1343), 'wx.Size', 'wx.Size', (['x', 'y'], {}), '(x, y)\n', (1337, 1343), False, 'import wx\n'), ((2622, 2636), 'wx.Point', 'wx.Point', (['x', 'y'], {}), '(x, y)\n', (2630, 2636), False, 'import wx\n'), ((2651, 2664), 'wx.Size', 'wx.Size', (['w', 'h'], {}), '(w, h)\n', (2658, 2664), False, 'import wx\n')] |
import datetime
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.db import transaction, IntegrityError
from django.db.models import Q
from django.utils.translation import gettext as _
from django.utils.translation import ngettext
from rest_framework.decorators import action
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet, ModelViewSet
from constents import UserTypeChoices, DocAvailableChoices
from modules.account.serializers import UserInfoSerializer
from modules.cel.tasks import export_all_docs, send_apply_result
from modules.doc.models import Doc, PinDoc
from modules.doc.serializers import DocListSerializer, DocPinSerializer
from modules.repo.models import Repo, RepoUser
from modules.repo.permissions import RepoAdminPermission
from modules.repo.serializers import (
RepoSerializer,
RepoApplyDealSerializer,
RepoListSerializer,
RepoCommonSerializer,
RepoUserSerializer,
)
from utils.exceptions import (
OperationError,
UserNotExist,
Error404,
ThrottledError,
ParamsNotFound,
)
from utils.paginations import NumPagination, RepoListNumPagination
USER_MODEL = get_user_model()
class RepoView(ModelViewSet):
"""仓库管理入口"""
queryset = Repo.objects.filter(is_deleted=False)
serializer_class = RepoSerializer
permission_classes = [
RepoAdminPermission,
]
def create(self, request, *args, **kwargs):
"""创建仓库"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
with transaction.atomic():
instance = serializer.save(creator=request.user.uid)
instance.set_owner(request.user.uid)
return Response(serializer.data)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response()
@action(detail=True, methods=["GET"])
def list_apply(self, request, *args, **kwargs):
"""申请人"""
instance = self.get_object()
applicant_ids = RepoUser.objects.filter(
repo_id=instance.id, u_type=UserTypeChoices.VISITOR
).values_list("uid", flat=True)
applicants = USER_MODEL.objects.filter(uid__in=applicant_ids).order_by(
"username"
)
search_key = request.GET.get("searchKey")
if search_key:
applicants = applicants.filter(username=search_key)
self.pagination_class = NumPagination
page = self.paginate_queryset(applicants)
serializer = UserInfoSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
@action(detail=True, methods=["POST"])
def deal_apply(self, request, *args, **kwargs):
"""处理申请"""
instance = self.get_object()
try:
repo_user = RepoUser.objects.get(
repo_id=instance.id,
uid=request.data.get("uid"),
u_type=UserTypeChoices.VISITOR,
)
except RepoUser.DoesNotExist:
raise UserNotExist(_("该申请不存在"))
if not request.data.get("status", True):
send_apply_result.delay(
request.user.uid, repo_user.repo_id, repo_user.uid, False
)
repo_user.delete()
return Response()
serializer = RepoApplyDealSerializer(instance=repo_user, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(
u_type=UserTypeChoices.MEMBER,
operator=request.user.uid,
join_at=datetime.datetime.now(),
)
send_apply_result.delay(
request.user.uid, repo_user.repo_id, repo_user.uid, True
)
return Response()
@action(detail=True, methods=["POST"])
def export_docs(self, request, *args, **kwargs):
"""导出文章"""
instance = self.get_object()
# 检验是否有执行中任务
cache_key = "ExportAllDocs:{}:{}".format(instance.id, request.user.uid)
running = cache.get(cache_key)
if running:
raise ThrottledError()
cache.set(cache_key, request.user.uid, 3600)
# 导出
export_all_docs.delay(instance.id, request.user.uid)
return Response()
@action(detail=False, methods=["GET"])
def load_repo(self, request, *args, **kwargs):
"""显示有权限库"""
# 超管显示全部
if request.user.is_superuser:
repos = Repo.objects.filter(is_deleted=False)
# 显示管理的库
else:
repo_ids = RepoUser.objects.filter(
Q(uid=request.user.uid)
& Q(u_type__in=[UserTypeChoices.ADMIN, UserTypeChoices.OWNER])
).values("repo_id")
repos = Repo.objects.filter(id__in=repo_ids, is_deleted=False)
serializer = self.get_serializer(repos, many=True)
return Response(serializer.data)
@action(detail=True, methods=["GET"])
def load_user(self, request, *args, **kwargs):
"""库内所有用户"""
instance = self.get_object()
sql = (
"SELECT au.username, ru.* FROM `auth_user` au "
"JOIN `repo_user` ru ON au.uid = ru.uid "
"WHERE ru.repo_id = {} AND au.username like %s "
"ORDER BY FIELD(ru.u_type, '{}', '{}', '{}', '{}') "
).format(
instance.id,
UserTypeChoices.VISITOR,
UserTypeChoices.OWNER,
UserTypeChoices.ADMIN,
UserTypeChoices.MEMBER,
)
search_key = request.GET.get("searchKey")
search_key = f"%%{search_key}%%" if search_key else "%%"
repo_users = RepoUser.objects.raw(sql, [search_key])
queryset = self.paginate_queryset(repo_users)
serializer = RepoUserSerializer(queryset, many=True)
return self.get_paginated_response(serializer.data)
@action(detail=True, methods=["POST"])
def remove_user(self, request, *args, **kwargs):
"""移除用户"""
instance = self.get_object()
uid = request.data.get("uid")
RepoUser.objects.filter(
Q(repo_id=instance.id) & Q(uid=uid) & ~Q(u_type=UserTypeChoices.OWNER)
).delete()
return Response()
@action(detail=True, methods=["POST"])
def change_u_type(self, request, *args, **kwargs):
"""切换用户类型"""
instance = self.get_object()
uid = request.data.get("uid")
u_type = request.data.get("uType")
if u_type == UserTypeChoices.OWNER:
raise OperationError()
RepoUser.objects.filter(
Q(repo_id=instance.id) & Q(uid=uid) & ~Q(u_type=UserTypeChoices.OWNER)
).update(u_type=u_type, operator=request.user.uid)
return Response()
@action(detail=True, methods=["GET"])
def load_doc(self, request, *args, **kwargs):
"""展示文章"""
instance = self.get_object()
sql = (
"SELECT dd.*, au.username 'creator_name', IFNULL(dp.in_use, FALSE) 'pin_status' "
"FROM `auth_user` au "
"JOIN `doc_doc` dd ON dd.creator=au.uid "
"LEFT JOIN `doc_pin` dp ON dp.doc_id=dd.id AND dp.in_use "
"WHERE NOT dd.is_deleted AND dd.is_publish AND dd.available='{}' "
"AND dd.repo_id = {} "
"AND dd.title like %s "
"ORDER BY dd.id DESC ;"
).format(DocAvailableChoices.PUBLIC, instance.id)
search_key = request.GET.get("searchKey")
search_key = f"%%{search_key}%%" if search_key else "%%"
docs = Doc.objects.raw(sql, [search_key])
queryset = self.paginate_queryset(docs)
serializer = DocListSerializer(queryset, many=True)
return self.get_paginated_response(serializer.data)
@action(detail=True, methods=["DELETE"])
def delete_doc(self, request, *args, **kwargs):
"""删除文章"""
instance = self.get_object()
doc_id = request.data.get("docID", "")
Doc.objects.filter(id=doc_id, repo_id=instance.id).update(
is_deleted=True, update_by=request.user.uid
)
return Response()
@action(detail=True, methods=["GET"])
def is_owner(self, request, *args, **kwargs):
"""检测是否为仓库拥有者"""
if request.user.is_superuser:
return Response(True)
instance = self.get_object()
return Response(instance.creator == request.user.uid)
@action(detail=True, methods=["POST"])
def pin_doc(self, request, *args, **kwargs):
"""置顶文章"""
data = request.data
data["operator"] = request.user.uid
serializer = DocPinSerializer(data=data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
instance = self.get_object()
try:
Doc.objects.get(
id=data["doc_id"],
repo_id=instance.id,
is_deleted=False,
is_publish=True,
available=DocAvailableChoices.PUBLIC,
)
except Doc.DoesNotExist:
raise OperationError()
try:
pin = PinDoc.objects.get(doc_id=data["doc_id"], in_use=True)
pin.pin_to = data["pin_to"]
pin.operator = request.user.uid
pin.save()
except PinDoc.DoesNotExist:
serializer.save()
return Response()
@action(detail=True, methods=["POST"])
def unpin_doc(self, request, *args, **kwargs):
"""取消置顶"""
doc_id = request.data.get("doc_id")
if not doc_id:
raise ParamsNotFound()
instance = self.get_object()
try:
Doc.objects.get(
id=doc_id,
repo_id=instance.id,
is_deleted=False,
is_publish=True,
available=DocAvailableChoices.PUBLIC,
)
except Doc.DoesNotExist:
raise OperationError()
PinDoc.objects.filter(doc_id=doc_id, in_use=True).update(
in_use=False, operator=request.user.uid
)
return Response()
class RepoCommonView(ListModelMixin, RetrieveModelMixin, GenericViewSet):
"""仓库常规入口"""
queryset = Repo.objects.filter(is_deleted=False)
serializer_class = RepoSerializer
pagination_class = None
def get_users(self, repo_id: int, u_types: list):
"""获取成员"""
repo_users = RepoUser.objects.filter(
repo_id=repo_id, u_type__in=u_types
).values_list("uid", flat=True)
users = USER_MODEL.objects.filter(uid__in=repo_users).order_by(
"-active_index", "username"
)
user_serializer = UserInfoSerializer(users, many=True)
return user_serializer.data
def retrieve(self, request, *args, **kwargs):
# 库信息
instance = self.get_object()
serializer = self.get_serializer(instance)
data = serializer.data
# 成员信息
data.update(
{
"admins": self.get_users(
instance.id, [UserTypeChoices.ADMIN, UserTypeChoices.OWNER]
),
"members": self.get_users(instance.id, [UserTypeChoices.MEMBER]),
}
)
return Response(data)
def list(self, request, *args, **kwargs):
"""获取自己的库"""
repo_ids = RepoUser.objects.filter(
Q(uid=request.user.uid) & ~Q(u_type=UserTypeChoices.VISITOR)
).values_list("repo_id", flat=True)
search_key = request.GET.get("searchKey", "")
self.queryset = self.queryset.filter(
id__in=repo_ids, name__icontains=search_key
)
self.serializer_class = RepoCommonSerializer
if request.GET.get("page", None) is not None:
self.pagination_class = NumPagination
return super().list(request, *args, **kwargs)
@action(detail=False, methods=["GET"])
def with_user(self, request, *args, **kwargs):
"""获取包含成员状态的库列表"""
search_key = request.GET.get("searchKey")
search_key = f"%%{search_key}%%" if search_key else "%%"
sql = (
"SELECT rr.*, au.username creator_name, ru.u_type member_type "
"FROM `repo_repo` rr "
"LEFT JOIN `auth_user` au ON au.uid = rr.creator "
"LEFT JOIN `repo_user` ru ON ru.repo_id = rr.id AND ru.uid = %s "
"WHERE NOT rr.is_deleted "
"AND rr.name like %s "
"ORDER BY rr.id;"
)
repos = Repo.objects.raw(sql, [request.user.uid, search_key])
page = RepoListNumPagination()
queryset = page.paginate_queryset(repos, request, self)
serializer = RepoListSerializer(queryset, many=True)
return page.get_paginated_response(serializer.data)
def apply_for_repo(self, repo: Repo, uid: str):
"""申请库"""
try:
RepoUser.objects.create(
repo_id=repo.id, uid=uid, u_type=UserTypeChoices.VISITOR
)
except IntegrityError:
raise OperationError(
ngettext("已申请或加入%(name)s", "已申请或加入%(name)s", 1) % {"name": repo.name}
)
@action(detail=True, methods=["POST"])
def apply(self, request, *args, **kwargs):
"""申请库"""
instance = self.get_object()
self.apply_for_repo(instance, request.user.uid)
return Response()
@action(detail=False, methods=["POST"])
def apply_by_doc(self, request, *args, **kwargs):
"""通过文章申请库"""
try:
doc = Doc.objects.get(id=request.data.get("doc_id", ""))
repo = Repo.objects.get(id=doc.repo_id)
self.apply_for_repo(repo, request.user.uid)
except (Doc.DoesNotExist, Repo.DoesNotExist):
raise Error404()
return Response()
@action(detail=True, methods=["POST"])
def exit(self, request, *args, **kwargs):
"""退出库"""
instance = self.get_object()
if instance.name == settings.DEFAULT_REPO_NAME:
raise OperationError()
try:
RepoUser.objects.filter(
Q(repo_id=instance.id)
& Q(uid=request.user.uid)
& ~Q(u_type=UserTypeChoices.OWNER)
).delete()
return Response()
except RepoUser.DoesNotExist:
raise OperationError()
| [
"modules.doc.models.PinDoc.objects.filter",
"modules.cel.tasks.export_all_docs.delay",
"modules.repo.models.Repo.objects.raw",
"modules.doc.serializers.DocListSerializer",
"modules.doc.serializers.DocPinSerializer",
"rest_framework.decorators.action",
"utils.paginations.RepoListNumPagination",
"django... | [((1307, 1323), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1321, 1323), False, 'from django.contrib.auth import get_user_model\n'), ((1389, 1426), 'modules.repo.models.Repo.objects.filter', 'Repo.objects.filter', ([], {'is_deleted': '(False)'}), '(is_deleted=False)\n', (1408, 1426), False, 'from modules.repo.models import Repo, RepoUser\n'), ((2053, 2089), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['GET']"}), "(detail=True, methods=['GET'])\n", (2059, 2089), False, 'from rest_framework.decorators import action\n'), ((2819, 2856), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['POST']"}), "(detail=True, methods=['POST'])\n", (2825, 2856), False, 'from rest_framework.decorators import action\n'), ((3925, 3962), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['POST']"}), "(detail=True, methods=['POST'])\n", (3931, 3962), False, 'from rest_framework.decorators import action\n'), ((4426, 4463), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)', 'methods': "['GET']"}), "(detail=False, methods=['GET'])\n", (4432, 4463), False, 'from rest_framework.decorators import action\n'), ((5060, 5096), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['GET']"}), "(detail=True, methods=['GET'])\n", (5066, 5096), False, 'from rest_framework.decorators import action\n'), ((6015, 6052), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['POST']"}), "(detail=True, methods=['POST'])\n", (6021, 6052), False, 'from rest_framework.decorators import action\n'), ((6367, 6404), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['POST']"}), "(detail=True, methods=['POST'])\n", (6373, 6404), False, 'from rest_framework.decorators import action\n'), ((6885, 6921), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['GET']"}), "(detail=True, methods=['GET'])\n", (6891, 6921), False, 'from rest_framework.decorators import action\n'), ((7881, 7920), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['DELETE']"}), "(detail=True, methods=['DELETE'])\n", (7887, 7920), False, 'from rest_framework.decorators import action\n'), ((8241, 8277), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['GET']"}), "(detail=True, methods=['GET'])\n", (8247, 8277), False, 'from rest_framework.decorators import action\n'), ((8530, 8567), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['POST']"}), "(detail=True, methods=['POST'])\n", (8536, 8567), False, 'from rest_framework.decorators import action\n'), ((9493, 9530), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['POST']"}), "(detail=True, methods=['POST'])\n", (9499, 9530), False, 'from rest_framework.decorators import action\n'), ((10312, 10349), 'modules.repo.models.Repo.objects.filter', 'Repo.objects.filter', ([], {'is_deleted': '(False)'}), '(is_deleted=False)\n', (10331, 10349), False, 'from modules.repo.models import Repo, RepoUser\n'), ((11968, 12005), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)', 'methods': "['GET']"}), "(detail=False, methods=['GET'])\n", (11974, 12005), False, 'from rest_framework.decorators import action\n'), ((13254, 13291), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['POST']"}), "(detail=True, methods=['POST'])\n", (13260, 13291), False, 'from rest_framework.decorators import action\n'), ((13482, 13520), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)', 'methods': "['POST']"}), "(detail=False, methods=['POST'])\n", (13488, 13520), False, 'from rest_framework.decorators import action\n'), ((13902, 13939), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['POST']"}), "(detail=True, methods=['POST'])\n", (13908, 13939), False, 'from rest_framework.decorators import action\n'), ((1869, 1894), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1877, 1894), False, 'from rest_framework.response import Response\n'), ((2036, 2046), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (2044, 2046), False, 'from rest_framework.response import Response\n'), ((2717, 2752), 'modules.account.serializers.UserInfoSerializer', 'UserInfoSerializer', (['page'], {'many': '(True)'}), '(page, many=True)\n', (2735, 2752), False, 'from modules.account.serializers import UserInfoSerializer\n'), ((3506, 3568), 'modules.repo.serializers.RepoApplyDealSerializer', 'RepoApplyDealSerializer', ([], {'instance': 'repo_user', 'data': 'request.data'}), '(instance=repo_user, data=request.data)\n', (3529, 3568), False, 'from modules.repo.serializers import RepoSerializer, RepoApplyDealSerializer, RepoListSerializer, RepoCommonSerializer, RepoUserSerializer\n'), ((3789, 3874), 'modules.cel.tasks.send_apply_result.delay', 'send_apply_result.delay', (['request.user.uid', 'repo_user.repo_id', 'repo_user.uid', '(True)'], {}), '(request.user.uid, repo_user.repo_id, repo_user.uid,\n True)\n', (3812, 3874), False, 'from modules.cel.tasks import export_all_docs, send_apply_result\n'), ((3908, 3918), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (3916, 3918), False, 'from rest_framework.response import Response\n'), ((4191, 4211), 'django.core.cache.cache.get', 'cache.get', (['cache_key'], {}), '(cache_key)\n', (4200, 4211), False, 'from django.core.cache import cache\n'), ((4275, 4319), 'django.core.cache.cache.set', 'cache.set', (['cache_key', 'request.user.uid', '(3600)'], {}), '(cache_key, request.user.uid, 3600)\n', (4284, 4319), False, 'from django.core.cache import cache\n'), ((4341, 4393), 'modules.cel.tasks.export_all_docs.delay', 'export_all_docs.delay', (['instance.id', 'request.user.uid'], {}), '(instance.id, request.user.uid)\n', (4362, 4393), False, 'from modules.cel.tasks import export_all_docs, send_apply_result\n'), ((4409, 4419), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (4417, 4419), False, 'from rest_framework.response import Response\n'), ((5028, 5053), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (5036, 5053), False, 'from rest_framework.response import Response\n'), ((5794, 5833), 'modules.repo.models.RepoUser.objects.raw', 'RepoUser.objects.raw', (['sql', '[search_key]'], {}), '(sql, [search_key])\n', (5814, 5833), False, 'from modules.repo.models import Repo, RepoUser\n'), ((5909, 5948), 'modules.repo.serializers.RepoUserSerializer', 'RepoUserSerializer', (['queryset'], {'many': '(True)'}), '(queryset, many=True)\n', (5927, 5948), False, 'from modules.repo.serializers import RepoSerializer, RepoApplyDealSerializer, RepoListSerializer, RepoCommonSerializer, RepoUserSerializer\n'), ((6350, 6360), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (6358, 6360), False, 'from rest_framework.response import Response\n'), ((6868, 6878), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (6876, 6878), False, 'from rest_framework.response import Response\n'), ((7672, 7706), 'modules.doc.models.Doc.objects.raw', 'Doc.objects.raw', (['sql', '[search_key]'], {}), '(sql, [search_key])\n', (7687, 7706), False, 'from modules.doc.models import Doc, PinDoc\n'), ((7776, 7814), 'modules.doc.serializers.DocListSerializer', 'DocListSerializer', (['queryset'], {'many': '(True)'}), '(queryset, many=True)\n', (7793, 7814), False, 'from modules.doc.serializers import DocListSerializer, DocPinSerializer\n'), ((8224, 8234), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (8232, 8234), False, 'from rest_framework.response import Response\n'), ((8477, 8523), 'rest_framework.response.Response', 'Response', (['(instance.creator == request.user.uid)'], {}), '(instance.creator == request.user.uid)\n', (8485, 8523), False, 'from rest_framework.response import Response\n'), ((8729, 8756), 'modules.doc.serializers.DocPinSerializer', 'DocPinSerializer', ([], {'data': 'data'}), '(data=data)\n', (8745, 8756), False, 'from modules.doc.serializers import DocListSerializer, DocPinSerializer\n'), ((9476, 9486), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (9484, 9486), False, 'from rest_framework.response import Response\n'), ((10192, 10202), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (10200, 10202), False, 'from rest_framework.response import Response\n'), ((10772, 10808), 'modules.account.serializers.UserInfoSerializer', 'UserInfoSerializer', (['users'], {'many': '(True)'}), '(users, many=True)\n', (10790, 10808), False, 'from modules.account.serializers import UserInfoSerializer\n'), ((11341, 11355), 'rest_framework.response.Response', 'Response', (['data'], {}), '(data)\n', (11349, 11355), False, 'from rest_framework.response import Response\n'), ((12597, 12650), 'modules.repo.models.Repo.objects.raw', 'Repo.objects.raw', (['sql', '[request.user.uid, search_key]'], {}), '(sql, [request.user.uid, search_key])\n', (12613, 12650), False, 'from modules.repo.models import Repo, RepoUser\n'), ((12666, 12689), 'utils.paginations.RepoListNumPagination', 'RepoListNumPagination', ([], {}), '()\n', (12687, 12689), False, 'from utils.paginations import NumPagination, RepoListNumPagination\n'), ((12775, 12814), 'modules.repo.serializers.RepoListSerializer', 'RepoListSerializer', (['queryset'], {'many': '(True)'}), '(queryset, many=True)\n', (12793, 12814), False, 'from modules.repo.serializers import RepoSerializer, RepoApplyDealSerializer, RepoListSerializer, RepoCommonSerializer, RepoUserSerializer\n'), ((13465, 13475), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (13473, 13475), False, 'from rest_framework.response import Response\n'), ((13885, 13895), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (13893, 13895), False, 'from rest_framework.response import Response\n'), ((1718, 1738), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (1736, 1738), False, 'from django.db import transaction, IntegrityError\n'), ((3311, 3397), 'modules.cel.tasks.send_apply_result.delay', 'send_apply_result.delay', (['request.user.uid', 'repo_user.repo_id', 'repo_user.uid', '(False)'], {}), '(request.user.uid, repo_user.repo_id, repo_user.uid,\n False)\n', (3334, 3397), False, 'from modules.cel.tasks import export_all_docs, send_apply_result\n'), ((3474, 3484), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (3482, 3484), False, 'from rest_framework.response import Response\n'), ((4250, 4266), 'utils.exceptions.ThrottledError', 'ThrottledError', ([], {}), '()\n', (4264, 4266), False, 'from utils.exceptions import OperationError, UserNotExist, Error404, ThrottledError, ParamsNotFound\n'), ((4611, 4648), 'modules.repo.models.Repo.objects.filter', 'Repo.objects.filter', ([], {'is_deleted': '(False)'}), '(is_deleted=False)\n', (4630, 4648), False, 'from modules.repo.models import Repo, RepoUser\n'), ((4899, 4953), 'modules.repo.models.Repo.objects.filter', 'Repo.objects.filter', ([], {'id__in': 'repo_ids', 'is_deleted': '(False)'}), '(id__in=repo_ids, is_deleted=False)\n', (4918, 4953), False, 'from modules.repo.models import Repo, RepoUser\n'), ((6661, 6677), 'utils.exceptions.OperationError', 'OperationError', ([], {}), '()\n', (6675, 6677), False, 'from utils.exceptions import OperationError, UserNotExist, Error404, ThrottledError, ParamsNotFound\n'), ((8410, 8424), 'rest_framework.response.Response', 'Response', (['(True)'], {}), '(True)\n', (8418, 8424), False, 'from rest_framework.response import Response\n'), ((8910, 9042), 'modules.doc.models.Doc.objects.get', 'Doc.objects.get', ([], {'id': "data['doc_id']", 'repo_id': 'instance.id', 'is_deleted': '(False)', 'is_publish': '(True)', 'available': 'DocAvailableChoices.PUBLIC'}), "(id=data['doc_id'], repo_id=instance.id, is_deleted=False,\n is_publish=True, available=DocAvailableChoices.PUBLIC)\n", (8925, 9042), False, 'from modules.doc.models import Doc, PinDoc\n'), ((9233, 9287), 'modules.doc.models.PinDoc.objects.get', 'PinDoc.objects.get', ([], {'doc_id': "data['doc_id']", 'in_use': '(True)'}), "(doc_id=data['doc_id'], in_use=True)\n", (9251, 9287), False, 'from modules.doc.models import Doc, PinDoc\n'), ((9686, 9702), 'utils.exceptions.ParamsNotFound', 'ParamsNotFound', ([], {}), '()\n', (9700, 9702), False, 'from utils.exceptions import OperationError, UserNotExist, Error404, ThrottledError, ParamsNotFound\n'), ((9765, 9889), 'modules.doc.models.Doc.objects.get', 'Doc.objects.get', ([], {'id': 'doc_id', 'repo_id': 'instance.id', 'is_deleted': '(False)', 'is_publish': '(True)', 'available': 'DocAvailableChoices.PUBLIC'}), '(id=doc_id, repo_id=instance.id, is_deleted=False,\n is_publish=True, available=DocAvailableChoices.PUBLIC)\n', (9780, 9889), False, 'from modules.doc.models import Doc, PinDoc\n'), ((12971, 13057), 'modules.repo.models.RepoUser.objects.create', 'RepoUser.objects.create', ([], {'repo_id': 'repo.id', 'uid': 'uid', 'u_type': 'UserTypeChoices.VISITOR'}), '(repo_id=repo.id, uid=uid, u_type=UserTypeChoices.\n VISITOR)\n', (12994, 13057), False, 'from modules.repo.models import Repo, RepoUser\n'), ((13698, 13730), 'modules.repo.models.Repo.objects.get', 'Repo.objects.get', ([], {'id': 'doc.repo_id'}), '(id=doc.repo_id)\n', (13714, 13730), False, 'from modules.repo.models import Repo, RepoUser\n'), ((14115, 14131), 'utils.exceptions.OperationError', 'OperationError', ([], {}), '()\n', (14129, 14131), False, 'from utils.exceptions import OperationError, UserNotExist, Error404, ThrottledError, ParamsNotFound\n'), ((14356, 14366), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (14364, 14366), False, 'from rest_framework.response import Response\n'), ((2221, 2297), 'modules.repo.models.RepoUser.objects.filter', 'RepoUser.objects.filter', ([], {'repo_id': 'instance.id', 'u_type': 'UserTypeChoices.VISITOR'}), '(repo_id=instance.id, u_type=UserTypeChoices.VISITOR)\n', (2244, 2297), False, 'from modules.repo.models import Repo, RepoUser\n'), ((3746, 3769), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3767, 3769), False, 'import datetime\n'), ((8084, 8134), 'modules.doc.models.Doc.objects.filter', 'Doc.objects.filter', ([], {'id': 'doc_id', 'repo_id': 'instance.id'}), '(id=doc_id, repo_id=instance.id)\n', (8102, 8134), False, 'from modules.doc.models import Doc, PinDoc\n'), ((9185, 9201), 'utils.exceptions.OperationError', 'OperationError', ([], {}), '()\n', (9199, 9201), False, 'from utils.exceptions import OperationError, UserNotExist, Error404, ThrottledError, ParamsNotFound\n'), ((10032, 10048), 'utils.exceptions.OperationError', 'OperationError', ([], {}), '()\n', (10046, 10048), False, 'from utils.exceptions import OperationError, UserNotExist, Error404, ThrottledError, ParamsNotFound\n'), ((10057, 10106), 'modules.doc.models.PinDoc.objects.filter', 'PinDoc.objects.filter', ([], {'doc_id': 'doc_id', 'in_use': '(True)'}), '(doc_id=doc_id, in_use=True)\n', (10078, 10106), False, 'from modules.doc.models import Doc, PinDoc\n'), ((10511, 10571), 'modules.repo.models.RepoUser.objects.filter', 'RepoUser.objects.filter', ([], {'repo_id': 'repo_id', 'u_type__in': 'u_types'}), '(repo_id=repo_id, u_type__in=u_types)\n', (10534, 10571), False, 'from modules.repo.models import Repo, RepoUser\n'), ((13859, 13869), 'utils.exceptions.Error404', 'Error404', ([], {}), '()\n', (13867, 13869), False, 'from utils.exceptions import OperationError, UserNotExist, Error404, ThrottledError, ParamsNotFound\n'), ((14423, 14439), 'utils.exceptions.OperationError', 'OperationError', ([], {}), '()\n', (14437, 14439), False, 'from utils.exceptions import OperationError, UserNotExist, Error404, ThrottledError, ParamsNotFound\n'), ((3237, 3248), 'django.utils.translation.gettext', '_', (['"""该申请不存在"""'], {}), "('该申请不存在')\n", (3238, 3248), True, 'from django.utils.translation import gettext as _\n'), ((11480, 11503), 'django.db.models.Q', 'Q', ([], {'uid': 'request.user.uid'}), '(uid=request.user.uid)\n', (11481, 11503), False, 'from django.db.models import Q\n'), ((13164, 13211), 'django.utils.translation.ngettext', 'ngettext', (['"""已申请或加入%(name)s"""', '"""已申请或加入%(name)s"""', '(1)'], {}), "('已申请或加入%(name)s', '已申请或加入%(name)s', 1)\n", (13172, 13211), False, 'from django.utils.translation import ngettext\n'), ((4744, 4767), 'django.db.models.Q', 'Q', ([], {'uid': 'request.user.uid'}), '(uid=request.user.uid)\n', (4745, 4767), False, 'from django.db.models import Q\n'), ((4786, 4846), 'django.db.models.Q', 'Q', ([], {'u_type__in': '[UserTypeChoices.ADMIN, UserTypeChoices.OWNER]'}), '(u_type__in=[UserTypeChoices.ADMIN, UserTypeChoices.OWNER])\n', (4787, 4846), False, 'from django.db.models import Q\n'), ((6245, 6267), 'django.db.models.Q', 'Q', ([], {'repo_id': 'instance.id'}), '(repo_id=instance.id)\n', (6246, 6267), False, 'from django.db.models import Q\n'), ((6270, 6280), 'django.db.models.Q', 'Q', ([], {'uid': 'uid'}), '(uid=uid)\n', (6271, 6280), False, 'from django.db.models import Q\n'), ((6284, 6315), 'django.db.models.Q', 'Q', ([], {'u_type': 'UserTypeChoices.OWNER'}), '(u_type=UserTypeChoices.OWNER)\n', (6285, 6315), False, 'from django.db.models import Q\n'), ((6723, 6745), 'django.db.models.Q', 'Q', ([], {'repo_id': 'instance.id'}), '(repo_id=instance.id)\n', (6724, 6745), False, 'from django.db.models import Q\n'), ((6748, 6758), 'django.db.models.Q', 'Q', ([], {'uid': 'uid'}), '(uid=uid)\n', (6749, 6758), False, 'from django.db.models import Q\n'), ((6762, 6793), 'django.db.models.Q', 'Q', ([], {'u_type': 'UserTypeChoices.OWNER'}), '(u_type=UserTypeChoices.OWNER)\n', (6763, 6793), False, 'from django.db.models import Q\n'), ((11507, 11540), 'django.db.models.Q', 'Q', ([], {'u_type': 'UserTypeChoices.VISITOR'}), '(u_type=UserTypeChoices.VISITOR)\n', (11508, 11540), False, 'from django.db.models import Q\n'), ((14198, 14220), 'django.db.models.Q', 'Q', ([], {'repo_id': 'instance.id'}), '(repo_id=instance.id)\n', (14199, 14220), False, 'from django.db.models import Q\n'), ((14239, 14262), 'django.db.models.Q', 'Q', ([], {'uid': 'request.user.uid'}), '(uid=request.user.uid)\n', (14240, 14262), False, 'from django.db.models import Q\n'), ((14282, 14313), 'django.db.models.Q', 'Q', ([], {'u_type': 'UserTypeChoices.OWNER'}), '(u_type=UserTypeChoices.OWNER)\n', (14283, 14313), False, 'from django.db.models import Q\n')] |
import os,sys,glob,shutil
lists = glob.glob('./*/*.log')
lists += glob.glob('./*/*.tar')
lists += glob.glob('./*/*.pk')
lists += glob.glob('./*/*weights.json')
for f in lists:
os.remove(f)
| [
"glob.glob",
"os.remove"
] | [((35, 57), 'glob.glob', 'glob.glob', (['"""./*/*.log"""'], {}), "('./*/*.log')\n", (44, 57), False, 'import os, sys, glob, shutil\n'), ((67, 89), 'glob.glob', 'glob.glob', (['"""./*/*.tar"""'], {}), "('./*/*.tar')\n", (76, 89), False, 'import os, sys, glob, shutil\n'), ((99, 120), 'glob.glob', 'glob.glob', (['"""./*/*.pk"""'], {}), "('./*/*.pk')\n", (108, 120), False, 'import os, sys, glob, shutil\n'), ((130, 160), 'glob.glob', 'glob.glob', (['"""./*/*weights.json"""'], {}), "('./*/*weights.json')\n", (139, 160), False, 'import os, sys, glob, shutil\n'), ((182, 194), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (191, 194), False, 'import os, sys, glob, shutil\n')] |
from model.contact import Contact
import random
import allure
def test_delete_some_contact(app, db, check_ui):
if len(db.get_contacts_list()) == 0:
app.contact.add_new_contact(Contact(firstname="Testik", middlename="Midtest", lastname="Lasttest", nickname="Nickname test", title="Mrs", company="Test Company", street="5th Avenue",
homephone="15", mobilephone="111999333", workphone="12123342", fax="2345645", email="<EMAIL>", birthday_day="11",
birthday_month="July", birthday_year="1991", anniversary_day="8", anniversary_month="November", anniversary_year="1991", address2="Sec address", phone2="163434", note="testtesttest note"))
with allure.step("Given a contacts list"):
old_contacts = db.get_contacts_list()
with allure.step("Given a contact from contacts list"):
contact = random.choice(old_contacts)
with allure.step("When I delete contact %s from the list" % contact):
app.contact.delete_contact_by_id(contact.id)
with allure.step("Then the new list is equal to the old list with the removed contact"):
assert len(old_contacts) - 1 == app.contact.count()
new_contacts = db.get_contacts_list()
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
def clean(contact):
return Contact(id=contact.id, firstname=contact.firstname.strip(), lastname=contact.lastname.strip())
new_contacts = map(clean, db.get_contacts_list())
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contacts_list(), key=Contact.id_or_max)
| [
"model.contact.Contact",
"allure.step",
"random.choice"
] | [((744, 780), 'allure.step', 'allure.step', (['"""Given a contacts list"""'], {}), "('Given a contacts list')\n", (755, 780), False, 'import allure\n'), ((837, 886), 'allure.step', 'allure.step', (['"""Given a contact from contacts list"""'], {}), "('Given a contact from contacts list')\n", (848, 886), False, 'import allure\n'), ((906, 933), 'random.choice', 'random.choice', (['old_contacts'], {}), '(old_contacts)\n', (919, 933), False, 'import random\n'), ((943, 1006), 'allure.step', 'allure.step', (["('When I delete contact %s from the list' % contact)"], {}), "('When I delete contact %s from the list' % contact)\n", (954, 1006), False, 'import allure\n'), ((1070, 1157), 'allure.step', 'allure.step', (['"""Then the new list is equal to the old list with the removed contact"""'], {}), "(\n 'Then the new list is equal to the old list with the removed contact')\n", (1081, 1157), False, 'import allure\n'), ((189, 672), 'model.contact.Contact', 'Contact', ([], {'firstname': '"""Testik"""', 'middlename': '"""Midtest"""', 'lastname': '"""Lasttest"""', 'nickname': '"""Nickname test"""', 'title': '"""Mrs"""', 'company': '"""Test Company"""', 'street': '"""5th Avenue"""', 'homephone': '"""15"""', 'mobilephone': '"""111999333"""', 'workphone': '"""12123342"""', 'fax': '"""2345645"""', 'email': '"""<EMAIL>"""', 'birthday_day': '"""11"""', 'birthday_month': '"""July"""', 'birthday_year': '"""1991"""', 'anniversary_day': '"""8"""', 'anniversary_month': '"""November"""', 'anniversary_year': '"""1991"""', 'address2': '"""Sec address"""', 'phone2': '"""163434"""', 'note': '"""testtesttest note"""'}), "(firstname='Testik', middlename='Midtest', lastname='Lasttest',\n nickname='Nickname test', title='Mrs', company='Test Company', street=\n '5th Avenue', homephone='15', mobilephone='111999333', workphone=\n '12123342', fax='2345645', email='<EMAIL>', birthday_day='11',\n birthday_month='July', birthday_year='1991', anniversary_day='8',\n anniversary_month='November', anniversary_year='1991', address2=\n 'Sec address', phone2='163434', note='testtesttest note')\n", (196, 672), False, 'from model.contact import Contact\n')] |
import os
from PIL import Image
file_deliminator = "//"
valid_image_file_formats = {'png', 'jpg'}
class SWData:
data_img = {}
data_class = {}
base_path = ""
data_classes = set()
data_images_equal_size = False
def __int__(self):
pass
def load_img_datafiles(self, folder_location):
"""
Creates a dictionary of filenames, classification and location
"""
self.base_path = folder_location
self.data_classes = os.listdir(folder_location)
for data_class in self.data_classes:
for file in os.listdir(self.base_path + file_deliminator + data_class):
for file_type in valid_image_file_formats:
if file.endswith(file_type):
self.data_class[
self.base_path + file_deliminator + data_class + file_deliminator + file] = data_class
def load_image_data(self):
"""
Load the image data
"""
for data_class in self.data_classes:
for file in os.listdir(self.base_path + file_deliminator + data_class):
self.data[self.base_path + file_deliminator + data_class + file_deliminator + file] = Image.open()
def is_datafiles_all_same_dim(self):
img_size = (-1, -1)
for file in self.data_class:
img = Image.open(file)
if img_size == (-1, -1):
img_size = img.size
img.close()
elif img_size != img.size:
return False
else:
img.close()
return True
def clear(self):
self.data_class = {}
self.base_path = ""
self.data_classes = []
def getClass(self, class_name):
returned_data = []
for img in self.data_img:
if self.data_classes(img) == class_name:
returned_data.append(self.data_classes(img))
return returned_data
| [
"os.listdir",
"PIL.Image.open"
] | [((487, 514), 'os.listdir', 'os.listdir', (['folder_location'], {}), '(folder_location)\n', (497, 514), False, 'import os\n'), ((584, 642), 'os.listdir', 'os.listdir', (['(self.base_path + file_deliminator + data_class)'], {}), '(self.base_path + file_deliminator + data_class)\n', (594, 642), False, 'import os\n'), ((1061, 1119), 'os.listdir', 'os.listdir', (['(self.base_path + file_deliminator + data_class)'], {}), '(self.base_path + file_deliminator + data_class)\n', (1071, 1119), False, 'import os\n'), ((1361, 1377), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (1371, 1377), False, 'from PIL import Image\n'), ((1223, 1235), 'PIL.Image.open', 'Image.open', ([], {}), '()\n', (1233, 1235), False, 'from PIL import Image\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-01-20 05:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("jobs", "0013_add_job_id_field_in_submission_model")]
operations = [
migrations.RenameField(
model_name="submission", old_name="job_id", new_name="job_name"
)
]
| [
"django.db.migrations.RenameField"
] | [((293, 385), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""submission"""', 'old_name': '"""job_id"""', 'new_name': '"""job_name"""'}), "(model_name='submission', old_name='job_id', new_name\n ='job_name')\n", (315, 385), False, 'from django.db import migrations\n')] |
import collections
import io
import numpy as np
import tensorflow as tf
hidden_dim = 1000
input_size = 28 * 28
output_size = 10
train_data_file = "/home/harper/dataset/mnist/train-images.idx3-ubyte"
train_label_file = "/home/harper/dataset/mnist/train-labels.idx1-ubyte"
test_data_file = "/home/harper/dataset/mnist/t10k-images.idx3-ubyte"
test_label_file = "/home/harper/dataset/mnist/t10k-labels.idx1-ubyte"
Datasets = collections.namedtuple("Datasets", ['train', 'test'])
class Dataset(object):
def __init__(self, data, label):
self.data = data
self.label = label
self.size = self.data.shape[0]
perm = np.random.permutation(self.size)
self.data = self.data[perm]
self.label = self.label[perm]
self.start = 0
def next_batch(self, batch_size):
if self.start == self.size:
perm = np.random.permutation(self.size)
self.data = self.data[perm]
self.label = self.label[perm]
self.start = 0
start = self.start
end = min(start + batch_size, self.size)
self.start = end
return [self.data[start:end], self.label[start:end]]
def read_data(file):
with io.open(file, 'rb') as stream:
magic = stream.read(4)
num_record = np.frombuffer(stream.read(4), np.dtype(np.uint32).newbyteorder(">"))[0]
raw = stream.read(input_size * num_record)
flat = np.frombuffer(raw, np.uint8).astype(np.float32) / 255
result = flat.reshape([-1, input_size])
return result
def read_label(file):
with io.open(file, 'rb') as stream:
magic = stream.read(4)
num_record = np.frombuffer(stream.read(4), np.dtype(np.uint32).newbyteorder(">"))[0]
raw = stream.read(num_record)
return np.frombuffer(raw, np.uint8).astype(np.int32)
def read_datasets():
train_data = read_data(train_data_file)
train_label = read_label(train_label_file)
test_data = read_data(test_data_file)
test_label = read_label(test_label_file)
return Datasets(train=Dataset(train_data, train_label),
test=Dataset(test_data, test_label))
mnist = read_datasets()
x = tf.placeholder(tf.float32, [None, input_size], name="x")
label = tf.placeholder(tf.int64, [None], name="label")
with tf.name_scope("layer1"):
w1 = tf.Variable(tf.truncated_normal([input_size, hidden_dim], stddev=0.01), name="w1")
b1 = tf.Variable(tf.zeros([hidden_dim]), name="b1")
layer1_out = tf.nn.relu(tf.matmul(x, w1) + b1, "l1o")
with tf.name_scope("layer2"):
w2 = tf.Variable(tf.truncated_normal([hidden_dim, output_size], stddev=0.01), name="w2")
b2 = tf.Variable(tf.zeros([output_size]), name="b2")
layer2_out = tf.matmul(layer1_out, w2) + b2
with tf.name_scope("loss"):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=layer2_out,
name="cross_entropy")
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope("sgd"):
train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
with tf.name_scope("accuracy"):
prediction = tf.argmax(layer2_out, 1, name="prediction")
correct_prediction = tf.equal(prediction, label)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.name_scope("summary"):
tf.summary.histogram('b1', b1)
tf.summary.histogram('w1', w1)
tf.summary.histogram('w2', w2)
tf.summary.histogram('b2', b2)
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter("/home/harper/tftemp")
train_writer.add_graph(tf.get_default_graph())
builder = tf.saved_model.builder.SavedModelBuilder("/home/harper/mnistmodel")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(5000):
batch = mnist.train.next_batch(50)
if batch is None:
break
if i % 100 == 0:
train_accuracy, merged_summary = sess.run([accuracy, merged], feed_dict={x: batch[0], label: batch[1]})
train_writer.add_summary(merged_summary, i)
print('step %d, training accuracy %g' % (i, train_accuracy))
print(
'test accuracy %g' % accuracy.eval(feed_dict={x: mnist.test.data, label: mnist.test.label}))
_, merged_summary = sess.run([train_step, merged], feed_dict={x: batch[0], label: batch[1]})
train_writer.add_summary(merged_summary, i)
print(
'test accuracy %g' % accuracy.eval(feed_dict={x: mnist.test.data, label: mnist.test.label}))
# Build Signature to save to model
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs={
'input': tf.saved_model.utils.build_tensor_info(x)
},
outputs={
'output': tf.saved_model.utils.build_tensor_info(prediction)
},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
)
builder.add_meta_graph_and_variables(sess,
[tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature})
builder.save()
train_writer.close()
| [
"tensorflow.equal",
"io.open",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.matmul",
"numpy.frombuffer",
"tensorflow.summary.scalar",
"tensorflow.train.AdamOptimizer",
"numpy.d... | [((425, 478), 'collections.namedtuple', 'collections.namedtuple', (['"""Datasets"""', "['train', 'test']"], {}), "('Datasets', ['train', 'test'])\n", (447, 478), False, 'import collections\n'), ((2192, 2248), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, input_size]'], {'name': '"""x"""'}), "(tf.float32, [None, input_size], name='x')\n", (2206, 2248), True, 'import tensorflow as tf\n'), ((2257, 2303), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[None]'], {'name': '"""label"""'}), "(tf.int64, [None], name='label')\n", (2271, 2303), True, 'import tensorflow as tf\n'), ((3004, 3033), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (3018, 3033), True, 'import tensorflow as tf\n'), ((3576, 3598), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (3596, 3598), True, 'import tensorflow as tf\n'), ((3615, 3659), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""/home/harper/tftemp"""'], {}), "('/home/harper/tftemp')\n", (3636, 3659), True, 'import tensorflow as tf\n'), ((3717, 3784), 'tensorflow.saved_model.builder.SavedModelBuilder', 'tf.saved_model.builder.SavedModelBuilder', (['"""/home/harper/mnistmodel"""'], {}), "('/home/harper/mnistmodel')\n", (3757, 3784), True, 'import tensorflow as tf\n'), ((2310, 2333), 'tensorflow.name_scope', 'tf.name_scope', (['"""layer1"""'], {}), "('layer1')\n", (2323, 2333), True, 'import tensorflow as tf\n'), ((2547, 2570), 'tensorflow.name_scope', 'tf.name_scope', (['"""layer2"""'], {}), "('layer2')\n", (2560, 2570), True, 'import tensorflow as tf\n'), ((2775, 2796), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (2788, 2796), True, 'import tensorflow as tf\n'), ((2818, 2924), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'label', 'logits': 'layer2_out', 'name': '"""cross_entropy"""'}), "(labels=label, logits=\n layer2_out, name='cross_entropy')\n", (2864, 2924), True, 'import tensorflow as tf\n'), ((3040, 3060), 'tensorflow.name_scope', 'tf.name_scope', (['"""sgd"""'], {}), "('sgd')\n", (3053, 3060), True, 'import tensorflow as tf\n'), ((3138, 3163), 'tensorflow.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (3151, 3163), True, 'import tensorflow as tf\n'), ((3182, 3225), 'tensorflow.argmax', 'tf.argmax', (['layer2_out', '(1)'], {'name': '"""prediction"""'}), "(layer2_out, 1, name='prediction')\n", (3191, 3225), True, 'import tensorflow as tf\n'), ((3251, 3278), 'tensorflow.equal', 'tf.equal', (['prediction', 'label'], {}), '(prediction, label)\n', (3259, 3278), True, 'import tensorflow as tf\n'), ((3356, 3380), 'tensorflow.name_scope', 'tf.name_scope', (['"""summary"""'], {}), "('summary')\n", (3369, 3380), True, 'import tensorflow as tf\n'), ((3386, 3416), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""b1"""', 'b1'], {}), "('b1', b1)\n", (3406, 3416), True, 'import tensorflow as tf\n'), ((3421, 3451), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""w1"""', 'w1'], {}), "('w1', w1)\n", (3441, 3451), True, 'import tensorflow as tf\n'), ((3456, 3486), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""w2"""', 'w2'], {}), "('w2', w2)\n", (3476, 3486), True, 'import tensorflow as tf\n'), ((3491, 3521), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""b2"""', 'b2'], {}), "('b2', b2)\n", (3511, 3521), True, 'import tensorflow as tf\n'), ((3526, 3565), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (3543, 3565), True, 'import tensorflow as tf\n'), ((3683, 3705), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3703, 3705), True, 'import tensorflow as tf\n'), ((3791, 3803), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3801, 3803), True, 'import tensorflow as tf\n'), ((647, 679), 'numpy.random.permutation', 'np.random.permutation', (['self.size'], {}), '(self.size)\n', (668, 679), True, 'import numpy as np\n'), ((1208, 1227), 'io.open', 'io.open', (['file', '"""rb"""'], {}), "(file, 'rb')\n", (1215, 1227), False, 'import io\n'), ((1588, 1607), 'io.open', 'io.open', (['file', '"""rb"""'], {}), "(file, 'rb')\n", (1595, 1607), False, 'import io\n'), ((2356, 2414), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[input_size, hidden_dim]'], {'stddev': '(0.01)'}), '([input_size, hidden_dim], stddev=0.01)\n', (2375, 2414), True, 'import tensorflow as tf\n'), ((2448, 2470), 'tensorflow.zeros', 'tf.zeros', (['[hidden_dim]'], {}), '([hidden_dim])\n', (2456, 2470), True, 'import tensorflow as tf\n'), ((2593, 2652), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[hidden_dim, output_size]'], {'stddev': '(0.01)'}), '([hidden_dim, output_size], stddev=0.01)\n', (2612, 2652), True, 'import tensorflow as tf\n'), ((2686, 2709), 'tensorflow.zeros', 'tf.zeros', (['[output_size]'], {}), '([output_size])\n', (2694, 2709), True, 'import tensorflow as tf\n'), ((2739, 2764), 'tensorflow.matmul', 'tf.matmul', (['layer1_out', 'w2'], {}), '(layer1_out, w2)\n', (2748, 2764), True, 'import tensorflow as tf\n'), ((3309, 3348), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (3316, 3348), True, 'import tensorflow as tf\n'), ((3826, 3859), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3857, 3859), True, 'import tensorflow as tf\n'), ((872, 904), 'numpy.random.permutation', 'np.random.permutation', (['self.size'], {}), '(self.size)\n', (893, 904), True, 'import numpy as np\n'), ((2511, 2527), 'tensorflow.matmul', 'tf.matmul', (['x', 'w1'], {}), '(x, w1)\n', (2520, 2527), True, 'import tensorflow as tf\n'), ((3079, 3108), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.001)'], {}), '(0.001)\n', (3101, 3108), True, 'import tensorflow as tf\n'), ((1796, 1824), 'numpy.frombuffer', 'np.frombuffer', (['raw', 'np.uint8'], {}), '(raw, np.uint8)\n', (1809, 1824), True, 'import numpy as np\n'), ((4788, 4829), 'tensorflow.saved_model.utils.build_tensor_info', 'tf.saved_model.utils.build_tensor_info', (['x'], {}), '(x)\n', (4826, 4829), True, 'import tensorflow as tf\n'), ((4881, 4931), 'tensorflow.saved_model.utils.build_tensor_info', 'tf.saved_model.utils.build_tensor_info', (['prediction'], {}), '(prediction)\n', (4919, 4931), True, 'import tensorflow as tf\n'), ((1431, 1459), 'numpy.frombuffer', 'np.frombuffer', (['raw', 'np.uint8'], {}), '(raw, np.uint8)\n', (1444, 1459), True, 'import numpy as np\n'), ((1322, 1341), 'numpy.dtype', 'np.dtype', (['np.uint32'], {}), '(np.uint32)\n', (1330, 1341), True, 'import numpy as np\n'), ((1701, 1720), 'numpy.dtype', 'np.dtype', (['np.uint32'], {}), '(np.uint32)\n', (1709, 1720), True, 'import numpy as np\n')] |
"""
Metrics extensions for routes.
"""
try:
from microcosm_metrics.classifier import Classifier
except ImportError:
raise Exception("Route metrics require 'microcosm-metrics'")
from microcosm_flask.audit import parse_response
from microcosm_flask.errors import extract_status_code
class StatusCodeClassifier(Classifier):
"""
Label route result/error with its status code.
"""
def label_result(self, result):
_, status_code, _ = parse_response(result)
return str(status_code)
def label_error(self, error):
status_code = extract_status_code(error)
return str(status_code)
| [
"microcosm_flask.errors.extract_status_code",
"microcosm_flask.audit.parse_response"
] | [((466, 488), 'microcosm_flask.audit.parse_response', 'parse_response', (['result'], {}), '(result)\n', (480, 488), False, 'from microcosm_flask.audit import parse_response\n'), ((578, 604), 'microcosm_flask.errors.extract_status_code', 'extract_status_code', (['error'], {}), '(error)\n', (597, 604), False, 'from microcosm_flask.errors import extract_status_code\n')] |
import torch
import torch.nn as nn
__all__ = [
'ConcatEmbeddings',
'PassThrough',
'MeanOfEmbeddings',
]
class ConcatEmbeddings(nn.Module):
def __init__(self, fields):
super().__init__()
self.output_dim = sum([field.output_dim for field in fields.values()])
self.embedders = nn.ModuleList([field.build_embedder() for field in fields.values()])
def forward(self, x):
res = [embedder(values) for embedder, values in zip(self.embedders, x)]
return torch.cat(res, dim=1)
class PassThrough(nn.Module):
def forward(self, x):
return x
class MeanOfEmbeddings(nn.Module):
def __init__(self, vocab_size, emb_dim):
super().__init__()
self.emb = nn.Embedding(vocab_size, emb_dim, padding_idx=0)
def forward(self, x):
mask = (x != 0).float()[:, :, None]
emb = self.emb(x) * mask.float()
s = mask.squeeze(2).sum(1).clamp_min(1.)[:, None].float()
return emb.sum(dim=1) / s
| [
"torch.nn.Embedding",
"torch.cat"
] | [((508, 529), 'torch.cat', 'torch.cat', (['res'], {'dim': '(1)'}), '(res, dim=1)\n', (517, 529), False, 'import torch\n'), ((733, 781), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'emb_dim'], {'padding_idx': '(0)'}), '(vocab_size, emb_dim, padding_idx=0)\n', (745, 781), True, 'import torch.nn as nn\n')] |
import json
import tempfile
from collections import OrderedDict
import os
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from utils import BoxList
#from utils.pycocotools_rotation import Rotation_COCOeval
def evaluate(dataset, predictions, result_file, score_threshold=None, epoch=0):
coco_results = {}
coco_results['bbox'] = make_coco_detection(predictions, dataset, score_threshold)
results = COCOResult('bbox')
path = os.path.join(result_file, str(epoch)+'_result.json')
res = evaluate_predictions_on_coco(dataset.coco, coco_results['bbox'], path, 'bbox')
results.update(res)
# with tempfile.NamedTemporaryFile() as f:
# path = f.name
# res = evaluate_predictions_on_coco(
# dataset.coco, coco_results['bbox'], path, 'bbox'
# )
# results.update(res)
print(results)
return results.results
def evaluate_predictions_on_coco(coco_gt, results, result_file, iou_type):
with open(result_file, 'w') as f:
json.dump(results, f)
coco_dt = coco_gt.loadRes(str(result_file)) if results else COCO()
coco_eval = Rotation_COCOeval(coco_gt, coco_dt, iou_type)
coco_eval.params.iouThrs = np.linspace(.25, 0.95, int(np.round((0.95 - .25) / .05)) + 1, endpoint=True)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
score_threshold = compute_thresholds_for_classes(coco_eval)
return coco_eval
def compute_thresholds_for_classes(coco_eval):
precision = coco_eval.eval['precision']
precision = precision[0, :, :, 0, -1]
scores = coco_eval.eval['scores']
scores = scores[0, :, :, 0, -1]
recall = np.linspace(0, 1, num=precision.shape[0])
recall = recall[:, None]
f1 = (2 * precision * recall) / (np.maximum(precision + recall, 1e-6))
max_f1 = f1.max(0)
max_f1_id = f1.argmax(0)
scores = scores[max_f1_id, range(len(max_f1_id))]
print('Maximum f1 for classes:')
print(list(max_f1))
print('Score thresholds for classes')
print(list(scores))
print('')
return scores
def make_coco_detection(predictions, dataset, score_threshold=None):
coco_results = []
for id, pred in enumerate(predictions):
orig_id = dataset.id2img[id]
if len(pred) == 0:
continue
img_meta = dataset.get_image_meta(id)
pred_resize = map_to_origin_image(img_meta, pred, flipmode='no', resize_mode='letterbox')
boxes = pred_resize.bbox.tolist()
scores = pred_resize.get_field('scores').tolist()
labels = pred_resize.get_field('labels').tolist()
labels = [dataset.id2category[i] for i in labels]
if score_threshold is None:
score_threshold = [0]*len(dataset.id2category)
coco_results.extend(
[
{
'image_id': orig_id,
'category_id': labels[k],
'bbox': box,
'score': scores[k],
}
for k, box in enumerate(boxes)
if scores[k] > score_threshold[labels[k] - 1]
]
)
return coco_results
class COCOResult:
METRICS = {
'bbox': ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl'],
'segm': ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl'],
'box_proposal': [
'AR@100',
'ARs@100',
'ARm@100',
'ARl@100',
'AR@1000',
'ARs@1000',
'ARm@1000',
'ARl@1000',
],
'keypoints': ['AP', 'AP50', 'AP75', 'APm', 'APl'],
}
def __init__(self, *iou_types):
allowed_types = ("box_proposal", "bbox", "segm", "keypoints")
assert all(iou_type in allowed_types for iou_type in iou_types)
results = OrderedDict()
for iou_type in iou_types:
results[iou_type] = OrderedDict(
[(metric, -1) for metric in COCOResult.METRICS[iou_type]]
)
self.results = results
def update(self, coco_eval):
if coco_eval is None:
return
assert isinstance(coco_eval, COCOeval)
s = coco_eval.stats
iou_type = coco_eval.params.iouType
res = self.results[iou_type]
metrics = COCOResult.METRICS[iou_type]
for idx, metric in enumerate(metrics):
res[metric] = s[idx]
def __repr__(self):
return repr(self.results)
def map_to_origin_image(img_meta, pred, flipmode='no', resize_mode='letterbox'):
'''
img_meta: "id": int, "width": int, "height": int,"file_name": str,
pred: boxlist object
flipmode:'h':Horizontal flip,'v':vertical flip 'no': no flip
resize_mode: 'letterbox' , 'wrap'
'''
assert pred.mode == 'xyxyxyxy'
if flipmode == 'h':
pred = pred.transpose(0)
elif flipmode == 'v':
pred = pred.transpose(1)
elif flipmode == 'no':
pass
else:
raise Exception("unspported flip mode, 'h', 'v' or 'no' ")
width = img_meta['width']
height = img_meta['height']
resized_width, resized_height = pred.size
if resize_mode == 'letterbox':
if width > height:
scale = resized_width / width
size = (resized_width, int(scale * height))
else:
scale = resized_height / height
size = (int(width * scale), resized_height)
pred_resize = BoxList(pred.bbox, size, mode='xyxyxyxy')
pred_resize._copy_extra_fields(pred)
pred_resize = pred_resize.clip_to_image(remove_empty=True)
pred_resize = pred_resize.resize((width, height))
pred_resize = pred_resize.clip_to_image(remove_empty=True)
#pred_resize = pred_resize.convert('xywh')
elif resize_mode == 'wrap':
pred_resize = pred.resize((width, height))
pred_resize = pred_resize.convert('xyxyxyxy')
pred_resize = pred_resize.clip_to_image(remove_empty=True)
else:
raise Exception("unspported reisze mode, either 'letterbox' or 'wrap' ")
return pred_resize
| [
"collections.OrderedDict",
"numpy.round",
"utils.BoxList",
"pycocotools.coco.COCO",
"numpy.linspace",
"numpy.maximum",
"json.dump"
] | [((1758, 1799), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': 'precision.shape[0]'}), '(0, 1, num=precision.shape[0])\n', (1769, 1799), True, 'import numpy as np\n'), ((1082, 1103), 'json.dump', 'json.dump', (['results', 'f'], {}), '(results, f)\n', (1091, 1103), False, 'import json\n'), ((1171, 1177), 'pycocotools.coco.COCO', 'COCO', ([], {}), '()\n', (1175, 1177), False, 'from pycocotools.coco import COCO\n'), ((1870, 1907), 'numpy.maximum', 'np.maximum', (['(precision + recall)', '(1e-06)'], {}), '(precision + recall, 1e-06)\n', (1880, 1907), True, 'import numpy as np\n'), ((3970, 3983), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3981, 3983), False, 'from collections import OrderedDict\n'), ((5638, 5679), 'utils.BoxList', 'BoxList', (['pred.bbox', 'size'], {'mode': '"""xyxyxyxy"""'}), "(pred.bbox, size, mode='xyxyxyxy')\n", (5645, 5679), False, 'from utils import BoxList\n'), ((4053, 4123), 'collections.OrderedDict', 'OrderedDict', (['[(metric, -1) for metric in COCOResult.METRICS[iou_type]]'], {}), '([(metric, -1) for metric in COCOResult.METRICS[iou_type]])\n', (4064, 4123), False, 'from collections import OrderedDict\n'), ((1302, 1332), 'numpy.round', 'np.round', (['((0.95 - 0.25) / 0.05)'], {}), '((0.95 - 0.25) / 0.05)\n', (1310, 1332), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Example of creating hierarchical clusters from Scanette traces.
Some helpful documentation about SciPy hierarchical clustering::
* SciPy docs: https://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html
* https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial
@author: <EMAIL>
"""
import matplotlib.pyplot as plt
import scipy.cluster.hierarchy as hierarchy
from pathlib import Path
import sys
import agilkia
# %% Load Scanette traces
def cluster_hierarchical(path: Path, maxclust: int = None) -> agilkia.TraceSet:
"""
Load an Agilkia TraceSet and cluster it hierarchically.
Parameters
----------
path : Path
Agilkia TraceSet file to load.
maxclust : Optional[int]
If given, then the 'maxclust' criterion is used to extact up to this number of cluster.
If None, then we try to use the same default cutoff as the SciPy dendrogram graphing.
Returns
-------
traces : TraceSet
Returns the clustered TraceSet.
"""
traces = agilkia.TraceSet.load_from_json(path)
print(f"Loaded {len(traces)} traces from {path}")
data = traces.get_trace_data() # using default "action_counts" (bag-of-words)
# standardize the data
data_scaled = (data - data.mean()) / data.std()
# for columns with a single value, std() == 0, so we get NaN. Replace by 0.
data_scaled.fillna(value=0, inplace=True)
# %% Use SciPy to create a hierarchical clustering.
Z = hierarchy.linkage(data_scaled, method='ward')
print("Top 20 nodes of the tree are [left, right, distance, total_traces]:")
print(Z[-20:])
# %% Try to choose the 'best' cut to get flat clusters. (Better done by eye).
if maxclust is None:
# we try to do the same as the dendrogram default color algorithm.
blue = 0.7 * Z[:, 2].max()
colored = [d for d in Z[:, 2] if d < blue]
cutoff = max(colored)
why = f"distance <= {cutoff:.8f}"
flat = hierarchy.fcluster(Z, cutoff, criterion="distance")
else:
why = f"maxclust <= {maxclust}"
flat = hierarchy.fcluster(Z, maxclust, criterion="maxclust")
flat = flat - flat.min() # make it zero-based
num_clusters = flat.max() + 1
print(f"Criterion '{why}' chose {num_clusters} clusters.")
print(sorted(flat))
# %% Set clusters in TraceSet
traces.set_clusters(flat, linkage=Z)
# %% Get all the traces in the leftmost tree (root.left)
# root = hierarchy.to_tree(Z)
# t0 = root.left.pre_order()
# print(sorted(t0))
return traces
# %%
def main(args):
sys.setrecursionlimit(10000) # to handle larger trees
if len(args) >= 2:
maxclust = None
graph = False
for name in args[1:]:
if name == "--graph":
graph = True
elif name.startswith("--maxclust="):
maxclust = int(name.split("=")[1])
else:
traces = cluster_hierarchical(Path(name), maxclust)
if graph:
# %% Plot a basic Dendrogram.
# plt.figure(figsize=(10, 7))
plt.title(f"Dendrogram for {name}")
hierarchy.dendrogram(traces.cluster_linkage, p=20,
truncate_mode='level', show_leaf_counts=True)
plt.show()
traces.save_to_json(Path(name).with_suffix(".hier.json"))
else:
script = args[0] or "cluster_hierarchical.py"
print(f"This script reads traces in Agilkia *.json trace format, and")
print("clusters each TraceSet using the scipy.cluster.hierarchy.linkage() algorithm.")
print(f"It saves the clustered traces into <input>.hier.json")
print("")
print(f"If --maxclust=NUM is given, then up to NUM clusters will be chosen.")
print(f"Otherwise, the number of clusters will be chosen automatically using a")
print(f"cophenetic distance heuristic that should match coloring in the dendrogram tree.")
print("")
print(f"If '--graph' is specified, then each clustering is graphed as a Dendrogram .")
print(f"Setup: conda install -c mark.utting agilkia")
print("")
print(f"Usage: python {script} [--graph] [--maxclust=NUM] input.json ...")
# %%
if __name__ == "__main__":
main(sys.argv)
| [
"sys.setrecursionlimit",
"scipy.cluster.hierarchy.dendrogram",
"pathlib.Path",
"scipy.cluster.hierarchy.linkage",
"matplotlib.pyplot.title",
"agilkia.TraceSet.load_from_json",
"scipy.cluster.hierarchy.fcluster",
"matplotlib.pyplot.show"
] | [((1102, 1139), 'agilkia.TraceSet.load_from_json', 'agilkia.TraceSet.load_from_json', (['path'], {}), '(path)\n', (1133, 1139), False, 'import agilkia\n'), ((1550, 1595), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['data_scaled'], {'method': '"""ward"""'}), "(data_scaled, method='ward')\n", (1567, 1595), True, 'import scipy.cluster.hierarchy as hierarchy\n'), ((2681, 2709), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10000)'], {}), '(10000)\n', (2702, 2709), False, 'import sys\n'), ((2053, 2104), 'scipy.cluster.hierarchy.fcluster', 'hierarchy.fcluster', (['Z', 'cutoff'], {'criterion': '"""distance"""'}), "(Z, cutoff, criterion='distance')\n", (2071, 2104), True, 'import scipy.cluster.hierarchy as hierarchy\n'), ((2170, 2223), 'scipy.cluster.hierarchy.fcluster', 'hierarchy.fcluster', (['Z', 'maxclust'], {'criterion': '"""maxclust"""'}), "(Z, maxclust, criterion='maxclust')\n", (2188, 2223), True, 'import scipy.cluster.hierarchy as hierarchy\n'), ((3062, 3072), 'pathlib.Path', 'Path', (['name'], {}), '(name)\n', (3066, 3072), False, 'from pathlib import Path\n'), ((3230, 3265), 'matplotlib.pyplot.title', 'plt.title', (['f"""Dendrogram for {name}"""'], {}), "(f'Dendrogram for {name}')\n", (3239, 3265), True, 'import matplotlib.pyplot as plt\n'), ((3286, 3386), 'scipy.cluster.hierarchy.dendrogram', 'hierarchy.dendrogram', (['traces.cluster_linkage'], {'p': '(20)', 'truncate_mode': '"""level"""', 'show_leaf_counts': '(True)'}), "(traces.cluster_linkage, p=20, truncate_mode='level',\n show_leaf_counts=True)\n", (3306, 3386), True, 'import scipy.cluster.hierarchy as hierarchy\n'), ((3444, 3454), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3452, 3454), True, 'import matplotlib.pyplot as plt\n'), ((3491, 3501), 'pathlib.Path', 'Path', (['name'], {}), '(name)\n', (3495, 3501), False, 'from pathlib import Path\n')] |
import pytest
import yaml
from django.conf.urls import url
from rest_framework.test import APIClient
from drf_spectacular.validation import validate_schema
from drf_spectacular.views import SpectacularAPIView
urlpatterns = [url(r'^api/schema$', SpectacularAPIView.as_view(), name='schema')]
@pytest.mark.urls(__name__)
def test_spectacular_view(no_warnings):
response = APIClient().get('/api/schema')
assert response.status_code == 200
assert response.content.startswith(b'openapi: 3.0.3\n')
assert response.accepted_media_type == 'application/vnd.oai.openapi'
schema = yaml.load(response.content, Loader=yaml.SafeLoader)
validate_schema(schema)
| [
"drf_spectacular.validation.validate_schema",
"drf_spectacular.views.SpectacularAPIView.as_view",
"yaml.load",
"rest_framework.test.APIClient",
"pytest.mark.urls"
] | [((296, 322), 'pytest.mark.urls', 'pytest.mark.urls', (['__name__'], {}), '(__name__)\n', (312, 322), False, 'import pytest\n'), ((594, 645), 'yaml.load', 'yaml.load', (['response.content'], {'Loader': 'yaml.SafeLoader'}), '(response.content, Loader=yaml.SafeLoader)\n', (603, 645), False, 'import yaml\n'), ((650, 673), 'drf_spectacular.validation.validate_schema', 'validate_schema', (['schema'], {}), '(schema)\n', (665, 673), False, 'from drf_spectacular.validation import validate_schema\n'), ((247, 275), 'drf_spectacular.views.SpectacularAPIView.as_view', 'SpectacularAPIView.as_view', ([], {}), '()\n', (273, 275), False, 'from drf_spectacular.views import SpectacularAPIView\n'), ((378, 389), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (387, 389), False, 'from rest_framework.test import APIClient\n')] |
# -*- coding: utf-8 -*-
"""Electrical billing for small consumers in Spain using PVPC. Bill dataclasses."""
from datetime import datetime
from typing import Iterator, List
import attr
import pandas as pd
from pvpcbill.base import Base
from pvpcbill.official import (
MARGEN_COMERC_EUR_KW_YEAR_MCF,
round_money,
round_sum_money,
split_in_tariff_periods,
TaxZone,
TERM_ENER_PEAJE_ACC_EUR_KWH_TEA,
TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA,
TipoPeaje,
)
DEFAULT_CUPS = "ES00XXXXXXXXXXXXXXDB"
DEFAULT_POTENCIA_CONTRATADA_KW = 3.45
DEFAULT_BONO_SOCIAL = False
DEFAULT_IMPUESTO_ELECTRICO = 0.0511269632 # 4,864% por 1,05113
DEFAULT_ALQUILER_CONT_ANUAL = 0.81 * 12 # € / año para monofásico
@attr.s(auto_attribs=True)
class FacturaConfig(Base):
"""Dataclass to store information related to the electric contract."""
tipo_peaje: TipoPeaje = attr.ib(default=TipoPeaje.GEN)
potencia_contratada: float = attr.ib(default=DEFAULT_POTENCIA_CONTRATADA_KW)
con_bono_social: bool = attr.ib(default=DEFAULT_BONO_SOCIAL)
zona_impuestos: TaxZone = attr.ib(default=TaxZone.PENINSULA_BALEARES)
alquiler_anual: float = attr.ib(default=DEFAULT_ALQUILER_CONT_ANUAL)
impuesto_electrico: float = attr.ib(default=DEFAULT_IMPUESTO_ELECTRICO)
cups: str = attr.ib(default=DEFAULT_CUPS)
@attr.s(auto_attribs=True)
class EnergykWhTariffPeriod(Base):
"""Dataclass to store info related to the energy in 1 tariff period."""
name: str = attr.ib()
coste_peaje_acceso_tea: float = attr.ib(default=0.0)
coste_energia_tcu: float = attr.ib(default=0.0)
energia_total: float = attr.ib(default=0.0)
@attr.s(auto_attribs=True)
class FacturaBilledPeriod(Base):
"""Dataclass to store info related to 1 billed period inside a bill."""
billed_days: int = attr.ib() # nº de días del periodo facturado
# TODO cambiar distinción tarifaria de 'año' a periodo reglamentario
year: int = attr.ib()
# filled when process hourly consumption
termino_fijo_peaje_acceso: float = attr.ib(default=0.0)
termino_fijo_comercializacion: float = attr.ib(default=0.0)
termino_fijo_total: float = attr.ib(default=0.0)
energy_periods: List[EnergykWhTariffPeriod] = attr.ib(factory=list)
@classmethod
def from_hourly_data(
cls,
consumo: pd.Series,
pvpc_tcu: pd.Series,
tipo_peaje: TipoPeaje,
potencia_contratada: float,
):
"""
TODO redo doc
"""
year = consumo.index[0].year
billed_days = (consumo.index[-1] - consumo.index[0]).days + 1
energy_periods = [
EnergykWhTariffPeriod(
name=f"P{i+1}",
coste_peaje_acceso_tea=round_money(cons_period.sum() * coef_tea),
coste_energia_tcu=round_money(
(cons_period * pvpc_tcu.loc[cons_period.index]).sum()
),
energia_total=round_money(cons_period.sum()),
)
for i, (coef_tea, cons_period) in enumerate(
zip(
TERM_ENER_PEAJE_ACC_EUR_KWH_TEA[year][tipo_peaje.value],
split_in_tariff_periods(consumo, tipo_peaje),
)
)
]
billed_period = cls(
billed_days=billed_days, year=year, energy_periods=energy_periods,
)
# Término fijo por peaje de acceso
billed_period.termino_fijo_peaje_acceso = round_money(
potencia_contratada * billed_days * billed_period.coef_peaje_acceso_potencia
)
# Término fijo por comercialización
billed_period.termino_fijo_comercializacion = round_money(
potencia_contratada * billed_days * billed_period.coef_comercializacion
)
billed_period.termino_fijo_total = round_money(
billed_period.termino_fijo_peaje_acceso
+ billed_period.termino_fijo_comercializacion
)
return billed_period
@property
def total_year_days(self):
"""Total number of days in the billed period's year."""
return (datetime(self.year + 1, 1, 1) - datetime(self.year, 1, 1)).days
@property
def coef_peaje_acceso_potencia(self) -> float:
coef_y = TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA[self.year]
return round(coef_y / self.total_year_days, 6)
@property
def coef_comercializacion(self) -> float:
coef_y = MARGEN_COMERC_EUR_KW_YEAR_MCF[self.year]
return round(coef_y / self.total_year_days, 6)
@attr.s(auto_attribs=True)
class FacturaData(Base):
"""Dataclass to store information related to the billed period."""
config: FacturaConfig = attr.ib()
num_dias_factura: int = attr.ib()
start: datetime = attr.ib()
end: datetime = attr.ib()
periodos_fact: List[FacturaBilledPeriod] = attr.ib(factory=list)
descuento_bono_social: float = attr.ib(default=0.0)
termino_impuesto_electrico: float = attr.ib(default=0.0)
termino_equipo_medida: float = attr.ib(default=0.0)
termino_iva_gen: float = attr.ib(default=0.0)
termino_iva_medida: float = attr.ib(default=0.0)
termino_iva_total: float = attr.ib(default=0.0)
total: float = attr.ib(default=0.0)
def __attrs_post_init__(self):
"""Fill calculated terms of the bill when instantiating."""
self._calc_taxes_and_total()
def _calc_taxes_and_total(self):
"""
Añade los términos finales a la factura eléctrica.
- Aplica el bono social
- Calcula el impuesto eléctrico
- Calcula el coste del alquiler del equipo de medida
- Añade el IVA y obtiene el total
"""
subt_fijo_var = self.termino_fijo_total + self.termino_variable_total
# Cálculo de la bonificación (bono social):
if self.config.con_bono_social:
self.descuento_bono_social = round_money(-0.25 * round_money(subt_fijo_var))
subt_fijo_var += self.descuento_bono_social
# Cálculo del impuesto eléctrico:
self.termino_impuesto_electrico = round_money(
self.config.impuesto_electrico * subt_fijo_var
)
subt_fijo_var += self.termino_impuesto_electrico
# Cálculo del alquiler del equipo de medida:
frac_year = sum(p.billed_days / p.total_year_days for p in self.periodos_fact)
self.termino_equipo_medida = round_money(frac_year * self.config.alquiler_anual)
# Cálculo del IVA y TOTAL:
self.termino_iva_gen = round_money(
subt_fijo_var * self.config.zona_impuestos.tax_rate
)
self.termino_iva_medida = round_money(
self.termino_equipo_medida * self.config.zona_impuestos.measurement_tax_rate
)
self.termino_iva_total = round_money(
self.termino_iva_gen + self.termino_iva_medida
)
# TOTAL FACTURA:
subt_fijo_var += self.termino_equipo_medida + self.termino_iva_total
self.total = round_money(subt_fijo_var)
def iter_energy_periods(self) -> Iterator[EnergykWhTariffPeriod]:
"""Itera sobre cada periodo tarifario de cada periodo de facturación."""
for billed_period in self.periodos_fact:
for ener_period in billed_period.energy_periods:
yield ener_period
@property
def coste_total_peaje_acceso_tea(self) -> float:
return round_sum_money(
ener_p.coste_peaje_acceso_tea for ener_p in self.iter_energy_periods()
)
@property
def coste_total_energia_tcu(self) -> float:
return round_sum_money(
ener_p.coste_energia_tcu for ener_p in self.iter_energy_periods()
)
@property
def consumo_total(self) -> float:
"""Calcula la energía total facturada, sumando cada periodo de facturación."""
return sum(ener_p.energia_total for ener_p in self.iter_energy_periods())
@property
def termino_variable_total(self) -> float:
"""Calcula el coste por energía total, sumando cada periodo de facturación."""
return round_money(
self.coste_total_peaje_acceso_tea + self.coste_total_energia_tcu
)
@property
def termino_fijo_total(self) -> float:
"""Calcula el coste por potencia total, sumando cada periodo de facturación."""
return round_sum_money(
billed_period.termino_fijo_total for billed_period in self.periodos_fact
)
@property
def identifier(self) -> str:
"""Text identifier to be used as filename for exported data."""
str_ident = f"elecbill_data_{self.start:%Y_%m_%d}_to_{self.end:%Y_%m_%d}_"
str_ident += f"{self.config.tipo_peaje.value}_"
str_ident += f"{self.config.potencia_contratada:g}_".replace(".", "_")
str_ident += f"{self.config.zona_impuestos.value}"
if self.config.con_bono_social:
str_ident += f"_discount"
return str_ident
| [
"datetime.datetime",
"pvpcbill.official.split_in_tariff_periods",
"attr.s",
"pvpcbill.official.round_money",
"pvpcbill.official.round_sum_money",
"attr.ib"
] | [((717, 742), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (723, 742), False, 'import attr\n'), ((1323, 1348), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (1329, 1348), False, 'import attr\n'), ((1647, 1672), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (1653, 1672), False, 'import attr\n'), ((4529, 4554), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (4535, 4554), False, 'import attr\n'), ((874, 904), 'attr.ib', 'attr.ib', ([], {'default': 'TipoPeaje.GEN'}), '(default=TipoPeaje.GEN)\n', (881, 904), False, 'import attr\n'), ((938, 985), 'attr.ib', 'attr.ib', ([], {'default': 'DEFAULT_POTENCIA_CONTRATADA_KW'}), '(default=DEFAULT_POTENCIA_CONTRATADA_KW)\n', (945, 985), False, 'import attr\n'), ((1014, 1050), 'attr.ib', 'attr.ib', ([], {'default': 'DEFAULT_BONO_SOCIAL'}), '(default=DEFAULT_BONO_SOCIAL)\n', (1021, 1050), False, 'import attr\n'), ((1081, 1124), 'attr.ib', 'attr.ib', ([], {'default': 'TaxZone.PENINSULA_BALEARES'}), '(default=TaxZone.PENINSULA_BALEARES)\n', (1088, 1124), False, 'import attr\n'), ((1153, 1197), 'attr.ib', 'attr.ib', ([], {'default': 'DEFAULT_ALQUILER_CONT_ANUAL'}), '(default=DEFAULT_ALQUILER_CONT_ANUAL)\n', (1160, 1197), False, 'import attr\n'), ((1230, 1273), 'attr.ib', 'attr.ib', ([], {'default': 'DEFAULT_IMPUESTO_ELECTRICO'}), '(default=DEFAULT_IMPUESTO_ELECTRICO)\n', (1237, 1273), False, 'import attr\n'), ((1290, 1319), 'attr.ib', 'attr.ib', ([], {'default': 'DEFAULT_CUPS'}), '(default=DEFAULT_CUPS)\n', (1297, 1319), False, 'import attr\n'), ((1477, 1486), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1484, 1486), False, 'import attr\n'), ((1523, 1543), 'attr.ib', 'attr.ib', ([], {'default': '(0.0)'}), '(default=0.0)\n', (1530, 1543), False, 'import attr\n'), ((1575, 1595), 'attr.ib', 'attr.ib', ([], {'default': '(0.0)'}), '(default=0.0)\n', (1582, 1595), False, 'import attr\n'), ((1623, 1643), 'attr.ib', 'attr.ib', ([], {'default': '(0.0)'}), '(default=0.0)\n', (1630, 1643), False, 'import attr\n'), ((1806, 1815), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1813, 1815), False, 'import attr\n'), ((1941, 1950), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1948, 1950), False, 'import attr\n'), ((2036, 2056), 'attr.ib', 'attr.ib', ([], {'default': '(0.0)'}), '(default=0.0)\n', (2043, 2056), False, 'import attr\n'), ((2100, 2120), 'attr.ib', 'attr.ib', ([], {'default': '(0.0)'}), '(default=0.0)\n', (2107, 2120), False, 'import attr\n'), ((2153, 2173), 'attr.ib', 'attr.ib', ([], {'default': '(0.0)'}), '(default=0.0)\n', (2160, 2173), False, 'import attr\n'), ((2224, 2245), 'attr.ib', 'attr.ib', ([], {'factory': 'list'}), '(factory=list)\n', (2231, 2245), False, 'import attr\n'), ((4680, 4689), 'attr.ib', 'attr.ib', ([], {}), '()\n', (4687, 4689), False, 'import attr\n'), ((4718, 4727), 'attr.ib', 'attr.ib', ([], {}), '()\n', (4725, 4727), False, 'import attr\n'), ((4750, 4759), 'attr.ib', 'attr.ib', ([], {}), '()\n', (4757, 4759), False, 'import attr\n'), ((4780, 4789), 'attr.ib', 'attr.ib', ([], {}), '()\n', (4787, 4789), False, 'import attr\n'), ((4837, 4858), 'attr.ib', 'attr.ib', ([], {'factory': 'list'}), '(factory=list)\n', (4844, 4858), False, 'import attr\n'), ((4895, 4915), 'attr.ib', 'attr.ib', ([], {'default': '(0.0)'}), '(default=0.0)\n', (4902, 4915), False, 'import attr\n'), ((4956, 4976), 'attr.ib', 'attr.ib', ([], {'default': '(0.0)'}), '(default=0.0)\n', (4963, 4976), False, 'import attr\n'), ((5012, 5032), 'attr.ib', 'attr.ib', ([], {'default': '(0.0)'}), '(default=0.0)\n', (5019, 5032), False, 'import attr\n'), ((5062, 5082), 'attr.ib', 'attr.ib', ([], {'default': '(0.0)'}), '(default=0.0)\n', (5069, 5082), False, 'import attr\n'), ((5115, 5135), 'attr.ib', 'attr.ib', ([], {'default': '(0.0)'}), '(default=0.0)\n', (5122, 5135), False, 'import attr\n'), ((5167, 5187), 'attr.ib', 'attr.ib', ([], {'default': '(0.0)'}), '(default=0.0)\n', (5174, 5187), False, 'import attr\n'), ((5207, 5227), 'attr.ib', 'attr.ib', ([], {'default': '(0.0)'}), '(default=0.0)\n', (5214, 5227), False, 'import attr\n'), ((3455, 3549), 'pvpcbill.official.round_money', 'round_money', (['(potencia_contratada * billed_days * billed_period.coef_peaje_acceso_potencia)'], {}), '(potencia_contratada * billed_days * billed_period.\n coef_peaje_acceso_potencia)\n', (3466, 3549), False, 'from pvpcbill.official import MARGEN_COMERC_EUR_KW_YEAR_MCF, round_money, round_sum_money, split_in_tariff_periods, TaxZone, TERM_ENER_PEAJE_ACC_EUR_KWH_TEA, TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA, TipoPeaje\n'), ((3665, 3754), 'pvpcbill.official.round_money', 'round_money', (['(potencia_contratada * billed_days * billed_period.coef_comercializacion)'], {}), '(potencia_contratada * billed_days * billed_period.\n coef_comercializacion)\n', (3676, 3754), False, 'from pvpcbill.official import MARGEN_COMERC_EUR_KW_YEAR_MCF, round_money, round_sum_money, split_in_tariff_periods, TaxZone, TERM_ENER_PEAJE_ACC_EUR_KWH_TEA, TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA, TipoPeaje\n'), ((3815, 3918), 'pvpcbill.official.round_money', 'round_money', (['(billed_period.termino_fijo_peaje_acceso + billed_period.\n termino_fijo_comercializacion)'], {}), '(billed_period.termino_fijo_peaje_acceso + billed_period.\n termino_fijo_comercializacion)\n', (3826, 3918), False, 'from pvpcbill.official import MARGEN_COMERC_EUR_KW_YEAR_MCF, round_money, round_sum_money, split_in_tariff_periods, TaxZone, TERM_ENER_PEAJE_ACC_EUR_KWH_TEA, TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA, TipoPeaje\n'), ((6067, 6126), 'pvpcbill.official.round_money', 'round_money', (['(self.config.impuesto_electrico * subt_fijo_var)'], {}), '(self.config.impuesto_electrico * subt_fijo_var)\n', (6078, 6126), False, 'from pvpcbill.official import MARGEN_COMERC_EUR_KW_YEAR_MCF, round_money, round_sum_money, split_in_tariff_periods, TaxZone, TERM_ENER_PEAJE_ACC_EUR_KWH_TEA, TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA, TipoPeaje\n'), ((6384, 6435), 'pvpcbill.official.round_money', 'round_money', (['(frac_year * self.config.alquiler_anual)'], {}), '(frac_year * self.config.alquiler_anual)\n', (6395, 6435), False, 'from pvpcbill.official import MARGEN_COMERC_EUR_KW_YEAR_MCF, round_money, round_sum_money, split_in_tariff_periods, TaxZone, TERM_ENER_PEAJE_ACC_EUR_KWH_TEA, TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA, TipoPeaje\n'), ((6503, 6567), 'pvpcbill.official.round_money', 'round_money', (['(subt_fijo_var * self.config.zona_impuestos.tax_rate)'], {}), '(subt_fijo_var * self.config.zona_impuestos.tax_rate)\n', (6514, 6567), False, 'from pvpcbill.official import MARGEN_COMERC_EUR_KW_YEAR_MCF, round_money, round_sum_money, split_in_tariff_periods, TaxZone, TERM_ENER_PEAJE_ACC_EUR_KWH_TEA, TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA, TipoPeaje\n'), ((6624, 6718), 'pvpcbill.official.round_money', 'round_money', (['(self.termino_equipo_medida * self.config.zona_impuestos.measurement_tax_rate)'], {}), '(self.termino_equipo_medida * self.config.zona_impuestos.\n measurement_tax_rate)\n', (6635, 6718), False, 'from pvpcbill.official import MARGEN_COMERC_EUR_KW_YEAR_MCF, round_money, round_sum_money, split_in_tariff_periods, TaxZone, TERM_ENER_PEAJE_ACC_EUR_KWH_TEA, TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA, TipoPeaje\n'), ((6769, 6828), 'pvpcbill.official.round_money', 'round_money', (['(self.termino_iva_gen + self.termino_iva_medida)'], {}), '(self.termino_iva_gen + self.termino_iva_medida)\n', (6780, 6828), False, 'from pvpcbill.official import MARGEN_COMERC_EUR_KW_YEAR_MCF, round_money, round_sum_money, split_in_tariff_periods, TaxZone, TERM_ENER_PEAJE_ACC_EUR_KWH_TEA, TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA, TipoPeaje\n'), ((6975, 7001), 'pvpcbill.official.round_money', 'round_money', (['subt_fijo_var'], {}), '(subt_fijo_var)\n', (6986, 7001), False, 'from pvpcbill.official import MARGEN_COMERC_EUR_KW_YEAR_MCF, round_money, round_sum_money, split_in_tariff_periods, TaxZone, TERM_ENER_PEAJE_ACC_EUR_KWH_TEA, TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA, TipoPeaje\n'), ((8060, 8137), 'pvpcbill.official.round_money', 'round_money', (['(self.coste_total_peaje_acceso_tea + self.coste_total_energia_tcu)'], {}), '(self.coste_total_peaje_acceso_tea + self.coste_total_energia_tcu)\n', (8071, 8137), False, 'from pvpcbill.official import MARGEN_COMERC_EUR_KW_YEAR_MCF, round_money, round_sum_money, split_in_tariff_periods, TaxZone, TERM_ENER_PEAJE_ACC_EUR_KWH_TEA, TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA, TipoPeaje\n'), ((8321, 8415), 'pvpcbill.official.round_sum_money', 'round_sum_money', (['(billed_period.termino_fijo_total for billed_period in self.periodos_fact)'], {}), '(billed_period.termino_fijo_total for billed_period in self.\n periodos_fact)\n', (8336, 8415), False, 'from pvpcbill.official import MARGEN_COMERC_EUR_KW_YEAR_MCF, round_money, round_sum_money, split_in_tariff_periods, TaxZone, TERM_ENER_PEAJE_ACC_EUR_KWH_TEA, TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA, TipoPeaje\n'), ((4104, 4133), 'datetime.datetime', 'datetime', (['(self.year + 1)', '(1)', '(1)'], {}), '(self.year + 1, 1, 1)\n', (4112, 4133), False, 'from datetime import datetime\n'), ((4136, 4161), 'datetime.datetime', 'datetime', (['self.year', '(1)', '(1)'], {}), '(self.year, 1, 1)\n', (4144, 4161), False, 'from datetime import datetime\n'), ((5898, 5924), 'pvpcbill.official.round_money', 'round_money', (['subt_fijo_var'], {}), '(subt_fijo_var)\n', (5909, 5924), False, 'from pvpcbill.official import MARGEN_COMERC_EUR_KW_YEAR_MCF, round_money, round_sum_money, split_in_tariff_periods, TaxZone, TERM_ENER_PEAJE_ACC_EUR_KWH_TEA, TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA, TipoPeaje\n'), ((3154, 3198), 'pvpcbill.official.split_in_tariff_periods', 'split_in_tariff_periods', (['consumo', 'tipo_peaje'], {}), '(consumo, tipo_peaje)\n', (3177, 3198), False, 'from pvpcbill.official import MARGEN_COMERC_EUR_KW_YEAR_MCF, round_money, round_sum_money, split_in_tariff_periods, TaxZone, TERM_ENER_PEAJE_ACC_EUR_KWH_TEA, TERM_POT_PEAJE_ACC_EUR_KW_YEAR_TPA, TipoPeaje\n')] |
from django.contrib import admin
from . import models
@admin.register(models.Footsize)
class FootsizeAdmin(admin.ModelAdmin):
""" アドミンに足サイズテーブルを定義する """
pass
@admin.register(models.FootImage)
class FootImageAdmin(admin.ModelAdmin):
""" アドミンに足イメージテーブルを定義する """
pass
@admin.register(models.ProcessedFootImage)
class ProcessedFootImageAdmin(admin.ModelAdmin):
""" アドミンに処理済み足イメージテーブルを定義する """
pass
| [
"django.contrib.admin.register"
] | [((57, 88), 'django.contrib.admin.register', 'admin.register', (['models.Footsize'], {}), '(models.Footsize)\n', (71, 88), False, 'from django.contrib import admin\n'), ((173, 205), 'django.contrib.admin.register', 'admin.register', (['models.FootImage'], {}), '(models.FootImage)\n', (187, 205), False, 'from django.contrib import admin\n'), ((292, 333), 'django.contrib.admin.register', 'admin.register', (['models.ProcessedFootImage'], {}), '(models.ProcessedFootImage)\n', (306, 333), False, 'from django.contrib import admin\n')] |
"""
Continuously scroll randomly generated After Dark style toasters.
Designed for an ItsyBitsy M4 Express and a 1.3" 240x240 TFT
Adafruit invests time and resources providing this open source code.
Please support Adafruit and open source hardware by purchasing
products from Adafruit!
Written by <NAME> for Adafruit Industries
Copyright (c) 2019 Adafruit Industries
Licensed under the MIT license.
All text above must be included in any redistribution.
Requires CircuitPython 5.0 or later.
"""
import time
from random import seed, randint
import board
import displayio
from adafruit_st7789 import ST7789
import adafruit_imageload
# Sprite cell values
EMPTY = 0
CELL_1 = EMPTY + 1
CELL_2 = CELL_1 + 1
CELL_3 = CELL_2 + 1
CELL_4 = CELL_3 + 1
TOAST = CELL_4 + 1
NUMBER_OF_SPRITES = TOAST + 1
# Animation support
FIRST_CELL = CELL_1
LAST_CELL = CELL_4
NUMBER_OF_CELLS = (LAST_CELL - FIRST_CELL) + 1
# A boolean array corresponding to the sprites, True if it's part of the animation sequence.
ANIMATED = [_sprite >= FIRST_CELL and _sprite <= LAST_CELL for _sprite in range(NUMBER_OF_SPRITES)]
# The chance (out of 10) that toast will enter
CHANCE_OF_NEW_TOAST = 2
# How many sprites to styart with
INITIAL_NUMBER_OF_SPRITES = 4
# Global variables
display = None
tilegrid = None
seed(int(time.monotonic()))
def make_display():
"""Set up the display support.
Return the Display object.
"""
spi = board.SPI()
while not spi.try_lock():
pass
spi.configure(baudrate=24000000) # Configure SPI for 24MHz
spi.unlock()
displayio.release_displays()
display_bus = displayio.FourWire(spi, command=board.D7, chip_select=board.D10, reset=board.D9)
return ST7789(display_bus, width=240, height=240, rowstart=80, auto_refresh=True)
def make_tilegrid():
"""Construct and return the tilegrid."""
group = displayio.Group(max_size=10)
sprite_sheet, palette = adafruit_imageload.load("/spritesheet-2x.bmp",
bitmap=displayio.Bitmap,
palette=displayio.Palette)
grid = displayio.TileGrid(sprite_sheet, pixel_shader=palette,
width=5, height=5,
tile_height=64, tile_width=64,
x=0, y=-64,
default_tile=EMPTY)
group.append(grid)
display.show(group)
return grid
def random_cell():
return randint(FIRST_CELL, LAST_CELL)
def evaluate_position(row, col):
"""Return whether how long of aa toaster is placable at the given location.
:param row: the tile row (0-9)
:param col: the tile column (0-9)
"""
return tilegrid[col, row] == EMPTY
def seed_toasters(number_of_toasters):
"""Create the initial toasters so it doesn't start empty"""
for _ in range(number_of_toasters):
while True:
row = randint(0, 4)
col = randint(0, 4)
if evaluate_position(row, col):
break
tilegrid[col, row] = random_cell()
def next_sprite(sprite):
if ANIMATED[sprite]:
return (((sprite - FIRST_CELL) + 1) % NUMBER_OF_CELLS) + FIRST_CELL
return sprite
def advance_animation():
"""Cycle through animation cells each time."""
for tile_number in range(25):
tilegrid[tile_number] = next_sprite(tilegrid[tile_number])
def slide_tiles():
"""Move the tilegrid one pixel to the bottom-left."""
tilegrid.x -= 1
tilegrid.y += 1
def shift_tiles():
"""Move tiles one spot to the left, and reset the tilegrid's position"""
for row in range(4, 0, -1):
for col in range(4):
tilegrid[col, row] = tilegrid[col + 1, row - 1]
tilegrid[4, row] = EMPTY
for col in range(5):
tilegrid[col, 0] = EMPTY
tilegrid.x = 0
tilegrid.y = -64
def get_entry_row():
while True:
row = randint(0, 4)
if tilegrid[4, row] == EMPTY and tilegrid[3, row] == EMPTY:
return row
def get_entry_column():
while True:
col = randint(0, 3)
if tilegrid[col, 0] == EMPTY and tilegrid[col, 1] == EMPTY:
return col
def add_toaster_or_toast():
"""Maybe add a new toaster or toast on the right and/or top at a randon open location"""
if randint(1, 10) <= CHANCE_OF_NEW_TOAST:
tile = TOAST
else:
tile = random_cell()
tilegrid[4, get_entry_row()] = tile
if randint(1, 10) <= CHANCE_OF_NEW_TOAST:
tile = TOAST
else:
tile = random_cell()
tilegrid[get_entry_column(), 0] = tile
display = make_display()
tilegrid = make_tilegrid()
seed_toasters(INITIAL_NUMBER_OF_SPRITES)
display.refresh()
while True:
for _ in range(64):
display.refresh(target_frames_per_second=80)
advance_animation()
slide_tiles()
shift_tiles()
add_toaster_or_toast()
display.refresh(target_frames_per_second=120)
| [
"adafruit_st7789.ST7789",
"displayio.release_displays",
"time.monotonic",
"displayio.Group",
"adafruit_imageload.load",
"displayio.FourWire",
"board.SPI",
"displayio.TileGrid",
"random.randint"
] | [((1424, 1435), 'board.SPI', 'board.SPI', ([], {}), '()\n', (1433, 1435), False, 'import board\n'), ((1563, 1591), 'displayio.release_displays', 'displayio.release_displays', ([], {}), '()\n', (1589, 1591), False, 'import displayio\n'), ((1610, 1695), 'displayio.FourWire', 'displayio.FourWire', (['spi'], {'command': 'board.D7', 'chip_select': 'board.D10', 'reset': 'board.D9'}), '(spi, command=board.D7, chip_select=board.D10, reset=board.D9\n )\n', (1628, 1695), False, 'import displayio\n'), ((1703, 1777), 'adafruit_st7789.ST7789', 'ST7789', (['display_bus'], {'width': '(240)', 'height': '(240)', 'rowstart': '(80)', 'auto_refresh': '(True)'}), '(display_bus, width=240, height=240, rowstart=80, auto_refresh=True)\n', (1709, 1777), False, 'from adafruit_st7789 import ST7789\n'), ((1857, 1885), 'displayio.Group', 'displayio.Group', ([], {'max_size': '(10)'}), '(max_size=10)\n', (1872, 1885), False, 'import displayio\n'), ((1915, 2017), 'adafruit_imageload.load', 'adafruit_imageload.load', (['"""/spritesheet-2x.bmp"""'], {'bitmap': 'displayio.Bitmap', 'palette': 'displayio.Palette'}), "('/spritesheet-2x.bmp', bitmap=displayio.Bitmap,\n palette=displayio.Palette)\n", (1938, 2017), False, 'import adafruit_imageload\n'), ((2129, 2269), 'displayio.TileGrid', 'displayio.TileGrid', (['sprite_sheet'], {'pixel_shader': 'palette', 'width': '(5)', 'height': '(5)', 'tile_height': '(64)', 'tile_width': '(64)', 'x': '(0)', 'y': '(-64)', 'default_tile': 'EMPTY'}), '(sprite_sheet, pixel_shader=palette, width=5, height=5,\n tile_height=64, tile_width=64, x=0, y=-64, default_tile=EMPTY)\n', (2147, 2269), False, 'import displayio\n'), ((2480, 2510), 'random.randint', 'randint', (['FIRST_CELL', 'LAST_CELL'], {}), '(FIRST_CELL, LAST_CELL)\n', (2487, 2510), False, 'from random import seed, randint\n'), ((1300, 1316), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1314, 1316), False, 'import time\n'), ((3924, 3937), 'random.randint', 'randint', (['(0)', '(4)'], {}), '(0, 4)\n', (3931, 3937), False, 'from random import seed, randint\n'), ((4084, 4097), 'random.randint', 'randint', (['(0)', '(3)'], {}), '(0, 3)\n', (4091, 4097), False, 'from random import seed, randint\n'), ((4318, 4332), 'random.randint', 'randint', (['(1)', '(10)'], {}), '(1, 10)\n', (4325, 4332), False, 'from random import seed, randint\n'), ((4465, 4479), 'random.randint', 'randint', (['(1)', '(10)'], {}), '(1, 10)\n', (4472, 4479), False, 'from random import seed, randint\n'), ((2927, 2940), 'random.randint', 'randint', (['(0)', '(4)'], {}), '(0, 4)\n', (2934, 2940), False, 'from random import seed, randint\n'), ((2959, 2972), 'random.randint', 'randint', (['(0)', '(4)'], {}), '(0, 4)\n', (2966, 2972), False, 'from random import seed, randint\n')] |
import unittest
import update_logs
class UpdateLogsTest(unittest.TestCase):
def test_get_new_logs_with_more_next_logs(self):
self.assertEqual(
"56789",
update_logs.get_new_logs(prev_logs="01234", next_logs="0123456789"))
def test_get_new_logs_with_more_prev_logs(self):
self.assertEqual(
"",
update_logs.get_new_logs(prev_logs="0123456789", next_logs="01234"))
def test_get_new_logs_with_no_common_logs(self):
self.assertEqual(
"56789",
update_logs.get_new_logs(prev_logs="01234", next_logs="56789"))
def test_get_new_logs_with_no_prev_logs(self):
self.assertEqual(
"0123456789",
update_logs.get_new_logs(prev_logs="", next_logs="0123456789"))
def test_get_new_logs_with_no_next_logs(self):
self.assertEqual(
"", update_logs.get_new_logs(prev_logs="01234", next_logs=""))
| [
"update_logs.get_new_logs"
] | [((192, 259), 'update_logs.get_new_logs', 'update_logs.get_new_logs', ([], {'prev_logs': '"""01234"""', 'next_logs': '"""0123456789"""'}), "(prev_logs='01234', next_logs='0123456789')\n", (216, 259), False, 'import update_logs\n'), ((369, 436), 'update_logs.get_new_logs', 'update_logs.get_new_logs', ([], {'prev_logs': '"""0123456789"""', 'next_logs': '"""01234"""'}), "(prev_logs='0123456789', next_logs='01234')\n", (393, 436), False, 'import update_logs\n'), ((551, 613), 'update_logs.get_new_logs', 'update_logs.get_new_logs', ([], {'prev_logs': '"""01234"""', 'next_logs': '"""56789"""'}), "(prev_logs='01234', next_logs='56789')\n", (575, 613), False, 'import update_logs\n'), ((731, 793), 'update_logs.get_new_logs', 'update_logs.get_new_logs', ([], {'prev_logs': '""""""', 'next_logs': '"""0123456789"""'}), "(prev_logs='', next_logs='0123456789')\n", (755, 793), False, 'import update_logs\n'), ((889, 946), 'update_logs.get_new_logs', 'update_logs.get_new_logs', ([], {'prev_logs': '"""01234"""', 'next_logs': '""""""'}), "(prev_logs='01234', next_logs='')\n", (913, 946), False, 'import update_logs\n')] |
# coding=utf-8
from flask import Flask, render_template
import json
app = Flask(__name__)
@app.route('/data')
def data():
with open('./cache.json') as fd:
return json.load(fd)
@app.route('/')
def index():
_data = data()
return render_template('index.html', data=_data)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
| [
"flask.render_template",
"json.load",
"flask.Flask"
] | [((76, 91), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (81, 91), False, 'from flask import Flask, render_template\n'), ((252, 293), 'flask.render_template', 'render_template', (['"""index.html"""'], {'data': '_data'}), "('index.html', data=_data)\n", (267, 293), False, 'from flask import Flask, render_template\n'), ((177, 190), 'json.load', 'json.load', (['fd'], {}), '(fd)\n', (186, 190), False, 'import json\n')] |
#!/usr/bin/env python
# Copyright (c) 2011, <NAME>, TU Darmstadt
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the TU Darmstadt nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from python_qt_binding.QtCore import Signal, Slot
from python_qt_binding.QtGui import QIcon
from python_qt_binding.QtWidgets import QAction
from .publisher_tree_model import PublisherTreeModel
from rqt_py_common.message_tree_widget import MessageTreeWidget
from rqt_py_common.item_delegates import SpinBoxDelegate
class PublisherTreeWidget(MessageTreeWidget):
remove_publisher = Signal(int)
publish_once = Signal(int)
def __init__(self, parent=None):
super(PublisherTreeWidget, self).__init__(parent)
self.setModel(PublisherTreeModel(self))
self._action_remove_publisher = QAction(QIcon.fromTheme('list-remove'), 'Remove Selected', self)
self._action_remove_publisher.triggered[bool].connect(self._handle_action_remove_publisher)
self._action_publish_once = QAction(QIcon.fromTheme('media-playback-start'), 'Publish Selected Once', self)
self._action_publish_once.triggered[bool].connect(self._handle_action_publish_once)
self.setItemDelegateForColumn(self.model()._column_index['rate'], SpinBoxDelegate(min_value=0, max_value=1000000, decimals=2))
@Slot()
def remove_selected_publishers(self):
publisher_ids = self.model().get_publisher_ids(self.selectedIndexes())
for publisher_id in publisher_ids:
self.remove_publisher.emit(publisher_id)
self.model().remove_items_with_parents(self.selectedIndexes())
def _context_menu_add_actions(self, menu, pos):
if self.selectionModel().hasSelection():
menu.addAction(self._action_remove_publisher)
menu.addAction(self._action_publish_once)
# let super class add actions
super(PublisherTreeWidget, self)._context_menu_add_actions(menu, pos)
def _handle_action_remove_publisher(self, checked):
self.remove_selected_publishers()
def _handle_action_publish_once(self, checked):
for publisher_id in self.model().get_publisher_ids(self.selectedIndexes()):
self.publish_once.emit(publisher_id)
| [
"python_qt_binding.QtCore.Signal",
"python_qt_binding.QtGui.QIcon.fromTheme",
"rqt_py_common.item_delegates.SpinBoxDelegate",
"python_qt_binding.QtCore.Slot"
] | [((1977, 1988), 'python_qt_binding.QtCore.Signal', 'Signal', (['int'], {}), '(int)\n', (1983, 1988), False, 'from python_qt_binding.QtCore import Signal, Slot\n'), ((2008, 2019), 'python_qt_binding.QtCore.Signal', 'Signal', (['int'], {}), '(int)\n', (2014, 2019), False, 'from python_qt_binding.QtCore import Signal, Slot\n'), ((2718, 2724), 'python_qt_binding.QtCore.Slot', 'Slot', ([], {}), '()\n', (2722, 2724), False, 'from python_qt_binding.QtCore import Signal, Slot\n'), ((2212, 2242), 'python_qt_binding.QtGui.QIcon.fromTheme', 'QIcon.fromTheme', (['"""list-remove"""'], {}), "('list-remove')\n", (2227, 2242), False, 'from python_qt_binding.QtGui import QIcon\n'), ((2413, 2452), 'python_qt_binding.QtGui.QIcon.fromTheme', 'QIcon.fromTheme', (['"""media-playback-start"""'], {}), "('media-playback-start')\n", (2428, 2452), False, 'from python_qt_binding.QtGui import QIcon\n'), ((2651, 2710), 'rqt_py_common.item_delegates.SpinBoxDelegate', 'SpinBoxDelegate', ([], {'min_value': '(0)', 'max_value': '(1000000)', 'decimals': '(2)'}), '(min_value=0, max_value=1000000, decimals=2)\n', (2666, 2710), False, 'from rqt_py_common.item_delegates import SpinBoxDelegate\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest, os, yaml
from pyspark import SparkContext
from pyspark.sql import HiveContext
class TestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Load config file
fpath = os.path.abspath(os.path.join(os.path.dirname(__file__),".."))
with open(fpath + '/conf/config.yml', 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)
cls.cfg = cfg
sc = SparkContext().getOrCreate()
sc.setLogLevel('warn')
cls.hive_context = HiveContext(sc)
| [
"os.path.dirname",
"yaml.safe_load",
"pyspark.sql.HiveContext",
"pyspark.SparkContext"
] | [((1305, 1320), 'pyspark.sql.HiveContext', 'HiveContext', (['sc'], {}), '(sc)\n', (1316, 1320), False, 'from pyspark.sql import HiveContext\n'), ((1159, 1182), 'yaml.safe_load', 'yaml.safe_load', (['ymlfile'], {}), '(ymlfile)\n', (1173, 1182), False, 'import unittest, os, yaml\n'), ((1045, 1070), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1060, 1070), False, 'import unittest, os, yaml\n'), ((1218, 1232), 'pyspark.SparkContext', 'SparkContext', ([], {}), '()\n', (1230, 1232), False, 'from pyspark import SparkContext\n')] |
#! /usr/env/bin python
import os
import linecache
import numpy as np
from collections import OrderedDict
from CP2K_kit.tools import call
from CP2K_kit.tools import data_op
from CP2K_kit.tools import file_tools
from CP2K_kit.tools import read_input
from CP2K_kit.tools import traj_info
from CP2K_kit.deepff import load_data
from CP2K_kit.deepff import gen_lammps_task
def get_sys_num(exe_dir):
'''
get_sys_num: get the number of systems
Args:
exe_dir: string
exe_dir is the directory where shell script will be excuted.
Returns:
sys_num: int
sys_num is the number of systems.
'''
cmd = "ls | grep %s" % ('sys_')
sys_num = len(call.call_returns_shell(exe_dir, cmd))
return sys_num
def get_data_num(exe_dir):
'''
get_data_num: get the number of data
Args:
exe_dir: string
exe_dir is the directory where shell script will be excuted.
Returns:
data_num: int
data_num is the number of data.
'''
cmd = "ls | grep %s" % ('data_')
data_num = len(call.call_returns_shell(exe_dir, cmd))
return data_num
def get_task_num(exe_dir, get_task_dir=False):
'''
get_task_num: get the number of tasks in a system
Args:
exe_dir: string
exe_dir is the directory where shell script will be excuted.
Returns:
task_num: int
task_num is the number of tasks in a system.
'''
cmd = "ls | grep %s" % ('task_')
task_dir = call.call_returns_shell(exe_dir, cmd)
task_num = len(task_dir)
if get_task_dir:
return task_num, task_dir
else:
return task_num
def get_lmp_model_num(exe_dir):
'''
get_lmp_model_num: get the number of models in lammps directory.
Args:
exe_dir: string
exe_dir is the directory where shell script will be excuted.
Returns:
model_num: int
model_num is the number of models in lammps directory.
'''
cmd = "ls | grep %s" % ("'model_[0-9]'")
model_num = len(call.call_returns_shell(exe_dir, cmd))
return model_num
def get_deepmd_model_num(exe_dir):
'''
get_deepmd_model_num: get the number of models in deepmd directory.
Args:
exe_dir: string
exe_dir is the directory where shell script will be excuted.
Returns:
model_num: int
model_num is the number of models in deepmd directory.
'''
model_num = len(call.call_returns_shell(exe_dir, "ls -ll |awk '/^d/ {print $NF}'"))
return model_num
def get_traj_num(exe_dir):
'''
get_traj_num: get the number of frames
Args:
exe_dir: string
exe_dir is the directory where shell script will be excuted.
Returns:
traj_num: int
traj_num is the number of frames.
'''
cmd = "ls | grep %s" % ('traj_')
traj_num = len(call.call_returns_shell(exe_dir, cmd))
return traj_num
def dump_input(work_dir, inp_file, f_key):
'''
dump_input: dump deepff input file, it will call read_input module.
Args:
work_dir: string
work_dir is the working directory of CP2K_kit.
inp_file: string
inp_file is the deepff input file
f_key: 1-d string list
f_key is fixed to: ['deepmd', 'lammps', 'cp2k', 'model_devi', 'environ']
Returns :
deepmd_dic: dictionary
deepmd_dic contains keywords used in deepmd.
lammps_dic: dictionary
lammpd_dic contains keywords used in lammps.
cp2k_dic: dictionary
cp2k_dic contains keywords used in cp2k.
model_devi_dic: dictionary
model_devi contains keywords used in model_devi.
environ_dic: dictionary
environ_dic contains keywords used in environment.
'''
job_type_param = read_input.dump_info(work_dir, inp_file, f_key)
deepmd_dic = job_type_param[0]
lammps_dic = job_type_param[1]
cp2k_dic = job_type_param[2]
active_learn_dic = job_type_param[3]
environ_dic = job_type_param[4]
return deepmd_dic, lammps_dic, cp2k_dic, active_learn_dic, environ_dic
def get_atoms_type(deepmd_dic):
'''
get_atoms_type: get atoms type for total systems
Args:
deepmd_dic: dictionary
deepmd_dic contains keywords used in deepmd.
Returns:
final_atoms_type: list
final_atoms_type is the atoms type for all systems.
Example: ['O', 'H']
'''
import linecache
atoms_type = []
train_dic = deepmd_dic['training']
for key in train_dic:
if ( 'system' in key ):
traj_coord_file = train_dic[key]['traj_coord_file']
line_num = file_tools.grep_line_num("'PDB file'", traj_coord_file, os.getcwd())
if ( line_num == 0 ):
coord_file_type = 'coord_xyz'
else:
coord_file_type = 'coord_pdb'
atoms_num, pre_base_block, end_base_block, pre_base, frames_num, each, start_id, end_id, time_step = \
traj_info.get_traj_info(traj_coord_file, coord_file_type)
atoms = []
for i in range(atoms_num):
line_i = linecache.getline(traj_coord_file, pre_base+pre_base_block+i+1)
line_i_split = data_op.split_str(line_i, ' ', '\n')
if ( coord_file_type == 'coord_xyz' ):
atoms.append(line_i_split[0])
elif ( coord_file_type == 'coord_pdb' ):
atoms.append(line_i_split[len(line_i_split)-1])
linecache.clearcache()
atoms_type.append(data_op.list_replicate(atoms))
tot_atoms_type = data_op.list_reshape(atoms_type)
final_atoms_type = data_op.list_replicate(tot_atoms_type)
return final_atoms_type
def dump_init_data(work_dir, deepmd_dic, train_stress, tot_atoms_type_dic):
'''
dump_init_data: load initial training data.
Args:
work_dir: string
work_dir is working directory of CP2K_kit.
deepmd_dic: dictionary
deepmd_dic contains keywords used in deepmd.
train_stress: bool
train_stress is whether we need to dump stress.
tot_atoms_type_dic: dictionary
tot_atoms_type_dic is the atoms type dictionary.
Returns:
init_train_data: 1-d string list
init_train_data contains initial training data directories.
init_data_num : int
init_data_num is the number of data for initial training.
'''
init_train_data_dir = ''.join((work_dir, '/init_train_data'))
if ( not os.path.exists(init_train_data_dir) ):
cmd = "mkdir %s" % ('init_train_data')
call.call_simple_shell(work_dir, cmd)
i = 0
init_train_data = []
init_data_num = 0
train_dic = deepmd_dic['training']
shuffle_data = train_dic['shuffle_data']
for key in train_dic:
if ( 'system' in key):
save_dir = ''.join((work_dir, '/init_train_data/data_', str(i)))
if ( not os.path.exists(save_dir) ):
cmd = "mkdir %s" % (save_dir)
call.call_simple_shell(work_dir, cmd)
init_train_data.append(save_dir)
cmd = "ls | grep %s" %("'set.'")
set_dir_name = call.call_returns_shell(save_dir, cmd)
choosed_num = train_dic[key]['choosed_frame_num']
data_num = []
if ( len(set_dir_name) > 0 ):
for set_dir in set_dir_name:
data_num_part = []
set_dir_abs = ''.join((save_dir, '/', set_dir))
coord_npy_file = ''.join((set_dir_abs, '/coord.npy'))
force_npy_file = ''.join((set_dir_abs, '/force.npy'))
box_npy_file = ''.join((set_dir_abs, '/box.npy'))
energy_npy_file = ''.join((set_dir_abs, '/energy.npy'))
if ( all(os.path.exists(npy_file) for npy_file in [coord_npy_file, force_npy_file, box_npy_file, energy_npy_file]) ):
for npy_file in [coord_npy_file, force_npy_file, box_npy_file, energy_npy_file]:
data_num_part.append(len(np.load(npy_file)))
else:
data_num_part = [0,0,0,0]
virial_npy_file = ''.join((set_dir_abs, '/virial.npy'))
if ( os.path.exists(virial_npy_file) ):
data_num_part.append(len(np.load(virial_npy_file)))
data_num.append(data_num_part)
else:
data_num = [[0,0,0,0]]
data_num = data_op.add_2d_list(data_num)
if ( all(j == choosed_num for j in data_num) ):
if ( len(set_dir_name) == 1 ):
init_data_num_part = choosed_num
else:
final_set_dir_abs = ''.join((save_dir, '/', set_dir_name[len(set_dir_name)-1]))
final_energy_npy_file = ''.join((final_set_dir_abs, '/energy.npy'))
init_data_num_part = choosed_num-len(np.load(final_energy_npy_file))
else:
traj_type = train_dic[key]['traj_type']
start = train_dic[key]['start_frame']
end = train_dic[key]['end_frame']
parts = train_dic[key]['set_parts']
if ( traj_type == 'md' ):
traj_coord_file = train_dic[key]['traj_coord_file']
traj_frc_file = train_dic[key]['traj_frc_file']
traj_cell_file = train_dic[key]['traj_cell_file']
traj_stress_file = train_dic[key]['traj_stress_file']
load_data.load_data_from_dir(traj_coord_file, traj_frc_file, traj_cell_file, traj_stress_file, \
train_stress, work_dir, save_dir, start, end, choosed_num, tot_atoms_type_dic)
elif ( traj_type == 'mtd' ):
data_dir = train_dic[key]['data_dir']
task_dir_prefix = train_dic[key]['task_dir_prefix']
proj_name = train_dic[key]['proj_name']
out_file_name = train_dic[key]['out_file_name']
choosed_index = data_op.gen_list(start, end, 1)
choosed_index_array = np.array(choosed_index)
np.random.shuffle(choosed_index_array)
choosed_index = list(choosed_index_array[0:choosed_num])
load_data.load_data_from_sepfile(data_dir, save_dir, task_dir_prefix, proj_name, tot_atoms_type_dic, \
sorted(choosed_index), out_file_name)
energy_array, coord_array, frc_array, box_array, virial_array = load_data.read_raw_data(save_dir)
init_data_num_part, init_test_data_num_part = load_data.raw_data_to_set(parts, shuffle_data, save_dir, energy_array, \
coord_array, frc_array, box_array, virial_array)
init_data_num = init_data_num+init_data_num_part
i = i+1
if ( 'set_data_dir' in train_dic.keys() ):
init_train_data.append(os.path.abspath(train_dic['set_data_dir']))
energy_npy_file = ''.join((os.path.abspath(train_dic['set_data_dir']), '/set.000/energy.npy'))
set_data_num = len(np.load(energy_npy_file))
init_data_num = init_data_num+set_data_num
return init_train_data, init_data_num
def check_deepff_run(work_dir, iter_id):
'''
check_deepff_run: check the running state of deepff
Args:
work_dir: string
work_dir is workding directory.
iter_id: int
iter_id is current iteration number.
Returns:
failure_model: 1-d int list
failure_model is the id of failure models.
'''
train_dir = ''.join((work_dir, '/iter_', str(iter_id), '/01.train'))
model_num = get_deepmd_model_num(train_dir)
failure_model = []
for i in range(model_num):
model_dir = ''.join((train_dir, '/', str(i)))
lcurve_file = ''.join((model_dir, '/lcurve.out'))
whole_line_num = len(open(lcurve_file).readlines())
choosed_line_num = int(0.1*whole_line_num)
start_line = whole_line_num-choosed_line_num
force_trn = []
for j in range(choosed_line_num):
line = linecache.getline(lcurve_file, start_line+j+1)
line_split = data_op.split_str(line, ' ')
if ( data_op.eval_str(line_split[0]) == 1 and len(line_split) >= 8 ):
force_trn.append(float(line_split[6]))
linecache.clearcache()
force_max = max(force_trn)
force_min = min(force_trn)
force_avg = np.mean(np.array(force_trn))
if ( ((force_max-force_min) >= 0.04 and force_max >= 0.08) or force_avg >= 0.08 ):
failure_model.append(i)
return failure_model
def get_md_sys_info(lmp_dic, tot_atoms_type_dic):
'''
get_md_sys_info: get the system information for lammps md.
Args:
lmp_dic: dictionary
lmp_dic contains parameters for lammps.
tot_atoms_type_dic: dictionary
tot_atoms_type_dic is the atoms type dictionary.
Returns:
sys_num: int
sys_num is the number of systems.
atoms_type_multi_sys: 2-d dictionary, dim = (num of lammps systems) * (num of atom types)
atoms_type_multi_sys is the atoms type for multi-systems.
example: {0:{'O':1,'H':2,'N':3},1:{'O':1,'S':2,'N':3}}
atoms_num_tot: dictionary
atoms_num_tot contains number of atoms for different systems.
Example: {0:3, 1:3}
use_mtd_tot: bool
use_mtd_tot is whethet using metadynamics for whole systems.
'''
atoms_type_multi_sys = []
atoms_num_tot = []
use_mtd_tot = []
sys_num = 0
for key in lmp_dic:
if 'system' in key:
sys_num = sys_num + 1
for i in range(sys_num):
sys = 'system' + str(i)
box_file_name = lmp_dic[sys]['box']
coord_file_name = lmp_dic[sys]['coord']
use_mtd = lmp_dic[sys]['use_mtd']
tri_cell_vec, atoms, x, y, z = gen_lammps_task.get_box_coord(box_file_name, coord_file_name)
atoms_type = data_op.list_replicate(atoms)
atoms_type_dic = OrderedDict()
for j in atoms_type:
if j in tot_atoms_type_dic.keys():
atoms_type_dic[j] = tot_atoms_type_dic[j]+1
else:
log_info.log_error('Input error: %s atom type in system %d is not trained, please check deepff/lammps/system' %(j, i))
exit()
atoms_type_multi_sys.append(atoms_type_dic)
atoms_num_tot.append(len(atoms))
use_mtd_tot.append(use_mtd)
return sys_num, atoms_type_multi_sys, atoms_num_tot, use_mtd_tot
| [
"CP2K_kit.deepff.gen_lammps_task.get_box_coord",
"CP2K_kit.tools.data_op.list_replicate",
"numpy.array",
"CP2K_kit.tools.data_op.gen_list",
"CP2K_kit.tools.data_op.add_2d_list",
"os.path.exists",
"CP2K_kit.deepff.load_data.load_data_from_dir",
"CP2K_kit.tools.data_op.eval_str",
"CP2K_kit.tools.data_... | [((1413, 1450), 'CP2K_kit.tools.call.call_returns_shell', 'call.call_returns_shell', (['exe_dir', 'cmd'], {}), '(exe_dir, cmd)\n', (1436, 1450), False, 'from CP2K_kit.tools import call\n'), ((3556, 3603), 'CP2K_kit.tools.read_input.dump_info', 'read_input.dump_info', (['work_dir', 'inp_file', 'f_key'], {}), '(work_dir, inp_file, f_key)\n', (3576, 3603), False, 'from CP2K_kit.tools import read_input\n'), ((5204, 5236), 'CP2K_kit.tools.data_op.list_reshape', 'data_op.list_reshape', (['atoms_type'], {}), '(atoms_type)\n', (5224, 5236), False, 'from CP2K_kit.tools import data_op\n'), ((5258, 5296), 'CP2K_kit.tools.data_op.list_replicate', 'data_op.list_replicate', (['tot_atoms_type'], {}), '(tot_atoms_type)\n', (5280, 5296), False, 'from CP2K_kit.tools import data_op\n'), ((664, 701), 'CP2K_kit.tools.call.call_returns_shell', 'call.call_returns_shell', (['exe_dir', 'cmd'], {}), '(exe_dir, cmd)\n', (687, 701), False, 'from CP2K_kit.tools import call\n'), ((1017, 1054), 'CP2K_kit.tools.call.call_returns_shell', 'call.call_returns_shell', (['exe_dir', 'cmd'], {}), '(exe_dir, cmd)\n', (1040, 1054), False, 'from CP2K_kit.tools import call\n'), ((1918, 1955), 'CP2K_kit.tools.call.call_returns_shell', 'call.call_returns_shell', (['exe_dir', 'cmd'], {}), '(exe_dir, cmd)\n', (1941, 1955), False, 'from CP2K_kit.tools import call\n'), ((2302, 2368), 'CP2K_kit.tools.call.call_returns_shell', 'call.call_returns_shell', (['exe_dir', '"""ls -ll |awk \'/^d/ {print $NF}\'"""'], {}), '(exe_dir, "ls -ll |awk \'/^d/ {print $NF}\'")\n', (2325, 2368), False, 'from CP2K_kit.tools import call\n'), ((2690, 2727), 'CP2K_kit.tools.call.call_returns_shell', 'call.call_returns_shell', (['exe_dir', 'cmd'], {}), '(exe_dir, cmd)\n', (2713, 2727), False, 'from CP2K_kit.tools import call\n'), ((6062, 6097), 'os.path.exists', 'os.path.exists', (['init_train_data_dir'], {}), '(init_train_data_dir)\n', (6076, 6097), False, 'import os\n'), ((6148, 6185), 'CP2K_kit.tools.call.call_simple_shell', 'call.call_simple_shell', (['work_dir', 'cmd'], {}), '(work_dir, cmd)\n', (6170, 6185), False, 'from CP2K_kit.tools import call\n'), ((11439, 11461), 'linecache.clearcache', 'linecache.clearcache', ([], {}), '()\n', (11459, 11461), False, 'import linecache\n'), ((12874, 12935), 'CP2K_kit.deepff.gen_lammps_task.get_box_coord', 'gen_lammps_task.get_box_coord', (['box_file_name', 'coord_file_name'], {}), '(box_file_name, coord_file_name)\n', (12903, 12935), False, 'from CP2K_kit.deepff import gen_lammps_task\n'), ((12953, 12982), 'CP2K_kit.tools.data_op.list_replicate', 'data_op.list_replicate', (['atoms'], {}), '(atoms)\n', (12975, 12982), False, 'from CP2K_kit.tools import data_op\n'), ((13004, 13017), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13015, 13017), False, 'from collections import OrderedDict\n'), ((4657, 4714), 'CP2K_kit.tools.traj_info.get_traj_info', 'traj_info.get_traj_info', (['traj_coord_file', 'coord_file_type'], {}), '(traj_coord_file, coord_file_type)\n', (4680, 4714), False, 'from CP2K_kit.tools import traj_info\n'), ((5106, 5128), 'linecache.clearcache', 'linecache.clearcache', ([], {}), '()\n', (5126, 5128), False, 'import linecache\n'), ((6666, 6704), 'CP2K_kit.tools.call.call_returns_shell', 'call.call_returns_shell', (['save_dir', 'cmd'], {}), '(save_dir, cmd)\n', (6689, 6704), False, 'from CP2K_kit.tools import call\n'), ((7810, 7839), 'CP2K_kit.tools.data_op.add_2d_list', 'data_op.add_2d_list', (['data_num'], {}), '(data_num)\n', (7829, 7839), False, 'from CP2K_kit.tools import data_op\n'), ((10115, 10157), 'os.path.abspath', 'os.path.abspath', (["train_dic['set_data_dir']"], {}), "(train_dic['set_data_dir'])\n", (10130, 10157), False, 'import os\n'), ((10281, 10305), 'numpy.load', 'np.load', (['energy_npy_file'], {}), '(energy_npy_file)\n', (10288, 10305), True, 'import numpy as np\n'), ((11217, 11267), 'linecache.getline', 'linecache.getline', (['lcurve_file', '(start_line + j + 1)'], {}), '(lcurve_file, start_line + j + 1)\n', (11234, 11267), False, 'import linecache\n'), ((11283, 11311), 'CP2K_kit.tools.data_op.split_str', 'data_op.split_str', (['line', '""" """'], {}), "(line, ' ')\n", (11300, 11311), False, 'from CP2K_kit.tools import data_op\n'), ((11548, 11567), 'numpy.array', 'np.array', (['force_trn'], {}), '(force_trn)\n', (11556, 11567), True, 'import numpy as np\n'), ((4413, 4424), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4422, 4424), False, 'import os\n'), ((4782, 4851), 'linecache.getline', 'linecache.getline', (['traj_coord_file', '(pre_base + pre_base_block + i + 1)'], {}), '(traj_coord_file, pre_base + pre_base_block + i + 1)\n', (4799, 4851), False, 'import linecache\n'), ((4869, 4905), 'CP2K_kit.tools.data_op.split_str', 'data_op.split_str', (['line_i', '""" """', '"""\n"""'], {}), "(line_i, ' ', '\\n')\n", (4886, 4905), False, 'from CP2K_kit.tools import data_op\n'), ((5153, 5182), 'CP2K_kit.tools.data_op.list_replicate', 'data_op.list_replicate', (['atoms'], {}), '(atoms)\n', (5175, 5182), False, 'from CP2K_kit.tools import data_op\n'), ((6455, 6479), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (6469, 6479), False, 'import os\n'), ((6529, 6566), 'CP2K_kit.tools.call.call_simple_shell', 'call.call_simple_shell', (['work_dir', 'cmd'], {}), '(work_dir, cmd)\n', (6551, 6566), False, 'from CP2K_kit.tools import call\n'), ((9683, 9716), 'CP2K_kit.deepff.load_data.read_raw_data', 'load_data.read_raw_data', (['save_dir'], {}), '(save_dir)\n', (9706, 9716), False, 'from CP2K_kit.deepff import load_data\n'), ((9771, 9894), 'CP2K_kit.deepff.load_data.raw_data_to_set', 'load_data.raw_data_to_set', (['parts', 'shuffle_data', 'save_dir', 'energy_array', 'coord_array', 'frc_array', 'box_array', 'virial_array'], {}), '(parts, shuffle_data, save_dir, energy_array,\n coord_array, frc_array, box_array, virial_array)\n', (9796, 9894), False, 'from CP2K_kit.deepff import load_data\n'), ((10190, 10232), 'os.path.abspath', 'os.path.abspath', (["train_dic['set_data_dir']"], {}), "(train_dic['set_data_dir'])\n", (10205, 10232), False, 'import os\n'), ((7610, 7641), 'os.path.exists', 'os.path.exists', (['virial_npy_file'], {}), '(virial_npy_file)\n', (7624, 7641), False, 'import os\n'), ((8717, 8898), 'CP2K_kit.deepff.load_data.load_data_from_dir', 'load_data.load_data_from_dir', (['traj_coord_file', 'traj_frc_file', 'traj_cell_file', 'traj_stress_file', 'train_stress', 'work_dir', 'save_dir', 'start', 'end', 'choosed_num', 'tot_atoms_type_dic'], {}), '(traj_coord_file, traj_frc_file, traj_cell_file,\n traj_stress_file, train_stress, work_dir, save_dir, start, end,\n choosed_num, tot_atoms_type_dic)\n', (8745, 8898), False, 'from CP2K_kit.deepff import load_data\n'), ((11323, 11354), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['line_split[0]'], {}), '(line_split[0])\n', (11339, 11354), False, 'from CP2K_kit.tools import data_op\n'), ((9213, 9244), 'CP2K_kit.tools.data_op.gen_list', 'data_op.gen_list', (['start', 'end', '(1)'], {}), '(start, end, 1)\n', (9229, 9244), False, 'from CP2K_kit.tools import data_op\n'), ((9277, 9300), 'numpy.array', 'np.array', (['choosed_index'], {}), '(choosed_index)\n', (9285, 9300), True, 'import numpy as np\n'), ((9311, 9349), 'numpy.random.shuffle', 'np.random.shuffle', (['choosed_index_array'], {}), '(choosed_index_array)\n', (9328, 9349), True, 'import numpy as np\n'), ((7214, 7238), 'os.path.exists', 'os.path.exists', (['npy_file'], {}), '(npy_file)\n', (7228, 7238), False, 'import os\n'), ((8205, 8235), 'numpy.load', 'np.load', (['final_energy_npy_file'], {}), '(final_energy_npy_file)\n', (8212, 8235), True, 'import numpy as np\n'), ((7682, 7706), 'numpy.load', 'np.load', (['virial_npy_file'], {}), '(virial_npy_file)\n', (7689, 7706), True, 'import numpy as np\n'), ((7455, 7472), 'numpy.load', 'np.load', (['npy_file'], {}), '(npy_file)\n', (7462, 7472), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Author: <NAME> <<EMAIL>>
License: LGPL
Note: I've licensed this code as LGPL because it was a complete translation of the code found here...
https://github.com/mojocorp/QProgressIndicator
Adapted to spectrochempy_gui
"""
import sys
from spectrochempy_gui.pyqtgraph.Qt import QtCore, QtGui, QtWidgets
class QProgressIndicator(QtGui.QWidget):
m_angle = None
m_timerId = None
m_delay = None
m_displayedWhenStopped = None
m_color = None
def __init__(self, parent):
# Call parent class constructor first
super().__init__(parent)
# Initialize Qt Properties
self.setProperties()
# Intialize instance variables
self.m_angle = 0
self.m_timerId = -1
self.m_delay = 40
self.m_displayedWhenStopped = False
self.m_color = QtCore.Qt.black
# Set size and focus policy
self.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
self.setFocusPolicy(QtCore.Qt.NoFocus)
# Show the widget
self.show()
def animationDelay(self):
return self.delay
def isAnimated(self):
return (self.m_timerId != -1)
def isDisplayedWhenStopped(self):
return self.displayedWhenStopped
def getColor(self):
return self.color
def sizeHint(self):
return QtCore.QSize(20, 20)
def startAnimation(self):
self.m_angle = 0
if self.m_timerId == -1:
self.m_timerId = self.startTimer(self.m_delay)
def stopAnimation(self):
if self.m_timerId != -1:
self.killTimer(self.m_timerId)
self.m_timerId = -1
self.update()
def setAnimationDelay(self, delay):
if self.m_timerId != -1:
self.killTimer(self.m_timerId)
self.m_delay = delay
if self.m_timerId != -1:
self.m_timerId = self.startTimer(self.m_delay)
def setDisplayedWhenStopped(self, state):
self.displayedWhenStopped = state
self.update()
def setColor(self, color):
self.m_color = color
self.update()
def timerEvent(self, event):
self.m_angle = (self.m_angle + 30) % 360
self.update()
def paintEvent(self, event):
if (not self.m_displayedWhenStopped) and (not self.isAnimated()):
return
width = min(self.width(), self.height())
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
outerRadius = (width - 1) * 0.5
innerRadius = (width - 1) * 0.5 * 0.38
capsuleHeight = outerRadius - innerRadius
capsuleWidth = capsuleHeight * .23 if (width > 32) else capsuleHeight * .35
capsuleRadius = capsuleWidth / 2
for i in range(0, 12):
color = QtGui.QColor(self.m_color)
if self.isAnimated():
color.setAlphaF(1.0 - (i / 12.0))
else:
color.setAlphaF(0.2)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(color)
painter.save()
painter.translate(self.rect().center())
painter.rotate(self.m_angle - (i * 30.0))
painter.drawRoundedRect(capsuleWidth * -0.5, (innerRadius + capsuleHeight) * -1, capsuleWidth,
capsuleHeight, capsuleRadius, capsuleRadius)
painter.restore()
def setProperties(self):
self.delay = QtCore.pyqtProperty(int, self.animationDelay, self.setAnimationDelay)
self.displayedWhenStopped = QtCore.pyqtProperty(bool, self.isDisplayedWhenStopped, self.setDisplayedWhenStopped)
self.color = QtCore.pyqtProperty(QtGui.QColor, self.getColor, self.setColor)
def TestProgressIndicator():
app = QtGui.QApplication(sys.argv)
progress = QProgressIndicator(None)
progress.setAnimationDelay(70)
progress.startAnimation()
# Execute the application
sys.exit(app.exec_())
if __name__ == "__main__":
TestProgressIndicator()
| [
"spectrochempy_gui.pyqtgraph.Qt.QtGui.QColor",
"spectrochempy_gui.pyqtgraph.Qt.QtCore.QSize",
"spectrochempy_gui.pyqtgraph.Qt.QtCore.pyqtProperty",
"spectrochempy_gui.pyqtgraph.Qt.QtGui.QApplication",
"spectrochempy_gui.pyqtgraph.Qt.QtGui.QPainter"
] | [((3809, 3837), 'spectrochempy_gui.pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (3827, 3837), False, 'from spectrochempy_gui.pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((1384, 1404), 'spectrochempy_gui.pyqtgraph.Qt.QtCore.QSize', 'QtCore.QSize', (['(20)', '(20)'], {}), '(20, 20)\n', (1396, 1404), False, 'from spectrochempy_gui.pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((2446, 2466), 'spectrochempy_gui.pyqtgraph.Qt.QtGui.QPainter', 'QtGui.QPainter', (['self'], {}), '(self)\n', (2460, 2466), False, 'from spectrochempy_gui.pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((3492, 3561), 'spectrochempy_gui.pyqtgraph.Qt.QtCore.pyqtProperty', 'QtCore.pyqtProperty', (['int', 'self.animationDelay', 'self.setAnimationDelay'], {}), '(int, self.animationDelay, self.setAnimationDelay)\n', (3511, 3561), False, 'from spectrochempy_gui.pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((3598, 3687), 'spectrochempy_gui.pyqtgraph.Qt.QtCore.pyqtProperty', 'QtCore.pyqtProperty', (['bool', 'self.isDisplayedWhenStopped', 'self.setDisplayedWhenStopped'], {}), '(bool, self.isDisplayedWhenStopped, self.\n setDisplayedWhenStopped)\n', (3617, 3687), False, 'from spectrochempy_gui.pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((3704, 3767), 'spectrochempy_gui.pyqtgraph.Qt.QtCore.pyqtProperty', 'QtCore.pyqtProperty', (['QtGui.QColor', 'self.getColor', 'self.setColor'], {}), '(QtGui.QColor, self.getColor, self.setColor)\n', (3723, 3767), False, 'from spectrochempy_gui.pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n'), ((2842, 2868), 'spectrochempy_gui.pyqtgraph.Qt.QtGui.QColor', 'QtGui.QColor', (['self.m_color'], {}), '(self.m_color)\n', (2854, 2868), False, 'from spectrochempy_gui.pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n')] |
import random
from Lecture_6_Local_Search.Exercise4.Exercise import random_selction_methods
p_mutation = 0.2
num_of_generations = 30
def genetic_algorithm(population, fitness_fn, minimal_fitness):
for generation in range(num_of_generations):
print("Generation {}:".format(generation))
print_population(population, fitness_fn)
new_population = set()
for i in range(len(population)):
mother, father = random_selection(population, fitness_fn)
child = reproduce(mother, father)
if random.uniform(0, 1) < p_mutation:
child = mutate(child)
new_population.add(child)
# Add new population to population, use union to disregard
# duplicate individuals
population = population.union(new_population)
fittest_individual = get_fittest_individual(population, fitness_fn)
if minimal_fitness <= fitness_fn(fittest_individual):
break
print("Final generation {}:".format(generation))
print_population(population, fitness_fn)
return fittest_individual
def print_population(population, fitness_fn):
for individual in population:
fitness = fitness_fn(individual)
print("{} - fitness: {}".format(individual, fitness))
def reproduce(mother, father):
'''
Reproduce two individuals with single-point crossover
Return the child individual
'''
mother_list = list(mother)
father_list = list(father)
random_pointer = int(round(random.uniform(0, 2))) # random pointer
i = random_pointer
while i < len(mother_list):
mother_list[i], father_list[i] = father_list[i], mother_list[i] # swap
i += 1
child1 = tuple(father_list)
child2 = mother_list
return child1
def mutate(individual):
'''
Mutate an individual by randomly assigning one of its bits
Return the mutated individual
'''
mutation = [1 if random.random() > 0.3 else 0 for bit in
individual]
return tuple(mutation)
def random_selection(population, fitness_fn):
"""
Compute fitness of each in population according to fitness_fn and add up
the total. Then choose 2 from sequence based on percentage contribution to
total fitness of population
Return selected variable which holds two individuals that were chosen as
the mother and the father
"""
# Python sets are randomly ordered. Since we traverse the set twice, we
# want to do it in the same order. So let's convert it temporarily to a
# list.
ordered_population = list(population)
fitness_of_each = {}
sum_of_fitness = 0
for member in ordered_population:
fitness_of_each[member] = (fitness_fn(member))
sum_of_fitness += fitness_of_each[member]
percentage_of_each = {}
parents = []
for key, value in sorted(fitness_of_each.items(), key=lambda x: x[1], reverse=True):
percentage_of_each[
key] = value / sum_of_fitness * 100
seen = []
min_percent = 0
max_percent = sum(percentage_of_each.values())
while not len(parents) >= 2:
if len(percentage_of_each) == 2:
for key in percentage_of_each.keys():
parents.append(key)
return parents
else:
for key, value in percentage_of_each.items():
pick_parents = random.uniform(min_percent, max_percent)
if pick_parents >= value and not (len(parents) == 2) and key not in seen:
parents.append(key)
seen.append(key)
max_percent -= value
return parents
def fitness_function(individual):
'''
Computes the decimal value of the individual
Return the fitness level of the individual
Explanation:
enumerate(list) returns a list of pairs (position, element):
enumerate((4, 6, 2, 8)) -> [(0, 4), (1, 6), (2, 2), (3, 8)]
enumerate(reversed((1, 1, 0))) -> [(0, 0), (1, 1), (2, 1)]
'''
liste = list(individual)
fitness = int("".join(str(x) for x in liste), 2)
return fitness
def get_fittest_individual(iterable, func):
return max(iterable, key=func)
def get_initial_population(n, count):
'''
Randomly generate count individuals of length n
Note since its a set it disregards duplicate elements.
'''
return set([
tuple(random.randint(0, 1) for _ in range(n))
for _ in range(count)
])
def main():
minimal_fitness = 7
# Curly brackets also creates a set, if there isn't a colon to indicate a dictionary
initial_population = {
(1, 1, 0),
(0, 0, 0),
(0, 1, 0),
(1, 0, 0)
}
initial_population = get_initial_population(3, 4)
fittest = genetic_algorithm(initial_population, fitness_function, minimal_fitness)
print('Fittest Individual: ' + str(fittest) + ' Fitness: ' + str(fitness_function(fittest)))
if __name__ == '__main__':
main()
| [
"random.random",
"random.uniform",
"random.randint"
] | [((1526, 1546), 'random.uniform', 'random.uniform', (['(0)', '(2)'], {}), '(0, 2)\n', (1540, 1546), False, 'import random\n'), ((555, 575), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (569, 575), False, 'import random\n'), ((1953, 1968), 'random.random', 'random.random', ([], {}), '()\n', (1966, 1968), False, 'import random\n'), ((3395, 3435), 'random.uniform', 'random.uniform', (['min_percent', 'max_percent'], {}), '(min_percent, max_percent)\n', (3409, 3435), False, 'import random\n'), ((4405, 4425), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (4419, 4425), False, 'import random\n')] |
import time
import numpy as np
import utils.measurement_subs as measurement_subs
import utils.socket_subs as socket_subs
from .do_fridge_sweep import do_fridge_sweep
from .do_device_sweep import do_device_sweep
def device_fridge_2d(
graph_proc, rpg, data_file,
read_inst, sweep_inst=[], set_inst=[],
set_value=[], pre_value=[], finish_value=[],
fridge_sweep="B", fridge_set=0.0,
device_start=0.0, device_stop=1.0, device_step=0.1, device_finish=0.0,
device_mid=[],
fridge_start=0.0, fridge_stop=1.0, fridge_rate=0.1,
delay=0, sample=1,
timeout=-1, wait=0.0,
comment="No comment!", network_dir="Z:\\DATA",
persist=True, x_custom=[]
):
"""2D data acquisition either by sweeping a device parameter
or by sweepng a fridge parameter
The program decides which of these to do depending on if the
the variable "sweep_inst" is assigned.
i.e. if "sweep_inst" is assigned the device is swept and the
fridge parameter is stepped.
If the device is being swept the variable "fridge_rate" is the size
of successive steps of either T or B.
If the fridge is being swept the first set_inst is stepped by the
"device_step"
For the case of successive B sweeps the fridge will be swept
forwards and backwards
e.g. Vg = -60 V B = -9 --> +9 T
Vg = -50 V B = +9 --> -9 T
etc ...
Note that in this case the first "set_value" will be overwritten
therefore a dummy e.g. 0.0 should be written in the case that there
are additional set_inst
"""
if sweep_inst:
sweep_device = True
else:
sweep_device = False
if fridge_sweep == "B":
b_sweep = True
else:
b_sweep = False
if not finish_value:
finish_value = list(set_value)
# We step over the x variable and sweep over the y
if sweep_device:
x_vec = np.hstack((np.arange(fridge_start, fridge_stop, fridge_rate), fridge_stop))
y_start = device_start
y_stop = device_stop
y_step = device_step
else:
x_vec = np.hstack((np.arange(device_start, device_stop, device_step), device_stop))
y_start = fridge_start
y_stop = fridge_stop
y_step = fridge_rate
if not not x_custom:
x_vec = x_custom
if sweep_device:
y_len = len(measurement_subs.generate_device_sweep(
device_start, device_stop, device_step, mid=device_mid))
else:
y_len = abs(y_start - y_stop) / y_step + 1
num_of_inst = len(read_inst)
plot_2d_window = [None] * num_of_inst
view_box = [None] * num_of_inst
image_view = [None] * num_of_inst
z_array = [np.zeros((len(x_vec), y_len)) for i in range(num_of_inst)]
if sweep_device:
for i in range(num_of_inst):
plot_2d_window[i] = rpg.QtGui.QMainWindow()
plot_2d_window[i].resize(500, 500)
view_box[i] = rpg.ViewBox(invertY=True)
image_view[i] = rpg.ImageView(view=rpg.PlotItem(viewBox=view_box[i]))
plot_2d_window[i].setCentralWidget(image_view[i])
plot_2d_window[i].setWindowTitle("read_inst %d" % i)
plot_2d_window[i].show()
view_box[i].setAspectLocked(False)
y_scale = y_step
x_scale = (x_vec[-2] - x_vec[0]) / np.float(len(x_vec) - 1)
for j in range(num_of_inst):
image_view[j].setImage(z_array[j], scale=(x_scale, y_scale), pos=(x_vec[0], y_start))
for i, v in enumerate(x_vec):
if sweep_device:
# sweep the device and fix T or B
if b_sweep:
data_list = do_device_sweep(
graph_proc, rpg, data_file,
sweep_inst, read_inst, set_inst=set_inst, set_value=set_value,
finish_value=finish_value, pre_value=pre_value, b_set=v, persist=False,
sweep_start=device_start, sweep_stop=device_stop, sweep_step=device_step,
sweep_finish=device_finish, sweep_mid=device_mid,
delay=delay, sample=sample, t_set=fridge_set,
timeout=timeout, wait=wait, return_data=True, make_plot=False,
comment=comment, network_dir=network_dir
)
else:
data_list = do_device_sweep(
graph_proc, rpg, data_file,
sweep_inst, read_inst, set_inst=set_inst, set_value=set_value,
finish_value=finish_value, pre_value=pre_value, b_set=fridge_set, persist=True,
sweep_start=device_start, sweep_stop=device_stop, sweep_step=device_step,
sweep_mid=device_mid,
delay=delay, sample=sample, t_set=v,
timeout=timeout, wait=wait, return_data=True, make_plot=False,
comment=comment, network_dir=network_dir
)
else:
set_value[0] = v
if i == len(x_vec) - 1:
finish_value[0] = 0.0
else:
finish_value[0] = x_vec[i + 1]
# Fix the device and sweep T or B
if b_sweep:
data_list = do_fridge_sweep(
graph_proc, rpg, data_file,
read_inst, set_inst=set_inst, set_value=set_value,
finish_value=finish_value, pre_value=pre_value,
fridge_sweep="B", fridge_set=fridge_set,
sweep_start=fridge_start, sweep_stop=fridge_stop,
sweep_rate=fridge_rate, sweep_finish=fridge_stop,
persist=False,
delay=delay, sample=sample,
timeout=timeout, wait=wait,
return_data=True,
comment=comment, network_dir=network_dir)
tmp_sweep = [fridge_start, fridge_stop]
fridge_start = tmp_sweep[1]
fridge_stop = tmp_sweep[0]
else:
data_list = do_fridge_sweep(
graph_proc, rpg, data_file,
read_inst, set_inst=set_inst, set_value=set_value,
finish_value=finish_value, pre_value=pre_value,
fridge_sweep="T", fridge_set=fridge_set,
sweep_start=fridge_start, sweep_stop=fridge_stop,
sweep_rate=fridge_rate, sweep_finish=fridge_stop,
persist=True,
delay=delay, sample=sample,
timeout=timeout, wait=wait,
return_data=True,
comment=comment, network_dir=network_dir)
if sweep_device:
for j in range(num_of_inst):
z_array[j][i, :] = data_list[j + 1]
image_view[j].setImage(z_array[j], pos=(x_vec[0], y_start), scale=(x_scale, y_scale))
m_client = socket_subs.SockClient('localhost', 18861)
time.sleep(2)
measurement_subs.socket_write(m_client, "SET 0.0 0")
time.sleep(2)
m_client.close()
time.sleep(2)
return
| [
"numpy.arange",
"time.sleep",
"utils.socket_subs.SockClient",
"utils.measurement_subs.generate_device_sweep",
"utils.measurement_subs.socket_write"
] | [((7004, 7046), 'utils.socket_subs.SockClient', 'socket_subs.SockClient', (['"""localhost"""', '(18861)'], {}), "('localhost', 18861)\n", (7026, 7046), True, 'import utils.socket_subs as socket_subs\n'), ((7051, 7064), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7061, 7064), False, 'import time\n'), ((7069, 7121), 'utils.measurement_subs.socket_write', 'measurement_subs.socket_write', (['m_client', '"""SET 0.0 0"""'], {}), "(m_client, 'SET 0.0 0')\n", (7098, 7121), True, 'import utils.measurement_subs as measurement_subs\n'), ((7126, 7139), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7136, 7139), False, 'import time\n'), ((7166, 7179), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7176, 7179), False, 'import time\n'), ((1944, 1993), 'numpy.arange', 'np.arange', (['fridge_start', 'fridge_stop', 'fridge_rate'], {}), '(fridge_start, fridge_stop, fridge_rate)\n', (1953, 1993), True, 'import numpy as np\n'), ((2135, 2184), 'numpy.arange', 'np.arange', (['device_start', 'device_stop', 'device_step'], {}), '(device_start, device_stop, device_step)\n', (2144, 2184), True, 'import numpy as np\n'), ((2390, 2488), 'utils.measurement_subs.generate_device_sweep', 'measurement_subs.generate_device_sweep', (['device_start', 'device_stop', 'device_step'], {'mid': 'device_mid'}), '(device_start, device_stop,\n device_step, mid=device_mid)\n', (2428, 2488), True, 'import utils.measurement_subs as measurement_subs\n')] |
"""Module extensions for custom properties of HBPBaseModule."""
from backpack.core.derivatives.scale_module import ScaleModuleDerivatives
from backpack.core.derivatives.sum_module import SumModuleDerivatives
from backpack.extensions.secondorder.hbp.hbpbase import HBPBaseModule
class HBPScaleModule(HBPBaseModule):
"""HBP extension for ScaleModule."""
def __init__(self):
"""Initialization."""
super().__init__(derivatives=ScaleModuleDerivatives())
class HBPSumModule(HBPBaseModule):
"""HBP extension for SumModule."""
def __init__(self):
"""Initialization."""
super().__init__(derivatives=SumModuleDerivatives())
| [
"backpack.core.derivatives.sum_module.SumModuleDerivatives",
"backpack.core.derivatives.scale_module.ScaleModuleDerivatives"
] | [((450, 474), 'backpack.core.derivatives.scale_module.ScaleModuleDerivatives', 'ScaleModuleDerivatives', ([], {}), '()\n', (472, 474), False, 'from backpack.core.derivatives.scale_module import ScaleModuleDerivatives\n'), ((644, 666), 'backpack.core.derivatives.sum_module.SumModuleDerivatives', 'SumModuleDerivatives', ([], {}), '()\n', (664, 666), False, 'from backpack.core.derivatives.sum_module import SumModuleDerivatives\n')] |
import sirf.STIR as pet
from scipy.spatial.transform import Rotation as R
import numpy as np
def printGeoInfo(image : pet.ImageData) -> None:
"""Print geometrical data for image object.
Args:
image (pet.ImageData): Input image.
"""
print(image.get_geometrical_info().get_info())
def printAffineMatrixAsEulerAngles(affineMatrix : np.array, round = 4, seq = 'zxy', degrees = True) -> None:
"""prints an affine transformation matrix as euler angles. The affine matrix
can be 4x4 or 3x3.
Args:
affineMatrix (np.array): 4x4 or 3x3 affine transformation matrix
round (int, optional): Round to a certain number of decimals. Defaults to 4.
seq (str, optional): Output axis sequence. Defaults to 'zxy'.
degrees (bool, optional): Output degrees or radians. Defaults to True.
"""
if affineMatrix.shape == (4,4):
r = R.from_matrix(affineMatrix[:3, :3])
elif affineMatrix.shape == (3,3):
r = R.from_matrix(affineMatrix)
print(r.as_euler(seq=seq, degrees=degrees).round(round))
| [
"scipy.spatial.transform.Rotation.from_matrix"
] | [((894, 929), 'scipy.spatial.transform.Rotation.from_matrix', 'R.from_matrix', (['affineMatrix[:3, :3]'], {}), '(affineMatrix[:3, :3])\n', (907, 929), True, 'from scipy.spatial.transform import Rotation as R\n'), ((980, 1007), 'scipy.spatial.transform.Rotation.from_matrix', 'R.from_matrix', (['affineMatrix'], {}), '(affineMatrix)\n', (993, 1007), True, 'from scipy.spatial.transform import Rotation as R\n')] |
import re
import numpy as np
import sympy as sp
import random as rd
from functools import reduce
NORMAL_VECTOR_ID = 'hyperplane_normal_vector_%s_%i'
NUM_NORMAL_VECS_ID = 'num_normal_vectors_%s'
CHAMBER_ID = 'chamber_%s_%s'
FVECTOR_ID = 'feature_vector_%s'
FVEC_ID_EX = re.compile(r'feature_vector_([\S]*)')
class HyperplaneHasher():
def __init__(self, kvstore, name, normal_vectors=None):
"""'name' is a string used for cribbing names of things to be stored
in the KeyValueStore instance 'kvstore'. 'normal_vectors' is
either a list of 1-rankal numpy arrays, all of the same rank,
or else of type None. In the latter case, normal vectors are assumed to
exist in 'kvstore', and are named NORMAL_VECTOR_ID % ('name', i),
where i is an integer."""
self.kvstore = kvstore
self.name = name
if normal_vectors is None:
self.num_normal_vectors = kvstore.get_int(
NUM_NORMAL_VECS_ID % name)
self.normal_vectors = [kvstore.get_vector(NORMAL_VECTOR_ID % (name, i))
for i in range(self.num_normal_vectors)]
else:
self.normal_vectors = normal_vectors
self.num_normal_vectors = len(normal_vectors)
self.rank = len(self.normal_vectors[0])
def _compute_num_chambers(self):
"""Computes the number of chambers defined by the hyperplanes
corresponding to the normal vectors."""
d = self.rank
n = self.num_normal_vectors
raw_cfs = sp.binomial_coefficients_list(n)
cfs = np.array([(-1)**i * raw_cfs[i] for i in range(n + 1)])
powers = np.array([max(entry, 0)
for entry in [d - k for k in range(n + 1)]])
ys = np.array([-1] * len(powers))
return (-1)**d * sum(cfs * (ys**powers))
@classmethod
def _flip_digit(cls, binary_string, i):
"""Given a string 'binary_string' of length n, each letter of
which is either '0' or '1', and an integer 0 <= i <= n-1, returns
the binary_string in which the i-th letter is flipped."""
for letter in binary_string:
if letter not in ['0', '1']:
raise ValueError(
"""Input string contains characters other than '0' and '1'.""")
if i > len(binary_string) - 1 or i < 0:
raise ValueError(
"""Argument 'i' outside range 0 <= i <= len(binary_string) - 1.""")
else:
flip_dict = {'0': '1', '1': '0'}
letters = [letter for letter in binary_string]
letters[i] = flip_dict[binary_string[i]]
return ''.join(letters)
@classmethod
def _hamming_distance(cls, bstring_1, bstring_2):
"""Given two strings of equal length, composed of only 0s and 1s, computes the
Hamming Distance between them: the number of places at which they differ."""
for pair in zip(bstring_1, bstring_2):
if not set(pair).issubset(set(['0', '1'])):
raise ValueError(
"""Input strings contain characters other than '0' and '1'.""")
if len(bstring_1) != len(bstring_2):
raise ValueError("""Lengths of input strings disagree.""")
else:
total = 0
for i in range(len(bstring_1)):
if bstring_1[i] != bstring_2[i]:
total += 1
return total
def _hamming_distance_i(self, chamber_id, i):
"""Given a chamber_id 'chamber_id' and an integer 0 <= i <= self.rank - 1,
returns the alphabetically sorted list of all chamber_ids having Hamming Distance
equal to i from 'chamber_id'."""
for letter in chamber_id:
if letter not in ['0', '1']:
raise ValueError(
"""Input string contains characters other than '0' and '1'.""")
if i < 0 or i > self.num_normal_vectors - 1:
raise ValueError(
"""Argument 'i' outside range 0 <= i <= len(binary_string) - 1.""")
if len(chamber_id) != self.num_normal_vectors:
raise ValueError("""len(chamber_id) != self.num_normal_vectors.""")
else:
result = []
cids = self._all_binary_strings()
for cid in cids:
if self._hamming_distance(chamber_id, cid) == i:
result.append(cid)
return result
def _all_binary_strings(self):
"""Returns a list of all binary strings of length
self.num_normal_vectors."""
n = self.num_normal_vectors
strings = [np.binary_repr(i) for i in range(2**n)]
return ['0' * (n - len(entry)) + entry for entry in strings]
@classmethod
def _random_vectors(cls, num, rank):
"""This class method return a list of length 'num' or
vectors (numpy arrays) of rank 'rank'. Both arguments
are assumed to be positive integers."""
vec_list = [
np.array([rd.random() - 0.5 for i in range(rank)]) for j in range(num)]
return vec_list
def label_chamber(self, chamber_id, label):
"""Appends the string 'label' to the set with key
'chamber_id' in self.kvstore, if such exists. If not, then
a new singleton set {'label'} is created in self.kvstore
with key 'chamber_id'. The method is idempotent."""
full_chamber_id = CHAMBER_ID % (self.name, chamber_id)
full_label_id = FVECTOR_ID % label
self.kvstore.add_to_set(full_chamber_id, full_label_id)
def bulk_label_chamber(self, chamber_ids, labels):
"""The arguments 'chamber_ids' and 'labels' must be lists of strings
of equal length, else ValueError is raised. This method produces the same result
as calling self.label_chamber(ch_id, label) for all pairs (ch_id, label) in
chamber_ids x labels, but may be faster if self.kvstore is an instance of
class DynamoDBAdapter."""
chamber_ids = [CHAMBER_ID %
(self.name, chamber_id) for chamber_id in chamber_ids]
labels = [FVECTOR_ID % label for label in labels]
self.kvstore.bulk_add_to_set(chamber_ids, labels)
def unlabel_chamber(self, chamber_id, label):
"""Removes 'label' from the set corresponding to 'chamber_id'.
Raises KeyError if 'label' is not an element of the
corresponding set."""
full_chamber_id = CHAMBER_ID % (self.name, chamber_id)
full_label_id = FVECTOR_ID % label
self.kvstore.remove_from_set(full_chamber_id, full_label_id)
def chamber_labels(self, chamber_id):
"""Returns the set of labels corresponding
to key chamber_id. Returns empty set if
chamber_id is unknown."""
try:
full_chamber_id = CHAMBER_ID % (self.name, chamber_id)
result = set([FVEC_ID_EX.findall(entry)[0] for entry in self.kvstore.get_set(
full_chamber_id) if len(FVEC_ID_EX.findall(entry)) > 0])
return result
except KeyError:
return set()
def get_chamber_id(self, vector):
"""Returns the chamber_id of the chamber to which
vector belongs. Throws a ValueError if rank(vector) differs
from the ranks of the normal vectors. The binary digits
of the chamber_id for vectors are computed in the order
given by the output of the get_normal_vectors() method."""
if len(vector) != self.rank:
raise ValueError("""len(vector) != self.rank""")
else:
PMZO = {1: 1, -1: 0}
signs = [int(np.sign(np.dot(vector, nvec)))
for nvec in self.normal_vectors]
chamber_id = ''.join([str(PMZO[entry]) for entry in signs])
return chamber_id
def get_chamber_ids(self):
"""Returns the set of all chamber ids."""
chamber_id_prefix = 'chamber_%s' % self.name
chamber_id_ex = re.compile(r'%s_([\S]*)' % chamber_id_prefix)
chamber_ids = [''.join(chamber_id_ex.findall(entry))
for entry in self.kvstore.get_set_ids()]
return set([entry for entry in chamber_ids if len(entry) > 0])
def adjacent_chamber_ids(self, chamber_id):
"""Returns the set of ids of all chambers directly adjacent
to the chamber corresponding to 'chamber_id'."""
results = set([chamber_id])
for i in range(len(chamber_id)):
results.add(self._flip_digit(chamber_id, i))
results = sorted(results)
return results
def proximal_chamber_ids(self, chamber_id, num_labels):
"""This method returns the smallest list of chamber ids proximal to
the string 'chamber_id', such that the union of the corresponding chambers
contains at least 'num_labels' labels, assumed to be a positive integer.
The list is sorted by ascending distance.
NOTE: A set S of chambers is _proximal_ to a given chamber C if
(i) C is in S, and (ii) D in S implies all chambers nearer to
C than D are also in S. Here, the distance between two chambers
is given by the alphabetical distance of their ids."""
total = 0
pids = []
for i in range(self.num_normal_vectors):
if total >= num_labels:
break
hdi = self._hamming_distance_i(chamber_id, i)
for j in range(len(hdi)):
if total >= num_labels:
break
next_id = hdi[j]
total += len(self.chamber_labels(next_id))
pids.append(next_id)
if total >= num_labels:
break
return pids
def proximal_chamber_labels(self, chamber_id, num_labels):
"""Finds the smallest set of proximal chambers containing
at least 'num_labels' labels, assumed to be a positive integer,
and returns the set of all labels from this."""
pcids = self.proximal_chamber_ids(chamber_id, num_labels)
labels_list = [self.chamber_labels(cid) for cid in pcids]
labels = reduce(lambda x, y: x.union(y), labels_list)
return labels
def get_normal_vectors(self):
"""Returns the list of normal vectors."""
return self.normal_vectors
| [
"re.compile",
"numpy.binary_repr",
"numpy.dot",
"sympy.binomial_coefficients_list",
"random.random"
] | [((270, 307), 're.compile', 're.compile', (['"""feature_vector_([\\\\S]*)"""'], {}), "('feature_vector_([\\\\S]*)')\n", (280, 307), False, 'import re\n'), ((1551, 1583), 'sympy.binomial_coefficients_list', 'sp.binomial_coefficients_list', (['n'], {}), '(n)\n', (1580, 1583), True, 'import sympy as sp\n'), ((7996, 8041), 're.compile', 're.compile', (["('%s_([\\\\S]*)' % chamber_id_prefix)"], {}), "('%s_([\\\\S]*)' % chamber_id_prefix)\n", (8006, 8041), False, 'import re\n'), ((4648, 4665), 'numpy.binary_repr', 'np.binary_repr', (['i'], {}), '(i)\n', (4662, 4665), True, 'import numpy as np\n'), ((5031, 5042), 'random.random', 'rd.random', ([], {}), '()\n', (5040, 5042), True, 'import random as rd\n'), ((7658, 7678), 'numpy.dot', 'np.dot', (['vector', 'nvec'], {}), '(vector, nvec)\n', (7664, 7678), True, 'import numpy as np\n')] |
from Walkline import Walkline, WalklineButton, WalklineSwitch
from WalklineUtility import WifiHandler
from utime import sleep
from config import *
from machine import Pin
led = Pin(2, Pin.OUT, value=0)
relay = Pin(14, Pin.OUT, value=1)
def main():
Walkline.setup(UID, DEVICE_ID, DEVICE_KEY)
button = WalklineButton(13, button_pressed)
switch = WalklineSwitch(2, button_pressed)
Walkline.run()
def connect_to_internet():
result_code = WifiHandler.set_sta_mode(WIFI_SSID, WIFI_PASS)
WifiHandler.set_ap_status(False)
return result_code
def button_pressed(status=None):
if status is None:
led.value(not led.value())
relay.value(not led.value())
else:
if status == 1:
led.on()
relay.off()
elif status == 0:
led.off()
relay.on()
elif status == 2:
led.value(not led.value())
relay.value(not led.value())
else:
raise ValueError("Wrong status command received")
if __name__ == "__main__":
try:
if WifiHandler.STATION_CONNECTED == connect_to_internet():
# pass
main()
while True:
sleep(1)
except KeyboardInterrupt:
print("\nPRESS CTRL+D TO RESET DEVICE")
| [
"Walkline.Walkline.setup",
"Walkline.WalklineButton",
"utime.sleep",
"Walkline.WalklineSwitch",
"machine.Pin",
"Walkline.Walkline.run",
"WalklineUtility.WifiHandler.set_ap_status",
"WalklineUtility.WifiHandler.set_sta_mode"
] | [((178, 202), 'machine.Pin', 'Pin', (['(2)', 'Pin.OUT'], {'value': '(0)'}), '(2, Pin.OUT, value=0)\n', (181, 202), False, 'from machine import Pin\n'), ((211, 236), 'machine.Pin', 'Pin', (['(14)', 'Pin.OUT'], {'value': '(1)'}), '(14, Pin.OUT, value=1)\n', (214, 236), False, 'from machine import Pin\n'), ((252, 294), 'Walkline.Walkline.setup', 'Walkline.setup', (['UID', 'DEVICE_ID', 'DEVICE_KEY'], {}), '(UID, DEVICE_ID, DEVICE_KEY)\n', (266, 294), False, 'from Walkline import Walkline, WalklineButton, WalklineSwitch\n'), ((306, 340), 'Walkline.WalklineButton', 'WalklineButton', (['(13)', 'button_pressed'], {}), '(13, button_pressed)\n', (320, 340), False, 'from Walkline import Walkline, WalklineButton, WalklineSwitch\n'), ((351, 384), 'Walkline.WalklineSwitch', 'WalklineSwitch', (['(2)', 'button_pressed'], {}), '(2, button_pressed)\n', (365, 384), False, 'from Walkline import Walkline, WalklineButton, WalklineSwitch\n'), ((387, 401), 'Walkline.Walkline.run', 'Walkline.run', ([], {}), '()\n', (399, 401), False, 'from Walkline import Walkline, WalklineButton, WalklineSwitch\n'), ((446, 492), 'WalklineUtility.WifiHandler.set_sta_mode', 'WifiHandler.set_sta_mode', (['WIFI_SSID', 'WIFI_PASS'], {}), '(WIFI_SSID, WIFI_PASS)\n', (470, 492), False, 'from WalklineUtility import WifiHandler\n'), ((494, 526), 'WalklineUtility.WifiHandler.set_ap_status', 'WifiHandler.set_ap_status', (['(False)'], {}), '(False)\n', (519, 526), False, 'from WalklineUtility import WifiHandler\n'), ((1041, 1049), 'utime.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1046, 1049), False, 'from utime import sleep\n')] |
import os
import sys
from decimal import *
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.db.models import Sum
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext_lazy as _
from django.core.management import call_command
from smegurus import constants
from foundation_tenant.models.base.me import Me
from foundation_tenant.models.base.countryoption import CountryOption
from foundation_tenant.models.base.provinceoption import ProvinceOption
from foundation_tenant.models.base.cityoption import CityOption
from smegurus.settings import env_var
# NOTE:
# To run this from console, enter:
# python manage.py tenant_command populate_tenant
class Command(BaseCommand):
help = _('Populates various data for the tenant.')
def handle(self, *args, **options):
# The filename of all the objects to be imported.
ordered_file_names = [
'countryoption.json',
'provinceoption.json',
'cityoption.json',
'governmentbenefitoption.json',
'identifyoption.json',
'inforesourcecategory.json',
'documenttype.json',
'module_01.json',
'module_02.json',
'module_03.json',
'module_04.json',
'module_05.json',
'module_06.json',
'module_07.json',
'module_08.json',
'module_09.json',
'module_10.json',
'slide_01.json',
'slide_02.json',
'slide_03.json',
'slide_04.json',
'slide_05.json',
'slide_06.json',
'slide_07.json',
'slide_08.json',
'slide_09.json',
'slide_10.json',
'question_01.json',
'question_02.json',
'question_03.json',
'question_04.json',
'question_05.json',
'question_06.json',
'question_07.json',
'question_08.json',
'question_09.json',
'question_10.json',
'notification.json',
'inforesource.json'
]
# Iterate through all the filenames and load them into database.
for file_name in ordered_file_names:
call_command('loaddata', file_name, verbosity=0, interactive=False)
self.stdout.write(
self.style.SUCCESS(_('Successfully populated tenant.'))
)
| [
"django.utils.translation.ugettext_lazy",
"django.core.management.call_command"
] | [((822, 865), 'django.utils.translation.ugettext_lazy', '_', (['"""Populates various data for the tenant."""'], {}), "('Populates various data for the tenant.')\n", (823, 865), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2365, 2432), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', 'file_name'], {'verbosity': '(0)', 'interactive': '(False)'}), "('loaddata', file_name, verbosity=0, interactive=False)\n", (2377, 2432), False, 'from django.core.management import call_command\n'), ((2492, 2527), 'django.utils.translation.ugettext_lazy', '_', (['"""Successfully populated tenant."""'], {}), "('Successfully populated tenant.')\n", (2493, 2527), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
#!/usr/bin/env python3
import argparse
from pycassa.pool import ConnectionPool
from pycassa.columnfamily import ColumnFamily
from pycassa.system_manager import *
from pycassa.cassandra.ttypes import NotFoundException
import json
import timeit
import time
import uuid
import datetime
import random
from esmond.config import get_config, get_config_path
from esmond.cassandra import CASSANDRA_DB, RawRateData, BaseRateBin
PERFSONAR_NAMESPACE = 'ps'
class CassandraTester:
def __init__(self, keyspace_name, savedb=True):
config = get_config(get_config_path())
if not savedb:
config.db_clear_on_testing = True
self.db = CASSANDRA_DB(config)
def generate_int_data(self, key_prefix, metatdata_key, num_rows, start_ts, end_ts, summary_type, time_int, min_val, max_val):
row_keys = []
# data = []
for n in range(num_rows):
if metatdata_key is None:
metatdata_key, = uuid.uuid4().hex
path = [ PERFSONAR_NAMESPACE, key_prefix, metatdata_key ]
if summary_type and summary_type != 'base':
path = path + [ summary_type, str(time_int) ]
row_keys.append(BaseRateBin(path=path, ts=1).get_meta_key().lower())
for ts in range(start_ts, end_ts, time_int):
br = BaseRateBin(path=path, ts=ts*1000, val=random.randint(min_val, max_val), is_valid=1)
# data.append({'path': path, 'ts':ts*1000, 'val':random.randint(min_val, max_val), 'is_valid':1})
self.db.update_rate_bin(br)
self.db.flush()
return row_keys
def generate_histogram_data(self, key_prefix, metatdata_key, num_rows, start_ts, end_ts, summary_type, summ_window, sample_size, bucket_min, bucket_max):
row_keys = []
data = []
for n in range(num_rows):
if metatdata_key is None:
metatdata_key, = uuid.uuid4().hex
path = [ PERFSONAR_NAMESPACE, key_prefix, metatdata_key ]
if summary_type and summary_type != 'base':
path = path + [ summary_type, str(summ_window) ]
row_keys.append(RawRateData(path=path, ts=1).get_meta_key().lower())
for ts in range(start_ts, end_ts, summ_window):
histogram = {}
sample = sample_size
while(sample > 0):
bucket = random.randint(bucket_min, bucket_max)
val = random.randint(1,sample)
if str(bucket) not in histogram:
histogram[str(bucket)] = val
else:
histogram[str(bucket)] += val
sample -= val
rr = RawRateData(path=path, ts=ts*1000, val=json.dumps(histogram))
# data.append({'path':path, 'ts':ts*1000, 'val':json.dumps(histogram)})
self.db.set_raw_data(rr)
self.db.flush()
return row_keys
def get_data(self, cf_name, key, start_time, end_time, output_json=False):
cf = ColumnFamily(self.pool, cf_name)
try:
result = cf.multiget(self.gen_key_range(key, start_time, end_time), column_start=start_time*1000, column_finish=end_time*1000, column_count=10000000)
if output_json:
self.dump_json(result)
except NotFoundException:
pass
def dump_json(self, db_result):
time_series = []
for row in list(db_result.keys()):
for ts in list(db_result[row].keys()):
time_series.append({'time': ts, 'value': db_result[row][ts]})
print(json.dumps(time_series))
def gen_key(self, key, ts):
year = datetime.datetime.utcfromtimestamp(ts).year
key = "%s:%d" % (key,year)
return key.lower();
def gen_key_range(self, key, start_time, end_time):
key_range = []
start_year = datetime.datetime.utcfromtimestamp(start_time).year
end_year = datetime.datetime.utcfromtimestamp(end_time).year
year_range = list(range(start_year, end_year+1))
for year in year_range:
key_range.append("%s:%d" % (key,year))
return key_range
#create option parser
parser = argparse.ArgumentParser(description="Generate test data and time queries in cassandra")
parser.add_argument("-d", "--datatype", dest="data_type", help="the type of data to generate", choices=['base_rate', 'rate_agg', 'traceroute', 'histogram' ], default='base_rate')
parser.add_argument("-k", "--keyspace", dest="ks_name", help="the keyspace to use for testing", default='ma_test')
parser.add_argument("-s", "--sample-size", dest="sample_size", type=int, help="for histogram data, the size of each histogram sample", default=600)
parser.add_argument("-t", "--time-range", dest="time_range", type=int, help="the time range for which to generate data (in seconds)", default=(86400*365))
parser.add_argument("-T", "--summary-type", dest="summ_type", help="the type of sumnmarization", choices=['base', 'aggregation', 'composite', 'statistics', 'subinterval' ], default='base')
parser.add_argument("-w", "--summary-window", dest="summ_window", type=int, help="the frequency with which to gernerate columns (in seconds)", default=0)
parser.add_argument("-m", "--metadata-key", dest="metadata_key", help="the metadata key to use when generating data. --num-rows must be 1 when using this option.", default=None)
parser.add_argument("--minval", dest="min_val", type=int, help="the minimum value to be stored. This is the bucket value for histograms and the stored value for the other column-families.", default=1)
parser.add_argument("--maxval", dest="max_val", type=int, help="the maximum value to be stored. This is the bucket value for histograms and the stored value for the other column-families", default=1000)
parser.add_argument("--keep-data", dest="keep_data", action="store_true", help="if present data will not be deleted before running test")
parser.add_argument("--key-prefix", dest="key_prefix", help="the prefix to append to the key", default="ps:test")
parser.add_argument("--num-rows", dest="num_rows", type=int, help="the number of rows to generate and then query", default=1)
args = parser.parse_args()
#create tester
tester = CassandraTester(args.ks_name, args.keep_data)
#set column-family to test
data_type = args.data_type
#check if metadata key specified
if((args.metadata_key is not None) and args.num_rows > 1):
raise Exception("--num-rows must be 1 when providing metadata key")
#generate data
end_time= int(time.time())
gen_interval = args.time_range
print("Generating %d seconds of data..." % gen_interval)
gen_timer = time.time()
row_keys = []
if data_type == 'base_rate':
row_keys = tester.generate_int_data(args.key_prefix, args.metadata_key, args.num_rows, (end_time - gen_interval), end_time, args.summ_type, args.summ_window, args.min_val, args.max_val)
elif data_type == 'histogram':
row_keys = tester.generate_histogram_data(args.key_prefix, args.metadata_key, args.num_rows, (end_time - gen_interval), end_time, args.summ_type, args.summ_window, args.sample_size, args.min_val, args.max_val)
else:
raise Exception("Invalid data type: %s" % data_type)
print("Data generated in %f seconds." % (time.time() - gen_timer))
print("")
| [
"datetime.datetime.utcfromtimestamp",
"esmond.config.get_config_path",
"pycassa.columnfamily.ColumnFamily",
"argparse.ArgumentParser",
"esmond.cassandra.RawRateData",
"json.dumps",
"uuid.uuid4",
"esmond.cassandra.CASSANDRA_DB",
"esmond.cassandra.BaseRateBin",
"time.time",
"random.randint"
] | [((4268, 4360), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate test data and time queries in cassandra"""'}), "(description=\n 'Generate test data and time queries in cassandra')\n", (4291, 4360), False, 'import argparse\n'), ((6721, 6732), 'time.time', 'time.time', ([], {}), '()\n', (6730, 6732), False, 'import time\n'), ((6608, 6619), 'time.time', 'time.time', ([], {}), '()\n', (6617, 6619), False, 'import time\n'), ((660, 680), 'esmond.cassandra.CASSANDRA_DB', 'CASSANDRA_DB', (['config'], {}), '(config)\n', (672, 680), False, 'from esmond.cassandra import CASSANDRA_DB, RawRateData, BaseRateBin\n'), ((3082, 3114), 'pycassa.columnfamily.ColumnFamily', 'ColumnFamily', (['self.pool', 'cf_name'], {}), '(self.pool, cf_name)\n', (3094, 3114), False, 'from pycassa.columnfamily import ColumnFamily\n'), ((553, 570), 'esmond.config.get_config_path', 'get_config_path', ([], {}), '()\n', (568, 570), False, 'from esmond.config import get_config, get_config_path\n'), ((3660, 3683), 'json.dumps', 'json.dumps', (['time_series'], {}), '(time_series)\n', (3670, 3683), False, 'import json\n'), ((3737, 3775), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['ts'], {}), '(ts)\n', (3771, 3775), False, 'import datetime\n'), ((3949, 3995), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['start_time'], {}), '(start_time)\n', (3983, 3995), False, 'import datetime\n'), ((4020, 4064), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['end_time'], {}), '(end_time)\n', (4054, 4064), False, 'import datetime\n'), ((7315, 7326), 'time.time', 'time.time', ([], {}), '()\n', (7324, 7326), False, 'import time\n'), ((967, 979), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (977, 979), False, 'import uuid\n'), ((1934, 1946), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1944, 1946), False, 'import uuid\n'), ((2415, 2453), 'random.randint', 'random.randint', (['bucket_min', 'bucket_max'], {}), '(bucket_min, bucket_max)\n', (2429, 2453), False, 'import random\n'), ((2480, 2505), 'random.randint', 'random.randint', (['(1)', 'sample'], {}), '(1, sample)\n', (2494, 2505), False, 'import random\n'), ((1370, 1402), 'random.randint', 'random.randint', (['min_val', 'max_val'], {}), '(min_val, max_val)\n', (1384, 1402), False, 'import random\n'), ((2785, 2806), 'json.dumps', 'json.dumps', (['histogram'], {}), '(histogram)\n', (2795, 2806), False, 'import json\n'), ((1200, 1228), 'esmond.cassandra.BaseRateBin', 'BaseRateBin', ([], {'path': 'path', 'ts': '(1)'}), '(path=path, ts=1)\n', (1211, 1228), False, 'from esmond.cassandra import CASSANDRA_DB, RawRateData, BaseRateBin\n'), ((2170, 2198), 'esmond.cassandra.RawRateData', 'RawRateData', ([], {'path': 'path', 'ts': '(1)'}), '(path=path, ts=1)\n', (2181, 2198), False, 'from esmond.cassandra import CASSANDRA_DB, RawRateData, BaseRateBin\n')] |
"""
:Copyright: 2006-2021 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
import pytest
from tests.helpers import login_user
@pytest.fixture(scope='package')
def orga_admin(make_admin):
permission_ids = {
'admin.access',
'orga_birthday.view',
'orga_detail.view',
'orga_team.administrate_memberships',
}
admin = make_admin('OrgaAdmin', permission_ids)
login_user(admin.id)
return admin
@pytest.fixture(scope='package')
def orga_admin_client(make_client, admin_app, orga_admin):
return make_client(admin_app, user_id=orga_admin.id)
| [
"pytest.fixture",
"tests.helpers.login_user"
] | [((148, 179), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""package"""'}), "(scope='package')\n", (162, 179), False, 'import pytest\n'), ((462, 493), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""package"""'}), "(scope='package')\n", (476, 493), False, 'import pytest\n'), ((421, 441), 'tests.helpers.login_user', 'login_user', (['admin.id'], {}), '(admin.id)\n', (431, 441), False, 'from tests.helpers import login_user\n')] |
import logging
from phonenumbers.phonenumberutil import region_code_for_country_code
logger = logging.getLogger('interakt')
def require(name, field, data_type):
"""Require that the named `field` has the right `data_type`"""
if not isinstance(field, data_type):
msg = '{0} must have {1}, got: {2}'.format(name, data_type, type(field))
raise AssertionError(msg)
def verify_country_code(country_code: str):
"""Verifies country code of the phone number"""
country_code = country_code.replace("+", "")
if not country_code.isdigit():
raise AssertionError(f"Invalid country_code {country_code}")
region_code = region_code_for_country_code(int(country_code))
if region_code == "ZZ":
raise AssertionError(f"Invalid country_code {country_code}")
def remove_trailing_slash(host):
if host.endswith('/'):
return host[:-1]
return host
def stringify(val):
if val is None:
return None
if isinstance(val, str):
return val
return str(val)
| [
"logging.getLogger"
] | [((95, 124), 'logging.getLogger', 'logging.getLogger', (['"""interakt"""'], {}), "('interakt')\n", (112, 124), False, 'import logging\n')] |
"""User profile resource view"""
from sqlalchemy.exc import IntegrityError
from flask_jwt_extended import decode_token, create_access_token
from sqlalchemy.orm import exc
from flask import jsonify, request, session, make_response
from flask_restful import Resource
from flask_api import status
from marshmallow import ValidationError
from flask_mail import Message
from app_config import DB
from app_config import API
from models.user import User
from serializers.user_schema import UserSchema, LoginSchema
from app_config import BCRYPT
from utils.user_utils import get_reset_token, verify_reset_token
from app_config import MAIL
USER_SCHEMA = UserSchema(exclude=['id', 'user_registration_data'])
JWT_TOKEN = 'jwt_token'
def send_email(user_email, token):
"""
Implementation of sending message on email
Args:
user_email:
token:
Returns:
status
"""
try:
msg = Message("Hello, you tried to reset password!", sender='<EMAIL>',
recipients=[user_email])
msg.body = f'''To reset your password just follow this link: {API.url_for(ResetPasswordRequestResource,
token=token, _external=True)}
If you haven`t tried to reset your password just ignore this message'''
MAIL.send(msg)
except RuntimeError:
return status.HTTP_400_BAD_REQUEST
return status.HTTP_200_OK
class ResetPasswordRequestResource(Resource):
"""Implementation of reset password request on mail"""
def post(self):
"""Post method for reset password"""
try:
data = request.json
user_email = data['user_email']
except ValidationError as error:
return make_response(jsonify(error.messages), status.HTTP_400_BAD_REQUEST)
try:
user = User.query.filter_by(user_email=user_email).scalar()
token = get_reset_token(user)
try:
send_email(user_email, token)
return status.HTTP_200_OK
except ValueError:
response_object = {
'Error': 'No user found'
}
return make_response(response_object, status.HTTP_401_UNAUTHORIZED)
except:
return status.HTTP_400_BAD_REQUEST
def put(self):
"""Put method for reset password"""
try:
token = request.args.get('token')
except TimeoutError:
return status.HTTP_504_GATEWAY_TIMEOUT
try:
user = verify_reset_token(token)
data = request.json
user_password = data['<PASSWORD>']
user_password_confirm = data['user_password_confirm']
except ValidationError as error:
return make_response(jsonify(error.messages), status.HTTP_400_BAD_REQUEST)
try:
if user_password == user_password_confirm:
try:
user.user_password = BCRYPT.generate_password_hash(user_password, 10).decode('utf-8')
DB.session.commit()
return status.HTTP_200_OK
except IntegrityError:
DB.session.rollback()
response_object = {
'Error': 'Database error'
}
return make_response(jsonify(response_object), status.HTTP_400_BAD_REQUEST)
else:
raise TypeError
except TypeError:
response_object = {
'Error': 'Passwords do not match'
}
return make_response(response_object, status.HTTP_400_BAD_REQUEST)
class ProfileResource(Resource):
"""Implementation profile methods for editing user data"""
def post(self):
"""Post method for creating an user"""
try:
new_user = USER_SCHEMA.load(request.json)
except ValidationError as error:
return make_response(jsonify(error.messages),
status.HTTP_400_BAD_REQUEST)
try:
is_exists = DB.session.query(User.id).filter_by(user_name=new_user.user_name).scalar() is not None
if not is_exists:
try:
new_user.user_password = BCRYPT.generate_password_hash(new_user.user_password, round(10)).decode(
'utf-8')
except ValidationError as error:
return make_response(jsonify(error.messages), status.HTTP_400_BAD_REQUEST)
else:
raise ValueError
except ValueError:
response_object = {
'Error': 'This user already exists'
}
return make_response(response_object, status.HTTP_409_CONFLICT)
try:
DB.session.add(new_user)
DB.session.commit()
session.permanent = True
access_token = create_access_token(identity=new_user.id, expires_delta=False)
session[JWT_TOKEN] = access_token
return status.HTTP_200_OK
except IntegrityError:
DB.session.rollback()
response_object = {
'Error': 'Database error'
}
return make_response(jsonify(response_object), status.HTTP_400_BAD_REQUEST)
def get(self):
"""Get method for returning user data"""
try:
access = session[JWT_TOKEN]
except KeyError:
response_object = {
'Error': 'You`re unauthorized'
}
return make_response(response_object, status.HTTP_401_UNAUTHORIZED)
try:
user_info = decode_token(access)
user_id = user_info['identity']
current_user = User.find_user(id=user_id)
if current_user is not None:
try:
user_to_response = USER_SCHEMA.dump(current_user)
return make_response(jsonify(user_to_response), status.HTTP_200_OK)
except ValidationError as error:
return make_response(jsonify(error.messages), status.HTTP_400_BAD_REQUEST)
else:
raise ValueError
except ValueError:
response_object = {
'Error': "This user doesn`t exists"
}
return make_response(response_object, status.HTTP_400_BAD_REQUEST)
def put(self):
"""Put metod for editing user data"""
try:
new_user = USER_SCHEMA.load(request.json)
except ValidationError as error:
return make_response(jsonify(error.messages), status.HTTP_400_BAD_REQUEST)
try:
access = session[JWT_TOKEN]
user_info = decode_token(access)
user_id = user_info['identity']
except KeyError:
response_object = {
'Error': 'Session has been expired'
}
return make_response(response_object, status.HTTP_401_UNAUTHORIZED)
try:
current_user = User.find_user(id=user_id)
if current_user is not None:
current_user.user_email = new_user.user_email
current_user.user_password = BCRYPT.generate_password_hash(new_user.user_password).decode(
'utf-8')
current_user.user_first_name = new_user.user_first_name
current_user.user_last_name = new_user.user_last_name
current_user.user_image_file = new_user.user_image_file
else:
raise ValueError
except ValueError:
response_object = {
'Error': 'This user doesn`t exists'
}
return make_response(response_object, status.HTTP_400_BAD_REQUEST)
try:
DB.session.commit()
return status.HTTP_200_OK
except IntegrityError:
DB.session.rollback()
response_object = {
'Error': 'Database error'
}
return make_response(response_object, status.HTTP_400_BAD_REQUEST)
def delete(self):
"""Delete method for deleting user account"""
try:
access = session[JWT_TOKEN]
except KeyError:
response_object = {
'Error': 'You`re unauthorized'
}
return make_response(response_object, status.HTTP_401_UNAUTHORIZED)
try:
user_info = decode_token(access)
user_id = user_info['identity']
current_user = User.find_user(id=user_id)
DB.session.delete(current_user)
except exc.UnmappedInstanceError:
response_object = {
'Error': 'This user doesn`t exists'
}
return make_response(response_object, status.HTTP_400_BAD_REQUEST)
try:
DB.session.commit()
session.clear()
return status.HTTP_200_OK
except IntegrityError:
response_object = {
'Error': 'Database error'
}
DB.session.rollback()
return make_response(response_object, status.HTTP_400_BAD_REQUEST)
API.add_resource(ProfileResource, '/profile')
API.add_resource(ResetPasswordRequestResource, '/reset-password') | [
"flask.request.args.get",
"models.user.User.query.filter_by",
"flask_mail.Message",
"flask_jwt_extended.decode_token",
"flask.session.clear",
"flask.jsonify",
"serializers.user_schema.UserSchema",
"app_config.DB.session.query",
"flask_jwt_extended.create_access_token",
"utils.user_utils.get_reset_... | [((645, 697), 'serializers.user_schema.UserSchema', 'UserSchema', ([], {'exclude': "['id', 'user_registration_data']"}), "(exclude=['id', 'user_registration_data'])\n", (655, 697), False, 'from serializers.user_schema import UserSchema, LoginSchema\n'), ((9186, 9231), 'app_config.API.add_resource', 'API.add_resource', (['ProfileResource', '"""/profile"""'], {}), "(ProfileResource, '/profile')\n", (9202, 9231), False, 'from app_config import API\n'), ((9232, 9297), 'app_config.API.add_resource', 'API.add_resource', (['ResetPasswordRequestResource', '"""/reset-password"""'], {}), "(ResetPasswordRequestResource, '/reset-password')\n", (9248, 9297), False, 'from app_config import API\n'), ((920, 1014), 'flask_mail.Message', 'Message', (['"""Hello, you tried to reset password!"""'], {'sender': '"""<EMAIL>"""', 'recipients': '[user_email]'}), "('Hello, you tried to reset password!', sender='<EMAIL>', recipients\n =[user_email])\n", (927, 1014), False, 'from flask_mail import Message\n'), ((1272, 1286), 'app_config.MAIL.send', 'MAIL.send', (['msg'], {}), '(msg)\n', (1281, 1286), False, 'from app_config import MAIL\n'), ((1879, 1900), 'utils.user_utils.get_reset_token', 'get_reset_token', (['user'], {}), '(user)\n', (1894, 1900), False, 'from utils.user_utils import get_reset_token, verify_reset_token\n'), ((2380, 2405), 'flask.request.args.get', 'request.args.get', (['"""token"""'], {}), "('token')\n", (2396, 2405), False, 'from flask import jsonify, request, session, make_response\n'), ((2518, 2543), 'utils.user_utils.verify_reset_token', 'verify_reset_token', (['token'], {}), '(token)\n', (2536, 2543), False, 'from utils.user_utils import get_reset_token, verify_reset_token\n'), ((4794, 4818), 'app_config.DB.session.add', 'DB.session.add', (['new_user'], {}), '(new_user)\n', (4808, 4818), False, 'from app_config import DB\n'), ((4831, 4850), 'app_config.DB.session.commit', 'DB.session.commit', ([], {}), '()\n', (4848, 4850), False, 'from app_config import DB\n'), ((4915, 4977), 'flask_jwt_extended.create_access_token', 'create_access_token', ([], {'identity': 'new_user.id', 'expires_delta': '(False)'}), '(identity=new_user.id, expires_delta=False)\n', (4934, 4977), False, 'from flask_jwt_extended import decode_token, create_access_token\n'), ((5660, 5680), 'flask_jwt_extended.decode_token', 'decode_token', (['access'], {}), '(access)\n', (5672, 5680), False, 'from flask_jwt_extended import decode_token, create_access_token\n'), ((5752, 5778), 'models.user.User.find_user', 'User.find_user', ([], {'id': 'user_id'}), '(id=user_id)\n', (5766, 5778), False, 'from models.user import User\n'), ((6736, 6756), 'flask_jwt_extended.decode_token', 'decode_token', (['access'], {}), '(access)\n', (6748, 6756), False, 'from flask_jwt_extended import decode_token, create_access_token\n'), ((7044, 7070), 'models.user.User.find_user', 'User.find_user', ([], {'id': 'user_id'}), '(id=user_id)\n', (7058, 7070), False, 'from models.user import User\n'), ((7804, 7823), 'app_config.DB.session.commit', 'DB.session.commit', ([], {}), '()\n', (7821, 7823), False, 'from app_config import DB\n'), ((8459, 8479), 'flask_jwt_extended.decode_token', 'decode_token', (['access'], {}), '(access)\n', (8471, 8479), False, 'from flask_jwt_extended import decode_token, create_access_token\n'), ((8551, 8577), 'models.user.User.find_user', 'User.find_user', ([], {'id': 'user_id'}), '(id=user_id)\n', (8565, 8577), False, 'from models.user import User\n'), ((8590, 8621), 'app_config.DB.session.delete', 'DB.session.delete', (['current_user'], {}), '(current_user)\n', (8607, 8621), False, 'from app_config import DB\n'), ((8866, 8885), 'app_config.DB.session.commit', 'DB.session.commit', ([], {}), '()\n', (8883, 8885), False, 'from app_config import DB\n'), ((8898, 8913), 'flask.session.clear', 'session.clear', ([], {}), '()\n', (8911, 8913), False, 'from flask import jsonify, request, session, make_response\n'), ((1102, 1172), 'app_config.API.url_for', 'API.url_for', (['ResetPasswordRequestResource'], {'token': 'token', '_external': '(True)'}), '(ResetPasswordRequestResource, token=token, _external=True)\n', (1113, 1172), False, 'from app_config import API\n'), ((3594, 3653), 'flask.make_response', 'make_response', (['response_object', 'status.HTTP_400_BAD_REQUEST'], {}), '(response_object, status.HTTP_400_BAD_REQUEST)\n', (3607, 3653), False, 'from flask import jsonify, request, session, make_response\n'), ((4712, 4768), 'flask.make_response', 'make_response', (['response_object', 'status.HTTP_409_CONFLICT'], {}), '(response_object, status.HTTP_409_CONFLICT)\n', (4725, 4768), False, 'from flask import jsonify, request, session, make_response\n'), ((5105, 5126), 'app_config.DB.session.rollback', 'DB.session.rollback', ([], {}), '()\n', (5124, 5126), False, 'from app_config import DB\n'), ((5562, 5622), 'flask.make_response', 'make_response', (['response_object', 'status.HTTP_401_UNAUTHORIZED'], {}), '(response_object, status.HTTP_401_UNAUTHORIZED)\n', (5575, 5622), False, 'from flask import jsonify, request, session, make_response\n'), ((6338, 6397), 'flask.make_response', 'make_response', (['response_object', 'status.HTTP_400_BAD_REQUEST'], {}), '(response_object, status.HTTP_400_BAD_REQUEST)\n', (6351, 6397), False, 'from flask import jsonify, request, session, make_response\n'), ((6943, 7003), 'flask.make_response', 'make_response', (['response_object', 'status.HTTP_401_UNAUTHORIZED'], {}), '(response_object, status.HTTP_401_UNAUTHORIZED)\n', (6956, 7003), False, 'from flask import jsonify, request, session, make_response\n'), ((7719, 7778), 'flask.make_response', 'make_response', (['response_object', 'status.HTTP_400_BAD_REQUEST'], {}), '(response_object, status.HTTP_400_BAD_REQUEST)\n', (7732, 7778), False, 'from flask import jsonify, request, session, make_response\n'), ((7905, 7926), 'app_config.DB.session.rollback', 'DB.session.rollback', ([], {}), '()\n', (7924, 7926), False, 'from app_config import DB\n'), ((8034, 8093), 'flask.make_response', 'make_response', (['response_object', 'status.HTTP_400_BAD_REQUEST'], {}), '(response_object, status.HTTP_400_BAD_REQUEST)\n', (8047, 8093), False, 'from flask import jsonify, request, session, make_response\n'), ((8361, 8421), 'flask.make_response', 'make_response', (['response_object', 'status.HTTP_401_UNAUTHORIZED'], {}), '(response_object, status.HTTP_401_UNAUTHORIZED)\n', (8374, 8421), False, 'from flask import jsonify, request, session, make_response\n'), ((8781, 8840), 'flask.make_response', 'make_response', (['response_object', 'status.HTTP_400_BAD_REQUEST'], {}), '(response_object, status.HTTP_400_BAD_REQUEST)\n', (8794, 8840), False, 'from flask import jsonify, request, session, make_response\n'), ((9083, 9104), 'app_config.DB.session.rollback', 'DB.session.rollback', ([], {}), '()\n', (9102, 9104), False, 'from app_config import DB\n'), ((9124, 9183), 'flask.make_response', 'make_response', (['response_object', 'status.HTTP_400_BAD_REQUEST'], {}), '(response_object, status.HTTP_400_BAD_REQUEST)\n', (9137, 9183), False, 'from flask import jsonify, request, session, make_response\n'), ((1720, 1743), 'flask.jsonify', 'jsonify', (['error.messages'], {}), '(error.messages)\n', (1727, 1743), False, 'from flask import jsonify, request, session, make_response\n'), ((1806, 1849), 'models.user.User.query.filter_by', 'User.query.filter_by', ([], {'user_email': 'user_email'}), '(user_email=user_email)\n', (1826, 1849), False, 'from models.user import User\n'), ((2159, 2219), 'flask.make_response', 'make_response', (['response_object', 'status.HTTP_401_UNAUTHORIZED'], {}), '(response_object, status.HTTP_401_UNAUTHORIZED)\n', (2172, 2219), False, 'from flask import jsonify, request, session, make_response\n'), ((2763, 2786), 'flask.jsonify', 'jsonify', (['error.messages'], {}), '(error.messages)\n', (2770, 2786), False, 'from flask import jsonify, request, session, make_response\n'), ((3032, 3051), 'app_config.DB.session.commit', 'DB.session.commit', ([], {}), '()\n', (3049, 3051), False, 'from app_config import DB\n'), ((3960, 3983), 'flask.jsonify', 'jsonify', (['error.messages'], {}), '(error.messages)\n', (3967, 3983), False, 'from flask import jsonify, request, session, make_response\n'), ((5248, 5272), 'flask.jsonify', 'jsonify', (['response_object'], {}), '(response_object)\n', (5255, 5272), False, 'from flask import jsonify, request, session, make_response\n'), ((6605, 6628), 'flask.jsonify', 'jsonify', (['error.messages'], {}), '(error.messages)\n', (6612, 6628), False, 'from flask import jsonify, request, session, make_response\n'), ((3157, 3178), 'app_config.DB.session.rollback', 'DB.session.rollback', ([], {}), '()\n', (3176, 3178), False, 'from app_config import DB\n'), ((5952, 5977), 'flask.jsonify', 'jsonify', (['user_to_response'], {}), '(user_to_response)\n', (5959, 5977), False, 'from flask import jsonify, request, session, make_response\n'), ((7219, 7272), 'app_config.BCRYPT.generate_password_hash', 'BCRYPT.generate_password_hash', (['new_user.user_password'], {}), '(new_user.user_password)\n', (7248, 7272), False, 'from app_config import BCRYPT\n'), ((2947, 2995), 'app_config.BCRYPT.generate_password_hash', 'BCRYPT.generate_password_hash', (['user_password', '(10)'], {}), '(user_password, 10)\n', (2976, 2995), False, 'from app_config import BCRYPT\n'), ((3332, 3356), 'flask.jsonify', 'jsonify', (['response_object'], {}), '(response_object)\n', (3339, 3356), False, 'from flask import jsonify, request, session, make_response\n'), ((4463, 4486), 'flask.jsonify', 'jsonify', (['error.messages'], {}), '(error.messages)\n', (4470, 4486), False, 'from flask import jsonify, request, session, make_response\n'), ((6089, 6112), 'flask.jsonify', 'jsonify', (['error.messages'], {}), '(error.messages)\n', (6096, 6112), False, 'from flask import jsonify, request, session, make_response\n'), ((4084, 4109), 'app_config.DB.session.query', 'DB.session.query', (['User.id'], {}), '(User.id)\n', (4100, 4109), False, 'from app_config import DB\n')] |
"""
Set of programs and tools to read the outputs from RH (Han's version)
"""
import os
import sys
import io
import xdrlib
import numpy as np
class Rhout:
"""
Reads outputs from RH.
Currently the reading the following output files is supported:
- input.out
- geometry.out
- atmos.out
- spectrum.out (no Stokes)
- spectrum_XX (no Stokes, from solveray)
- brs.out
- J.dat
- opacity.out (no Stokes)
These output files are NOT supported:
- Atom (atom, collrate, damping, pops, radrate)
- Flux
- metals
- molecule
Parameters
----------
fdir : str, optional
Directory with output files.
verbose : str, optional
If True, will print more details.
Notes
-----
In general, the way to read all the XDR files should be:
Modify read_xdr_file so that it returns only xdata.
Then, on each read_xxx, read the necessary header variables,
rewind (xdata.set_position(0)), then read the variables in order.
This allows the flexibility of derived datatypes, and appending to dictionary
(e.g. as in readatmos for all the elements and etc.). It also allows one to
read directly into attribute of the class (with setattr(self,'aa',<data>))
"""
def __init__(self, fdir='.', verbose=True):
''' Reads all the output data from a RH run.'''
self.verbose = verbose
self.fdir = fdir
self.read_input('{0}/input.out'.format(fdir))
self.read_geometry('{0}/geometry.out'.format(fdir))
self.read_atmosphere('{0}/atmos.out'.format(fdir))
self.read_spectrum('{0}/spectrum.out'.format(fdir))
if os.path.isfile('{0}/spectrum_1.00'.format(fdir)):
self.read_ray('{0}/spectrum_1.00'.format(fdir))
def read_input(self, infile='input.out'):
''' Reads RH input.out file. '''
data = read_xdr_file(infile)
self.input = {}
input_vars = [('magneto_optical', 'i'), ('PRD_angle_dep', 'i'),
('XRD', 'i'), ('start_solution', 'i'),
('stokes_mode', 'i'), ('metallicity', 'd'),
('backgr_pol', 'i'), ('big_endian', 'i')]
for v in input_vars:
self.input[v[0]] = read_xdr_var(data, v[1:])
close_xdr(data, infile, verbose=self.verbose)
def read_geometry(self, infile='geometry.out'):
''' Reads RH geometry.out file. '''
data = read_xdr_file(infile)
self.geometry = {}
geom_type = ['ONE_D_PLANE', 'TWO_D_PLANE',
'SPHERICAL_SYMMETRIC', 'THREE_D_PLANE']
type = read_xdr_var(data, ('i',))
if type not in list(range(4)):
raise ValueError('read_geometry: invalid geometry type {0} in {1}'.
format(type, infile))
nrays = read_xdr_var(data, ('i',))
self.nrays = nrays
self.geometry_type = geom_type[type]
# read some parameters and define structure to be read
if self.geometry_type == 'ONE_D_PLANE':
ndep = read_xdr_var(data, ('i',))
self.ndep = ndep
geom_vars = [('xmu', 'd', (nrays,)), ('wmu', 'd', (nrays,)),
('height', 'd', (ndep,)), ('cmass', 'd', (ndep,)),
('tau500', 'd', (ndep,)), ('vz', 'd', (ndep,))]
elif self.geometry_type == 'TWO_D_PLANE':
nx = read_xdr_var(data, ('i',))
nz = read_xdr_var(data, ('i',))
self.nx = nx
self.nz = nz
geom_vars = [('angleSet', 'i'), ('xmu', 'd', (nrays,)),
('ymu', 'd', (nrays,)), ('wmu', 'd', (nrays,)),
('x', 'd', (nx,)), ('z', 'd', (nz,)),
('vx', 'd', (nx, nz)), ('vz', 'd', (nx, nz))]
elif self.geometry_type == 'THREE_D_PLANE':
nx = read_xdr_var(data, ('i',))
ny = read_xdr_var(data, ('i',))
nz = read_xdr_var(data, ('i',))
self.nx = nx
self.ny = ny
self.nz = nz
geom_vars = [('angleSet', 'i'), ('xmu', 'd', (nrays,)),
('ymu', 'd', (nrays,)), ('wmu', 'd', (nrays,)),
('dx', 'd'), ('dy', 'd'),
('z', 'd', (nz,)), ('vx', 'd', (nx, ny, nz)),
('vy', 'd', (nx, ny, nz)), ('vz', 'd', (nx, ny, nz))]
elif self.geometry_type == 'SPHERICAL_SYMMETRIC':
nradius = read_xdr_var(data, ('i',))
ncore = read_xdr_var(data, ('i',))
self.nradius = nradius
self.ncore = ncore
geom_vars = [('radius', 'd'), ('xmu', 'd', (nrays,)),
('wmu', 'd', (nrays,)), ('r', 'd', (nradius,)),
('cmass', 'd', (nradius,)), ('tau500', 'd', (nradius,)),
('vr', 'd', (nradius,))]
# read data
for v in geom_vars:
self.geometry[v[0]] = read_xdr_var(data, v[1:])
close_xdr(data, infile, verbose=self.verbose)
def read_atmosphere(self, infile='atmos.out'):
''' Reads RH atmos.out file '''
if not hasattr(self, 'geometry'):
em = ('read_atmosphere: geometry data not loaded, '
'call read_geometry() first!')
raise ValueError(em)
data = read_xdr_file(infile)
self.atmos = {}
nhydr = read_xdr_var(data, ('i',))
nelem = read_xdr_var(data, ('i',))
self.atmos['nhydr'] = nhydr
self.atmos['nelem'] = nelem
# read some parameters and define structure to be read
if self.geometry_type == 'ONE_D_PLANE':
ndep = self.ndep
atmos_vars = [('moving', 'i'), ('T', 'd', (ndep,)),
('n_elec', 'd', (ndep,)), ('vturb', 'd', (ndep,)),
('nh', 'd', (ndep, nhydr)), ('id', 's')]
elif self.geometry_type == 'TWO_D_PLANE':
nx, nz = self.nx, self.nz
atmos_vars = [('moving', 'i'), ('T', 'd', (nx, nz)),
('n_elec', 'd', (nx, nz)), ('vturb', 'd', (nx, nz)),
('nh', 'd', (nx, nz, nhydr)), ('id', 's')]
elif self.geometry_type == 'THREE_D_PLANE':
nx, ny, nz = self.nx, self.ny, self.nz
atmos_vars = [('moving', 'i'), ('T', 'd', (nx, ny, nz)),
('n_elec', 'd', (nx, ny, nz)
), ('vturb', 'd', (nx, ny, nz)),
('nh', 'd', (nx, ny, nz, nhydr)), ('id', 's')]
elif self.geometry_type == 'SPHERICAL_SYMMETRIC':
nradius = self.nradius
atmos_vars = [('moving', 'i'), ('T', 'd', (nradius,)),
('n_elec', 'd', (nradius,)), ('vturb', 'd', (nradius,)),
('nh', 'd', (nradius, nhydr)), ('id', 's')]
# read data
for v in atmos_vars:
self.atmos[v[0]] = read_xdr_var(data, v[1:])
# read elements into nested dictionaries
self.elements = {}
for v in range(nelem):
el = read_xdr_var(data, ('s',)).strip()
weight = read_xdr_var(data, ('d',))
abund = read_xdr_var(data, ('d',))
self.elements[el] = {'weight': weight, 'abund': abund}
# read stokes data, if present
self.stokes = False
if self.geometry_type != 'SPHERICAL_SYMMETRIC':
try:
stokes = read_xdr_var(data, ('i',))
except EOFError or IOError:
if self.verbose:
print('(WWW) read_atmos: no Stokes data in atmos.out,'
' skipping.')
return
self.stokes = True
ss = self.atmos['T'].shape
stokes_vars = [('B', 'd', ss), ('gamma_B', 'd', ss),
('chi_B', 'd', ss)]
for v in stokes_vars:
self.atmos[v[0]] = read_xdr_var(data, v[1:])
close_xdr(data, infile, verbose=self.verbose)
def read_spectrum(self, infile='spectrum.out'):
''' Reads RH spectrum.out file '''
if not hasattr(self, 'geometry'):
em = ('read_spectrum: geometry data not loaded, '
'call read_geometry() first!')
raise ValueError(em)
if not hasattr(self, 'atmos'):
em = ('read_spectrum: atmos data not loaded, '
'call read_atmos() first!')
raise ValueError(em)
data = read_xdr_file(infile)
profs = {}
self.spec = {}
nspect = read_xdr_var(data, ('i',))
self.spec['nspect'] = nspect
nrays = self.nrays
self.wave = read_xdr_var(data, ('d', (nspect,)))
if self.geometry_type == 'ONE_D_PLANE':
ishape = (nrays, nspect)
elif self.geometry_type == 'TWO_D_PLANE':
ishape = (self.nx, nrays, nspect)
elif self.geometry_type == 'THREE_D_PLANE':
ishape = (self.nx, self.ny, nrays, nspect)
elif self.geometry_type == 'SPHERICAL_SYMMETRIC':
ishape = (nrays, nspect)
self.imu = read_xdr_var(data, ('d', ishape))
self.spec['vacuum_to_air'] = read_xdr_var(data, ('i',))
self.spec['air_limit'] = read_xdr_var(data, ('d',))
if self.stokes:
self.stokes_Q = read_xdr_var(data, ('d', ishape))
self.stokes_U = read_xdr_var(data, ('d', ishape))
self.stokes_V = read_xdr_var(data, ('d', ishape))
close_xdr(data, infile, verbose=self.verbose)
# read as_rn, if it exists
if os.path.isfile('asrs.out'):
data = read_xdr_file('asrs.out')
if self.atmos['moving'] or self.stokes or self.input['PRD_angle_dep']:
self.spec['as_rn'] = read_xdr_var(data, ('i', (nrays, nspect)))
else:
self.spec['as_rn'] = read_xdr_var(data, ('i', (nspect,)))
close_xdr(data, 'asrs.out', verbose=self.verbose)
def read_ray(self, infile='spectrum_1.00'):
''' Reads spectra for single ray files (e.g. mu=1). '''
if not hasattr(self, 'geometry'):
em = ('read_spectrum: geometry data not loaded,'
' call read_geometry() first!')
raise ValueError(em)
if not hasattr(self, 'spec'):
em = ('read_spectrum: spectral data not loaded, '
'call read_spectrum() first!')
raise ValueError(em)
data = read_xdr_file(infile)
nspect = self.spec['nspect']
self.ray = {}
if self.geometry_type == 'ONE_D_PLANE':
self.muz = read_xdr_var(data, ('d',))
ishape = (nspect,)
sshape = (self.ndep,)
elif self.geometry_type == 'TWO_D_PLANE':
self.mux = read_xdr_var(data, ('d',))
self.muz = read_xdr_var(data, ('d',))
ishape = (self.nx, nspect)
sshape = (self.nx, self.nz)
elif self.geometry_type == 'THREE_D_PLANE':
self.mux = read_xdr_var(data, ('d',))
self.muy = read_xdr_var(data, ('d',))
ishape = (self.nx, self.ny, nspect)
sshape = (self.nx, self.ny, self.nz)
elif self.geometry_type == 'SPHERICAL_SYMMETRIC':
self.muz = read_xdr_var(data, ('d',))
ishape = (nspect,)
sshape = (self.nradius,)
# read intensity
self.int = read_xdr_var(data, ('d', ishape))
# read absorption and source function if written
ns = read_xdr_var(data, ('i',))
if ns > 0:
nshape = (ns,) + sshape
self.ray['chi'] = np.zeros(nshape, dtype='d')
self.ray['S'] = np.zeros(nshape, dtype='d')
self.ray['wave_idx'] = np.zeros(ns, dtype='l')
for i in range(ns):
self.ray['wave_idx'][i] = read_xdr_var(data, ('i',))
self.ray['chi'][i] = read_xdr_var(data, ('d', sshape))
self.ray['S'][i] = read_xdr_var(data, ('d', sshape))
if self.stokes:
self.ray_stokes_Q = read_xdr_var(data, ('d', ishape))
self.ray_stokes_U = read_xdr_var(data, ('d', ishape))
self.ray_stokes_V = read_xdr_var(data, ('d', ishape))
close_xdr(data, infile, verbose=self.verbose)
def read_brs(self, infile='brs.out'):
''' Reads the file with the background opacity record settings,
in the old (xdr) format. '''
if not hasattr(self, 'geometry'):
em = ('read_brs: geometry data not loaded, call read_geometry()'
' first!')
raise ValueError(em)
if not hasattr(self, 'spec'):
em = ('read_brs: spectrum data not loaded, call read_spectrum()'
' first!')
raise ValueError(em)
data = read_xdr_file(infile)
atmosID = read_xdr_var(data, ('s',)).strip()
nspace = read_xdr_var(data, ('i',))
nspect = read_xdr_var(data, ('i',))
if nspect != self.spec['nspect']:
em = ('(EEE) read_brs: nspect in file different from atmos. '
'Aborting.')
raise ValueError(em)
self.brs = {}
if self.atmos['moving'] or self.stokes:
ishape = (2, self.nrays, nspect)
else:
ishape = (nspect,)
self.brs['hasline'] = read_xdr_var(
data, ('i', (nspect,))).astype('Bool')
self.brs['ispolarized'] = read_xdr_var(
data, ('i', (nspect,))).astype('Bool')
self.brs['backgrrecno'] = read_xdr_var(data, ('i', ishape))
close_xdr(data, infile, verbose=self.verbose)
def read_j(self, infile='J.dat'):
''' Reads the mean radiation field, for all wavelengths. '''
if not hasattr(self, 'geometry'):
em = 'read_j: geometry data not loaded, call read_geometry() first!'
raise ValueError(em)
if not hasattr(self, 'spec'):
em = 'read_j: spectrum data not loaded, call read_spec() first!'
raise ValueError(em)
data_file = open(infile, 'r')
nspect = self.spec['nspect']
if self.geometry_type == 'ONE_D_PLANE':
rec_len = self.ndep * 8
ishape = (nspect, self.ndep)
elif self.geometry_type == 'TWO_D_PLANE':
rec_len = (self.nx * self.nz) * 8
ishape = (nspect, self.nx, self.nz)
elif self.geometry_type == 'THREE_D_PLANE':
rec_len = (self.nx * self.ny * self.nz) * 8
ishape = (nspect, self.nx, self.ny, self.nz)
elif self.geometry_type == 'SPHERICAL_SYMMETRIC':
rec_len = self.nradius * 8
ishape = (nspect, self.nradius)
self.J = np.zeros(ishape)
for i in range(nspect):
# point background file to position and read
data_file.seek(i * rec_len)
self.J[i] = read_file_var(data_file, ('d', ishape[1:]))
data_file.close()
def read_opacity(self, infile_line='opacity.out', infile_bg='background.dat',
imu=0):
''' Reads RH atmos.out file '''
if not hasattr(self, 'geometry'):
em = ('read_opacity: geometry data not loaded,'
' call read_geometry() first!')
raise ValueError(em)
if not hasattr(self, 'spec'):
em = ('read_opacity: spectrum data not loaded,'
' call read_spec() first!')
raise ValueError(em)
if not hasattr(self.atmos, 'brs'):
self.read_brs()
data_line = read_xdr_file(infile_line)
file_bg = open(infile_bg, 'r')
nspect = self.spec['nspect']
if self.geometry_type == 'ONE_D_PLANE':
as_rec_len = 2 * self.ndep * 8
bg_rec_len = self.ndep * 8
ishape = (nspect, self.ndep)
elif self.geometry_type == 'TWO_D_PLANE':
as_rec_len = 2 * (self.nx * self.nz) * 8
bg_rec_len = (self.nx * self.nz) * 8
ishape = (nspect, self.nx, self.nz)
elif self.geometry_type == 'THREE_D_PLANE':
as_rec_len = 2 * (self.nx * self.ny * self.nz) * 8
bg_rec_len = (self.nx * self.ny * self.nz) * 8
ishape = (nspect, self.nx, self.ny, self.nz)
elif self.geometry_type == 'SPHERICAL_SYMMETRIC':
as_rec_len = 2 * self.nradius * 8
bg_rec_len = self.nradius * 8
ishape = (nspect, self.nradius)
# create arrays
chi_as = np.zeros(ishape)
eta_as = np.zeros(ishape)
chi_c = np.zeros(ishape)
eta_c = np.zeros(ishape)
scatt = np.zeros(ishape)
# NOTE: this will not work when a line is polarised.
# For those cases these arrays must be read per wavelength, and will
# have different sizes for different wavelengths.
if np.sum(self.brs['ispolarized']):
em = ('read_opacity: Polarized line(s) detected, cannot continue'
' with opacity extraction')
raise ValueError(em)
# get record numbers
if self.atmos['moving'] or self.stokes or self.input['PRD_angle_dep']:
as_index = self.spec['as_rn'][imu] * as_rec_len
bg_index = self.brs['backgrrecno'][1, imu] * bg_rec_len
else:
as_index = self.spec['as_rn'] * as_rec_len
bg_index = self.brs['backgrrecno'] * bg_rec_len
# Read arrays
for i in range(nspect):
if as_index[i] >= 0: # avoid non-active set lines
# point xdr buffer to position and read
data_line.set_position(as_index[i])
chi_as[i] = read_xdr_var(data_line, ('d', ishape[1:]))
eta_as[i] = read_xdr_var(data_line, ('d', ishape[1:]))
# point background file to position and read
file_bg.seek(bg_index[i])
chi_c[i] = read_file_var(file_bg, ('d', ishape[1:]))
eta_c[i] = read_file_var(file_bg, ('d', ishape[1:]))
scatt[i] = read_file_var(file_bg, ('d', ishape[1:]))
self.chi_as = chi_as
self.eta_as = eta_as
self.chi_c = chi_c
self.eta_c = eta_c
self.scatt = scatt
close_xdr(data_line, infile_line, verbose=False)
file_bg.close()
def get_contrib_imu(self, imu, type='total', op_file='opacity.out',
bg_file='background.dat', j_file='J.dat'):
''' Calculates the contribution function for intensity, for a
particular ray, defined by imu.
type can be: \'total\', \'line, or \'continuum\'
The units of self.contribi are J m^-2 s^-1 Hz^-1 sr^-1 km^-1
NOTE: This only calculates the contribution function for
the quadrature rays (ie, often not for disk-centre)
For rays calculated with solve ray, one must use
get_contrib_ray
'''
type = type.lower()
if not hasattr(self, 'geometry'):
em = ('get_contrib_imu: geometry data not loaded,'
' call read_geometry() first!')
raise ValueError(em)
if not hasattr(self, 'spec'):
em = ('get_contrib_imu: spectrum data not loaded,'
' call read_spec() first!')
raise ValueError(em)
self.read_opacity(infile_line=op_file, infile_bg=bg_file, imu=imu)
self.read_j(infile=j_file)
mu = self.geometry['xmu'][imu]
# Calculate optical depth
ab = (self.chi_c + self.chi_as)
self.tau = get_tau(self.geometry['height'], mu, ab)
# Calculate source function
if type == 'total':
self.S = (self.eta_as + self.eta_c + self.J * self.scatt) / ab
elif type == 'line':
self.S = self.eta_as / ab
elif type == 'continuum':
self.S = (self.eta_c + self.J * self.scatt) / ab
else:
raise ValueError('get_contrib_imu: invalid type!')
# Calculate contribution function
self.contribi = get_contrib(
self.geometry['height'], mu, self.tau, self.S)
return
def get_contrib_ray(self, inray='ray.input', rayfile='spectrum_1.00'):
''' Calculates the contribution function for intensity, for a
particular ray
The units of self.contrib are J m^-2 s^-1 Hz^-1 sr^-1 km^-1
'''
inray = self.fdir + '/' + inray
rayfile = self.fdir + '/' + rayfile
if not hasattr(self, 'ray'):
self.read_ray(infile=rayfile)
if 'wave_idx' not in list(self.ray.keys()):
em = ('get_contrib_ray: no chi/source function written to '
'ray file, aborting.')
raise ValueError(em)
# read mu from ray.input file
mu = np.loadtxt(inray, dtype='f')[0]
if not (0 <= mu <= 1.):
em = 'get_contrib_ray: invalid mu read: %f' % mu
raise ValueError(em)
idx = self.ray['wave_idx']
# Calculate optical depth
self.tau = get_tau(self.geometry['height'], mu, self.ray['chi'])
# Calculate contribution function
self.contrib = get_contrib(self.geometry['height'], mu, self.tau,
self.ray['S'])
return
class RhAtmos:
"""
Reads input atmosphere from RH. Currently only 2D format supported.
Parameters
----------
format : str, optional
Atmosphere format. Currently only '2D' (default) supported.
filename : str, optional
File to read.
verbose : str, optional
If True, will print more details.
"""
def __init__(self, format="2D", filename=None, verbose=True):
''' Reads RH input atmospheres. '''
self.verbose = verbose
if format.lower() == "2d":
if filename is not None:
self.read_atmos2d(filename)
else:
raise NotImplementedError("Format %s not yet supported" % format)
def read_atmos2d(self, filename):
"""
Reads input 2D atmosphere
"""
data = read_xdr_file(filename)
self.nx = read_xdr_var(data, ('i',))
self.nz = read_xdr_var(data, ('i',))
self.nhydr = read_xdr_var(data, ('i',))
self.hboundary = read_xdr_var(data, ('i',))
self.bvalue = read_xdr_var(data, ('i', (2, )))
nx, nz, nhydr = self.nx, self.nz, self.nhydr
atmos_vars = [('dx', 'd', (nx,)), ('z', 'd', (nz,)),
('T', 'd', (nx, nz)), ('ne', 'd', (nx, nz)),
('vturb', 'd', (nx, nz)), ('vx', 'd', (nx, nz)),
('vz', 'd', (nx, nz)), ('nh', 'd', (nx, nz, nhydr))
]
for v in atmos_vars:
setattr(self, v[0], read_xdr_var(data, v[1:]))
def write_atmos2d(self, filename, dx, z, T, ne, vturb, vx, vz, nh,
hboundary, bvalue):
nx, nz = T.shape
nhydr = nh.shape[-1]
assert T.shape == ne.shape
assert ne.shape == vturb.shape
assert vturb.shape == nh.shape[:-1]
assert dx.shape[0] == nx
assert z.shape[0] == nz
# Pack as double
p = xdrlib.Packer()
p.pack_int(nx)
p.pack_int(nz)
p.pack_int(nhydr)
p.pack_int(hboundary)
p.pack_int(bvalue[0])
p.pack_int(bvalue[1])
p.pack_farray(nx, dx.ravel().astype('d'), p.pack_double)
p.pack_farray(nz, z.ravel().astype('d'), p.pack_double)
p.pack_farray(nx * nz, T.ravel().astype('d'), p.pack_double)
p.pack_farray(nx * nz, ne.ravel().astype('d'), p.pack_double)
p.pack_farray(nx * nz, vturb.ravel().astype('d'), p.pack_double)
p.pack_farray(nx * nz, vx.ravel().astype('d'), p.pack_double)
p.pack_farray(nx * nz, vz.ravel().astype('d'), p.pack_double)
p.pack_farray(nx * nz * nhydr, nh.T.ravel().astype('d'), p.pack_double)
# Write to file
f = open(filename, 'wb')
f.write(p.get_buffer())
f.close()
#############################################################################
# TOOLS
#############################################################################
class EmptyData:
def __init__(self):
pass
def read_xdr_file(filename): # ,var,cl=None,verbose=False):
"""
Reads data from XDR file.
Because of the way xdrlib works, this reads the whole file to
memory at once. Avoid with very large files.
Parameters
----------
filename : string
File to read.
Returns
-------
result : xdrlib.Unpacker object
"""
try:
f = io.open(filename, 'rb')
data = f.read()
f.close()
except IOError as e:
raise IOError(
'read_xdr_file: problem reading {0}: {1}'.format(filename, e))
# return XDR data
return xdrlib.Unpacker(data)
def close_xdr(buf, ofile='', verbose=False):
"""
Closes the xdrlib.Unpacker object, gives warning if not all data read.
Parameters
----------
buf : xdrlib.Unpacker object
data object.
ofile : string, optional
Original file from which data was read.
verbose : bool, optional
Whether to print warning or not.
"""
try:
buf.done()
except: # .done() will raise error if data remaining
if verbose:
print(('(WWW) close_xdr: {0} not all data read!'.format(ofile)))
def read_xdr_var(buf, var):
"""
Reads a single variable/array from a xdrlib.Unpack buffer.
Parameters
----------
buf: xdrlib.Unpack object
Data buffer.
var: tuple with (type[,shape]), where type is 'f', 'd', 'i', 'ui',
or 's'. Shape is optional, and if true is shape of array.
Type and shape of variable to read
Returns
-------
out : int/float or array
Resulting variable.
"""
assert len(var) > 0
if var[0] not in ['f', 'd', 'i', 'ui', 's']:
raise ValueError('read_xdr_var: data type'
' {0} not currently supported'.format(var[0]))
fdict = {'f': buf.unpack_float,
'd': buf.unpack_double,
'i': buf.unpack_int,
'ui': buf.unpack_uint,
's': buf.unpack_string}
func = fdict[var[0]]
# Single or array?
if len(var) == 1:
# this is because RH seems to write the size of the string twice
if var[0] == 's':
buf.unpack_int()
out = func()
else:
nitems = np.prod(var[1])
out = np.array(buf.unpack_farray(nitems, func)).reshape(var[1][::-1])
# invert order of indices, to match IDL's
out = np.transpose(out, list(range(len(var[1])))[::-1])
return out
def read_file_var(buf, var):
''' Reads a single variable/array from a file buffer.
IN:
buf: open file object
var: tuple with (type[,shape]), where type is 'f', 'd', 'i', 'ui',
or 's'. Shape is optional, and if true is shape of array.
OUT:
variable/array
'''
assert len(var) > 0
if len(var) == 1:
out = np.fromfile(buf, dtype=var, count=1)
elif len(var) == 2:
out = np.fromfile(buf, dtype=var[0], count=var[1][0])
else:
nitems = np.prod(var[1])
out = np.array(np.fromfile(buf, dtype=var[0], count=nitems)).\
reshape(var[1][::-1])
out = np.transpose(out, list(range(len(var[1])))[::-1])
return out
def get_tau(x, mu, chi):
''' Calculates the optical depth, given x (height), mu (cos[theta]) and
chi, absorption coefficient. Chi can be n-dimensional, as long as
last index is depth.
'''
# With scipy, this could be done in one line with
# scipy.integrate.quadrature.cumtrapz, but we are avoiding scipy to keep
# these tools more independent
if len(x) != chi.shape[-1]:
raise ValueError('get_tau: x and chi have different sizes!')
path = x / mu
npts = len(x)
# bring depth to first index, to allow n-d algebra
chi_t = np.transpose(chi)
tau = np.zeros(chi_t.shape)
for i in range(1, npts):
tau[i] = tau[i - 1] + 0.5 * \
(chi_t[i - 1] + chi_t[i]) * (path[i - 1] - path[i])
return tau.T
def get_contrib(z, mu, tau_in, S):
''' Calculates contribution function using x, mu, tau, and the source
function. '''
# Tau truncated at 100 (large enough to be useless)
tau = tau_in.copy()
tau[tau_in > 100.] = 100.
# Calculate dtau (transpose to keep n-D generic form), and dx
dtau = np.zeros(tau_in.shape[::-1])
tt = np.transpose(tau_in)
dtau[1:] = tt[1:] - tt[:-1]
dtau = np.transpose(dtau)
dx = np.zeros(z.shape)
dx[1:] = (z[1:] - z[:-1]) / mu
dx[0] = dx[1]
# Calculate contribution function
contrib = S * np.exp(-tau) * (- dtau / dx) / mu
# convert from m^-1 to km^-1, units are now: J m^-2 s^-1 Hz^-1 sr^-1 km^-1
contrib *= 1.e3
return contrib
def write_B(outfile, Bx, By, Bz):
''' Writes a RH magnetic field file. Input B arrays can be any rank, as
they will be flattened before write. Bx, By, Bz units should be T.'''
if (Bx.shape != By.shape) or (By.shape != Bz.shape):
raise TypeError('writeB: B arrays have different shapes!')
n = np.prod(Bx.shape)
# Convert into spherical coordinates
B = np.sqrt(Bx**2 + By**2 + Bz**2)
gamma_B = np.arccos(Bz / B)
chi_B = np.arctan(By / Bx)
# Pack as double
p = xdrlib.Packer()
p.pack_farray(n, B.ravel().astype('d'), p.pack_double)
p.pack_farray(n, gamma_B.ravel().astype('d'), p.pack_double)
p.pack_farray(n, chi_B.ravel().astype('d'), p.pack_double)
# Write to file
f = open(outfile, 'wb')
f.write(p.get_buffer())
f.close()
return
| [
"xdrlib.Unpacker",
"numpy.prod",
"numpy.fromfile",
"numpy.sqrt",
"numpy.arccos",
"io.open",
"os.path.isfile",
"xdrlib.Packer",
"numpy.zeros",
"numpy.sum",
"numpy.exp",
"numpy.loadtxt",
"numpy.transpose",
"numpy.arctan"
] | [((24930, 24951), 'xdrlib.Unpacker', 'xdrlib.Unpacker', (['data'], {}), '(data)\n', (24945, 24951), False, 'import xdrlib\n'), ((28119, 28136), 'numpy.transpose', 'np.transpose', (['chi'], {}), '(chi)\n', (28131, 28136), True, 'import numpy as np\n'), ((28147, 28168), 'numpy.zeros', 'np.zeros', (['chi_t.shape'], {}), '(chi_t.shape)\n', (28155, 28168), True, 'import numpy as np\n'), ((28637, 28665), 'numpy.zeros', 'np.zeros', (['tau_in.shape[::-1]'], {}), '(tau_in.shape[::-1])\n', (28645, 28665), True, 'import numpy as np\n'), ((28675, 28695), 'numpy.transpose', 'np.transpose', (['tau_in'], {}), '(tau_in)\n', (28687, 28695), True, 'import numpy as np\n'), ((28739, 28757), 'numpy.transpose', 'np.transpose', (['dtau'], {}), '(dtau)\n', (28751, 28757), True, 'import numpy as np\n'), ((28767, 28784), 'numpy.zeros', 'np.zeros', (['z.shape'], {}), '(z.shape)\n', (28775, 28784), True, 'import numpy as np\n'), ((29368, 29385), 'numpy.prod', 'np.prod', (['Bx.shape'], {}), '(Bx.shape)\n', (29375, 29385), True, 'import numpy as np\n'), ((29435, 29471), 'numpy.sqrt', 'np.sqrt', (['(Bx ** 2 + By ** 2 + Bz ** 2)'], {}), '(Bx ** 2 + By ** 2 + Bz ** 2)\n', (29442, 29471), True, 'import numpy as np\n'), ((29480, 29497), 'numpy.arccos', 'np.arccos', (['(Bz / B)'], {}), '(Bz / B)\n', (29489, 29497), True, 'import numpy as np\n'), ((29510, 29528), 'numpy.arctan', 'np.arctan', (['(By / Bx)'], {}), '(By / Bx)\n', (29519, 29528), True, 'import numpy as np\n'), ((29558, 29573), 'xdrlib.Packer', 'xdrlib.Packer', ([], {}), '()\n', (29571, 29573), False, 'import xdrlib\n'), ((9632, 9658), 'os.path.isfile', 'os.path.isfile', (['"""asrs.out"""'], {}), "('asrs.out')\n", (9646, 9658), False, 'import os\n'), ((14763, 14779), 'numpy.zeros', 'np.zeros', (['ishape'], {}), '(ishape)\n', (14771, 14779), True, 'import numpy as np\n'), ((16544, 16560), 'numpy.zeros', 'np.zeros', (['ishape'], {}), '(ishape)\n', (16552, 16560), True, 'import numpy as np\n'), ((16578, 16594), 'numpy.zeros', 'np.zeros', (['ishape'], {}), '(ishape)\n', (16586, 16594), True, 'import numpy as np\n'), ((16611, 16627), 'numpy.zeros', 'np.zeros', (['ishape'], {}), '(ishape)\n', (16619, 16627), True, 'import numpy as np\n'), ((16644, 16660), 'numpy.zeros', 'np.zeros', (['ishape'], {}), '(ishape)\n', (16652, 16660), True, 'import numpy as np\n'), ((16677, 16693), 'numpy.zeros', 'np.zeros', (['ishape'], {}), '(ishape)\n', (16685, 16693), True, 'import numpy as np\n'), ((16913, 16944), 'numpy.sum', 'np.sum', (["self.brs['ispolarized']"], {}), "(self.brs['ispolarized'])\n", (16919, 16944), True, 'import numpy as np\n'), ((23258, 23273), 'xdrlib.Packer', 'xdrlib.Packer', ([], {}), '()\n', (23271, 23273), False, 'import xdrlib\n'), ((24708, 24731), 'io.open', 'io.open', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (24715, 24731), False, 'import io\n'), ((26588, 26603), 'numpy.prod', 'np.prod', (['var[1]'], {}), '(var[1])\n', (26595, 26603), True, 'import numpy as np\n'), ((27185, 27221), 'numpy.fromfile', 'np.fromfile', (['buf'], {'dtype': 'var', 'count': '(1)'}), '(buf, dtype=var, count=1)\n', (27196, 27221), True, 'import numpy as np\n'), ((11676, 11703), 'numpy.zeros', 'np.zeros', (['nshape'], {'dtype': '"""d"""'}), "(nshape, dtype='d')\n", (11684, 11703), True, 'import numpy as np\n'), ((11732, 11759), 'numpy.zeros', 'np.zeros', (['nshape'], {'dtype': '"""d"""'}), "(nshape, dtype='d')\n", (11740, 11759), True, 'import numpy as np\n'), ((11795, 11818), 'numpy.zeros', 'np.zeros', (['ns'], {'dtype': '"""l"""'}), "(ns, dtype='l')\n", (11803, 11818), True, 'import numpy as np\n'), ((20869, 20897), 'numpy.loadtxt', 'np.loadtxt', (['inray'], {'dtype': '"""f"""'}), "(inray, dtype='f')\n", (20879, 20897), True, 'import numpy as np\n'), ((27260, 27307), 'numpy.fromfile', 'np.fromfile', (['buf'], {'dtype': 'var[0]', 'count': 'var[1][0]'}), '(buf, dtype=var[0], count=var[1][0])\n', (27271, 27307), True, 'import numpy as np\n'), ((27335, 27350), 'numpy.prod', 'np.prod', (['var[1]'], {}), '(var[1])\n', (27342, 27350), True, 'import numpy as np\n'), ((28894, 28906), 'numpy.exp', 'np.exp', (['(-tau)'], {}), '(-tau)\n', (28900, 28906), True, 'import numpy as np\n'), ((27374, 27418), 'numpy.fromfile', 'np.fromfile', (['buf'], {'dtype': 'var[0]', 'count': 'nitems'}), '(buf, dtype=var[0], count=nitems)\n', (27385, 27418), True, 'import numpy as np\n')] |
import logging, tqdm
import numpy as np
import rawpy
import colour_demosaicing as cd
import HDRutils.io as io
from HDRutils.utils import *
logger = logging.getLogger(__name__)
def merge(files, do_align=False, demosaic_first=True, normalize=False, color_space='sRGB',
wb=None, saturation_percent=0.98, black_level=0, bayer_pattern='RGGB',
exp=None, gain=None, aperture=None, estimate_exp='gfxdisp', cam='default',
perc=10, outlier='cerman'):
"""
Merge multiple SDR images into a single HDR image after demosacing. This is a wrapper
function that extracts metadata and calls the appropriate function.
:files: Filenames containing the inpt images
:do_align: Align by estimation homography
:demosaic_first: Order of operations
:color_space: Output color-space. Pick 1 of [sRGB, raw, Adobe, XYZ]
:normalize: Output pixels lie between 0 and 1
:wb: White-balance values after merging. Pick from [None, camera] or supply 3 values.
:saturation_percent: Saturation offset from reported white-point
:black_level: Camera's black level
:bayer_patter: Color filter array pattern of the camera
:exp: Exposure time (in seconds) required when metadata is not present
:gain: Camera gain (ISO/100) required when metadata is not present
:aperture: Aperture required when metadata is not present
:estimate_exp: Estimate exposure times by solving a system. Pick 1 of ['gfxdisp','cerman']
:cam: Camera noise model for exposure estimation
:perc: Estimate exposures using min-variance rows
:outlier: Iterative outlier removal. Pick 1 of [None, 'cerman', 'ransac']
:return: Merged FP32 HDR image
"""
data = get_metadata(files, color_space, saturation_percent, black_level, exp, gain, aperture)
if estimate_exp:
# TODO: Handle imamge stacks with varying gain and aperture
assert len(set(data['gain'])) == 1 and len(set(data['aperture'])) == 1
if do_align:
# TODO: Perform exposure alignment after homography (adds additional overhead since
# images need to be demosaiced)
logger.warning('Exposure alignment is done before homography, may cause it to fail')
Y = np.array([io.imread(f, libraw=False) for f in files], dtype=np.float32)
exif_exp = data['exp']
estimate = np.ones(data['N'], dtype=bool)
for i in range(data['N']):
# Skip images where > 90% of the pixels are saturated
if (Y[i] >= data['saturation_point']).sum() > 0.9*Y[i].size:
logger.warning(f'Skipping exposure estimation for file {files[i]} due to saturation')
estimate[i] = False
data['exp'][estimate] = estimate_exposures(Y[estimate], data['exp'][estimate], data,
estimate_exp, cam=cam, outlier=outlier)
if demosaic_first:
HDR, num_sat = imread_demosaic_merge(files, data, do_align, saturation_percent)
else:
HDR, num_sat = imread_merge_demosaic(files, data, do_align, bayer_pattern)
if num_sat > 0:
logger.warning(f'{num_sat/(data["h"]*data["w"]):.3f}% of pixels (n={num_sat}) are ' \
'saturated in the shortest exposure. The values for these pixels will ' \
'be inaccurate.')
if wb == 'camera':
wb = data['white_balance'][:3]
if wb is not None:
assert len(wb) == 3, 'Provide list [R G B] corresponding to white patch in the image'
HDR = HDR * np.array(wb)[None,None,:]
if HDR.min() < 0:
logger.info('Clipping negative pixels.')
HDR[HDR < 0] = 0
if normalize:
HDR = HDR / HDR.max()
return HDR.astype(np.float32)
def imread_demosaic_merge(files, metadata, do_align, sat_percent):
"""
First postprocess using libraw and then merge RGB images. This function merges in an online
way and can handle a large number of inputs with little memory.
"""
assert metadata['raw_format'], 'Libraw unsupported, use merge(..., demosaic_first=False)'
logger.info('Demosaicing before merging.')
# Check for saturation in shortest exposure
shortest_exposure = np.argmin(metadata['exp'] * metadata['gain'] * metadata['aperture'])
logger.info(f'Shortest exposure is {shortest_exposure}')
if do_align:
ref_idx = np.argsort(metadata['exp'] * metadata['gain']
* metadata['aperture'])[len(files)//2]
ref_img = io.imread(files[ref_idx]) / metadata['exp'][ref_idx] \
/ metadata['gain'][ref_idx] \
/ metadata['aperture'][ref_idx]
num_saturated = 0
num, denom = np.zeros((2, metadata['h'], metadata['w'], 3))
for i, f in enumerate(tqdm.tqdm(files, leave=False)):
raw = rawpy.imread(f)
img = io.imread_libraw(raw, color_space=metadata['color_space'])
saturation_point_img = sat_percent * (2**(8*img.dtype.itemsize) - 1)
if do_align and i != ref_idx:
scaled_img = img / metadata['exp'][i] \
/ metadata['gain'][i] \
/ metadata['aperture'][i]
img = align(ref_img, scaled_img, img)
# Ignore saturated pixels in all but shortest exposure
if i == shortest_exposure:
unsaturated = np.ones_like(img, dtype=bool)
num_sat = np.count_nonzero(np.logical_not(
get_unsaturated(raw.raw_image_visible, metadata['saturation_point'],
img, saturation_point_img))) / 3
else:
unsaturated = get_unsaturated(raw.raw_image_visible, metadata['saturation_point'],
img, saturation_point_img)
X_times_t = img / metadata['gain'][i] / metadata['aperture'][i]
denom[unsaturated] += metadata['exp'][i]
num[unsaturated] += X_times_t[unsaturated]
HDR = num / denom
return HDR, num_sat
def imread_merge_demosaic(files, metadata, do_align, pattern):
"""
Merge RAW images before demosaicing. This function merges in an online
way and can handle a large number of inputs with little memory.
"""
if do_align:
ref_idx = np.argsort(metadata['exp'] * metadata['gain']
* metadata['aperture'])[len(files)//2]
ref_img = io.imread(files[ref_idx]).astype(np.float32)
if metadata['raw_format']:
ref_img = cd.demosaicing_CFA_Bayer_bilinear(ref_img, pattern=pattern)
ref_img = ref_img / metadata['exp'][ref_idx] \
/ metadata['gain'][ref_idx] \
/ metadata['aperture'][ref_idx]
logger.info('Merging before demosaicing.')
# More transforms available here:
# http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
if metadata['color_space'] == 'raw':
color_mat = np.eye(3)
else:
assert metadata['raw_format'], \
'Only RAW color_space supported. Use merge(..., color_space=\'raw\')'
raw = rawpy.imread(files[0])
assert (raw.rgb_xyz_matrix[-1] == 0).all()
native2xyz = np.linalg.inv(raw.rgb_xyz_matrix[:-1])
if metadata['color_space'] == 'xyz':
xyz2out = np.eye(3)
elif metadata['color_space'] == 'srgb':
xyz2out = np.array([[3.2406, -1.5372, -0.4986],
[-0.9689, 1.8758, 0.0415],
[0.0557, -0.2040, 1.0570]])
elif metadata['color_space'] == 'adobe':
xyz2out = np.array([[2.0413690, -0.5649464, -0.3446944],
[-0.9692660, 1.8760108, 0.0415560],
[0.0134474, -0.1183897, 1.0154096]])
else:
logger.warning('Unsupported color-space, switching to camara raw.')
native2xyz = np.eye(3)
xyz2out = np.eye(3)
color_mat = (xyz2out @ native2xyz).transpose()
# Check for saturation in shortest exposure
shortest_exposure = np.argmin(metadata['exp'] * metadata['gain'] * metadata['aperture'])
logger.info(f'Shortest exposure is {shortest_exposure}')
num_saturated = 0
num, denom = np.zeros((2, metadata['h'], metadata['w']))
black_frame = np.tile(metadata['black_level'].reshape(2, 2),
(metadata['h']//2, metadata['w']//2))
for i, f in enumerate(tqdm.tqdm(files, leave=False)):
img = io.imread(f, libraw=False).astype(np.float32)
if do_align and i != ref_idx:
i_img = io.imread(f).astype(np.float32)
if metadata['raw_format']:
i_img = cd.demosaicing_CFA_Bayer_bilinear(i_img, pattern=pattern)
i_img = i_img / metadata['exp'][i] \
/ metadata['gain'][i] \
/ metadata['aperture'][i]
img = align(ref_img, i_img, img)
# Ignore saturated pixels in all but shortest exposure
if i == shortest_exposure:
unsaturated = np.ones_like(img, dtype=bool)
num_sat = np.count_nonzero(np.logical_not(get_unsaturated(
img, metadata['saturation_point'])))
else:
unsaturated = get_unsaturated(img, metadata['saturation_point'])
# Subtract black level for linearity
img -= black_frame
X_times_t = img / metadata['gain'][i] / metadata['aperture'][i]
denom[unsaturated] += metadata['exp'][i]
num[unsaturated] += X_times_t[unsaturated]
HDR_bayer = num / denom
# Libraw does not support 32-bit values. Use colour-demosaicing instead:
# https://colour-demosaicing.readthedocs.io/en/latest/manual.html
logger.info('Running bilinear demosaicing')
HDR = cd.demosaicing_CFA_Bayer_bilinear(HDR_bayer, pattern=pattern)
# Convert to output color-space
logger.info(f'Using color matrix: {color_mat}')
HDR = HDR @ color_mat
return HDR, num_sat
| [
"logging.getLogger",
"numpy.ones_like",
"numpy.eye",
"HDRutils.io.imread",
"numpy.ones",
"tqdm.tqdm",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"HDRutils.io.imread_libraw",
"numpy.argmin",
"rawpy.imread",
"colour_demosaicing.demosaicing_CFA_Bayer_bilinear"
] | [((151, 178), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (168, 178), False, 'import logging, tqdm\n'), ((3835, 3903), 'numpy.argmin', 'np.argmin', (["(metadata['exp'] * metadata['gain'] * metadata['aperture'])"], {}), "(metadata['exp'] * metadata['gain'] * metadata['aperture'])\n", (3844, 3903), True, 'import numpy as np\n'), ((4267, 4313), 'numpy.zeros', 'np.zeros', (["(2, metadata['h'], metadata['w'], 3)"], {}), "((2, metadata['h'], metadata['w'], 3))\n", (4275, 4313), True, 'import numpy as np\n'), ((7072, 7140), 'numpy.argmin', 'np.argmin', (["(metadata['exp'] * metadata['gain'] * metadata['aperture'])"], {}), "(metadata['exp'] * metadata['gain'] * metadata['aperture'])\n", (7081, 7140), True, 'import numpy as np\n'), ((7233, 7276), 'numpy.zeros', 'np.zeros', (["(2, metadata['h'], metadata['w'])"], {}), "((2, metadata['h'], metadata['w']))\n", (7241, 7276), True, 'import numpy as np\n'), ((8564, 8625), 'colour_demosaicing.demosaicing_CFA_Bayer_bilinear', 'cd.demosaicing_CFA_Bayer_bilinear', (['HDR_bayer'], {'pattern': 'pattern'}), '(HDR_bayer, pattern=pattern)\n', (8597, 8625), True, 'import colour_demosaicing as cd\n'), ((2203, 2233), 'numpy.ones', 'np.ones', (["data['N']"], {'dtype': 'bool'}), "(data['N'], dtype=bool)\n", (2210, 2233), True, 'import numpy as np\n'), ((4337, 4366), 'tqdm.tqdm', 'tqdm.tqdm', (['files'], {'leave': '(False)'}), '(files, leave=False)\n', (4346, 4366), False, 'import logging, tqdm\n'), ((4377, 4392), 'rawpy.imread', 'rawpy.imread', (['f'], {}), '(f)\n', (4389, 4392), False, 'import rawpy\n'), ((4401, 4459), 'HDRutils.io.imread_libraw', 'io.imread_libraw', (['raw'], {'color_space': "metadata['color_space']"}), "(raw, color_space=metadata['color_space'])\n", (4417, 4459), True, 'import HDRutils.io as io\n'), ((6154, 6163), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6160, 6163), True, 'import numpy as np\n'), ((6287, 6309), 'rawpy.imread', 'rawpy.imread', (['files[0]'], {}), '(files[0])\n', (6299, 6309), False, 'import rawpy\n'), ((6370, 6408), 'numpy.linalg.inv', 'np.linalg.inv', (['raw.rgb_xyz_matrix[:-1]'], {}), '(raw.rgb_xyz_matrix[:-1])\n', (6383, 6408), True, 'import numpy as np\n'), ((7408, 7437), 'tqdm.tqdm', 'tqdm.tqdm', (['files'], {'leave': '(False)'}), '(files, leave=False)\n', (7417, 7437), False, 'import logging, tqdm\n'), ((3989, 4058), 'numpy.argsort', 'np.argsort', (["(metadata['exp'] * metadata['gain'] * metadata['aperture'])"], {}), "(metadata['exp'] * metadata['gain'] * metadata['aperture'])\n", (3999, 4058), True, 'import numpy as np\n'), ((4817, 4846), 'numpy.ones_like', 'np.ones_like', (['img'], {'dtype': 'bool'}), '(img, dtype=bool)\n', (4829, 4846), True, 'import numpy as np\n'), ((5574, 5643), 'numpy.argsort', 'np.argsort', (["(metadata['exp'] * metadata['gain'] * metadata['aperture'])"], {}), "(metadata['exp'] * metadata['gain'] * metadata['aperture'])\n", (5584, 5643), True, 'import numpy as np\n'), ((5766, 5825), 'colour_demosaicing.demosaicing_CFA_Bayer_bilinear', 'cd.demosaicing_CFA_Bayer_bilinear', (['ref_img'], {'pattern': 'pattern'}), '(ref_img, pattern=pattern)\n', (5799, 5825), True, 'import colour_demosaicing as cd\n'), ((6462, 6471), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6468, 6471), True, 'import numpy as np\n'), ((7915, 7944), 'numpy.ones_like', 'np.ones_like', (['img'], {'dtype': 'bool'}), '(img, dtype=bool)\n', (7927, 7944), True, 'import numpy as np\n'), ((2103, 2129), 'HDRutils.io.imread', 'io.imread', (['f'], {'libraw': '(False)'}), '(f, libraw=False)\n', (2112, 2129), True, 'import HDRutils.io as io\n'), ((3217, 3229), 'numpy.array', 'np.array', (['wb'], {}), '(wb)\n', (3225, 3229), True, 'import numpy as np\n'), ((5679, 5704), 'HDRutils.io.imread', 'io.imread', (['files[ref_idx]'], {}), '(files[ref_idx])\n', (5688, 5704), True, 'import HDRutils.io as io\n'), ((6527, 6622), 'numpy.array', 'np.array', (['[[3.2406, -1.5372, -0.4986], [-0.9689, 1.8758, 0.0415], [0.0557, -0.204, 1.057]\n ]'], {}), '([[3.2406, -1.5372, -0.4986], [-0.9689, 1.8758, 0.0415], [0.0557, -\n 0.204, 1.057]])\n', (6535, 6622), True, 'import numpy as np\n'), ((7448, 7474), 'HDRutils.io.imread', 'io.imread', (['f'], {'libraw': '(False)'}), '(f, libraw=False)\n', (7457, 7474), True, 'import HDRutils.io as io\n'), ((7611, 7668), 'colour_demosaicing.demosaicing_CFA_Bayer_bilinear', 'cd.demosaicing_CFA_Bayer_bilinear', (['i_img'], {'pattern': 'pattern'}), '(i_img, pattern=pattern)\n', (7644, 7668), True, 'import colour_demosaicing as cd\n'), ((4094, 4119), 'HDRutils.io.imread', 'io.imread', (['files[ref_idx]'], {}), '(files[ref_idx])\n', (4103, 4119), True, 'import HDRutils.io as io\n'), ((6692, 6813), 'numpy.array', 'np.array', (['[[2.041369, -0.5649464, -0.3446944], [-0.969266, 1.8760108, 0.041556], [\n 0.0134474, -0.1183897, 1.0154096]]'], {}), '([[2.041369, -0.5649464, -0.3446944], [-0.969266, 1.8760108, \n 0.041556], [0.0134474, -0.1183897, 1.0154096]])\n', (6700, 6813), True, 'import numpy as np\n'), ((6923, 6932), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6929, 6932), True, 'import numpy as np\n'), ((6946, 6955), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6952, 6955), True, 'import numpy as np\n'), ((7537, 7549), 'HDRutils.io.imread', 'io.imread', (['f'], {}), '(f)\n', (7546, 7549), True, 'import HDRutils.io as io\n')] |
import pickle
import numpy as np
import sys
def eigen(num, split_num, layer_num):
prefix = 'min_'
layer_num = int(layer_num)
num = str(num)
#cur = [8, 8, 8, 8, 16, 16, 24, 24, 24, 24, 24, 24, 32, 32]
#cur = [10, 12, 13, 13, 21, 29, 35, 37, 35, 25, 28, 28, 37, 32]
#cur = [12, 12, 18, 17, 28, 54, 55, 45, 40, 25, 28, 28, 37, 32]
#cur = [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16]
cur = [22, 23, (20, 2), 25, (22, 2), 25, (24, 2), 20, 18, 19, 19, 20, (18, 2), 20]
cur = [27, 39, (22, 2), 39, (37, 2), 40, (30, 2), 20, 18, 21, 21, 21, (19, 2), 20]
cur = [29, 74, (24, 2), 54, (50, 2), 64, (42, 2), 21, 18, 24, 21, 21, (19, 2), 20]
cur = [33, 132, (28, 2), 69, (59, 2), 104, (53, 2), 21, 18, 24, 21, 21, (19, 2), 20]
cur = [33, 209, (34, 2), 90, (72, 2), 160, (64, 2), 21, 18, 24, 21, 21, (19, 2), 20]
cur[2] = cur[2][0]
cur[4] = cur[4][0]
cur[6] = cur[6][0]
cur[12] = cur[12][0]
cur = [4,4,4,4]
cur = [4, 7, 5, 4]
cur = [10, 12, 21, 11]
cur = [11, 18, 29, 12]
cur = [11, 18, 29, 12]
cur = [11, 30, 38, 12]
print(cur)
cur = pickle.load(open('cifar10max' + str(num) + '.pkl', 'rb'))
curt = []
DD = 0
layer_num = len(cur)
'''
for i in cur:
if i != 'M':
curt.append(i)
for i in range(layer_num):
#w = pickle.load(open('eigen/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
try:
w = pickle.load(open('eigenm/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
#w1 = pickle.load(open('eigen/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
print(w)
#print(w.shape)
except:
DD = DD + 1
continue
if i == DD:
W = w
else:
W = np.concatenate([W, w], 0)
'''
prefix = 'max_'
r = [0.116849326, 0.038422294, 0.02061177, 0.02997986, 0.014377874, 0.0062844744, 0.012592447, 0.006363712, 0.008475702, 0.02377023, 0.038945824, 0.03370137, 0.03196905, 0.06754288]
r = np.ones([14])
#r = pickle.load(open('cifar10max' + str(num) + 'mag.pkl','rb'))
for i in range(layer_num):
#w = pickle.load(open('eigen/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
try:
w = pickle.load(open('eigenmax/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
#print(np.mean(w))
w *= np.sqrt(r[i])
print(np.mean(w))
#w1 = pickle.load(open('eigen/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
#print(w.shape)
except:
DD = DD + 1
continue
if i == DD:
W = w
else:
W = np.concatenate([W, -w], 0)
st = np.argsort(W)
L = W.shape[0]
t = int(0.15 * L)
thre = W[st[t]]
SP = {}
VP = {}
SP1 = {}
VP1 = {}
DL = []
dp = []
prefix = sys.argv[3] + '_'
for i in range(layer_num):
if i == 0:
k = 3
else:
k = 1
try:
w = pickle.load(open('eigenmax/' +prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
v = pickle.load(open('eigenmax/' +prefix+ 'V_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
w *= np.sqrt(r[i])
except:
print(i)
l = int(0.1 * curt[i])
D = np.random.randint(0, curt[i], size=[int(0.1 * curt[i]), 1])
SP[i] = D
VD = np.zeros([1, 1, 1])
#VP[i] = np.reshape(v, [v.shape[0], -1, k, k])
VP[i] = np.zeros([curt[i], curt[i-1], 1, 1])
DL.append(l)
continue
if prefix == 'max_':
ic = -1
else:
ic = 1
D = np.argwhere((ic * w) < thre)
l = D.shape[0]
SP[i] = np.squeeze(D)
#SP1[i] = np.random.randint(0, curt[i], size=[D.shape[0], 1])
VD = v[D].astype(float)
VP[i] = np.reshape(v, [v.shape[0], -1, k, k])
#VP1[i] = np.zeros_like(VD)
dp.append(l)
DL.append(l)
print(SP[i].shape)
print(VP[i].shape)
print(cur[i])
pickle.dump(SP, open('eigenmax/' + num + prefix + 'global.pkl', 'wb'))
pickle.dump(VP, open('eigenmax/' + num + prefix + 'globalv.pkl', 'wb'))
print(DL)
DL = np.array(DL)
ct = 0
DDL = []
for i in cur:
if i == 'M':
DDL.append('M')
continue
else:
DDL.append(int(i + DL[ct]))
ct += 1
for i in range(len(cur)):
cur[i] = int(DDL[i])
#print(DL)
print(DDL)
print(cur)
pickle.dump(DL, open('maxcfg' + str(int(num) + 1) + '.pkl', 'wb'))
#print(SP)
eigen(sys.argv[1], 1, sys.argv[2])
| [
"numpy.mean",
"numpy.reshape",
"numpy.ones",
"numpy.sqrt",
"numpy.squeeze",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.argwhere",
"numpy.concatenate"
] | [((2128, 2141), 'numpy.ones', 'np.ones', (['[14]'], {}), '([14])\n', (2135, 2141), True, 'import numpy as np\n'), ((2887, 2900), 'numpy.argsort', 'np.argsort', (['W'], {}), '(W)\n', (2897, 2900), True, 'import numpy as np\n'), ((4491, 4503), 'numpy.array', 'np.array', (['DL'], {}), '(DL)\n', (4499, 4503), True, 'import numpy as np\n'), ((3908, 3934), 'numpy.argwhere', 'np.argwhere', (['(ic * w < thre)'], {}), '(ic * w < thre)\n', (3919, 3934), True, 'import numpy as np\n'), ((3989, 4002), 'numpy.squeeze', 'np.squeeze', (['D'], {}), '(D)\n', (3999, 4002), True, 'import numpy as np\n'), ((4121, 4158), 'numpy.reshape', 'np.reshape', (['v', '[v.shape[0], -1, k, k]'], {}), '(v, [v.shape[0], -1, k, k])\n', (4131, 4158), True, 'import numpy as np\n'), ((2533, 2546), 'numpy.sqrt', 'np.sqrt', (['r[i]'], {}), '(r[i])\n', (2540, 2546), True, 'import numpy as np\n'), ((2851, 2877), 'numpy.concatenate', 'np.concatenate', (['[W, -w]', '(0)'], {}), '([W, -w], 0)\n', (2865, 2877), True, 'import numpy as np\n'), ((3431, 3444), 'numpy.sqrt', 'np.sqrt', (['r[i]'], {}), '(r[i])\n', (3438, 3444), True, 'import numpy as np\n'), ((2565, 2575), 'numpy.mean', 'np.mean', (['w'], {}), '(w)\n', (2572, 2575), True, 'import numpy as np\n'), ((3632, 3651), 'numpy.zeros', 'np.zeros', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (3640, 3651), True, 'import numpy as np\n'), ((3731, 3769), 'numpy.zeros', 'np.zeros', (['[curt[i], curt[i - 1], 1, 1]'], {}), '([curt[i], curt[i - 1], 1, 1])\n', (3739, 3769), True, 'import numpy as np\n')] |
from flask import Flask
from flask_pymongo import PyMongo
from flask_admin import Admin
#from flask_mongoengine import MongoEngine
from flask_login import LoginManager
# flask
app = Flask(__name__)
app.config.from_pyfile("config.py")
# mongo db
mongo = PyMongo(app)
#db = MongoEngine()
#db.init_app(app)
# login manager
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
login_manager.login_message = u"Please login to access this page."
login_manager.login_message_category = "warning"
# Flask BCrypt will be used to salt the user password
# flask_bcrypt = Bcrypt(app)
# Create admin
admin = Admin(app, name='VUSualizer admin', template_mode='bootstrap3')
from . import *
from .views import main, auth, api, admin_view
| [
"flask_pymongo.PyMongo",
"flask_login.LoginManager",
"flask_admin.Admin",
"flask.Flask"
] | [((183, 198), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (188, 198), False, 'from flask import Flask\n'), ((255, 267), 'flask_pymongo.PyMongo', 'PyMongo', (['app'], {}), '(app)\n', (262, 267), False, 'from flask_pymongo import PyMongo\n'), ((339, 353), 'flask_login.LoginManager', 'LoginManager', ([], {}), '()\n', (351, 353), False, 'from flask_login import LoginManager\n'), ((641, 704), 'flask_admin.Admin', 'Admin', (['app'], {'name': '"""VUSualizer admin"""', 'template_mode': '"""bootstrap3"""'}), "(app, name='VUSualizer admin', template_mode='bootstrap3')\n", (646, 704), False, 'from flask_admin import Admin\n')] |
import requests
import os
from janny.config import logger
def kube_auth():
session = requests.Session()
# We're in-cluster
if not os.path.exists(os.path.expanduser("~/.kube/config")):
with open("/var/run/secrets/kubernetes.io/serviceaccount/token") as f:
token = f.read()
session.verify = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
session.headers.update({"Authorization": f"Bearer {token}"})
logger.info("Authenticated with the API server")
else:
logger.info("Not in-cluster, continuing as is")
return session
SESSION = kube_auth()
| [
"janny.config.logger.info",
"requests.Session",
"os.path.expanduser"
] | [((92, 110), 'requests.Session', 'requests.Session', ([], {}), '()\n', (108, 110), False, 'import requests\n'), ((465, 513), 'janny.config.logger.info', 'logger.info', (['"""Authenticated with the API server"""'], {}), "('Authenticated with the API server')\n", (476, 513), False, 'from janny.config import logger\n'), ((532, 579), 'janny.config.logger.info', 'logger.info', (['"""Not in-cluster, continuing as is"""'], {}), "('Not in-cluster, continuing as is')\n", (543, 579), False, 'from janny.config import logger\n'), ((161, 197), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.kube/config"""'], {}), "('~/.kube/config')\n", (179, 197), False, 'import os\n')] |
# Author: <NAME> (<EMAIL>) 08/25/2016
"""SqueezeDet Demo.
In image detection mode, for a given image, detect objects and draw bounding
boxes around them. In video detection mode, perform real-time detection on the
video stream.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import time
import sys
import os
import glob
import numpy as np
import tensorflow as tf
from config import *
from train import _draw_box
from nets import *
import RPi.GPIO as GPIO
os.system("espeak 'WELCOME TO PI VISION'")
os.system("espeak 'WE INCULCATE OBJECT DETECTION WITH MACHINE LEARNING'")
motor = 11
GPIO.setmode(GPIO.BOARD)
GPIO.setup(motor, GPIO.OUT)
pwm_motor = GPIO.PWM(motor, 50)
pwm_motor.start(0)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'mode', 'video', """'image' or 'video'.""")
tf.app.flags.DEFINE_string(
'checkpoint', './data/model_checkpoints/squeezeDet/model.ckpt-87000',
"""Path to the model parameter file.""")
tf.app.flags.DEFINE_string(
'input_path', './data/road.mp4',
"""Input image or video to be detected. Can process glob input such as """
"""./data/00000*.png.""")
tf.app.flags.DEFINE_string(
'out_dir', './data/out/', """Directory to dump output image or video.""")
tf.app.flags.DEFINE_string(
'demo_net', 'squeezeDet', """Neural net architecture.""")
def video_demo():
"""Detect videos."""
cap = cv2.VideoCapture(r'/home/pi/Downloads/road.mp4') #r'/home/pi/Downloads/road.mp4')
fps = int(cap.get(cv2.CAP_PROP_FPS))
# Define the codec and create VideoWriter object
# fourcc = cv2.cv.CV_FOURCC(*'XVID')
# fourcc = cv2.cv.CV_FOURCC(*'MJPG')
# in_file_name = os.path.split(FLAGS.input_path)[1]
# out_file_name = os.path.join(FLAGS.out_dir, 'out_'+in_file_name)
# out = cv2.VideoWriter(out_file_name, fourcc, 30.0, (375,1242), True)
# out = VideoWriter(out_file_name, frameSize=(1242, 375))
# out.open()
assert FLAGS.demo_net == 'squeezeDet' or FLAGS.demo_net == 'squeezeDet+', \
'Selected nueral net architecture not supported: {}'.format(FLAGS.demo_net)
with tf.Graph().as_default():
# Load model
if FLAGS.demo_net == 'squeezeDet':
mc = kitti_squeezeDet_config()
mc.BATCH_SIZE = 1
# model parameters will be restored from checkpoint
mc.LOAD_PRETRAINED_MODEL = False
model = SqueezeDet(mc, FLAGS.gpu)
elif FLAGS.demo_net == 'squeezeDet+':
mc = kitti_squeezeDetPlus_config()
mc.BATCH_SIZE = 1
mc.LOAD_PRETRAINED_MODEL = False
model = SqueezeDetPlus(mc, FLAGS.gpu)
saver = tf.train.Saver(model.model_params)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
saver.restore(sess, FLAGS.checkpoint)
times = {}
count = 0
while cap.isOpened():
t_start = time.time()
count += 1
out_im_name = os.path.join(FLAGS.out_dir, str(count).zfill(6)+'.jpg')
# Load images from video and crop
ret, frame = cap.read()
if ret==True:
# crop frames
#frame = frame[500:-205, 239:-439, :]
frame = cv2.resize(frame, (1248,384))
im_input = frame.astype(np.float32) - mc.BGR_MEANS
else:
break
t_reshape = time.time()
times['reshape']= t_reshape - t_start
start = time.time()
# Detect
det_boxes, det_probs, det_class = sess.run(
[model.det_boxes, model.det_probs, model.det_class],
feed_dict={model.image_input:[im_input]})
t_detect = time.time()
times['detect']= t_detect - t_reshape
# Filter
final_boxes, final_probs, final_class = model.filter_prediction(
det_boxes[0], det_probs[0], det_class[0])
keep_idx = [idx for idx in range(len(final_probs)) \
if final_probs[idx] > mc.PLOT_PROB_THRESH]
final_boxes = [final_boxes[idx] for idx in keep_idx]
final_probs = [final_probs[idx] for idx in keep_idx]
final_class = [final_class[idx] for idx in keep_idx]
end = time.time()
t_filter = time.time()
times['filter']= t_filter - t_detect
# Draw boxes
# TODO(bichen): move this color dict to configuration file
cls2clr = {
'car': (255, 191, 0),
'cyclist': (0, 191, 255),
'pedestrian':(255, 0, 191)
}
_draw_box(
frame, final_boxes,
[mc.CLASS_NAMES[idx]+': (%.2f)'% prob \
for idx, prob in zip(final_class, final_probs)],
cdict=cls2clr
)
for i,b in enumerate(det_boxes[0]):
if det_class[0][i] == 0 or det_class[0][i] == 1 or det_class[0][i] == 2:
if det_probs[0][i] >= 0.5:
mid_x = (det_boxes[0][i][1]+det_boxes[0][i][3])/2
mid_y = (det_boxes[0][i][0]+det_boxes[0][i][2])/2
apx_distance = round((1 - (det_boxes[0][i][3] - det_boxes[0][i][1])),1)
cv2.putText(frame, '{}'.format(apx_distance), (int(mid_x*800),int(mid_y*450)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2)
if apx_distance<=200 and apx_distance>180:
#if mid_x > 130 and mid_x < 140:
pwm_motor.ChangeDutyCycle(25)
print("object distance:" + str(apx_distance) + "motor is at 25% Duty Cycle")
time.sleep(0.5)
if apx_distance<=180 and apx_distance>160:
#if mid_x > 140 and mid_x < 150:
pwm_motor.ChangeDutyCycle(50)
print("object distance:" + str(apx_distance) + "motor is at 50% Duty Cycle")
time.sleep(0.5)
if apx_distance<=160 and apx_distance>140:
#if mid_x > 176 and mid_x < 184:
pwm_motor.ChangeDutyCycle(75)
print("object distance:" + str(apx_distance) + "motor is at 75% Duty Cycle")
time.sleep(0.5)
if apx_distance<=140 and apx_distance>=120:
#if mid_x > 168 and mid_x < 176:
pwm_motor.ChangeDutyCycle(100)
print("object distance:" + str(apx_distance) + "motor is at 100% Duty Cycle")
cv2.putText(frame, 'WARNING!!!', (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0,0,255), 3)
total_time_ms = (end-start)*1000.
#print ('processing time:%.3fms'%(total_time_ms))
#frame /= 255.
cv2.putText(frame, 'fps:%.2f'%(1000/total_time_ms), (5,20), cv2.FONT_HERSHEY_PLAIN, 1.3, (0,255,0), 2)
t_draw = time.time()
times['draw']= t_draw - t_filter
#cv2.imwrite(out_im_name, frame)
# out.write(frame)
cv2.imshow('Video Detection', frame)
#times['total']= time.time() - t_start
# time_str = ''
# for t in times:
# time_str += '{} time: {:.4f} '.format(t[0], t[1])
# time_str += '\n'
'''time_str = 'Total time: {:.4f}, detection time: {:.4f}, filter time: '\
'{:.4f}'. \
format(times['total'], times['detect'], times['filter'])'''
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release everything if job is finished
cap.release()
# out.release()
cv2.destroyAllWindows()
def image_demo():
"""Detect image."""
assert FLAGS.demo_net == 'squeezeDet' or FLAGS.demo_net == 'squeezeDet+', \
'Selected nueral net architecture not supported: {}'.format(FLAGS.demo_net)
with tf.Graph().as_default():
# Load model
if FLAGS.demo_net == 'squeezeDet':
mc = kitti_squeezeDet_config()
mc.BATCH_SIZE = 1
# model parameters will be restored from checkpoint
mc.LOAD_PRETRAINED_MODEL = False
model = SqueezeDet(mc, FLAGS.gpu)
elif FLAGS.demo_net == 'squeezeDet+':
mc = kitti_squeezeDetPlus_config()
mc.BATCH_SIZE = 1
mc.LOAD_PRETRAINED_MODEL = False
model = SqueezeDetPlus(mc, FLAGS.gpu)
saver = tf.train.Saver(model.model_params)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
saver.restore(sess, FLAGS.checkpoint)
for f in glob.iglob(FLAGS.input_path):
im = cv2.imread(f)
im = im.astype(np.float32, copy=False)
im = cv2.resize(im, (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT))
input_image = im - mc.BGR_MEANS
# Detect
det_boxes, det_probs, det_class = sess.run(
[model.det_boxes, model.det_probs, model.det_class],
feed_dict={model.image_input:[input_image]})
# Filter
final_boxes, final_probs, final_class = model.filter_prediction(
det_boxes[0], det_probs[0], det_class[0])
keep_idx = [idx for idx in range(len(final_probs)) \
if final_probs[idx] > mc.PLOT_PROB_THRESH]
final_boxes = [final_boxes[idx] for idx in keep_idx]
final_probs = [final_probs[idx] for idx in keep_idx]
final_class = [final_class[idx] for idx in keep_idx]
# TODO(bichen): move this color dict to configuration file
cls2clr = {
'car': (255, 191, 0),
'cyclist': (0, 191, 255),
'pedestrian':(255, 0, 191)
}
# Draw boxes
_draw_box(
im, final_boxes,
[mc.CLASS_NAMES[idx]+': (%.2f)'% prob \
for idx, prob in zip(final_class, final_probs)],
cdict=cls2clr,
)
file_name = os.path.split(f)[1]
out_file_name = os.path.join(FLAGS.out_dir, 'out_'+file_name)
cv2.imwrite(out_file_name, im)
print ('Image detection output saved to {}'.format(out_file_name))
def main(argv=None):
if not tf.gfile.Exists(FLAGS.out_dir):
tf.gfile.MakeDirs(FLAGS.out_dir)
if FLAGS.mode == 'image':
image_demo()
else:
video_demo()
if __name__ == '__main__':
tf.app.run()
| [
"glob.iglob",
"time.sleep",
"cv2.imshow",
"RPi.GPIO.PWM",
"cv2.destroyAllWindows",
"tensorflow.gfile.MakeDirs",
"RPi.GPIO.setmode",
"tensorflow.app.run",
"tensorflow.Graph",
"tensorflow.gfile.Exists",
"os.path.split",
"tensorflow.ConfigProto",
"cv2.waitKey",
"RPi.GPIO.setup",
"tensorflow... | [((539, 581), 'os.system', 'os.system', (['"""espeak \'WELCOME TO PI VISION\'"""'], {}), '("espeak \'WELCOME TO PI VISION\'")\n', (548, 581), False, 'import os\n'), ((582, 655), 'os.system', 'os.system', (['"""espeak \'WE INCULCATE OBJECT DETECTION WITH MACHINE LEARNING\'"""'], {}), '("espeak \'WE INCULCATE OBJECT DETECTION WITH MACHINE LEARNING\'")\n', (591, 655), False, 'import os\n'), ((667, 691), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BOARD'], {}), '(GPIO.BOARD)\n', (679, 691), True, 'import RPi.GPIO as GPIO\n'), ((692, 719), 'RPi.GPIO.setup', 'GPIO.setup', (['motor', 'GPIO.OUT'], {}), '(motor, GPIO.OUT)\n', (702, 719), True, 'import RPi.GPIO as GPIO\n'), ((732, 751), 'RPi.GPIO.PWM', 'GPIO.PWM', (['motor', '(50)'], {}), '(motor, 50)\n', (740, 751), True, 'import RPi.GPIO as GPIO\n'), ((800, 866), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""mode"""', '"""video"""', '"""\'image\' or \'video\'."""'], {}), '(\'mode\', \'video\', "\'image\' or \'video\'.")\n', (826, 866), True, 'import tensorflow as tf\n'), ((876, 1017), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""checkpoint"""', '"""./data/model_checkpoints/squeezeDet/model.ckpt-87000"""', '"""Path to the model parameter file."""'], {}), "('checkpoint',\n './data/model_checkpoints/squeezeDet/model.ckpt-87000',\n 'Path to the model parameter file.')\n", (902, 1017), True, 'import tensorflow as tf\n'), ((1023, 1181), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""input_path"""', '"""./data/road.mp4"""', '"""Input image or video to be detected. Can process glob input such as ./data/00000*.png."""'], {}), "('input_path', './data/road.mp4',\n 'Input image or video to be detected. Can process glob input such as ./data/00000*.png.'\n )\n", (1049, 1181), True, 'import tensorflow as tf\n'), ((1197, 1297), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""out_dir"""', '"""./data/out/"""', '"""Directory to dump output image or video."""'], {}), "('out_dir', './data/out/',\n 'Directory to dump output image or video.')\n", (1223, 1297), True, 'import tensorflow as tf\n'), ((1303, 1388), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""demo_net"""', '"""squeezeDet"""', '"""Neural net architecture."""'], {}), "('demo_net', 'squeezeDet', 'Neural net architecture.'\n )\n", (1329, 1388), True, 'import tensorflow as tf\n'), ((1446, 1493), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""/home/pi/Downloads/road.mp4"""'], {}), "('/home/pi/Downloads/road.mp4')\n", (1462, 1493), False, 'import cv2\n'), ((7424, 7447), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7445, 7447), False, 'import cv2\n'), ((10038, 10050), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (10048, 10050), True, 'import tensorflow as tf\n'), ((2622, 2656), 'tensorflow.train.Saver', 'tf.train.Saver', (['model.model_params'], {}), '(model.model_params)\n', (2636, 2656), True, 'import tensorflow as tf\n'), ((8141, 8175), 'tensorflow.train.Saver', 'tf.train.Saver', (['model.model_params'], {}), '(model.model_params)\n', (8155, 8175), True, 'import tensorflow as tf\n'), ((9867, 9897), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.out_dir'], {}), '(FLAGS.out_dir)\n', (9882, 9897), True, 'import tensorflow as tf\n'), ((9903, 9935), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.out_dir'], {}), '(FLAGS.out_dir)\n', (9920, 9935), True, 'import tensorflow as tf\n'), ((8316, 8344), 'glob.iglob', 'glob.iglob', (['FLAGS.input_path'], {}), '(FLAGS.input_path)\n', (8326, 8344), False, 'import glob\n'), ((2140, 2150), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2148, 2150), True, 'import tensorflow as tf\n'), ((2861, 2872), 'time.time', 'time.time', ([], {}), '()\n', (2870, 2872), False, 'import time\n'), ((3299, 3310), 'time.time', 'time.time', ([], {}), '()\n', (3308, 3310), False, 'import time\n'), ((3374, 3385), 'time.time', 'time.time', ([], {}), '()\n', (3383, 3385), False, 'import time\n'), ((3603, 3614), 'time.time', 'time.time', ([], {}), '()\n', (3612, 3614), False, 'import time\n'), ((4146, 4157), 'time.time', 'time.time', ([], {}), '()\n', (4155, 4157), False, 'import time\n'), ((4178, 4189), 'time.time', 'time.time', ([], {}), '()\n', (4187, 4189), False, 'import time\n'), ((6619, 6733), 'cv2.putText', 'cv2.putText', (['frame', "('fps:%.2f' % (1000 / total_time_ms))", '(5, 20)', 'cv2.FONT_HERSHEY_PLAIN', '(1.3)', '(0, 255, 0)', '(2)'], {}), "(frame, 'fps:%.2f' % (1000 / total_time_ms), (5, 20), cv2.\n FONT_HERSHEY_PLAIN, 1.3, (0, 255, 0), 2)\n", (6630, 6733), False, 'import cv2\n'), ((6740, 6751), 'time.time', 'time.time', ([], {}), '()\n', (6749, 6751), False, 'import time\n'), ((6870, 6906), 'cv2.imshow', 'cv2.imshow', (['"""Video Detection"""', 'frame'], {}), "('Video Detection', frame)\n", (6880, 6906), False, 'import cv2\n'), ((7659, 7669), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (7667, 7669), True, 'import tensorflow as tf\n'), ((8359, 8372), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (8369, 8372), False, 'import cv2\n'), ((8433, 8482), 'cv2.resize', 'cv2.resize', (['im', '(mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT)'], {}), '(im, (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT))\n', (8443, 8482), False, 'import cv2\n'), ((9675, 9722), 'os.path.join', 'os.path.join', (['FLAGS.out_dir', "('out_' + file_name)"], {}), "(FLAGS.out_dir, 'out_' + file_name)\n", (9687, 9722), False, 'import os\n'), ((9729, 9759), 'cv2.imwrite', 'cv2.imwrite', (['out_file_name', 'im'], {}), '(out_file_name, im)\n', (9740, 9759), False, 'import cv2\n'), ((2685, 2726), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (2699, 2726), True, 'import tensorflow as tf\n'), ((3157, 3187), 'cv2.resize', 'cv2.resize', (['frame', '(1248, 384)'], {}), '(frame, (1248, 384))\n', (3167, 3187), False, 'import cv2\n'), ((8204, 8245), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (8218, 8245), True, 'import tensorflow as tf\n'), ((9631, 9647), 'os.path.split', 'os.path.split', (['f'], {}), '(f)\n', (9644, 9647), False, 'import os\n'), ((7293, 7307), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7304, 7307), False, 'import cv2\n'), ((5482, 5497), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (5492, 5497), False, 'import time\n'), ((5789, 5804), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (5799, 5804), False, 'import time\n'), ((6096, 6111), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (6106, 6111), False, 'import time\n'), ((6387, 6481), 'cv2.putText', 'cv2.putText', (['frame', '"""WARNING!!!"""', '(50, 50)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1.0)', '(0, 0, 255)', '(3)'], {}), "(frame, 'WARNING!!!', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (\n 0, 0, 255), 3)\n", (6398, 6481), False, 'import cv2\n')] |
#!/usr/bin/env python3
import os
import sys
from datetime import date
import subprocess
from logzero import logger
from .utils import is_cloud_path, path_exists
class File():
def __init__(self, tag, path, description, source=None):
self.properties = {
'tag': tag,
'path': path,
'description': description,
'source': source
}
def impute_source(self):
source = 'cloud' if is_cloud_path(self['path']) else 'local'
return(source)
def all(self):
return(self.properties)
def __getitem__(self, property):
return(self.properties[property])
def __setitem__(self, index, value):
self.properties[index] = value
def __repr__(self):
return(str(self.properties))
def __str__(self):
return(str(self['path']))
@property
def path(self):
return(self['path'])
class InputFile(File):
def __init__(self, **kwargs):
init_dict = { p: kwargs.get(p, None) for p in [ 'path', 'description', 'source' ] }
init_dict['tag'] = kwargs['tag']
super().__init__(**init_dict)
self['entry_tag'] = kwargs.get('entry_tag', None)
self['version'] = kwargs.get('version', None)
tracker = kwargs.get('database', None)
#if tracker is None and 'tr' in globals():
# tracker = globals()['tr']
#print(globals())
#self.tracker = tracker
#sum([ self[p] is not None for p in ['entry_tag'] ]) == 3 # , 'version', 'database'
infer_boolean = (int(tracker is not None) + int(self['entry_tag'] is not None)) == 2
defined_boolean = sum([ self[p] is not None for p in ['path', 'description'] ]) == 2
if not infer_boolean and not defined_boolean:
raise('User must provide either the entry_tag, version, and database reference OR path and description.')
if infer_boolean:
self.__infer_file_properties(tracker)
self['exists'] = path_exists(self['path'])
def __infer_file_properties(self, tracker):
entry = tracker.get_entry(self['entry_tag'], self['version'])
file = tracker.get_file(self['entry_tag'], self['tag'], self['version'])
self['version'] = entry['version']
self['path'] = file['path']
self['description'] = file['description']
self['date'] = file['date']
class OutputFile(File):
def __init__(self, **kwargs):
init_dict = { p: kwargs.get(p, None) for p in [ 'tag', 'description', 'source' ] }
init_dict['path'] = kwargs['path']
super().__init__(**init_dict)
self['date'] = str(date.today())
self['source'] = self.impute_source() if self['source'] is None else self['source']
if self['tag'] is None:
self['tag'] = 'Artifact'
self['description'] = 'Data artifact' | [
"datetime.date.today"
] | [((2692, 2704), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2702, 2704), False, 'from datetime import date\n')] |
import json
from pprintpp import pprint
with open('terms.txt') as terms_file:
lines = terms_file.readlines()
main_list = list()
current = dict()
for term in lines:
if 'head:' in term:
if current:
main_list.append(current)
term = term.strip()
term = term.strip('head:')
term = term.strip()
current = dict()
current['value'] = term + ' emergency'
current['synonyms'] = list()
else:
term = term.strip()
if term:
current['synonyms'].append(term)
if current:
main_list.append(current)
pprint(main_list)
with open('data.json', 'w') as data_file:
json.dump(main_list, data_file) | [
"pprintpp.pprint",
"json.dump"
] | [((513, 530), 'pprintpp.pprint', 'pprint', (['main_list'], {}), '(main_list)\n', (519, 530), False, 'from pprintpp import pprint\n'), ((575, 606), 'json.dump', 'json.dump', (['main_list', 'data_file'], {}), '(main_list, data_file)\n', (584, 606), False, 'import json\n')] |
#!/usr/bin/python
useEMF=True
import sys
try:
import matplotlib
except:
print("Requires matplotlib from http://matplotlib.sourceforge.net.")
sys.exit()
if useEMF:
matplotlib.use('EMF')
ext=".emf"
else:
matplotlib.use('Agg')
ext=".png"
from pylab import *
semilogy([12,49,78,42,.15,24,.30,60,1],label="stuff")
semilogy([25,62,76,66,.6,54,30,53,.098],label="$10^{-1}$")
xlabel("nm")
ylabel("diff")
legend(loc='best')
title("Title of stuff and things. ;qjkxbwz/,.pyfgcr!@#$%^&*(")
savefig("test-matplotlib2"+ext,dpi=300)
| [
"matplotlib.use",
"sys.exit"
] | [((187, 208), 'matplotlib.use', 'matplotlib.use', (['"""EMF"""'], {}), "('EMF')\n", (201, 208), False, 'import matplotlib\n'), ((234, 255), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (248, 255), False, 'import matplotlib\n'), ((156, 166), 'sys.exit', 'sys.exit', ([], {}), '()\n', (164, 166), False, 'import sys\n')] |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
import sys
import os
import codecs
import io
import re
ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\x.. # 2-digit hex escapes
| \\[\\'"abfnrtv] # Single-character escapes
)''', re.UNICODE | re.VERBOSE)
def decode_escapes(s):
'''Unescape libconfig string literals'''
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return ESCAPE_SEQUENCE_RE.sub(decode_match, s)
class AttrDict(dict):
'''Dict subclass giving access to string keys via attribute access'''
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class ConfigParseError(RuntimeError):
'''Exception class raised on errors reading the libconfig input'''
pass
class Token(object):
'''Base class for all tokens produced by the libconf tokenizer'''
def __init__(self, type, text, filename, row, column):
self.type = type
self.text = text
self.filename = filename
self.row = row
self.column = column
def __str__(self):
return "%r in %r, row %d, column %d" % (
self.text, self.filename, self.row, self.column)
class FltToken(Token):
'''Token subclass for floating point values'''
def __init__(self, *args, **kwargs):
super(FltToken, self).__init__(*args, **kwargs)
self.value = float(self.text)
class IntToken(Token):
'''Token subclass for integral values'''
def __init__(self, *args, **kwargs):
super(IntToken, self).__init__(*args, **kwargs)
self.is_long = self.text.endswith('L')
self.is_hex = (self.text[1:2].lower() == 'x')
self.value = int(self.text.rstrip('L'), 0)
class BoolToken(Token):
'''Token subclass for booleans'''
def __init__(self, *args, **kwargs):
super(BoolToken, self).__init__(*args, **kwargs)
self.value = (self.text[0].lower() == 't')
class StrToken(Token):
'''Token subclass for strings'''
def __init__(self, *args, **kwargs):
super(StrToken, self).__init__(*args, **kwargs)
self.value = decode_escapes(self.text[1:-1])
class Tokenizer:
'''Tokenize an input string
Typical usage:
tokens = list(Tokenizer("<memory>").tokenize("""a = 7; b = ();"""))
The filename argument to the constructor is used only in error messages, no
data is loaded from the file. The input data is received as argument to the
tokenize function, which yields tokens or throws a ConfigParseError on
invalid input.
Include directives are not supported, they must be handled at a higher
level (cf. the TokenStream class).
'''
token_map = [
(FltToken, 'float', r'([-+]?([0-9]*)?\.[0-9]*([eE][-+]?[0-9]+)?)|'
r'([-+]([0-9]+)(\.[0-9]*)?[eE][-+]?[0-9]+)'),
(IntToken, 'hex64', r'0[Xx][0-9A-Fa-f]+(L(L)?)'),
(IntToken, 'hex', r'0[Xx][0-9A-Fa-f]+'),
(IntToken, 'integer64', r'[-+]?[0-9]+L(L)?'),
(IntToken, 'integer', r'[-+]?[0-9]+'),
(BoolToken, 'boolean', r'([Tt][Rr][Uu][Ee])|([Ff][Aa][Ll][Ss][Ee])'),
(StrToken, 'string', r'"([^"\\]|\\.)*"'),
(Token, 'name', r'[A-Za-z\*][-A-Za-z0-9_\*]*'),
(Token, '}', r'\}'),
(Token, '{', r'\{'),
(Token, ')', r'\)'),
(Token, '(', r'\('),
(Token, ']', r'\]'),
(Token, '[', r'\['),
(Token, ',', r','),
(Token, ';', r';'),
(Token, '=', r'='),
(Token, ':', r':'),
]
def __init__(self, filename):
self.filename = filename
self.row = 1
self.column = 1
def tokenize(self, string):
'''Yield tokens from the input string or throw ConfigParseError'''
while string:
m = re.match(r'\s+|#.*$|//.*$|/\*(.|\n)*?\*/', string, re.M)
if m:
skip_lines = m.group(0).split('\n')
if len(skip_lines) > 1:
self.row += len(skip_lines) - 1
self.column = 1 + len(skip_lines[-1])
else:
self.column += len(skip_lines[0])
string = string[m.end():]
continue
for cls, type, regex in self.token_map:
m = re.match(regex, string)
if m:
yield cls(type, m.group(0),
self.filename, self.row, self.column)
self.column += len(m.group(0))
string = string[m.end():]
break
else:
raise ConfigParseError(
"Couldn't load config in %r row %d, column %d: %r" %
(self.filename, self.row, self.column, string[:20]))
class TokenStream:
'''Offer a parsing-oriented view on tokens
Provide several methods that are useful to parsers, like ``accept()``,
``expect()``, ...
The ``from_file()`` method is the preferred way to read input files, as
it handles include directives, which the ``Tokenizer`` class does not do.
'''
def __init__(self, tokens):
self.position = 0
self.tokens = list(tokens)
@classmethod
def from_file(cls, f, filename=None, includedir='', seenfiles=None):
'''Create a token stream by reading an input file
Read tokens from `f`. If an include directive ('@include "file.cfg"')
is found, read its contents as well.
The `filename` argument is used for error messages and to detect
circular imports. ``includedir`` sets the lookup directory for included
files. ``seenfiles`` is used internally to detect circular includes,
and should normally not be supplied by users of is function.
'''
if filename is None:
filename = getattr(f, 'name', '<unknown>')
if seenfiles is None:
seenfiles = set()
if filename in seenfiles:
raise ConfigParseError("Circular include: %r" % (filename,))
seenfiles = seenfiles | {filename} # Copy seenfiles, don't alter it.
tokenizer = Tokenizer(filename=filename)
lines = []
tokens = []
for line in f:
m = re.match(r'@include "(.*)"$', line.strip())
if m:
tokens.extend(tokenizer.tokenize(''.join(lines)))
lines = [re.sub(r'\S', ' ', line)]
includefilename = decode_escapes(m.group(1))
includefilename = os.path.join(includedir, includefilename)
try:
includefile = open(includefilename, "r")
except IOError:
raise ConfigParseError("Could not open include file %r" %
(includefilename,))
with includefile:
includestream = cls.from_file(includefile,
filename=includefilename,
includedir=includedir,
seenfiles=seenfiles)
tokens.extend(includestream.tokens)
else:
lines.append(line)
tokens.extend(tokenizer.tokenize(''.join(lines)))
return cls(tokens)
def peek(self):
'''Return (but do not consume) the next token
At the end of input, ``None`` is returned.
'''
if self.position >= len(self.tokens):
return None
return self.tokens[self.position]
def accept(self, *args):
'''Consume and return the next token if it has the correct type
Multiple token types (as strings, e.g. 'integer64') can be given
as arguments. If the next token is one of them, consume and return it.
If the token type doesn't match, return None.
'''
token = self.peek()
if token is None:
return None
for arg in args:
if token.type == arg:
self.position += 1
return token
return None
def expect(self, *args):
'''Consume and return the next token if it has the correct type
Multiple token types (as strings, e.g. 'integer64') can be given
as arguments. If the next token is one of them, consume and return it.
If the token type doesn't match, raise a ConfigParseError.
'''
t = self.accept(*args)
if t is not None:
return t
self.error("expected: %r" % (args,))
def error(self, msg):
'''Raise a ConfigParseError at the current input position'''
if self.finished():
raise ConfigParseError("Unexpected end of input; %s" % (msg,))
else:
t = self.peek()
raise ConfigParseError("Unexpected token %s; %s" % (t, msg))
def finished(self):
'''Return ``True`` if the end of the token stream is reached.'''
return self.position >= len(self.tokens)
class Parser:
'''Recursive descent parser for libconfig files
Takes a ``TokenStream`` as input, the ``parse()`` method then returns
the config file data in a ``json``-module-style format.
'''
def __init__(self, tokenstream):
self.tokens = tokenstream
def parse(self):
return self.configuration()
def configuration(self):
result = self.setting_list_or_empty()
if not self.tokens.finished():
raise ConfigParseError("Expected end of input but found %s" %
(self.tokens.peek(),))
return result
def setting_list_or_empty(self):
result = AttrDict()
while True:
s = self.setting()
if s is None:
return result
result[s[0]] = s[1]
return result
def setting(self):
name = self.tokens.accept('name')
if name is None:
return None
self.tokens.expect(':', '=')
value = self.value()
if value is None:
self.tokens.error("expected a value")
self.tokens.accept(';', ',')
return (name.text, value)
def value(self):
acceptable = [self.scalar_value, self.array, self.list, self.group]
return self._parse_any_of(acceptable)
def scalar_value(self):
acceptable = [self.boolean, self.integer, self.integer64, self.hex,
self.hex64, self.float, self.string]
return self._parse_any_of(acceptable)
def value_list_or_empty(self):
return tuple(self._comma_separated_list_or_empty(self.value))
def scalar_value_list_or_empty(self):
return self._comma_separated_list_or_empty(self.scalar_value)
def array(self):
return self._enclosed_block('[', self.scalar_value_list_or_empty, ']')
def list(self):
return self._enclosed_block('(', self.value_list_or_empty, ')')
def group(self):
return self._enclosed_block('{', self.setting_list_or_empty, '}')
def boolean(self):
return self._create_value_node('boolean')
def integer(self):
return self._create_value_node('integer')
def integer64(self):
return self._create_value_node('integer64')
def hex(self):
return self._create_value_node('hex')
def hex64(self):
return self._create_value_node('hex64')
def float(self):
return self._create_value_node('float')
def string(self):
t_first = self.tokens.accept('string')
if t_first is None:
return None
values = [t_first.value]
while True:
t = self.tokens.accept('string')
if t is None:
break
values.append(t.value)
return ''.join(values)
def _create_value_node(self, tokentype):
t = self.tokens.accept(tokentype)
if t is None:
return None
return t.value
def _parse_any_of(self, nonterminals):
for fun in nonterminals:
result = fun()
if result is not None:
return result
return None
def _comma_separated_list_or_empty(self, nonterminal):
values = []
first = True
while True:
v = nonterminal()
if v is None:
if first:
return []
else:
self.tokens.error("expected value after ','")
values.append(v)
if not self.tokens.accept(','):
return values
first = False
def _enclosed_block(self, start, nonterminal, end):
if not self.tokens.accept(start):
return None
result = nonterminal()
self.tokens.expect(end)
return result
def load(f, filename=None, includedir=''):
'''Load the contents of ``f`` (a file-like object) to a Python object
The returned object is a subclass of ``dict`` that exposes string keys as
attributes as well.
Example:
>>> with open('test/example.cfg') as f:
... config = libconf.load(f)
>>> config['window']['title']
'libconfig example'
>>> config.window.title
'libconfig example'
'''
tokenstream = TokenStream.from_file(f,
filename=filename,
includedir=includedir)
return Parser(tokenstream).parse()
def loads(string, filename=None, includedir=''):
'''Load the contents of ``string`` to a Python object
The returned object is a subclass of ``dict`` that exposes string keys as
attributes as well.
Example:
>>> config = libconf.loads('window: { title: "libconfig example"; };')
>>> config['window']['title']
'libconfig example'
>>> config.window.title
'libconfig example'
'''
return load(io.StringIO(string),
filename=filename,
includedir=includedir)
def main():
'''Open the libconfig file specified by sys.argv[1] and pretty-print it'''
import pprint
global output
if len(sys.argv[1:]) == 1:
with io.open(sys.argv[1], 'r', encoding='utf-8') as f:
output = load(f)
else:
output = load(sys.stdin)
pprint.pprint(output)
if __name__ == '__main__':
main()
| [
"re.compile",
"re.match",
"os.path.join",
"io.open",
"re.sub",
"io.StringIO",
"pprint.pprint"
] | [((174, 334), 're.compile', 're.compile', (['"""\n ( \\\\\\\\x.. # 2-digit hex escapes\n | \\\\\\\\[\\\\\\\\\'"abfnrtv] # Single-character escapes\n )"""', '(re.UNICODE | re.VERBOSE)'], {}), '(\n """\n ( \\\\\\\\x.. # 2-digit hex escapes\n | \\\\\\\\[\\\\\\\\\'"abfnrtv] # Single-character escapes\n )"""\n , re.UNICODE | re.VERBOSE)\n', (184, 334), False, 'import re\n'), ((15097, 15118), 'pprint.pprint', 'pprint.pprint', (['output'], {}), '(output)\n', (15110, 15118), False, 'import pprint\n'), ((14685, 14704), 'io.StringIO', 'io.StringIO', (['string'], {}), '(string)\n', (14696, 14704), False, 'import io\n'), ((4168, 4227), 're.match', 're.match', (['"""\\\\s+|#.*$|//.*$|/\\\\*(.|\\\\n)*?\\\\*/"""', 'string', 're.M'], {}), "('\\\\s+|#.*$|//.*$|/\\\\*(.|\\\\n)*?\\\\*/', string, re.M)\n", (4176, 4227), False, 'import re\n'), ((14965, 15008), 'io.open', 'io.open', (['sys.argv[1]', '"""r"""'], {'encoding': '"""utf-8"""'}), "(sys.argv[1], 'r', encoding='utf-8')\n", (14972, 15008), False, 'import io\n'), ((4675, 4698), 're.match', 're.match', (['regex', 'string'], {}), '(regex, string)\n', (4683, 4698), False, 'import re\n'), ((6966, 7007), 'os.path.join', 'os.path.join', (['includedir', 'includefilename'], {}), '(includedir, includefilename)\n', (6978, 7007), False, 'import os\n'), ((6841, 6865), 're.sub', 're.sub', (['"""\\\\S"""', '""" """', 'line'], {}), "('\\\\S', ' ', line)\n", (6847, 6865), False, 'import re\n')] |
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from base.Sequence import Sequence
from base.StateTransitionHandler import StateTransitionHandler
import state_transition_test_utils
from Enums import EStateElementType, EStateTransitionType
from State import State
import RandomUtils
import StateTransition
## A test StateTransitionHandler that defers to the default StateTransitionHandler some of the time.
class PartialStateTransitionHandlerTest(StateTransitionHandler):
## Execute the State change represented by the StateElement. Only instances of the StateElement
# types for which the StateTransitionHandler has been registered will be passed to this method.
# Other StateTransitionHandlers will process the other StateElement types. It is important to
# avoid making changes to entities represented by StateElements that have already been
# processed. Changes to entities represented by StateElements that will be processed later are
# permitted.
#
# @param aStateElem A StateElement object.
def processStateElement(self, aStateElem):
processed = False
# Randomly decide whether to process the StateElement or defer to the default implementation
if RandomUtils.random32(0, 1) == 1:
(mem_block_ptr_index,) = self.getArbitraryGprs(1, aExclude=(0,))
self.initializeMemoryBlock(mem_block_ptr_index, (aStateElem,))
self.genInstruction('FLD##RISCV', {'rd': aStateElem.getRegisterIndex(), 'rs1': mem_block_ptr_index, 'simm12': 0, 'NoRestriction': 1})
processed = True
return processed
## This test verifies that a StateTransition handler can process some of the StateElements and defer
# to the default StateTransitionHandler for the remaining StateElements.
class MainSequence(Sequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mExpectedStateData = {}
def generate(self, **kargs):
state_trans_handler = PartialStateTransitionHandlerTest(self.genThread)
StateTransition.registerStateTransitionHandler(state_trans_handler, EStateTransitionType.Explicit, (EStateElementType.FloatingPointRegister,))
state = self._createState()
StateTransition.transitionToState(state)
state_transition_test_utils.verifyState(self, self._mExpectedStateData)
## Create a simple State to test an explicit StateTransition.
def _createState(self):
state = State()
self._mExpectedStateData[EStateElementType.FloatingPointRegister] = state_transition_test_utils.addRandomFloatingPointRegisterStateElements(self, state, RandomUtils.random32(0, 15))
return state
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| [
"StateTransition.transitionToState",
"StateTransition.registerStateTransitionHandler",
"RandomUtils.random32",
"state_transition_test_utils.verifyState",
"State.State"
] | [((2711, 2857), 'StateTransition.registerStateTransitionHandler', 'StateTransition.registerStateTransitionHandler', (['state_trans_handler', 'EStateTransitionType.Explicit', '(EStateElementType.FloatingPointRegister,)'], {}), '(state_trans_handler,\n EStateTransitionType.Explicit, (EStateElementType.FloatingPointRegister,))\n', (2757, 2857), False, 'import StateTransition\n'), ((2899, 2939), 'StateTransition.transitionToState', 'StateTransition.transitionToState', (['state'], {}), '(state)\n', (2932, 2939), False, 'import StateTransition\n'), ((2948, 3019), 'state_transition_test_utils.verifyState', 'state_transition_test_utils.verifyState', (['self', 'self._mExpectedStateData'], {}), '(self, self._mExpectedStateData)\n', (2987, 3019), False, 'import state_transition_test_utils\n'), ((3131, 3138), 'State.State', 'State', ([], {}), '()\n', (3136, 3138), False, 'from State import State\n'), ((1865, 1891), 'RandomUtils.random32', 'RandomUtils.random32', (['(0)', '(1)'], {}), '(0, 1)\n', (1885, 1891), False, 'import RandomUtils\n'), ((3300, 3327), 'RandomUtils.random32', 'RandomUtils.random32', (['(0)', '(15)'], {}), '(0, 15)\n', (3320, 3327), False, 'import RandomUtils\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 9 09:58:58 2021
@author: apauron
"""
import get_files_cluster ## To get the filename
import Compartments_SB3_cluster
"""
A pipeline for generating intrachromosomal compartments in the cluster.
Keyword arguments :
None
Returns :
None
"""
path_data_cluster = "/shared/projects/form_2021_21/trainers/dataforstudent/HiC/" ## Folder containing the data in cluster
a = get_files_cluster.getfiles(path_data_cluster,".RAWobserved") ## Get all raw data
list_files_intra = [] ## Will get all the filenames
list_resolutions = [] ## Will contain all resolutions
list_genes_density_files = [] ## Will contain all gene density files
for file in a :
if "intrachromosomal" in file :
list_files_intra.append(file) ## Get the file
## According to resolution append different resolutions
if "25kb_resolution" in file :
list_resolutions.append(25000)
density_path = "/shared/projects/form_2021_21/trainers/dataforstudent/genedensity/25kb/"
chr_name = file.split("/")[-1]
chr_name = chr_name.split("_")[0]
gdname = density_path + chr_name + ".hdf5" ## Get density file name
list_genes_density_files.append(gdname)
if "100kb_resolution" in file :
list_resolutions.append(100000)
density_path = "/shared/projects/form_2021_21/trainers/dataforstudent/genedensity/100kb/"
chr_name = file.split("/")[-1]
chr_name = chr_name.split("_")[0]
gdname = density_path + chr_name + ".hdf5" ## Get density file name
list_genes_density_files.append(gdname)
for (filetocomp,resolution,gdfile) in zip(list_files_intra,list_resolutions,list_genes_density_files) :
print(filetocomp)
print(resolution)
print(gdfile)
Compartments_SB3_cluster.pipeline_intra(resolution,filetocomp,gdfile) ## Call the main pipeline
| [
"Compartments_SB3_cluster.pipeline_intra",
"get_files_cluster.getfiles"
] | [((476, 537), 'get_files_cluster.getfiles', 'get_files_cluster.getfiles', (['path_data_cluster', '""".RAWobserved"""'], {}), "(path_data_cluster, '.RAWobserved')\n", (502, 537), False, 'import get_files_cluster\n'), ((1780, 1851), 'Compartments_SB3_cluster.pipeline_intra', 'Compartments_SB3_cluster.pipeline_intra', (['resolution', 'filetocomp', 'gdfile'], {}), '(resolution, filetocomp, gdfile)\n', (1819, 1851), False, 'import Compartments_SB3_cluster\n')] |
#!/usr/bin/env python
#
# Copyright 2007,2010,2011,2013,2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, digital, blocks
import pmt
class test_ofdm_cyclic_prefixer (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_wo_tags_no_rolloff(self):
" The easiest test: make sure the CP is added correctly. "
fft_len = 8
cp_len = 2
expected_result = (6, 7, 0, 1, 2, 3, 4, 5, 6, 7,
6, 7, 0, 1, 2, 3, 4, 5, 6, 7)
src = blocks.vector_source_c(range(fft_len) * 2, False, fft_len)
cp = digital.ofdm_cyclic_prefixer(fft_len, fft_len + cp_len)
sink = blocks.vector_sink_c()
self.tb.connect(src, cp, sink)
self.tb.run()
self.assertEqual(sink.data(), expected_result)
def test_wo_tags_2s_rolloff(self):
" No tags, but have a 2-sample rolloff "
fft_len = 8
cp_len = 2
rolloff = 2
expected_result = (7.0/2, 8, 1, 2, 3, 4, 5, 6, 7, 8, # 1.0/2
7.0/2+1.0/2, 8, 1, 2, 3, 4, 5, 6, 7, 8)
src = blocks.vector_source_c(range(1, fft_len+1) * 2, False, fft_len)
cp = digital.ofdm_cyclic_prefixer(fft_len, fft_len + cp_len, rolloff)
sink = blocks.vector_sink_c()
self.tb.connect(src, cp, sink)
self.tb.run()
self.assertEqual(sink.data(), expected_result)
def test_with_tags_2s_rolloff(self):
" With tags and a 2-sample rolloff "
fft_len = 8
cp_len = 2
tag_name = "ts_last"
expected_result = (7.0/2, 8, 1, 2, 3, 4, 5, 6, 7, 8, # 1.0/2
7.0/2+1.0/2, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1.0/2)
tag2 = gr.tag_t()
tag2.offset = 1
tag2.key = pmt.string_to_symbol("random_tag")
tag2.value = pmt.from_long(42)
src = blocks.vector_source_c(range(1, fft_len+1) * 2, False, fft_len, (tag2,))
cp = digital.ofdm_cyclic_prefixer(fft_len, fft_len + cp_len, 2, tag_name)
sink = blocks.tsb_vector_sink_c(tsb_key=tag_name)
self.tb.connect(src, blocks.stream_to_tagged_stream(gr.sizeof_gr_complex, fft_len, 2, tag_name), cp, sink)
self.tb.run()
self.assertEqual(sink.data()[0], expected_result)
tags = [gr.tag_to_python(x) for x in sink.tags()]
tags = sorted([(x.offset, x.key, x.value) for x in tags])
expected_tags = [
(fft_len+cp_len, "random_tag", 42)
]
self.assertEqual(tags, expected_tags)
if __name__ == '__main__':
gr_unittest.run(test_ofdm_cyclic_prefixer, "test_ofdm_cyclic_prefixer.xml")
| [
"gnuradio.gr.top_block",
"pmt.from_long",
"gnuradio.blocks.tsb_vector_sink_c",
"gnuradio.gr.tag_t",
"gnuradio.gr_unittest.run",
"gnuradio.digital.ofdm_cyclic_prefixer",
"gnuradio.gr.tag_to_python",
"gnuradio.blocks.vector_sink_c",
"gnuradio.blocks.stream_to_tagged_stream",
"pmt.string_to_symbol"
] | [((3369, 3444), 'gnuradio.gr_unittest.run', 'gr_unittest.run', (['test_ofdm_cyclic_prefixer', '"""test_ofdm_cyclic_prefixer.xml"""'], {}), "(test_ofdm_cyclic_prefixer, 'test_ofdm_cyclic_prefixer.xml')\n", (3384, 3444), False, 'from gnuradio import gr, gr_unittest, digital, blocks\n'), ((993, 1007), 'gnuradio.gr.top_block', 'gr.top_block', ([], {}), '()\n', (1005, 1007), False, 'from gnuradio import gr, gr_unittest, digital, blocks\n'), ((1404, 1459), 'gnuradio.digital.ofdm_cyclic_prefixer', 'digital.ofdm_cyclic_prefixer', (['fft_len', '(fft_len + cp_len)'], {}), '(fft_len, fft_len + cp_len)\n', (1432, 1459), False, 'from gnuradio import gr, gr_unittest, digital, blocks\n'), ((1475, 1497), 'gnuradio.blocks.vector_sink_c', 'blocks.vector_sink_c', ([], {}), '()\n', (1495, 1497), False, 'from gnuradio import gr, gr_unittest, digital, blocks\n'), ((1995, 2059), 'gnuradio.digital.ofdm_cyclic_prefixer', 'digital.ofdm_cyclic_prefixer', (['fft_len', '(fft_len + cp_len)', 'rolloff'], {}), '(fft_len, fft_len + cp_len, rolloff)\n', (2023, 2059), False, 'from gnuradio import gr, gr_unittest, digital, blocks\n'), ((2075, 2097), 'gnuradio.blocks.vector_sink_c', 'blocks.vector_sink_c', ([], {}), '()\n', (2095, 2097), False, 'from gnuradio import gr, gr_unittest, digital, blocks\n'), ((2533, 2543), 'gnuradio.gr.tag_t', 'gr.tag_t', ([], {}), '()\n', (2541, 2543), False, 'from gnuradio import gr, gr_unittest, digital, blocks\n'), ((2587, 2621), 'pmt.string_to_symbol', 'pmt.string_to_symbol', (['"""random_tag"""'], {}), "('random_tag')\n", (2607, 2621), False, 'import pmt\n'), ((2643, 2660), 'pmt.from_long', 'pmt.from_long', (['(42)'], {}), '(42)\n', (2656, 2660), False, 'import pmt\n'), ((2761, 2829), 'gnuradio.digital.ofdm_cyclic_prefixer', 'digital.ofdm_cyclic_prefixer', (['fft_len', '(fft_len + cp_len)', '(2)', 'tag_name'], {}), '(fft_len, fft_len + cp_len, 2, tag_name)\n', (2789, 2829), False, 'from gnuradio import gr, gr_unittest, digital, blocks\n'), ((2845, 2887), 'gnuradio.blocks.tsb_vector_sink_c', 'blocks.tsb_vector_sink_c', ([], {'tsb_key': 'tag_name'}), '(tsb_key=tag_name)\n', (2869, 2887), False, 'from gnuradio import gr, gr_unittest, digital, blocks\n'), ((2917, 2991), 'gnuradio.blocks.stream_to_tagged_stream', 'blocks.stream_to_tagged_stream', (['gr.sizeof_gr_complex', 'fft_len', '(2)', 'tag_name'], {}), '(gr.sizeof_gr_complex, fft_len, 2, tag_name)\n', (2947, 2991), False, 'from gnuradio import gr, gr_unittest, digital, blocks\n'), ((3099, 3118), 'gnuradio.gr.tag_to_python', 'gr.tag_to_python', (['x'], {}), '(x)\n', (3115, 3118), False, 'from gnuradio import gr, gr_unittest, digital, blocks\n')] |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import testtools
from mock import MagicMock
from trove.common.context import TroveContext
from trove.guestagent import volume
from trove.guestagent.datastore.mongodb import service as mongo_service
from trove.guestagent.datastore.mongodb import manager as mongo_manager
from trove.guestagent.volume import VolumeDevice
class GuestAgentMongoDBManagerTest(testtools.TestCase):
def setUp(self):
super(GuestAgentMongoDBManagerTest, self).setUp()
self.context = TroveContext()
self.manager = mongo_manager.Manager()
self.origin_MongoDbAppStatus = mongo_service.MongoDbAppStatus
self.origin_os_path_exists = os.path.exists
self.origin_format = volume.VolumeDevice.format
self.origin_migrate_data = volume.VolumeDevice.migrate_data
self.origin_mount = volume.VolumeDevice.mount
self.origin_stop_db = mongo_service.MongoDBApp.stop_db
self.origin_start_db = mongo_service.MongoDBApp.start_db
def tearDown(self):
super(GuestAgentMongoDBManagerTest, self).tearDown()
mongo_service.MongoDbAppStatus = self.origin_MongoDbAppStatus
os.path.exists = self.origin_os_path_exists
volume.VolumeDevice.format = self.origin_format
volume.VolumeDevice.migrate_data = self.origin_migrate_data
volume.VolumeDevice.mount = self.origin_mount
mongo_service.MongoDBApp.stop_db = self.origin_stop_db
mongo_service.MongoDBApp.start_db = self.origin_start_db
def test_update_status(self):
self.manager.status = MagicMock()
self.manager.update_status(self.context)
self.manager.status.update.assert_any_call()
def test_prepare_from_backup(self):
self._prepare_dynamic(backup_id='backup_id_123abc')
def _prepare_dynamic(self, device_path='/dev/vdb', is_db_installed=True,
backup_id=None):
# covering all outcomes is starting to cause trouble here
backup_info = {'id': backup_id,
'location': 'fake-location',
'type': 'MongoDBDump',
'checksum': 'fake-checksum'} if backup_id else None
mock_status = MagicMock()
mock_app = MagicMock()
self.manager.status = mock_status
self.manager.app = mock_app
mock_status.begin_install = MagicMock(return_value=None)
volume.VolumeDevice.format = MagicMock(return_value=None)
volume.VolumeDevice.migrate_data = MagicMock(return_value=None)
volume.VolumeDevice.mount = MagicMock(return_value=None)
mock_app.stop_db = MagicMock(return_value=None)
mock_app.start_db = MagicMock(return_value=None)
mock_app.clear_storage = MagicMock(return_value=None)
os.path.exists = MagicMock(return_value=is_db_installed)
# invocation
self.manager.prepare(context=self.context, databases=None,
packages=['package'],
memory_mb='2048', users=None,
device_path=device_path,
mount_point='/var/lib/mongodb',
backup_info=backup_info)
# verification/assertion
mock_status.begin_install.assert_any_call()
mock_app.install_if_needed.assert_any_call(['package'])
mock_app.stop_db.assert_any_call()
VolumeDevice.format.assert_any_call()
VolumeDevice.migrate_data.assert_any_call('/var/lib/mongodb')
| [
"trove.guestagent.volume.VolumeDevice.format.assert_any_call",
"trove.guestagent.volume.VolumeDevice.migrate_data.assert_any_call",
"trove.guestagent.datastore.mongodb.manager.Manager",
"trove.common.context.TroveContext",
"mock.MagicMock"
] | [((1107, 1121), 'trove.common.context.TroveContext', 'TroveContext', ([], {}), '()\n', (1119, 1121), False, 'from trove.common.context import TroveContext\n'), ((1145, 1168), 'trove.guestagent.datastore.mongodb.manager.Manager', 'mongo_manager.Manager', ([], {}), '()\n', (1166, 1168), True, 'from trove.guestagent.datastore.mongodb import manager as mongo_manager\n'), ((2176, 2187), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2185, 2187), False, 'from mock import MagicMock\n'), ((2814, 2825), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2823, 2825), False, 'from mock import MagicMock\n'), ((2845, 2856), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2854, 2856), False, 'from mock import MagicMock\n'), ((2972, 3000), 'mock.MagicMock', 'MagicMock', ([], {'return_value': 'None'}), '(return_value=None)\n', (2981, 3000), False, 'from mock import MagicMock\n'), ((3038, 3066), 'mock.MagicMock', 'MagicMock', ([], {'return_value': 'None'}), '(return_value=None)\n', (3047, 3066), False, 'from mock import MagicMock\n'), ((3110, 3138), 'mock.MagicMock', 'MagicMock', ([], {'return_value': 'None'}), '(return_value=None)\n', (3119, 3138), False, 'from mock import MagicMock\n'), ((3175, 3203), 'mock.MagicMock', 'MagicMock', ([], {'return_value': 'None'}), '(return_value=None)\n', (3184, 3203), False, 'from mock import MagicMock\n'), ((3232, 3260), 'mock.MagicMock', 'MagicMock', ([], {'return_value': 'None'}), '(return_value=None)\n', (3241, 3260), False, 'from mock import MagicMock\n'), ((3289, 3317), 'mock.MagicMock', 'MagicMock', ([], {'return_value': 'None'}), '(return_value=None)\n', (3298, 3317), False, 'from mock import MagicMock\n'), ((3351, 3379), 'mock.MagicMock', 'MagicMock', ([], {'return_value': 'None'}), '(return_value=None)\n', (3360, 3379), False, 'from mock import MagicMock\n'), ((3405, 3444), 'mock.MagicMock', 'MagicMock', ([], {'return_value': 'is_db_installed'}), '(return_value=is_db_installed)\n', (3414, 3444), False, 'from mock import MagicMock\n'), ((4014, 4051), 'trove.guestagent.volume.VolumeDevice.format.assert_any_call', 'VolumeDevice.format.assert_any_call', ([], {}), '()\n', (4049, 4051), False, 'from trove.guestagent.volume import VolumeDevice\n'), ((4060, 4121), 'trove.guestagent.volume.VolumeDevice.migrate_data.assert_any_call', 'VolumeDevice.migrate_data.assert_any_call', (['"""/var/lib/mongodb"""'], {}), "('/var/lib/mongodb')\n", (4101, 4121), False, 'from trove.guestagent.volume import VolumeDevice\n')] |
from __future__ import annotations
import subprocess
import pytest
from conftest import CustomTOMLFile
@pytest.mark.parametrize("command", [["update"], ["types", "update"]])
def test_update(command: list[str], toml_file: CustomTOMLFile):
content = toml_file.poetry
content["dependencies"].add("requests", "^2.27.1")
del content["dependencies"]["colorama"]
toml_file.write_poetry(content)
subprocess.run(["python", "-m", "poetry", *command])
assert "types-colorama" not in toml_file.poetry["group"]["types"]["dependencies"]
assert "types-requests" in toml_file.poetry["group"]["types"]["dependencies"]
@pytest.mark.parametrize("command", [["add", "requests"], ["types", "add", "requests"]])
def test_add(command: list[str], toml_file: CustomTOMLFile):
subprocess.run(["python", "-m", "poetry", *command])
assert "types-requests" in toml_file.poetry["group"]["types"]["dependencies"]
@pytest.mark.parametrize(
"command", [["remove", "colorama"], ["types", "remove", "colorama"]]
)
def test_remove(command: list[str], toml_file: CustomTOMLFile):
subprocess.run(["python", "-m", "poetry", *command])
assert "types-colorama" not in toml_file.poetry["group"]["types"]["dependencies"]
| [
"pytest.mark.parametrize",
"subprocess.run"
] | [((108, 177), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""command"""', "[['update'], ['types', 'update']]"], {}), "('command', [['update'], ['types', 'update']])\n", (131, 177), False, 'import pytest\n'), ((636, 727), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""command"""', "[['add', 'requests'], ['types', 'add', 'requests']]"], {}), "('command', [['add', 'requests'], ['types', 'add',\n 'requests']])\n", (659, 727), False, 'import pytest\n'), ((927, 1024), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""command"""', "[['remove', 'colorama'], ['types', 'remove', 'colorama']]"], {}), "('command', [['remove', 'colorama'], ['types',\n 'remove', 'colorama']])\n", (950, 1024), False, 'import pytest\n'), ((412, 464), 'subprocess.run', 'subprocess.run', (["['python', '-m', 'poetry', *command]"], {}), "(['python', '-m', 'poetry', *command])\n", (426, 464), False, 'import subprocess\n'), ((789, 841), 'subprocess.run', 'subprocess.run', (["['python', '-m', 'poetry', *command]"], {}), "(['python', '-m', 'poetry', *command])\n", (803, 841), False, 'import subprocess\n'), ((1095, 1147), 'subprocess.run', 'subprocess.run', (["['python', '-m', 'poetry', *command]"], {}), "(['python', '-m', 'poetry', *command])\n", (1109, 1147), False, 'import subprocess\n')] |
'''
Copyright (c) 2018 Modul 9/HiFiBerry
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from time import sleep
from threading import Thread
from ac2.metadata import Metadata
class DummyMetadataCreator(Thread):
"""
A class just use for development. It creates dummy metadata records and
send it to the given MetadataDisplay objects
"""
def __init__(self, display=None, interval=10):
super().__init__()
self.stop = False
self.interval = interval
self.display = display
def run(self):
import random
covers = ["https://images-na.ssl-images-amazon.com/images/I/81R6Jcf5eoL._SL1500_.jpg",
"https://townsquare.media/site/443/files/2013/03/92rage.jpg?w=980&q=75",
"file://unknown.png",
None,
None,
None,
None
]
songs = [
["Madonna", "Like a Virgin"],
["Rammstein", "Mutter"],
["Iggy Pop", "<NAME>"],
["Porcupine Tree", "Normal"],
["Clinton Shorter", "Truth"],
["<NAME>", "The River"],
["Plan B", "Kidz"],
["The Spooks", "Things I've seen"],
["Aldous Harding", "The Barrel"]
]
states = ["playing", "paused", "stopped"]
while not(self.stop):
coverindex = random.randrange(len(covers))
songindex = random.randrange(len(songs))
stateindex = random.randrange(len(states))
md = Metadata(artist=songs[songindex][0],
title=songs[songindex][1],
artUrl=covers[coverindex],
playerName="dummy",
playerState=states[stateindex])
if self.display is not None:
self.display.notify(md)
sleep(self.interval)
def stop(self):
self.stop = True
| [
"ac2.metadata.Metadata",
"time.sleep"
] | [((2541, 2688), 'ac2.metadata.Metadata', 'Metadata', ([], {'artist': 'songs[songindex][0]', 'title': 'songs[songindex][1]', 'artUrl': 'covers[coverindex]', 'playerName': '"""dummy"""', 'playerState': 'states[stateindex]'}), "(artist=songs[songindex][0], title=songs[songindex][1], artUrl=\n covers[coverindex], playerName='dummy', playerState=states[stateindex])\n", (2549, 2688), False, 'from ac2.metadata import Metadata\n'), ((2882, 2902), 'time.sleep', 'sleep', (['self.interval'], {}), '(self.interval)\n', (2887, 2902), False, 'from time import sleep\n')] |
import numpy as np
import open3d as o3d
import pickle
import torch
import ipdb
st = ipdb.set_trace
def apply_4x4(RT, xyz):
B, N, _ = list(xyz.shape)
ones = torch.ones_like(xyz[:,:,0:1])
xyz1 = torch.cat([xyz, ones], 2)
xyz1_t = torch.transpose(xyz1, 1, 2)
# this is B x 4 x N
xyz2_t = torch.matmul(RT, xyz1_t)
xyz2 = torch.transpose(xyz2_t, 1, 2)
xyz2 = xyz2[:,:,:3]
return xyz2
def make_pcd(pts):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pts[:, :3])
# if the dim is greater than 3 I expect the color
if pts.shape[1] == 6:
pcd.colors = o3d.utility.Vector3dVector(pts[:, 3:] / 255.\
if pts[:, 3:].max() > 1. else pts[:, 3:])
return pcd
mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=1, origin=[0, 0, 0])
pcd_list = [mesh_frame]
# for i in range(10):
# path = f"/Users/shamitlal/Desktop/temp/convocc/pointcloud_0{i}.npz"
# pcd = np.load(path)['points']
# st()
# pcd = make_pcd(pcd)
# pcd_list.append(pcd)
path = f"/Users/shamitlal/Desktop/temp/convocc/pointcloud.npz"
pcd = np.load(path)['points']
print("max: ", np.max(pcd, axis=0))
print("min: ", np.min(pcd, axis=0))
pcd = make_pcd(pcd)
pcd_list.append(pcd)
print("Pcd list len is: ", len(pcd_list))
o3d.visualization.draw_geometries(pcd_list)
# Visualize inside points
# path = f"/Users/shamitlal/Desktop/temp/convocc/points.npz"
# pcd = np.load(path)
# occ = np.unpackbits(pcd['occupancies'])
# pcd = pcd['points']
# occ_pts_idx = np.where(occ==1)[0]
# pcd = pcd[occ_pts_idx]
# print("max: ", np.max(pcd, axis=0))
# print("min: ", np.min(pcd, axis=0))
# pcd = make_pcd(pcd)
# pcd_list.append(pcd)
# # Visualize actual pointcloud
# path = f"/Users/shamitlal/Desktop/temp/convocc/pointcloud.npz"
# pcd = np.load(path)['points']
# print("max: ", np.max(pcd, axis=0))
# print("min: ", np.min(pcd, axis=0))
# pcd = make_pcd(pcd)
# pcd_list.append(pcd)
# print("Pcd list len is: ", len(pcd_list))
o3d.visualization.draw_geometries(pcd_list)
#Visualize pydisco shapenet data
# path = f"/Users/shamitlal/Desktop/temp/convocc/02958343_c48a804986a819b4bda733a39f84326d.p"
# pfile = pickle.load(open(path, "rb"))
# xyz_camXs = torch.tensor(pfile['xyz_camXs_raw'])
# origin_T_camXs = torch.tensor(pfile['origin_T_camXs_raw'])
# xyz_origin = apply_4x4(origin_T_camXs, xyz_camXs)
# pcd = xyz_origin.reshape(-1, 3)
# x, y, z = torch.abs(pcd[:,0]), torch.abs(pcd[:,1]), torch.abs(pcd[:,2])
# cond1 = (x<10)
# cond2 = (y<10)
# cond3 = (z<10)
# cond = cond1 & cond2 & cond3
# pcd = pcd[cond]
# pcd_list.append(make_pcd(pcd))
# o3d.visualization.draw_geometries(pcd_list)
# st()
# aa=1 | [
"torch.ones_like",
"torch.transpose",
"numpy.max",
"open3d.utility.Vector3dVector",
"open3d.visualization.draw_geometries",
"torch.matmul",
"open3d.geometry.PointCloud",
"numpy.min",
"open3d.geometry.TriangleMesh.create_coordinate_frame",
"numpy.load",
"torch.cat"
] | [((760, 835), 'open3d.geometry.TriangleMesh.create_coordinate_frame', 'o3d.geometry.TriangleMesh.create_coordinate_frame', ([], {'size': '(1)', 'origin': '[0, 0, 0]'}), '(size=1, origin=[0, 0, 0])\n', (809, 835), True, 'import open3d as o3d\n'), ((1315, 1358), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['pcd_list'], {}), '(pcd_list)\n', (1348, 1358), True, 'import open3d as o3d\n'), ((2012, 2055), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['pcd_list'], {}), '(pcd_list)\n', (2045, 2055), True, 'import open3d as o3d\n'), ((167, 198), 'torch.ones_like', 'torch.ones_like', (['xyz[:, :, 0:1]'], {}), '(xyz[:, :, 0:1])\n', (182, 198), False, 'import torch\n'), ((208, 233), 'torch.cat', 'torch.cat', (['[xyz, ones]', '(2)'], {}), '([xyz, ones], 2)\n', (217, 233), False, 'import torch\n'), ((247, 274), 'torch.transpose', 'torch.transpose', (['xyz1', '(1)', '(2)'], {}), '(xyz1, 1, 2)\n', (262, 274), False, 'import torch\n'), ((312, 336), 'torch.matmul', 'torch.matmul', (['RT', 'xyz1_t'], {}), '(RT, xyz1_t)\n', (324, 336), False, 'import torch\n'), ((348, 377), 'torch.transpose', 'torch.transpose', (['xyz2_t', '(1)', '(2)'], {}), '(xyz2_t, 1, 2)\n', (363, 377), False, 'import torch\n'), ((448, 473), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (471, 473), True, 'import open3d as o3d\n'), ((491, 529), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['pts[:, :3]'], {}), '(pts[:, :3])\n', (517, 529), True, 'import open3d as o3d\n'), ((1136, 1149), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1143, 1149), True, 'import numpy as np\n'), ((1175, 1194), 'numpy.max', 'np.max', (['pcd'], {'axis': '(0)'}), '(pcd, axis=0)\n', (1181, 1194), True, 'import numpy as np\n'), ((1211, 1230), 'numpy.min', 'np.min', (['pcd'], {'axis': '(0)'}), '(pcd, axis=0)\n', (1217, 1230), True, 'import numpy as np\n')] |
"""
Helper functions for image processing
The color space conversion functions are modified from functions of the
Python package scikit-image, https://github.com/scikit-image/scikit-image.
scikit-image has the following license.
Copyright (C) 2019, the scikit-image team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of skimage nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from PIL import Image
import numpy as np
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.metrics import pairwise_distances
import requests
from io import BytesIO
import os
from dotenv import load_dotenv
load_dotenv()
sls_stage = os.getenv("SLS_STAGE")
if sls_stage == 'local':
import plotly.graph_objects as go
default_k = 4
xyz_ref_white = np.asarray((0.95047, 1.0, 1.08883))
xyz_from_rgb = np.array(
[
[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227],
]
)
rgb_from_xyz = np.linalg.inv(xyz_from_rgb)
def rgb2xyz(rgb_arr):
"""
Convert colur from RGB to CIE 1931 XYZ
Parameters
----------
rgb_arr: ndarray
Color in RGB
Returns
------
xyz_arr: ndarray
Color in CIE 1931 XYZ
"""
xyz_arr = np.copy(rgb_arr)
mask = xyz_arr > 0.04045
xyz_arr[mask] = np.power((xyz_arr[mask] + 0.055) / 1.055, 2.4)
xyz_arr[~mask] /= 12.92
return xyz_arr @ np.transpose(xyz_from_rgb)
def xyz2lab(xyz_arr):
"""
Convert colur from CIE 1931 XYZ to CIE 1976 L*a*b*
Parameters
----------
xyz_arr: ndarray
Color in CIE 1931 XYZ
Returns
------
lab_arr: ndarray
Color in CIE 1976 L*a*b*
"""
lab_arr = np.copy(xyz_arr) / xyz_ref_white
mask = lab_arr > 0.008856
lab_arr[mask] = np.cbrt(lab_arr[mask])
lab_arr[~mask] = 7.787 * lab_arr[~mask] + 16.0 / 116.0
x, y, z = lab_arr[:, 0], lab_arr[:, 1], lab_arr[:, 2]
L = (116.0 * y) - 16.0
a = 500.0 * (x - y)
b = 200.0 * (y - z)
return np.transpose(np.asarray((L, a, b)))
def lab2xyz(lab_arr):
"""
Convert colur from CIE 1976 L*a*b* to CIE 1931 XYZ
Parameters
----------
lab_arr: ndarray
Color in CIE 1976 L*a*b*
Returns
------
xyz_arr: ndarray
Color in CIE 1931 XYZ
"""
L, a, b = lab_arr[:, 0], lab_arr[:, 1], lab_arr[:, 2]
y = (L + 16.0) / 116.0
x = (a / 500.0) + y
z = y - (b / 200.0)
if np.any(z < 0):
invalid = np.nonzero(z < 0)
warn(
"Color data out of range: Z < 0 in %s pixels" % invalid[0].size,
stacklevel=2,
)
z[invalid] = 0
xyz_arr = np.transpose(np.asarray((x, y, z)))
mask = xyz_arr > 0.2068966
xyz_arr[mask] = np.power(xyz_arr[mask], 3.0)
xyz_arr[~mask] = (xyz_arr[~mask] - 16.0 / 116.0) / 7.787
# rescale to the reference white (illuminant)
xyz_arr *= xyz_ref_white
return xyz_arr
def xyz2rgb(xyz_arr):
"""
Convert colur from CIE 1931 XYZ to RGB
Parameters
----------
xyz_arr: ndarray
Color in CIE 1931 XYZ
Returns
------
rgb_arr: ndarray
Color in RGB
"""
rgb_arr = xyz_arr @ np.transpose(rgb_from_xyz)
mask = rgb_arr > 0.0031308
rgb_arr[mask] = 1.055 * np.power(rgb_arr[mask], 1 / 2.4) - 0.055
rgb_arr[~mask] *= 12.92
rgb_arr = np.clip(rgb_arr, 0, 1)
return rgb_arr
def rgb2lab(rgb_arr):
"""
Convert colur from RGB to CIE 1976 L*a*b*
Parameters
----------
rgb_arr: ndarray
Color in RGB
Returns
-------
lab_arr: ndarray
Color in CIE 1976 L*a*b*
"""
return xyz2lab(rgb2xyz(rgb_arr))
def lab2rgb(lab_arr):
"""
Convert colur from CIE 1976 L*a*b* to RGB
Parameters
----------
lab_arr: ndarray
Color in CIE 1976 L*a*b*
Returns
------
rgb_arr: ndarray
Color in RGB
"""
return xyz2rgb(lab2xyz(lab_arr))
def get_lab_data(im):
"""
Convert colur from CIE 1976 L*a*b* to RGB
Parameters
----------
im: Image
Image to create palette
Returns
------
lab_arr: ndarray
Color in CIE 1976 L*a*b*
"""
img_size = 150, 150
im.thumbnail(img_size)
pixel_rgb = np.asarray(im)
# Range of RGB in Pillow is [0, 255], that in skimage is [0, 1]
pixel_lab = rgb2lab(pixel_rgb.reshape(-1, pixel_rgb.shape[-1]) / 255)
return pixel_lab.reshape(-1, pixel_lab.shape[-1])
def make_img(colors, counts):
"""
Create image from colors
Parameters
----------
colors: ndarray
Color in RGB
counts: ndarray
Number of data points in each color cluster
Returns
------
img: Image
Generated image
"""
img_size = 512, 512
n_clusters = len(colors)
lengths = (
((counts / np.sum(counts)) + (1.0 / n_clusters)) / 2.0 * img_size[0]
).astype(np.uint16)
# Ensure sum of lengths equals img_size[0]
lengths[0] = lengths[0] + (img_size[0] - np.sum(lengths))
pixel_group = np.array(
[np.tile(colors[i], (lengths[i], img_size[1], 1)) for i in range(n_clusters)]
)
pixel_rgb = np.transpose(np.concatenate(pixel_group), (1, 0, 2))
return Image.fromarray(pixel_rgb, mode="RGB")
def get_hex_string(rgb_arr):
"""
Covert RGB color to HEX values
Parameters
----------
rgb_arr: ndarray
Color in RGB
Returns
------
hex_names: str
HEX values of color
"""
def int2hex(integer):
hex_string = hex(integer)[2:]
if len(hex_string) < 2:
return "0" + hex_string
return hex_string
return "".join(np.vectorize(int2hex)(rgb_arr)).upper()
def cluster_kmeans(data, n_clusters):
"""
Partition data with k-means clustering
Parameters
----------
data: ndarray
Data points
n_clusters: int
Number of clusters
Returns
------
centers: ndarray
Clusters centers
labels: ndarray
Center label of every data point
"""
kmeans = KMeans(n_clusters)
labels = kmeans.fit_predict(data)
centers = kmeans.cluster_centers_
return centers, labels
def compute_medoid(data):
"""
Get medoid of data
Parameters
----------
data: ndarray
Data points
Returns
------
medoid: ndarray
Medoid
"""
dist_mat = pairwise_distances(data)
return data[np.argmin(dist_mat.sum(axis=0))]
def cluster_agglo(data, n_clusters):
"""
Partition data with agglomerative clustering
Parameters
----------
data: ndarray
Data points
n_clusters: int
Number of clusters
Returns
------
centers: ndarray
Clusters centers
labels: ndarray
Center label of every data point
"""
ac = AgglomerativeClustering(n_clusters)
labels = ac.fit_predict(data)
print("Completed agglomerative clustering")
centers = np.empty([n_clusters, 3])
for i in range(n_clusters):
centers[i] = compute_medoid(data[labels == i])
return centers, labels
def get_cluster(centers, labels):
"""
Sort cluster centers and count number of labels
Parameters
----------
centers: ndarray
Clusters centers
labels: ndarray
Center label of every data point
Returns
------
sort_centers: ndarray
Clusters centers sorted by number of label descending
sort_labels: ndarray
Sorted center label of every data point
sort_counts: ndarray
Number of data points of sorted centers
"""
_, counts = np.unique(labels, return_counts=True)
sort_idx = (-counts).argsort()
sort_labels = np.vectorize(lambda i: list(sort_idx).index(i))(labels)
return centers[sort_idx], sort_labels, counts[sort_idx]
def get_palette(im, k):
"""
Create a palette from an image
Parameters
----------
im: Image
Image to create palette from
k: int
Number of pallete colors
If None or k is outside of the range [2, 10], uses default_k as k
Returns
------
im_output: Image
Image of palette colors
hex_colors: ndarray
Palette colors in HEX values
"""
if k is None:
k = default_k
elif k < 2 or k > 10:
k = default_k
data = get_lab_data(im)
print("Get {} clusters".format(k))
centers, labels = cluster_agglo(data, k)
sorted_centers, _, counts = get_cluster(centers, labels)
# Range of RGB in Pillow is [0, 255], that in skimage is [0, 1]
centers_rgb = (255 * lab2rgb(sorted_centers)).astype(np.uint8)
print("Clusters are")
print(centers_rgb)
return (
make_img(centers_rgb, counts),
np.apply_along_axis(get_hex_string, 1, centers_rgb),
)
def get_palette_plot(im, k):
"""
Create a palette from an image and plot clusters in 3D
Parameters
----------
img: Image
Image to create palette.
k: int
Number of pallete colors.
If None or k is outside of the range [2, 10], uses default_k as k
Returns
------
im_output: Image
Image of palette colors
hex_colors: ndarray
Palette colors in HEX values
"""
if k is None:
k = default_k
elif k < 2 or k > 10:
k = default_k
data = get_lab_data(im)
print("Get {} clusters".format(k))
centers, labels = cluster_agglo(data, k)
sorted_centers, sorted_labels, counts = get_cluster(centers, labels)
# Range of RGB in Pillow is [0, 255], that in skimage is [0, 1]
centers_rgb = (255 * lab2rgb(sorted_centers)).astype(np.uint8)
print("Clusters in RGB are")
print(centers_rgb)
centers_hex = np.apply_along_axis(get_hex_string, 1, centers_rgb)
plot_3d(data, sorted_labels, centers_hex)
return (
make_img(centers_rgb, counts),
centers_hex,
)
def plot_3d(data, labels, centers_hex):
"""
Plot clustered data in 3D
Parameters
----------
data: ndarray
Data points
labels: ndarray
Labels of every data point
centers_hex: ndarray
Color in HEX values
"""
l, a, b = np.transpose(data)
fig = go.Figure(
data=[
go.Scatter3d(
x=a,
y=b,
z=l,
mode="markers",
marker={
"size": 3,
"color": np.vectorize(lambda hex: "#" + hex)(centers_hex)[labels],
"opacity": 0.1,
},
)
]
)
fig.update_layout(
scene={"xaxis_title": "a", "yaxis_title": "b", "zaxis_title": "L",}
)
fig.show()
def get_image_from_url(url):
"""
Download and create image
Parameters
----------
url: str
Image link
Returns
------
im: Image
Image downloaded
"""
print("Get image from ", url)
response = requests.get(url)
return Image.open(BytesIO(response.content))
| [
"numpy.clip",
"io.BytesIO",
"numpy.array",
"sklearn.cluster.AgglomerativeClustering",
"numpy.asarray",
"dotenv.load_dotenv",
"numpy.empty",
"numpy.concatenate",
"numpy.tile",
"numpy.any",
"requests.get",
"numpy.nonzero",
"numpy.transpose",
"numpy.vectorize",
"sklearn.cluster.KMeans",
"... | [((1918, 1931), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1929, 1931), False, 'from dotenv import load_dotenv\n'), ((1945, 1967), 'os.getenv', 'os.getenv', (['"""SLS_STAGE"""'], {}), "('SLS_STAGE')\n", (1954, 1967), False, 'import os\n'), ((2064, 2099), 'numpy.asarray', 'np.asarray', (['(0.95047, 1.0, 1.08883)'], {}), '((0.95047, 1.0, 1.08883))\n', (2074, 2099), True, 'import numpy as np\n'), ((2116, 2225), 'numpy.array', 'np.array', (['[[0.412453, 0.35758, 0.180423], [0.212671, 0.71516, 0.072169], [0.019334, \n 0.119193, 0.950227]]'], {}), '([[0.412453, 0.35758, 0.180423], [0.212671, 0.71516, 0.072169], [\n 0.019334, 0.119193, 0.950227]])\n', (2124, 2225), True, 'import numpy as np\n'), ((2276, 2303), 'numpy.linalg.inv', 'np.linalg.inv', (['xyz_from_rgb'], {}), '(xyz_from_rgb)\n', (2289, 2303), True, 'import numpy as np\n'), ((2558, 2574), 'numpy.copy', 'np.copy', (['rgb_arr'], {}), '(rgb_arr)\n', (2565, 2574), True, 'import numpy as np\n'), ((2624, 2670), 'numpy.power', 'np.power', (['((xyz_arr[mask] + 0.055) / 1.055)', '(2.4)'], {}), '((xyz_arr[mask] + 0.055) / 1.055, 2.4)\n', (2632, 2670), True, 'import numpy as np\n'), ((3108, 3130), 'numpy.cbrt', 'np.cbrt', (['lab_arr[mask]'], {}), '(lab_arr[mask])\n', (3115, 3130), True, 'import numpy as np\n'), ((3774, 3787), 'numpy.any', 'np.any', (['(z < 0)'], {}), '(z < 0)\n', (3780, 3787), True, 'import numpy as np\n'), ((4076, 4104), 'numpy.power', 'np.power', (['xyz_arr[mask]', '(3.0)'], {}), '(xyz_arr[mask], 3.0)\n', (4084, 4104), True, 'import numpy as np\n'), ((4697, 4719), 'numpy.clip', 'np.clip', (['rgb_arr', '(0)', '(1)'], {}), '(rgb_arr, 0, 1)\n', (4704, 4719), True, 'import numpy as np\n'), ((5623, 5637), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (5633, 5637), True, 'import numpy as np\n'), ((6602, 6640), 'PIL.Image.fromarray', 'Image.fromarray', (['pixel_rgb'], {'mode': '"""RGB"""'}), "(pixel_rgb, mode='RGB')\n", (6617, 6640), False, 'from PIL import Image\n'), ((7454, 7472), 'sklearn.cluster.KMeans', 'KMeans', (['n_clusters'], {}), '(n_clusters)\n', (7460, 7472), False, 'from sklearn.cluster import KMeans, AgglomerativeClustering\n'), ((7791, 7815), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['data'], {}), '(data)\n', (7809, 7815), False, 'from sklearn.metrics import pairwise_distances\n'), ((8230, 8265), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', (['n_clusters'], {}), '(n_clusters)\n', (8253, 8265), False, 'from sklearn.cluster import KMeans, AgglomerativeClustering\n'), ((8362, 8387), 'numpy.empty', 'np.empty', (['[n_clusters, 3]'], {}), '([n_clusters, 3])\n', (8370, 8387), True, 'import numpy as np\n'), ((9023, 9060), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (9032, 9060), True, 'import numpy as np\n'), ((11152, 11203), 'numpy.apply_along_axis', 'np.apply_along_axis', (['get_hex_string', '(1)', 'centers_rgb'], {}), '(get_hex_string, 1, centers_rgb)\n', (11171, 11203), True, 'import numpy as np\n'), ((11614, 11632), 'numpy.transpose', 'np.transpose', (['data'], {}), '(data)\n', (11626, 11632), True, 'import numpy as np\n'), ((12399, 12416), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (12411, 12416), False, 'import requests\n'), ((2720, 2746), 'numpy.transpose', 'np.transpose', (['xyz_from_rgb'], {}), '(xyz_from_rgb)\n', (2732, 2746), True, 'import numpy as np\n'), ((3025, 3041), 'numpy.copy', 'np.copy', (['xyz_arr'], {}), '(xyz_arr)\n', (3032, 3041), True, 'import numpy as np\n'), ((3347, 3368), 'numpy.asarray', 'np.asarray', (['(L, a, b)'], {}), '((L, a, b))\n', (3357, 3368), True, 'import numpy as np\n'), ((3807, 3824), 'numpy.nonzero', 'np.nonzero', (['(z < 0)'], {}), '(z < 0)\n', (3817, 3824), True, 'import numpy as np\n'), ((4002, 4023), 'numpy.asarray', 'np.asarray', (['(x, y, z)'], {}), '((x, y, z))\n', (4012, 4023), True, 'import numpy as np\n'), ((4528, 4554), 'numpy.transpose', 'np.transpose', (['rgb_from_xyz'], {}), '(rgb_from_xyz)\n', (4540, 4554), True, 'import numpy as np\n'), ((6551, 6578), 'numpy.concatenate', 'np.concatenate', (['pixel_group'], {}), '(pixel_group)\n', (6565, 6578), True, 'import numpy as np\n'), ((10159, 10210), 'numpy.apply_along_axis', 'np.apply_along_axis', (['get_hex_string', '(1)', 'centers_rgb'], {}), '(get_hex_string, 1, centers_rgb)\n', (10178, 10210), True, 'import numpy as np\n'), ((12439, 12464), 'io.BytesIO', 'BytesIO', (['response.content'], {}), '(response.content)\n', (12446, 12464), False, 'from io import BytesIO\n'), ((4614, 4646), 'numpy.power', 'np.power', (['rgb_arr[mask]', '(1 / 2.4)'], {}), '(rgb_arr[mask], 1 / 2.4)\n', (4622, 4646), True, 'import numpy as np\n'), ((6385, 6400), 'numpy.sum', 'np.sum', (['lengths'], {}), '(lengths)\n', (6391, 6400), True, 'import numpy as np\n'), ((6439, 6487), 'numpy.tile', 'np.tile', (['colors[i]', '(lengths[i], img_size[1], 1)'], {}), '(colors[i], (lengths[i], img_size[1], 1))\n', (6446, 6487), True, 'import numpy as np\n'), ((7050, 7071), 'numpy.vectorize', 'np.vectorize', (['int2hex'], {}), '(int2hex)\n', (7062, 7071), True, 'import numpy as np\n'), ((6211, 6225), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (6217, 6225), True, 'import numpy as np\n'), ((11875, 11910), 'numpy.vectorize', 'np.vectorize', (["(lambda hex: '#' + hex)"], {}), "(lambda hex: '#' + hex)\n", (11887, 11910), True, 'import numpy as np\n')] |
"""
TopView is the main Widget with the related ControllerTopView Class
There are several SliceView windows (sagittal, coronal, possibly tilted etc...) that each have
a SliceController object
The underlying data model object is an ibllib.atlas.AllenAtlas object
TopView(QMainWindow)
ControllerTopView(PgImageController)
SliceView(QWidget)
SliceController(PgImageController)
"""
from dataclasses import dataclass, field
from pathlib import Path
import numpy as np
from PyQt5 import QtWidgets, uic
from PyQt5.QtGui import QTransform
import pyqtgraph as pg
import matplotlib
from ibllib.atlas import AllenAtlas
import qt
class TopView(QtWidgets.QMainWindow):
"""
Main Window of the application.
This is a top view of the brain with 2 movable lines allowing to select sagittal and coronal
slices.
"""
@staticmethod
def _instances():
app = QtWidgets.QApplication.instance()
return [w for w in app.topLevelWidgets() if isinstance(w, TopView)]
@staticmethod
def _get_or_create(title=None, **kwargs):
av = next(filter(lambda e: e.isVisible() and e.windowTitle() == title,
TopView._instances()), None)
if av is None:
av = TopView(**kwargs)
av.setWindowTitle(title)
return av
def __init__(self, **kwargs):
super(TopView, self).__init__()
self.ctrl = ControllerTopView(self, **kwargs)
self.ctrl.image_layers = [ImageLayer()]
uic.loadUi(Path(__file__).parent.joinpath('topview.ui'), self)
self.plotItem_topview.setAspectLocked(True)
self.plotItem_topview.addItem(self.ctrl.imageItem)
# setup one horizontal and one vertical line that can be moved
line_kwargs = {'movable': True, 'pen': pg.mkPen((0, 255, 0), width=3)}
self.line_coronal = pg.InfiniteLine(angle=0, pos=0, **line_kwargs)
self.line_sagittal = pg.InfiniteLine(angle=90, pos=0, **line_kwargs)
self.line_coronal.sigDragged.connect(self._refresh_coronal) # sigPositionChangeFinished
self.line_sagittal.sigDragged.connect(self._refresh_sagittal)
self.plotItem_topview.addItem(self.line_coronal)
self.plotItem_topview.addItem(self.line_sagittal)
# connect signals and slots: mouse moved
s = self.plotItem_topview.getViewBox().scene()
self.proxy = pg.SignalProxy(s.sigMouseMoved, rateLimit=60, slot=self.mouseMoveEvent)
# combobox for the atlas remapping choices
self.comboBox_mappings.addItems(self.ctrl.atlas.regions.mappings.keys())
self.comboBox_mappings.currentIndexChanged.connect(self._refresh)
# slider for transparency between image and labels
self.slider_alpha.sliderMoved.connect(self.slider_alpha_move)
self.ctrl.set_top()
def add_scatter_feature(self, data):
self.ctrl.scatter_data = data / 1e6
self.ctrl.scatter_data_ind = self.ctrl.atlas.bc.xyz2i(self.ctrl.scatter_data)
self.ctrl.fig_coronal.add_scatter()
self.ctrl.fig_sagittal.add_scatter()
self.line_coronal.sigDragged.connect(
lambda: self.ctrl.set_scatter(self.ctrl.fig_coronal, self.line_coronal.value()))
self.line_sagittal.sigDragged.connect(
lambda: self.ctrl.set_scatter(self.ctrl.fig_sagittal, self.line_sagittal.value()))
self.ctrl.set_scatter(self.ctrl.fig_coronal)
self.ctrl.set_scatter(self.ctrl.fig_sagittal)
def add_image_layer(self, **kwargs):
"""
:param pg_kwargs: pyqtgraph setImage arguments: {'levels': None, 'lut': None,
'opacity': 1.0}
:param slice_kwargs: ibllib.atlas.slice arguments: {'volume': 'image', 'mode': 'clip'}
:return:
"""
self.ctrl.fig_sagittal.add_image_layer(**kwargs)
self.ctrl.fig_coronal.add_image_layer(**kwargs)
def add_regions_feature(self, values, cmap, opacity=1.0):
self.ctrl.values = values
# creat cmap look up table
colormap = matplotlib.cm.get_cmap(cmap)
colormap._init()
lut = (colormap._lut * 255).view(np.ndarray)
lut = np.insert(lut, 0, [0, 0, 0, 0], axis=0)
self.add_image_layer(pg_kwargs={'lut': lut, 'opacity': opacity}, slice_kwargs={
'volume': 'value', 'region_values': values, 'mode': 'clip'})
self._refresh()
def slider_alpha_move(self):
annotation_alpha = self.slider_alpha.value() / 100
self.ctrl.fig_coronal.ctrl.image_layers[0].pg_kwargs['opacity'] = 1 - annotation_alpha
self.ctrl.fig_sagittal.ctrl.image_layers[0].pg_kwargs['opacity'] = 1 - annotation_alpha
self.ctrl.fig_coronal.ctrl.image_layers[1].pg_kwargs['opacity'] = annotation_alpha
self.ctrl.fig_sagittal.ctrl.image_layers[1].pg_kwargs['opacity'] = annotation_alpha
self._refresh()
def mouseMoveEvent(self, scenepos):
if isinstance(scenepos, tuple):
scenepos = scenepos[0]
else:
return
pass
# qpoint = self.imageItem.mapFromScene(scenepos)
def _refresh(self):
self._refresh_sagittal()
self._refresh_coronal()
def _refresh_coronal(self):
self.ctrl.set_slice(self.ctrl.fig_coronal, self.line_coronal.value(),
mapping=self.comboBox_mappings.currentText())
def _refresh_sagittal(self):
self.ctrl.set_slice(self.ctrl.fig_sagittal, self.line_sagittal.value(),
mapping=self.comboBox_mappings.currentText())
class SliceView(QtWidgets.QWidget):
"""
Window containing a volume slice
"""
def __init__(self, topview: TopView, waxis, haxis, daxis):
super(SliceView, self).__init__()
self.topview = topview
self.ctrl = SliceController(self, waxis, haxis, daxis)
uic.loadUi(Path(__file__).parent.joinpath('sliceview.ui'), self)
self.add_image_layer(slice_kwargs={'volume': 'image', 'mode': 'clip'},
pg_kwargs={'opacity': 0.8})
self.add_image_layer(slice_kwargs={'volume': 'annotation', 'mode': 'clip'},
pg_kwargs={'opacity': 0.2})
# init the image display
self.plotItem_slice.setAspectLocked(True)
# connect signals and slots
s = self.plotItem_slice.getViewBox().scene()
self.proxy = pg.SignalProxy(s.sigMouseMoved, rateLimit=60, slot=self.mouseMoveEvent)
s.sigMouseClicked.connect(self.mouseClick)
def add_scatter(self):
self.scatterItem = pg.ScatterPlotItem()
self.plotItem_slice.addItem(self.scatterItem)
def add_image_layer(self, **kwargs):
"""
:param pg_kwargs: pyqtgraph setImage arguments: {'levels': None, 'lut': None,
'opacity': 1.0}
:param slice_kwargs: ibllib.atlas.slice arguments: {'volume': 'image', 'mode': 'clip'}
:return:
"""
il = ImageLayer(**kwargs)
self.ctrl.image_layers.append(il)
self.plotItem_slice.addItem(il.image_item)
def closeEvent(self, event):
self.destroy()
def keyPressEvent(self, e):
pass
def mouseClick(self, event):
if not event.double():
return
def mouseMoveEvent(self, scenepos):
if isinstance(scenepos, tuple):
scenepos = scenepos[0]
else:
return
qpoint = self.ctrl.image_layers[0].image_item.mapFromScene(scenepos)
iw, ih, w, h, v, region = self.ctrl.cursor2xyamp(qpoint)
self.label_x.setText(f"{w:.4f}")
self.label_y.setText(f"{h:.4f}")
self.label_ix.setText(f"{iw:.0f}")
self.label_iy.setText(f"{ih:.0f}")
if isinstance(v, np.ndarray):
self.label_v.setText(str(v))
else:
self.label_v.setText(f"{v:.4f}")
if region is None:
self.label_region.setText("")
self.label_acronym.setText("")
else:
self.label_region.setText(region['name'][0])
self.label_acronym.setText(region['acronym'][0])
def replace_image_layer(self, index, **kwargs):
if index and len(self.imageItem) >= index:
il = self.image_layers.pop(index)
self.plotItem_slice.removeItem(il.image_item)
self.add_image_layer(**kwargs)
class PgImageController:
"""
Abstract class that implements mapping from axes to voxels for any window.
Not instantiated directly.
"""
def __init__(self, win, res=25):
self.qwidget = win
self.transform = None # affine transform image indices 2 data domain
self.image_layers = []
def cursor2xyamp(self, qpoint):
"""Used for the mouse hover function over image display"""
iw, ih = self.cursor2ind(qpoint)
v = self.im[iw, ih]
w, h, _ = np.matmul(self.transform, np.array([iw, ih, 1]))
return iw, ih, w, h, v
def cursor2ind(self, qpoint):
""" image coordinates over the image display"""
iw = np.max((0, np.min((int(np.floor(qpoint.x())), self.nw - 1))))
ih = np.max((0, np.min((int(np.round(qpoint.y())), self.nh - 1))))
return iw, ih
@property
def imageItem(self):
"""returns the first image item"""
return self.image_layers[0].image_item
def set_image(self, pg_image_item, im, dw, dh, w0, h0, **pg_kwargs):
"""
:param im:
:param dw:
:param dh:
:param w0:
:param h0:
:param pgkwargs: og.ImageItem.setImage() parameters: level=None, lut=None, opacity=1
:return:
"""
self.im = im
self.nw, self.nh = self.im.shape[0:2]
pg_image_item.setImage(self.im, **pg_kwargs)
transform = [dw, 0., 0., 0., dh, 0., w0, h0, 1.]
self.transform = np.array(transform).reshape((3, 3)).T
pg_image_item.setTransform(QTransform(*transform))
def set_points(self, x=None, y=None):
# at the moment brush and size are fixed! These need to be arguments
# For the colour need to convert the colour to QtGui.QColor
self.qwidget.scatterItem.setData(x=x, y=y, brush='b', size=5)
class ControllerTopView(PgImageController):
"""
TopView ControllerTopView
"""
def __init__(self, qmain: TopView, res: int = 25, volume='image', brainmap='Allen'):
super(ControllerTopView, self).__init__(qmain)
self.volume = volume
self.atlas = AllenAtlas(res, brainmap=brainmap)
self.fig_top = self.qwidget = qmain
# Setup Coronal slice: width: ml, height: dv, depth: ap
self.fig_coronal = SliceView(qmain, waxis=0, haxis=2, daxis=1)
self.fig_coronal.setWindowTitle('Coronal Slice')
self.set_slice(self.fig_coronal)
self.fig_coronal.show()
# Setup Sagittal slice: width: ap, height: dv, depth: ml
self.fig_sagittal = SliceView(qmain, waxis=1, haxis=2, daxis=0)
self.fig_sagittal.setWindowTitle('Sagittal Slice')
self.set_slice(self.fig_sagittal)
self.fig_sagittal.show()
def set_slice(self, fig, coord=0, mapping="Allen"):
waxis, haxis, daxis = (fig.ctrl.waxis, fig.ctrl.haxis, fig.ctrl.daxis)
# construct the transform matrix image 2 ibl coordinates
dw = self.atlas.bc.dxyz[waxis]
dh = self.atlas.bc.dxyz[haxis]
wl = self.atlas.bc.lim(waxis) - dw / 2
hl = self.atlas.bc.lim(haxis) - dh / 2
# the ImageLayer object carries slice kwargs and pyqtgraph ImageSet kwargs
# reversed order so the self.im is set with the base layer
for layer in reversed(fig.ctrl.image_layers):
_slice = self.atlas.slice(coord, axis=daxis, mapping=mapping, **layer.slice_kwargs)
fig.ctrl.set_image(layer.image_item, _slice, dw, dh, wl[0], hl[0], **layer.pg_kwargs)
fig.ctrl.slice_coord = coord
def set_top(self):
img = self.atlas.top.transpose()
img[np.isnan(img)] = np.nanmin(img) # img has dims ml, ap
dw, dh = (self.atlas.bc.dxyz[0], self.atlas.bc.dxyz[1])
wl, hl = (self.atlas.bc.xlim, self.atlas.bc.ylim)
self.set_image(self.image_layers[0].image_item, img, dw, dh, wl[0], hl[0])
def set_scatter(self, fig, coord=0):
waxis = fig.ctrl.waxis
# dealing with coronal slice
if waxis == 0:
idx = np.where(self.scatter_data_ind[:, 1] == self.atlas.bc.y2i(coord))[0]
x = self.scatter_data[idx, 0]
y = self.scatter_data[idx, 2]
else:
idx = np.where(self.scatter_data_ind[:, 0] == self.atlas.bc.x2i(coord))[0]
x = self.scatter_data[idx, 1]
y = self.scatter_data[idx, 2]
fig.ctrl.set_points(x, y)
def set_volume(self, volume):
self.volume = volume
class SliceController(PgImageController):
def __init__(self, fig, waxis=None, haxis=None, daxis=None):
"""
:param waxis: brain atlas axis corresponding to display abscissa (coronal: 0, sagittal: 1)
:param haxis: brain atlas axis corresponding to display ordinate (coronal: 2, sagittal: 2)
:param daxis: brain atlas axis corresponding to display abscissa (coronal: 1, sagittal: 0)
"""
super(SliceController, self).__init__(fig)
self.waxis = waxis
self.haxis = haxis
self.daxis = daxis
def cursor2xyamp(self, qpoint):
"""
Extends the superclass method to also get the brain region from the model
:param qpoint:
:return:
"""
iw, ih, w, h, v = super(SliceController, self).cursor2xyamp(qpoint)
ba = self.qwidget.topview.ctrl.atlas
xyz = np.zeros(3)
xyz[np.array([self.waxis, self.haxis, self.daxis])] = [w, h, self.slice_coord]
mapping = self.qwidget.topview.comboBox_mappings.currentText()
try:
region = ba.regions.get(ba.get_labels(xyz, mapping=mapping))
except ValueError:
region = None
return iw, ih, w, h, v, region
@dataclass
class ImageLayer:
"""
Class for keeping track of image layers.
:param image_item
:param pg_kwargs: pyqtgraph setImage arguments: {'levels': None, 'lut': None, 'opacity': 1.0}
:param slice_kwargs: ibllib.atlas.slice arguments: {'volume': 'image', 'mode': 'clip'}
:param
"""
image_item: pg.ImageItem = field(default_factory=pg.ImageItem)
pg_kwargs: dict = field(default_factory=lambda: {})
slice_kwargs: dict = field(default_factory=lambda: {'volume': 'image', 'mode': 'clip'})
def view(res=25, title=None, brainmap='Allen'):
"""
"""
qt.create_app()
av = TopView._get_or_create(title=title, res=res, brainmap=brainmap)
av.show()
return av
| [
"numpy.insert",
"PyQt5.QtWidgets.QApplication.instance",
"PyQt5.QtGui.QTransform",
"matplotlib.cm.get_cmap",
"pathlib.Path",
"pyqtgraph.ScatterPlotItem",
"pyqtgraph.InfiniteLine",
"numpy.array",
"ibllib.atlas.AllenAtlas",
"numpy.zeros",
"numpy.isnan",
"pyqtgraph.mkPen",
"numpy.nanmin",
"qt... | [((14373, 14408), 'dataclasses.field', 'field', ([], {'default_factory': 'pg.ImageItem'}), '(default_factory=pg.ImageItem)\n', (14378, 14408), False, 'from dataclasses import dataclass, field\n'), ((14431, 14465), 'dataclasses.field', 'field', ([], {'default_factory': '(lambda : {})'}), '(default_factory=lambda : {})\n', (14436, 14465), False, 'from dataclasses import dataclass, field\n'), ((14490, 14557), 'dataclasses.field', 'field', ([], {'default_factory': "(lambda : {'volume': 'image', 'mode': 'clip'})"}), "(default_factory=lambda : {'volume': 'image', 'mode': 'clip'})\n", (14495, 14557), False, 'from dataclasses import dataclass, field\n'), ((14627, 14642), 'qt.create_app', 'qt.create_app', ([], {}), '()\n', (14640, 14642), False, 'import qt\n'), ((894, 927), 'PyQt5.QtWidgets.QApplication.instance', 'QtWidgets.QApplication.instance', ([], {}), '()\n', (925, 927), False, 'from PyQt5 import QtWidgets, uic\n'), ((1852, 1898), 'pyqtgraph.InfiniteLine', 'pg.InfiniteLine', ([], {'angle': '(0)', 'pos': '(0)'}), '(angle=0, pos=0, **line_kwargs)\n', (1867, 1898), True, 'import pyqtgraph as pg\n'), ((1928, 1975), 'pyqtgraph.InfiniteLine', 'pg.InfiniteLine', ([], {'angle': '(90)', 'pos': '(0)'}), '(angle=90, pos=0, **line_kwargs)\n', (1943, 1975), True, 'import pyqtgraph as pg\n'), ((2383, 2454), 'pyqtgraph.SignalProxy', 'pg.SignalProxy', (['s.sigMouseMoved'], {'rateLimit': '(60)', 'slot': 'self.mouseMoveEvent'}), '(s.sigMouseMoved, rateLimit=60, slot=self.mouseMoveEvent)\n', (2397, 2454), True, 'import pyqtgraph as pg\n'), ((4019, 4047), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (4041, 4047), False, 'import matplotlib\n'), ((4140, 4179), 'numpy.insert', 'np.insert', (['lut', '(0)', '[0, 0, 0, 0]'], {'axis': '(0)'}), '(lut, 0, [0, 0, 0, 0], axis=0)\n', (4149, 4179), True, 'import numpy as np\n'), ((6372, 6443), 'pyqtgraph.SignalProxy', 'pg.SignalProxy', (['s.sigMouseMoved'], {'rateLimit': '(60)', 'slot': 'self.mouseMoveEvent'}), '(s.sigMouseMoved, rateLimit=60, slot=self.mouseMoveEvent)\n', (6386, 6443), True, 'import pyqtgraph as pg\n'), ((6550, 6570), 'pyqtgraph.ScatterPlotItem', 'pg.ScatterPlotItem', ([], {}), '()\n', (6568, 6570), True, 'import pyqtgraph as pg\n'), ((10451, 10485), 'ibllib.atlas.AllenAtlas', 'AllenAtlas', (['res'], {'brainmap': 'brainmap'}), '(res, brainmap=brainmap)\n', (10461, 10485), False, 'from ibllib.atlas import AllenAtlas\n'), ((11968, 11982), 'numpy.nanmin', 'np.nanmin', (['img'], {}), '(img)\n', (11977, 11982), True, 'import numpy as np\n'), ((13680, 13691), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (13688, 13691), True, 'import numpy as np\n'), ((1792, 1822), 'pyqtgraph.mkPen', 'pg.mkPen', (['(0, 255, 0)'], {'width': '(3)'}), '((0, 255, 0), width=3)\n', (1800, 1822), True, 'import pyqtgraph as pg\n'), ((8858, 8879), 'numpy.array', 'np.array', (['[iw, ih, 1]'], {}), '([iw, ih, 1])\n', (8866, 8879), True, 'import numpy as np\n'), ((9883, 9905), 'PyQt5.QtGui.QTransform', 'QTransform', (['*transform'], {}), '(*transform)\n', (9893, 9905), False, 'from PyQt5.QtGui import QTransform\n'), ((11951, 11964), 'numpy.isnan', 'np.isnan', (['img'], {}), '(img)\n', (11959, 11964), True, 'import numpy as np\n'), ((13704, 13750), 'numpy.array', 'np.array', (['[self.waxis, self.haxis, self.daxis]'], {}), '([self.waxis, self.haxis, self.daxis])\n', (13712, 13750), True, 'import numpy as np\n'), ((9810, 9829), 'numpy.array', 'np.array', (['transform'], {}), '(transform)\n', (9818, 9829), True, 'import numpy as np\n'), ((1511, 1525), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1515, 1525), False, 'from pathlib import Path\n'), ((5848, 5862), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5852, 5862), False, 'from pathlib import Path\n')] |
import os
import numpy as np
import numpy.random as rnd
import matplotlib.pyplot as plt
import logging
from pandas import DataFrame
from common.gen_samples import *
from common.data_plotter import *
from aad.aad_globals import *
from aad.aad_support import *
from aad.forest_description import *
from aad.anomaly_dataset_support import *
# from percept.percept import *
"""
pythonw -m aad.plot_anomalies_rectangle
"""
def get_x_tau(x, w, tau):
v = x.dot(w)
ranked = np.argsort(-v)
tau_id = ranked[int(tau * len(v))]
return tau_id, x[tau_id]
def plot_anomalies_ifor(outdir, plot=False, plot_legends=False):
u_theta = np.pi * 4. / 4 + np.pi * 5 / 180
x, y = get_sphere_samples([(50, 0, np.pi * 4. / 4, np.pi * 4. / 4 + np.pi * 2 / 4),
(15, 1, u_theta - np.pi * 5 / 180, u_theta + np.pi * 5 / 180),
(15, 1, np.pi * 6. / 4 - np.pi * 1.5 / 180, np.pi * 6. / 4)])
n, d = x.shape
id_nomls = np.where(y == 0)[0]
id_anoms = np.where(y == 1)[0]
n_anoms = len(id_anoms)
x_nomls, y_nomls = x[id_nomls, :], y[id_nomls]
x_anoms, y_anoms = x[id_anoms, :], y[id_anoms]
if plot:
axis_fontsize = 16
line_colors = ["blue", "red", "red"]
line_types = ["--", "--", "-"]
line_widths = [2, 2, 2]
lines = list()
line_labels = list()
tau = n_anoms * 1. / n # multiplying by a factor to move the plane lower
w = normalize(np.ones(2))
r = np.array([np.min(x[:, 0]), np.max(x[:, 0])])
tau_id, x_tau = get_x_tau(x, w, tau)
q_tau = w.dot(x_tau)
# plot the true weight vector
u = interpolate_2D_line_by_point_and_vec(np.array([-1., 1.]), [0., 0.],
[np.cos(u_theta + np.pi * 1 / 4), np.sin(u_theta + np.pi * 1 / 4)])
lines.append(u)
line_labels.append(r"True weights ${\bf u}$")
zd = interpolate_2D_line_by_point_and_vec(np.array([-1., 1.0]), [0., 0.], w)
lines.append(zd)
line_labels.append(r"Uniform weights ${\bf w}_{unif}$")
zw = interpolate_2D_line_by_slope_and_intercept(np.array([-1., 1.]), -w[0] / w[1], q_tau / w[1])
lines.append(zw)
line_labels.append(r"hyperplane $\perp$ ${\bf w}_{unif}$")
pdffile = os.path.join(outdir, "anomalies_in_ifor.pdf")
dp = DataPlotter(pdfpath=pdffile, rows=1, cols=1)
pl = dp.get_next_plot()
pl.set_aspect('equal')
# plt.xlabel('x', fontsize=axis_fontsize)
# plt.ylabel('y', fontsize=axis_fontsize)
plt.xticks([])
plt.yticks([])
plt.xlim([-1.05, 1.05])
plt.ylim([-1.05, 1.05])
pl.scatter(x_nomls[:, 0], x_nomls[:, 1], s=45, c="blue", marker="+", label="Nominal")
pl.scatter(x_anoms[:, 0], x_anoms[:, 1], s=45, c="red", marker="+", label="Anomaly")
for i, line in enumerate(lines):
color = "blue" if line_colors is None else line_colors[i]
pl.plot(line[:, 0], line[:, 1], line_types[i], color=color, linewidth=line_widths[i],
label=line_labels[i] if plot_legends else None)
plt.axhline(0, linestyle="--", color="lightgrey")
plt.axvline(0, linestyle="--", color="lightgrey")
if plot_legends:
pl.legend(loc='lower right', prop={'size': 12})
dp.close()
return x, y
def plot_anomalies_rect(outdir, plot=False, plot_legends=False):
x_nomls = rnd.uniform(0., 1., 500)
x_nomls = np.reshape(x_nomls, newshape=(250, -1))
anom_mu = (0.83, 0.95)
u_theta = np.arctan(0.9 / 0.8)
anom_score_dist = MVNParams(
mu=np.array([anom_mu[0], anom_mu[1]]),
mcorr=np.array([
[1, -0.5],
[0, 1.0]]),
dvar=np.array([0.002, 0.0005])
)
n_anoms = 30
x_anoms = generate_dependent_normal_samples(n_anoms,
anom_score_dist.mu,
anom_score_dist.mcorr,
anom_score_dist.dvar)
x = np.vstack([x_nomls, x_anoms])
y = np.array(np.zeros(x_nomls.shape[0], dtype=int))
y = np.append(y, np.ones(x_anoms.shape[0], dtype=int))
if plot:
n, d = x.shape
# tau is computed assuming that the anomalies occupy tau-proportion
# of the circumference
tau = n_anoms * 1.3 / n # multiplying by a factor to move the plane lower
w = normalize(np.ones(2))
r = np.array([np.min(x[:, 0]), np.max(x[:, 0])])
line_colors = ["blue", "red", "red"]
line_types = ["--", "--", "-"]
line_widths = [2, 2, 2]
lines = list()
line_labels = list()
tau_id, x_tau = get_x_tau(x, w, tau)
q_tau = w.dot(x_tau)
# plot the true weight vector
u = interpolate_2D_line_by_point_and_vec(np.array([0., 1.]), [0., 0.],
[np.cos(u_theta), np.sin(u_theta)])
lines.append(u)
line_labels.append(r"True weights ${\bf u}$")
zd = interpolate_2D_line_by_point_and_vec(np.array([0., 1.0]), [0., 0.], w)
lines.append(zd)
line_labels.append(r"Uniform weights ${\bf w}_{unif}$")
zw = interpolate_2D_line_by_slope_and_intercept(np.array([0., 1.05]), -w[0] / w[1], q_tau / w[1])
lines.append(zw)
line_labels.append(r"hyperplane $\perp$ ${\bf w}_{unif}$")
axis_fontsize = 16
pdffile = os.path.join(outdir, "anomalies_in_rect.pdf")
dp = DataPlotter(pdfpath=pdffile, rows=1, cols=1)
pl = dp.get_next_plot()
pl.set_aspect('equal')
# plt.xlabel('x', fontsize=axis_fontsize)
# plt.ylabel('y', fontsize=axis_fontsize)
plt.xticks([])
plt.yticks([])
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
pl.scatter(x_nomls[:, 0], x_nomls[:, 1], s=45, c="blue", marker="+", label="Nominal")
pl.scatter(x_anoms[:, 0], x_anoms[:, 1], s=45, c="red", marker="+", label="Anomaly")
for i, line in enumerate(lines):
color = "blue" if line_colors is None else line_colors[i]
pl.plot(line[:, 0], line[:, 1], line_types[i], color=color, linewidth=line_widths[i],
label=line_labels[i] if plot_legends else None)
if plot_legends:
pl.legend(loc='lower right', prop={'size': 12})
dp.close()
return x, y
if __name__ == "__main__":
logger = logging.getLogger(__name__)
args = get_command_args(debug=True, debug_args=["--debug",
"--plot",
"--log_file=temp/plot_anomalies_rectangle.log"])
# print "log file: %s" % args.log_file
configure_logger(args)
rnd.seed(42)
outdir = "./temp/illustration"
dir_create(outdir)
# plot isolation forest score distribution illustration
# plot_anomalies_ifor(outdir, plot=True, plot_legends=False)
plot_anomalies_rect(outdir, plot=True, plot_legends=False) | [
"logging.getLogger",
"numpy.argsort",
"numpy.array",
"numpy.sin",
"numpy.reshape",
"numpy.where",
"numpy.max",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.yticks",
"numpy.vstack",
"numpy.random.seed",
"numpy.min",
"matplotlib.pyplot.ylim",
"numpy.arctan",
"numpy.ones",
"matplotlib.... | [((481, 495), 'numpy.argsort', 'np.argsort', (['(-v)'], {}), '(-v)\n', (491, 495), True, 'import numpy as np\n'), ((3499, 3525), 'numpy.random.uniform', 'rnd.uniform', (['(0.0)', '(1.0)', '(500)'], {}), '(0.0, 1.0, 500)\n', (3510, 3525), True, 'import numpy.random as rnd\n'), ((3538, 3577), 'numpy.reshape', 'np.reshape', (['x_nomls'], {'newshape': '(250, -1)'}), '(x_nomls, newshape=(250, -1))\n', (3548, 3577), True, 'import numpy as np\n'), ((3620, 3640), 'numpy.arctan', 'np.arctan', (['(0.9 / 0.8)'], {}), '(0.9 / 0.8)\n', (3629, 3640), True, 'import numpy as np\n'), ((4131, 4160), 'numpy.vstack', 'np.vstack', (['[x_nomls, x_anoms]'], {}), '([x_nomls, x_anoms])\n', (4140, 4160), True, 'import numpy as np\n'), ((6545, 6572), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (6562, 6572), False, 'import logging\n'), ((6875, 6887), 'numpy.random.seed', 'rnd.seed', (['(42)'], {}), '(42)\n', (6883, 6887), True, 'import numpy.random as rnd\n'), ((988, 1004), 'numpy.where', 'np.where', (['(y == 0)'], {}), '(y == 0)\n', (996, 1004), True, 'import numpy as np\n'), ((1023, 1039), 'numpy.where', 'np.where', (['(y == 1)'], {}), '(y == 1)\n', (1031, 1039), True, 'import numpy as np\n'), ((2339, 2384), 'os.path.join', 'os.path.join', (['outdir', '"""anomalies_in_ifor.pdf"""'], {}), "(outdir, 'anomalies_in_ifor.pdf')\n", (2351, 2384), False, 'import os\n'), ((2614, 2628), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2624, 2628), True, 'import matplotlib.pyplot as plt\n'), ((2637, 2651), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2647, 2651), True, 'import matplotlib.pyplot as plt\n'), ((2660, 2683), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-1.05, 1.05]'], {}), '([-1.05, 1.05])\n', (2668, 2683), True, 'import matplotlib.pyplot as plt\n'), ((2692, 2715), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-1.05, 1.05]'], {}), '([-1.05, 1.05])\n', (2700, 2715), True, 'import matplotlib.pyplot as plt\n'), ((3189, 3238), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'linestyle': '"""--"""', 'color': '"""lightgrey"""'}), "(0, linestyle='--', color='lightgrey')\n", (3200, 3238), True, 'import matplotlib.pyplot as plt\n'), ((3247, 3296), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'linestyle': '"""--"""', 'color': '"""lightgrey"""'}), "(0, linestyle='--', color='lightgrey')\n", (3258, 3296), True, 'import matplotlib.pyplot as plt\n'), ((4178, 4215), 'numpy.zeros', 'np.zeros', (['x_nomls.shape[0]'], {'dtype': 'int'}), '(x_nomls.shape[0], dtype=int)\n', (4186, 4215), True, 'import numpy as np\n'), ((4238, 4274), 'numpy.ones', 'np.ones', (['x_anoms.shape[0]'], {'dtype': 'int'}), '(x_anoms.shape[0], dtype=int)\n', (4245, 4274), True, 'import numpy as np\n'), ((5539, 5584), 'os.path.join', 'os.path.join', (['outdir', '"""anomalies_in_rect.pdf"""'], {}), "(outdir, 'anomalies_in_rect.pdf')\n", (5551, 5584), False, 'import os\n'), ((5814, 5828), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5824, 5828), True, 'import matplotlib.pyplot as plt\n'), ((5837, 5851), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5847, 5851), True, 'import matplotlib.pyplot as plt\n'), ((5860, 5883), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (5868, 5883), True, 'import matplotlib.pyplot as plt\n'), ((5892, 5915), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (5900, 5915), True, 'import matplotlib.pyplot as plt\n'), ((1489, 1499), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1496, 1499), True, 'import numpy as np\n'), ((1721, 1742), 'numpy.array', 'np.array', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (1729, 1742), True, 'import numpy as np\n'), ((1998, 2019), 'numpy.array', 'np.array', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (2006, 2019), True, 'import numpy as np\n'), ((2179, 2200), 'numpy.array', 'np.array', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (2187, 2200), True, 'import numpy as np\n'), ((3686, 3720), 'numpy.array', 'np.array', (['[anom_mu[0], anom_mu[1]]'], {}), '([anom_mu[0], anom_mu[1]])\n', (3694, 3720), True, 'import numpy as np\n'), ((3736, 3767), 'numpy.array', 'np.array', (['[[1, -0.5], [0, 1.0]]'], {}), '([[1, -0.5], [0, 1.0]])\n', (3744, 3767), True, 'import numpy as np\n'), ((3807, 3832), 'numpy.array', 'np.array', (['[0.002, 0.0005]'], {}), '([0.002, 0.0005])\n', (3815, 3832), True, 'import numpy as np\n'), ((4526, 4536), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4533, 4536), True, 'import numpy as np\n'), ((4927, 4947), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (4935, 4947), True, 'import numpy as np\n'), ((5171, 5191), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (5179, 5191), True, 'import numpy as np\n'), ((5351, 5372), 'numpy.array', 'np.array', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (5359, 5372), True, 'import numpy as np\n'), ((1523, 1538), 'numpy.min', 'np.min', (['x[:, 0]'], {}), '(x[:, 0])\n', (1529, 1538), True, 'import numpy as np\n'), ((1540, 1555), 'numpy.max', 'np.max', (['x[:, 0]'], {}), '(x[:, 0])\n', (1546, 1555), True, 'import numpy as np\n'), ((1802, 1833), 'numpy.cos', 'np.cos', (['(u_theta + np.pi * 1 / 4)'], {}), '(u_theta + np.pi * 1 / 4)\n', (1808, 1833), True, 'import numpy as np\n'), ((1835, 1866), 'numpy.sin', 'np.sin', (['(u_theta + np.pi * 1 / 4)'], {}), '(u_theta + np.pi * 1 / 4)\n', (1841, 1866), True, 'import numpy as np\n'), ((4560, 4575), 'numpy.min', 'np.min', (['x[:, 0]'], {}), '(x[:, 0])\n', (4566, 4575), True, 'import numpy as np\n'), ((4577, 4592), 'numpy.max', 'np.max', (['x[:, 0]'], {}), '(x[:, 0])\n', (4583, 4592), True, 'import numpy as np\n'), ((5007, 5022), 'numpy.cos', 'np.cos', (['u_theta'], {}), '(u_theta)\n', (5013, 5022), True, 'import numpy as np\n'), ((5024, 5039), 'numpy.sin', 'np.sin', (['u_theta'], {}), '(u_theta)\n', (5030, 5039), True, 'import numpy as np\n')] |
import sys
from PySide2.QtCore import QCoreApplication, QFile
def load_ui_file(filename):
ui_file = QFile(filename)
if not ui_file.open(QFile.ReadOnly):
print("Cannot open {}: {}".format(filename, ui_file.errorString()))
sys.exit(-1)
return ui_file
def translate(context, text):
return QCoreApplication.translate(context, text, None)
| [
"PySide2.QtCore.QFile",
"PySide2.QtCore.QCoreApplication.translate",
"sys.exit"
] | [((107, 122), 'PySide2.QtCore.QFile', 'QFile', (['filename'], {}), '(filename)\n', (112, 122), False, 'from PySide2.QtCore import QCoreApplication, QFile\n'), ((323, 370), 'PySide2.QtCore.QCoreApplication.translate', 'QCoreApplication.translate', (['context', 'text', 'None'], {}), '(context, text, None)\n', (349, 370), False, 'from PySide2.QtCore import QCoreApplication, QFile\n'), ((248, 260), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (256, 260), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
"""
From
https://github.com/msmsajjadi/precision-recall-distributions/blob/master/prd_from_image_folders.py
"""
# coding=utf-8
# Copyright: <NAME> (msajjadi.com)
import prd_score as prd
from improved_precision_recall import knn_precision_recall_features
def compute_prc(orig_data,synth_data, params=None, plot_path=None, improved_version=True, verbose=True):
if verbose:
print('computing PRD')
if improved_version:
prd_data = knn_precision_recall_features(orig_data,synth_data)
else:
if params is None:
params = {}
params['num_clusters'] = 20
params['num_angles'] = 1001
params['num_runs'] = 10
prd_data = prd.compute_prd_from_embedding(
eval_data=synth_data,
ref_data=orig_data,
num_clusters=params['num_clusters'],
num_angles=params['num_angles'],
num_runs=params['num_runs'])
precision, recall = prd_data
if verbose:
print('plotting results')
f_beta = prd.prd_to_max_f_beta_pair(precision, recall, beta=8)
print('%.3f %.3f' % (f_beta[0], f_beta[1]))
return prd_data
| [
"prd_score.compute_prd_from_embedding",
"improved_precision_recall.knn_precision_recall_features",
"prd_score.prd_to_max_f_beta_pair"
] | [((1087, 1140), 'prd_score.prd_to_max_f_beta_pair', 'prd.prd_to_max_f_beta_pair', (['precision', 'recall'], {'beta': '(8)'}), '(precision, recall, beta=8)\n', (1113, 1140), True, 'import prd_score as prd\n'), ((479, 531), 'improved_precision_recall.knn_precision_recall_features', 'knn_precision_recall_features', (['orig_data', 'synth_data'], {}), '(orig_data, synth_data)\n', (508, 531), False, 'from improved_precision_recall import knn_precision_recall_features\n'), ((727, 906), 'prd_score.compute_prd_from_embedding', 'prd.compute_prd_from_embedding', ([], {'eval_data': 'synth_data', 'ref_data': 'orig_data', 'num_clusters': "params['num_clusters']", 'num_angles': "params['num_angles']", 'num_runs': "params['num_runs']"}), "(eval_data=synth_data, ref_data=orig_data,\n num_clusters=params['num_clusters'], num_angles=params['num_angles'],\n num_runs=params['num_runs'])\n", (757, 906), True, 'import prd_score as prd\n')] |
import pandas as pd
from autogluon.utils.tabular.utils.savers import save_pd
from autogluon_utils.benchmarking.evaluation.preprocess import preprocess_openml
from autogluon_utils.benchmarking.evaluation.constants import *
def run():
results_dir = 'data/results/'
results_dir_input = results_dir + 'input/raw/original/'
results_dir_output = results_dir + 'input/prepared/openml/'
other_results_large_4h = preprocess_openml.preprocess_openml_input(path=results_dir_input + 'results_large-8c4h.csv', framework_suffix='_4h')
other_results_medium_4h = preprocess_openml.preprocess_openml_input(path=results_dir_input + 'results_medium-8c4h.csv', framework_suffix='_4h')
other_results_small_4h = preprocess_openml.preprocess_openml_input(path=results_dir_input + 'results_small-8c4h.csv', framework_suffix='_4h')
other_results_medium_1h = preprocess_openml.preprocess_openml_input(path=results_dir_input + 'results_medium-8c1h.csv', framework_suffix='_1h')
other_results_small_1h = preprocess_openml.preprocess_openml_input(path=results_dir_input + 'results_small-8c1h.csv', framework_suffix='_1h')
results_list = [other_results_large_4h, other_results_medium_4h, other_results_small_4h, other_results_medium_1h, other_results_small_1h]
results_raw = pd.concat(results_list, ignore_index=True, sort=True)
results_raw[FRAMEWORK] = ['orig_' + name[0] for name in zip(results_raw[FRAMEWORK])]
frameworks_original = [
'orig_H2OAutoML_1h',
'orig_autosklearn_1h',
'orig_TPOT_1h',
'orig_AutoWEKA_1h',
'orig_H2OAutoML_4h',
'orig_autosklearn_4h',
'orig_TPOT_4h',
'orig_AutoWEKA_4h',
]
results_original = results_raw[results_raw[FRAMEWORK].isin(frameworks_original)]
save_pd.save(path=results_dir_output + 'openml_original.csv', df=results_original)
if __name__ == '__main__':
run()
| [
"autogluon.utils.tabular.utils.savers.save_pd.save",
"pandas.concat",
"autogluon_utils.benchmarking.evaluation.preprocess.preprocess_openml.preprocess_openml_input"
] | [((425, 545), 'autogluon_utils.benchmarking.evaluation.preprocess.preprocess_openml.preprocess_openml_input', 'preprocess_openml.preprocess_openml_input', ([], {'path': "(results_dir_input + 'results_large-8c4h.csv')", 'framework_suffix': '"""_4h"""'}), "(path=results_dir_input +\n 'results_large-8c4h.csv', framework_suffix='_4h')\n", (466, 545), False, 'from autogluon_utils.benchmarking.evaluation.preprocess import preprocess_openml\n'), ((572, 693), 'autogluon_utils.benchmarking.evaluation.preprocess.preprocess_openml.preprocess_openml_input', 'preprocess_openml.preprocess_openml_input', ([], {'path': "(results_dir_input + 'results_medium-8c4h.csv')", 'framework_suffix': '"""_4h"""'}), "(path=results_dir_input +\n 'results_medium-8c4h.csv', framework_suffix='_4h')\n", (613, 693), False, 'from autogluon_utils.benchmarking.evaluation.preprocess import preprocess_openml\n'), ((719, 839), 'autogluon_utils.benchmarking.evaluation.preprocess.preprocess_openml.preprocess_openml_input', 'preprocess_openml.preprocess_openml_input', ([], {'path': "(results_dir_input + 'results_small-8c4h.csv')", 'framework_suffix': '"""_4h"""'}), "(path=results_dir_input +\n 'results_small-8c4h.csv', framework_suffix='_4h')\n", (760, 839), False, 'from autogluon_utils.benchmarking.evaluation.preprocess import preprocess_openml\n'), ((866, 987), 'autogluon_utils.benchmarking.evaluation.preprocess.preprocess_openml.preprocess_openml_input', 'preprocess_openml.preprocess_openml_input', ([], {'path': "(results_dir_input + 'results_medium-8c1h.csv')", 'framework_suffix': '"""_1h"""'}), "(path=results_dir_input +\n 'results_medium-8c1h.csv', framework_suffix='_1h')\n", (907, 987), False, 'from autogluon_utils.benchmarking.evaluation.preprocess import preprocess_openml\n'), ((1013, 1133), 'autogluon_utils.benchmarking.evaluation.preprocess.preprocess_openml.preprocess_openml_input', 'preprocess_openml.preprocess_openml_input', ([], {'path': "(results_dir_input + 'results_small-8c1h.csv')", 'framework_suffix': '"""_1h"""'}), "(path=results_dir_input +\n 'results_small-8c1h.csv', framework_suffix='_1h')\n", (1054, 1133), False, 'from autogluon_utils.benchmarking.evaluation.preprocess import preprocess_openml\n'), ((1292, 1345), 'pandas.concat', 'pd.concat', (['results_list'], {'ignore_index': '(True)', 'sort': '(True)'}), '(results_list, ignore_index=True, sort=True)\n', (1301, 1345), True, 'import pandas as pd\n'), ((1786, 1873), 'autogluon.utils.tabular.utils.savers.save_pd.save', 'save_pd.save', ([], {'path': "(results_dir_output + 'openml_original.csv')", 'df': 'results_original'}), "(path=results_dir_output + 'openml_original.csv', df=\n results_original)\n", (1798, 1873), False, 'from autogluon.utils.tabular.utils.savers import save_pd\n')] |
""" Copyright 2012, 2013 UW Information Technology, University of Washington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.test import TestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot, SpotAvailableHours
import simplejson as json
from django.test.utils import override_settings
from mock import patch
from django.core import cache
from spotseeker_server import models
@override_settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok')
class SpotHoursGETTest(TestCase):
def setUp(self):
spot = Spot.objects.create(name="This spot has available hours")
# Intentionally out of order - make sure windows are sorted, not just in db happenstance order
hours2 = SpotAvailableHours.objects.create(spot=spot, day="m", start_time="11:00", end_time="14:00")
hours1 = SpotAvailableHours.objects.create(spot=spot, day="m", start_time="00:00", end_time="10:00")
hours3 = SpotAvailableHours.objects.create(spot=spot, day="t", start_time="11:00", end_time="14:00")
hours4 = SpotAvailableHours.objects.create(spot=spot, day="w", start_time="11:00", end_time="14:00")
hours5 = SpotAvailableHours.objects.create(spot=spot, day="th", start_time="11:00", end_time="14:00")
hours6 = SpotAvailableHours.objects.create(spot=spot, day="f", start_time="11:00", end_time="14:00")
# Saturday is intentionally missing
hours8 = SpotAvailableHours.objects.create(spot=spot, day="su", start_time="11:00", end_time="14:00")
self.spot = spot
def test_hours(self):
""" Tests that a Spot's available hours can be retrieved successfully.
"""
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
url = "/api/v1/spot/%s" % self.spot.pk
response = c.get(url)
spot_dict = json.loads(response.content)
valid_data = {
'monday': [["00:00", "10:00"], ["11:00", "14:00"]],
'tuesday': [["11:00", "14:00"]],
'wednesday': [["11:00", "14:00"]],
'thursday': [["11:00", "14:00"]],
'friday': [["11:00", "14:00"]],
'saturday': [],
'sunday': [["11:00", "14:00"]],
}
available_hours = spot_dict["available_hours"]
self.assertEquals(available_hours, valid_data, "Data from the web service matches the data for the spot")
| [
"django.core.cache.get_cache",
"django.test.client.Client",
"spotseeker_server.models.Spot.objects.create",
"mock.patch.object",
"spotseeker_server.models.SpotAvailableHours.objects.create",
"django.test.utils.override_settings",
"simplejson.loads"
] | [((972, 1045), 'django.test.utils.override_settings', 'override_settings', ([], {'SPOTSEEKER_AUTH_MODULE': '"""spotseeker_server.auth.all_ok"""'}), "(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok')\n", (989, 1045), False, 'from django.test.utils import override_settings\n'), ((1117, 1174), 'spotseeker_server.models.Spot.objects.create', 'Spot.objects.create', ([], {'name': '"""This spot has available hours"""'}), "(name='This spot has available hours')\n", (1136, 1174), False, 'from spotseeker_server.models import Spot, SpotAvailableHours\n'), ((1295, 1390), 'spotseeker_server.models.SpotAvailableHours.objects.create', 'SpotAvailableHours.objects.create', ([], {'spot': 'spot', 'day': '"""m"""', 'start_time': '"""11:00"""', 'end_time': '"""14:00"""'}), "(spot=spot, day='m', start_time='11:00',\n end_time='14:00')\n", (1328, 1390), False, 'from spotseeker_server.models import Spot, SpotAvailableHours\n'), ((1404, 1499), 'spotseeker_server.models.SpotAvailableHours.objects.create', 'SpotAvailableHours.objects.create', ([], {'spot': 'spot', 'day': '"""m"""', 'start_time': '"""00:00"""', 'end_time': '"""10:00"""'}), "(spot=spot, day='m', start_time='00:00',\n end_time='10:00')\n", (1437, 1499), False, 'from spotseeker_server.models import Spot, SpotAvailableHours\n'), ((1513, 1608), 'spotseeker_server.models.SpotAvailableHours.objects.create', 'SpotAvailableHours.objects.create', ([], {'spot': 'spot', 'day': '"""t"""', 'start_time': '"""11:00"""', 'end_time': '"""14:00"""'}), "(spot=spot, day='t', start_time='11:00',\n end_time='14:00')\n", (1546, 1608), False, 'from spotseeker_server.models import Spot, SpotAvailableHours\n'), ((1622, 1717), 'spotseeker_server.models.SpotAvailableHours.objects.create', 'SpotAvailableHours.objects.create', ([], {'spot': 'spot', 'day': '"""w"""', 'start_time': '"""11:00"""', 'end_time': '"""14:00"""'}), "(spot=spot, day='w', start_time='11:00',\n end_time='14:00')\n", (1655, 1717), False, 'from spotseeker_server.models import Spot, SpotAvailableHours\n'), ((1731, 1827), 'spotseeker_server.models.SpotAvailableHours.objects.create', 'SpotAvailableHours.objects.create', ([], {'spot': 'spot', 'day': '"""th"""', 'start_time': '"""11:00"""', 'end_time': '"""14:00"""'}), "(spot=spot, day='th', start_time='11:00',\n end_time='14:00')\n", (1764, 1827), False, 'from spotseeker_server.models import Spot, SpotAvailableHours\n'), ((1841, 1936), 'spotseeker_server.models.SpotAvailableHours.objects.create', 'SpotAvailableHours.objects.create', ([], {'spot': 'spot', 'day': '"""f"""', 'start_time': '"""11:00"""', 'end_time': '"""14:00"""'}), "(spot=spot, day='f', start_time='11:00',\n end_time='14:00')\n", (1874, 1936), False, 'from spotseeker_server.models import Spot, SpotAvailableHours\n'), ((1994, 2090), 'spotseeker_server.models.SpotAvailableHours.objects.create', 'SpotAvailableHours.objects.create', ([], {'spot': 'spot', 'day': '"""su"""', 'start_time': '"""11:00"""', 'end_time': '"""14:00"""'}), "(spot=spot, day='su', start_time='11:00',\n end_time='14:00')\n", (2027, 2090), False, 'from spotseeker_server.models import Spot, SpotAvailableHours\n'), ((2253, 2315), 'django.core.cache.get_cache', 'cache.get_cache', (['"""django.core.cache.backends.dummy.DummyCache"""'], {}), "('django.core.cache.backends.dummy.DummyCache')\n", (2268, 2315), False, 'from django.core import cache\n'), ((2329, 2371), 'mock.patch.object', 'patch.object', (['models', '"""cache"""', 'dummy_cache'], {}), "(models, 'cache', dummy_cache)\n", (2341, 2371), False, 'from mock import patch\n'), ((2389, 2397), 'django.test.client.Client', 'Client', ([], {}), '()\n', (2395, 2397), False, 'from django.test.client import Client\n'), ((2507, 2535), 'simplejson.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (2517, 2535), True, 'import simplejson as json\n')] |
from copy import deepcopy
from src.CourseMaterials.Week3.Oef1.place import Place
class Board:
starting_board = [[Place(x, y) for x in range(3)] for y in range(3)]
def __init__(self, inner_board=starting_board, value="", children=[], parent_board=deepcopy(starting_board)):
self.inner_board = deepcopy(inner_board)
self.value = value
self.children = deepcopy(children)
self.parent_board = parent_board
def get_free_places(self):
free_spaces = []
for i in range(len(self.inner_board)):
for j in range(len(self.inner_board[i])):
if self.inner_board[i][j].value is "~":
free_spaces.append(self.inner_board[i][j])
return free_spaces
def mark_place(self, place, symbol):
self.children.append(Board(self.inner_board))
self.children[len(self.children) - 1].inner_board[place.row][place.column].value = symbol
def __str__(self):
output = ""
for places in self.inner_board:
for place in places:
output += str(place.value) + " "
output += "\n"
return output
def __repr__(self):
return repr(self.__str__())
| [
"src.CourseMaterials.Week3.Oef1.place.Place",
"copy.deepcopy"
] | [((256, 280), 'copy.deepcopy', 'deepcopy', (['starting_board'], {}), '(starting_board)\n', (264, 280), False, 'from copy import deepcopy\n'), ((310, 331), 'copy.deepcopy', 'deepcopy', (['inner_board'], {}), '(inner_board)\n', (318, 331), False, 'from copy import deepcopy\n'), ((383, 401), 'copy.deepcopy', 'deepcopy', (['children'], {}), '(children)\n', (391, 401), False, 'from copy import deepcopy\n'), ((118, 129), 'src.CourseMaterials.Week3.Oef1.place.Place', 'Place', (['x', 'y'], {}), '(x, y)\n', (123, 129), False, 'from src.CourseMaterials.Week3.Oef1.place import Place\n')] |
'''
.. module:: eosfactory.core.vscode
:platform: Unix, Darwin
:synopsis: Default configuration items of a contract project.
.. moduleauthor:: Tokenika
'''
import json
import argparse
import eosfactory.core.config as config
INCLUDE_PATH = "includePath"
LIBS = "libs"
CODE_OPTIONS = "codeOptions"
TEST_OPTIONS = "testOptions"
def get_includes():
includes = config.eosio_cpp_includes()
retval = []
root = config.wsl_root()
for include in includes:
retval.append(root + include)
retval.append("${workspaceFolder}")
retval.append("${workspaceFolder}/include")
return retval
LIB_LIST = [
]
OPTIONS = [
]
TASKS = '''
{
"version": "2.0.0",
"tasks": [
{
"label": "Compile",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}' --compile"
},
"osx": {
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}' --compile"
},
"linux": {
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}' --compile"
},
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"problemMatcher": [
]
},
{
"label": "Build",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}'"
},
"osx": {
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}'"
},
"linux": {
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}'"
},
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"group": {
"kind": "build",
"isDefault": true
},
"problemMatcher": [
]
},
{
"label": "Test",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "python3 ./tests/test1.py"
},
"osx": {
"command": "python3 ./tests/test1.py"
},
"linux": {
"command": "python3 ./tests/test1.py"
},
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"problemMatcher": [
]
},
{
"label": "Unittest",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "python3 ./tests/unittest1.py"
},
"osx": {
"command": "python3 ./tests/unittest1.py"
},
"linux": {
"command": "python3 ./tests/unittest1.py"
},
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"problemMatcher": [
]
},
{
"label": "EOSIO API",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "explorer.exe"
},
"osx": {
"command": "open"
},
"linux": {
"command": "sensible-browser"
},
"args": [
"https://developers.eos.io/"
],
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"problemMatcher": [
]
}
]
}
'''
def c_cpp_properties():
includes = get_includes()
retval = """
{
"configurations": [
{
"%s": %s,
"%s": %s,
"%s": %s,
"%s": %s,
"defines": [],
"intelliSenseMode": "clang-x64",
"browse": {
"path": %s,
"limitSymbolsToIncludedHeaders": true,
"databaseFilename": ""
}
}
],
"version": 4
}
""" % (
INCLUDE_PATH,
json.dumps(includes, indent=4),
LIBS,
json.dumps(LIB_LIST, indent=4),
CODE_OPTIONS,
json.dumps(OPTIONS, indent=4),
TEST_OPTIONS,
json.dumps(OPTIONS, indent=4),
json.dumps(includes, indent=4))
return retval
def main(c_cpp_properties_path=None):
if c_cpp_properties_path:
config.update_vscode(c_cpp_properties_path)
else:
print(c_cpp_properties())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--c_cpp_prop_path", default="")
args = parser.parse_args()
main(args.c_cpp_prop_path) | [
"eosfactory.core.config.eosio_cpp_includes",
"argparse.ArgumentParser",
"json.dumps",
"eosfactory.core.config.wsl_root",
"eosfactory.core.config.update_vscode"
] | [((372, 399), 'eosfactory.core.config.eosio_cpp_includes', 'config.eosio_cpp_includes', ([], {}), '()\n', (397, 399), True, 'import eosfactory.core.config as config\n'), ((427, 444), 'eosfactory.core.config.wsl_root', 'config.wsl_root', ([], {}), '()\n', (442, 444), True, 'import eosfactory.core.config as config\n'), ((5994, 6019), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6017, 6019), False, 'import argparse\n'), ((5864, 5907), 'eosfactory.core.config.update_vscode', 'config.update_vscode', (['c_cpp_properties_path'], {}), '(c_cpp_properties_path)\n', (5884, 5907), True, 'import eosfactory.core.config as config\n'), ((5547, 5577), 'json.dumps', 'json.dumps', (['includes'], {'indent': '(4)'}), '(includes, indent=4)\n', (5557, 5577), False, 'import json\n'), ((5593, 5623), 'json.dumps', 'json.dumps', (['LIB_LIST'], {'indent': '(4)'}), '(LIB_LIST, indent=4)\n', (5603, 5623), False, 'import json\n'), ((5647, 5676), 'json.dumps', 'json.dumps', (['OPTIONS'], {'indent': '(4)'}), '(OPTIONS, indent=4)\n', (5657, 5676), False, 'import json\n'), ((5700, 5729), 'json.dumps', 'json.dumps', (['OPTIONS'], {'indent': '(4)'}), '(OPTIONS, indent=4)\n', (5710, 5729), False, 'import json\n'), ((5735, 5765), 'json.dumps', 'json.dumps', (['includes'], {'indent': '(4)'}), '(includes, indent=4)\n', (5745, 5765), False, 'import json\n')] |
from tests.functional.services.policy_engine.utils.api.conf import (
policy_engine_api_conf,
)
from tests.functional.services.utils import http_utils
def get_vulnerabilities(
vulnerability_ids=[],
affected_package=None,
affected_package_version=None,
namespace=None,
):
if not vulnerability_ids:
raise ValueError("Cannot fetch vulnerabilities without ids")
query = {
"id": ",".join(vulnerability_ids),
"affected_package": affected_package,
"affected_package_version": affected_package_version,
"namespace": namespace,
}
vulnerabilities_resp = http_utils.http_get(
["query", "vulnerabilities"], query, config=policy_engine_api_conf
)
if vulnerabilities_resp.code != 200:
raise http_utils.RequestFailedError(
vulnerabilities_resp.url,
vulnerabilities_resp.code,
vulnerabilities_resp.body,
)
return vulnerabilities_resp
| [
"tests.functional.services.utils.http_utils.RequestFailedError",
"tests.functional.services.utils.http_utils.http_get"
] | [((622, 714), 'tests.functional.services.utils.http_utils.http_get', 'http_utils.http_get', (["['query', 'vulnerabilities']", 'query'], {'config': 'policy_engine_api_conf'}), "(['query', 'vulnerabilities'], query, config=\n policy_engine_api_conf)\n", (641, 714), False, 'from tests.functional.services.utils import http_utils\n'), ((780, 893), 'tests.functional.services.utils.http_utils.RequestFailedError', 'http_utils.RequestFailedError', (['vulnerabilities_resp.url', 'vulnerabilities_resp.code', 'vulnerabilities_resp.body'], {}), '(vulnerabilities_resp.url,\n vulnerabilities_resp.code, vulnerabilities_resp.body)\n', (809, 893), False, 'from tests.functional.services.utils import http_utils\n')] |
from flake8_aaa.line_markers import LineMarkers
from flake8_aaa.types import LineType
def test():
result = LineMarkers(5 * [''], 7)
assert result.types == [
LineType.unprocessed,
LineType.unprocessed,
LineType.unprocessed,
LineType.unprocessed,
LineType.unprocessed,
]
assert result.lines == ['', '', '', '', '']
assert result.fn_offset == 7
| [
"flake8_aaa.line_markers.LineMarkers"
] | [((113, 137), 'flake8_aaa.line_markers.LineMarkers', 'LineMarkers', (["(5 * [''])", '(7)'], {}), "(5 * [''], 7)\n", (124, 137), False, 'from flake8_aaa.line_markers import LineMarkers\n')] |
import pytest
from helpers.cluster import ClickHouseCluster
import urllib.request, urllib.parse
import ssl
import os.path
HTTPS_PORT = 8443
NODE_IP = '10.5.172.77' # It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf).
NODE_IP_WITH_HTTPS_PORT = NODE_IP + ':' + str(HTTPS_PORT)
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('node', ipv4_address=NODE_IP,
main_configs=['configs/ssl_config.xml', 'certs/server-key.pem', 'certs/server-cert.pem', 'certs/ca-cert.pem'],
user_configs=["configs/users_with_ssl_auth.xml"])
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_ssl_context(cert_name):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(cafile=f'{SCRIPT_DIR}/certs/ca-cert.pem')
if cert_name:
context.load_cert_chain(f'{SCRIPT_DIR}/certs/{cert_name}-cert.pem', f'{SCRIPT_DIR}/certs/{cert_name}-key.pem')
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
return context
def execute_query_https(query, user, enable_ssl_auth=True, cert_name=None, password=None):
url = f'https://{NODE_IP_WITH_HTTPS_PORT}/?query={urllib.parse.quote(query)}'
request = urllib.request.Request(url)
request.add_header('X-ClickHouse-User', user)
if enable_ssl_auth:
request.add_header('X-ClickHouse-SSL-Certificate-Auth', 'on')
if password:
request.add_header('X-ClickHouse-Key', password)
response = urllib.request.urlopen(request, context=get_ssl_context(cert_name)).read()
return response.decode('utf-8')
def test_https():
assert execute_query_https("SELECT currentUser()", user="john", cert_name='client1') == "john\n"
assert execute_query_https("SELECT currentUser()", user="lucy", cert_name='client2') == "lucy\n"
assert execute_query_https("SELECT currentUser()", user="lucy", cert_name='client3') == "lucy\n"
def test_https_wrong_cert():
# Wrong certificate: different user's certificate
with pytest.raises(Exception) as err:
execute_query_https("SELECT currentUser()", user="john", cert_name='client2')
assert "HTTP Error 403" in str(err.value)
# Wrong certificate: self-signed certificate.
with pytest.raises(Exception) as err:
execute_query_https("SELECT currentUser()", user="john", cert_name='wrong')
assert "unknown ca" in str(err.value)
# No certificate.
with pytest.raises(Exception) as err:
execute_query_https("SELECT currentUser()", user="john")
assert "HTTP Error 403" in str(err.value)
# No header enabling SSL authentication.
with pytest.raises(Exception) as err:
execute_query_https("SELECT currentUser()", user="john", enable_ssl_auth=False, cert_name='client1')
def test_https_non_ssl_auth():
# Users with non-SSL authentication are allowed, in this case we can skip sending a client certificate at all (because "verificationMode" is set to "relaxed").
#assert execute_query_https("SELECT currentUser()", user="peter", enable_ssl_auth=False) == "peter\n"
assert execute_query_https("SELECT currentUser()", user="jane", enable_ssl_auth=False, password='<PASSWORD>') == "jane\n"
# But we still can send a certificate if we want.
assert execute_query_https("SELECT currentUser()", user="peter", enable_ssl_auth=False, cert_name='client1') == "peter\n"
assert execute_query_https("SELECT currentUser()", user="peter", enable_ssl_auth=False, cert_name='client2') == "peter\n"
assert execute_query_https("SELECT currentUser()", user="peter", enable_ssl_auth=False, cert_name='client3') == "peter\n"
assert execute_query_https("SELECT currentUser()", user="jane", enable_ssl_auth=False, password='<PASSWORD>', cert_name='client1') == "jane\n"
assert execute_query_https("SELECT currentUser()", user="jane", enable_ssl_auth=False, password='<PASSWORD>', cert_name='client2') == "jane\n"
assert execute_query_https("SELECT currentUser()", user="jane", enable_ssl_auth=False, password='<PASSWORD>', cert_name='client3') == "jane\n"
# However if we send a certificate it must not be wrong.
with pytest.raises(Exception) as err:
execute_query_https("SELECT currentUser()", user="peter", enable_ssl_auth=False, cert_name='wrong')
assert "unknown ca" in str(err.value)
with pytest.raises(Exception) as err:
execute_query_https("SELECT currentUser()", user="jane", enable_ssl_auth=False, password='<PASSWORD>', cert_name='wrong')
assert "unknown ca" in str(err.value)
def test_create_user():
instance.query("CREATE USER emma IDENTIFIED WITH ssl_certificate CN 'client3'")
assert execute_query_https("SELECT currentUser()", user="emma", cert_name='client3') == "emma\n"
assert instance.query("SHOW CREATE USER emma") == "CREATE USER emma IDENTIFIED WITH ssl_certificate CN \\'client3\\'\n"
instance.query("ALTER USER emma IDENTIFIED WITH ssl_certificate CN 'client2'")
assert execute_query_https("SELECT currentUser()", user="emma", cert_name='client2') == "emma\n"
assert instance.query("SHOW CREATE USER emma") == "CREATE USER emma IDENTIFIED WITH ssl_certificate CN \\'client2\\'\n"
with pytest.raises(Exception) as err:
execute_query_https("SELECT currentUser()", user="emma", cert_name='client3')
assert "HTTP Error 403" in str(err.value)
assert instance.query("SHOW CREATE USER lucy") == "CREATE USER lucy IDENTIFIED WITH ssl_certificate CN \\'client2\\', \\'client3\\'\n"
assert instance.query("SELECT name, auth_type, auth_params FROM system.users WHERE name IN ['emma', 'lucy'] ORDER BY name") ==\
"emma\tssl_certificate\t{\"common_names\":[\"client2\"]}\n"\
"lucy\tssl_certificate\t{\"common_names\":[\"client2\",\"client3\"]}\n"
| [
"pytest.fixture",
"ssl.SSLContext",
"helpers.cluster.ClickHouseCluster",
"pytest.raises"
] | [((402, 429), 'helpers.cluster.ClickHouseCluster', 'ClickHouseCluster', (['__file__'], {}), '(__file__)\n', (419, 429), False, 'from helpers.cluster import ClickHouseCluster\n'), ((720, 764), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'autouse': '(True)'}), "(scope='module', autouse=True)\n", (734, 764), False, 'import pytest\n'), ((932, 971), 'ssl.SSLContext', 'ssl.SSLContext', (['ssl.PROTOCOL_TLS_CLIENT'], {}), '(ssl.PROTOCOL_TLS_CLIENT)\n', (946, 971), False, 'import ssl\n'), ((2264, 2288), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2277, 2288), False, 'import pytest\n'), ((2489, 2513), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2502, 2513), False, 'import pytest\n'), ((2680, 2704), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2693, 2704), False, 'import pytest\n'), ((2879, 2903), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2892, 2903), False, 'import pytest\n'), ((4400, 4424), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (4413, 4424), False, 'import pytest\n'), ((4592, 4616), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (4605, 4616), False, 'import pytest\n'), ((5451, 5475), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (5464, 5475), False, 'import pytest\n')] |
import inspect
from functools import partial
from typing import Any, Callable, Dict, List
from .service_builder import ServiceBuilder
from .types import NameType
class Container:
def __init__(self, params):
self._services: Dict[NameType, ServiceBuilder] = {}
self._params: Dict[NameType, Any] = params
self._aliases: Dict[NameType, NameType] = {}
self._by_tag: Dict[str, List[NameType]] = {}
def add_alias(self, alias: NameType, service: NameType):
self._aliases[alias] = service
def has(self, name: NameType):
if self.get(name, raise_if_none=False):
return True
return False
def get(self, name: NameType, raise_if_none: bool = True):
if isinstance(name, str) and name.startswith("#"):
return self.get_tagged(name[1:])
if alias := self._aliases.get(name):
return self.get(alias)
if service := self._services.get(name):
return service.get_instance()
if param := self._params.get(name):
return param
if raise_if_none:
raise Exception(f"Cannot find {name}")
return None
def add_service(self, name: NameType, factory: Callable[[], Any], is_singleton: bool = True, tags: List[str] = []):
self._services[name] = ServiceBuilder(factory, is_singleton=is_singleton)
for tag in tags:
try:
self._by_tag[tag].append(name)
except KeyError:
self._by_tag[tag] = [name]
def get_tagged(self, tag: str):
names = self._by_tag.get(tag)
if not names:
return []
return [self.get(name) for name in names]
def inject(self, *, ignore_missing=True, **bindings):
container = self
def wrapper(f):
args = inspect.getfullargspec(f)
kwargs = {}
for arg_name in args.kwonlyargs:
if binding := bindings.get(arg_name):
value = container.get(binding, raise_if_none=not ignore_missing)
if value:
kwargs[arg_name] = value
continue
try:
value = container.get(args.annotations[arg_name], raise_if_none=False) or container.get(
arg_name, raise_if_none=not ignore_missing
)
if value:
kwargs[arg_name] = value
except KeyError:
value = container.get(arg_name, raise_if_none=not ignore_missing)
kwargs[arg_name] = value
return partial(f, **kwargs)
return wrapper
| [
"functools.partial",
"inspect.getfullargspec"
] | [((1832, 1857), 'inspect.getfullargspec', 'inspect.getfullargspec', (['f'], {}), '(f)\n', (1854, 1857), False, 'import inspect\n'), ((2657, 2677), 'functools.partial', 'partial', (['f'], {}), '(f, **kwargs)\n', (2664, 2677), False, 'from functools import partial\n')] |
from exceptions.gpm.auth_exceptions import AuthException
from gmusicapi import Mobileclient
from oauth2client.client import OAuth2Credentials
from wrappers.gpm.auth_wrapper import AuthWrapper
from unittest.mock import MagicMock
from unittest import mock
import unittest
class AuthWrapperTest(unittest.TestCase):
def test_authenticate_mobile_client(self):
mocked_credentials: OAuth2Credentials = MagicMock(OAuth2Credentials)
mocked_mobile_client: Mobileclient = MagicMock(Mobileclient)
AuthWrapper.authenticate_mobile_client(mobile_client=mocked_mobile_client, oauth_credentials=mocked_credentials)
mocked_mobile_client.perform_oauth.assert_not_called()
mocked_mobile_client.oauth_login.assert_called_once_with(oauth_credentials=mocked_credentials,
device_id=mock.ANY)
def test_auth_failure_with_invalid_code(self):
mocked_mobile_client: Mobileclient = MagicMock(Mobileclient)
mocked_mobile_client.perform_oauth.side_effect = Exception('Perform Oauth Exception')
with self.assertRaises(AuthException) as context:
AuthWrapper.authenticate_mobile_client(mobile_client=mocked_mobile_client)
self.assertEqual(AuthException, type(context.exception), "Failed to login, auth Exception raised")
def test_auth_failure_with_invalid_oauth_cred(self):
invalid_mocked_credentials: OAuth2Credentials = MagicMock(OAuth2Credentials)
mocked_mobile_client: Mobileclient = MagicMock(Mobileclient)
mocked_mobile_client.oauth_login.side_effect = Exception('Oauth Login Exception')
with self.assertRaises(AuthException) as context:
AuthWrapper.authenticate_mobile_client(mobile_client=mocked_mobile_client,
oauth_credentials=invalid_mocked_credentials)
self.assertEqual(AuthException, type(context.exception), "Failed to login, auth Exception raised")
| [
"wrappers.gpm.auth_wrapper.AuthWrapper.authenticate_mobile_client",
"unittest.mock.MagicMock"
] | [((411, 439), 'unittest.mock.MagicMock', 'MagicMock', (['OAuth2Credentials'], {}), '(OAuth2Credentials)\n', (420, 439), False, 'from unittest.mock import MagicMock\n'), ((485, 508), 'unittest.mock.MagicMock', 'MagicMock', (['Mobileclient'], {}), '(Mobileclient)\n', (494, 508), False, 'from unittest.mock import MagicMock\n'), ((518, 634), 'wrappers.gpm.auth_wrapper.AuthWrapper.authenticate_mobile_client', 'AuthWrapper.authenticate_mobile_client', ([], {'mobile_client': 'mocked_mobile_client', 'oauth_credentials': 'mocked_credentials'}), '(mobile_client=mocked_mobile_client,\n oauth_credentials=mocked_credentials)\n', (556, 634), False, 'from wrappers.gpm.auth_wrapper import AuthWrapper\n'), ((980, 1003), 'unittest.mock.MagicMock', 'MagicMock', (['Mobileclient'], {}), '(Mobileclient)\n', (989, 1003), False, 'from unittest.mock import MagicMock\n'), ((1466, 1494), 'unittest.mock.MagicMock', 'MagicMock', (['OAuth2Credentials'], {}), '(OAuth2Credentials)\n', (1475, 1494), False, 'from unittest.mock import MagicMock\n'), ((1540, 1563), 'unittest.mock.MagicMock', 'MagicMock', (['Mobileclient'], {}), '(Mobileclient)\n', (1549, 1563), False, 'from unittest.mock import MagicMock\n'), ((1169, 1243), 'wrappers.gpm.auth_wrapper.AuthWrapper.authenticate_mobile_client', 'AuthWrapper.authenticate_mobile_client', ([], {'mobile_client': 'mocked_mobile_client'}), '(mobile_client=mocked_mobile_client)\n', (1207, 1243), False, 'from wrappers.gpm.auth_wrapper import AuthWrapper\n'), ((1725, 1849), 'wrappers.gpm.auth_wrapper.AuthWrapper.authenticate_mobile_client', 'AuthWrapper.authenticate_mobile_client', ([], {'mobile_client': 'mocked_mobile_client', 'oauth_credentials': 'invalid_mocked_credentials'}), '(mobile_client=mocked_mobile_client,\n oauth_credentials=invalid_mocked_credentials)\n', (1763, 1849), False, 'from wrappers.gpm.auth_wrapper import AuthWrapper\n')] |
from datetime import datetime
from django.shortcuts import render
from django.http import Http404
from .models import ProgramSchedule
from utils.datedeux import DateDeux
# Create your views here.
def display_single_schedule(request, schedule_id):
try:
schedule = ProgramSchedule.objects.get(id=int(schedule_id))
except:
raise Http404("Schedule does not exist")
schedule_data = {
'program': schedule.program.name,
'center': schedule.center.center_name,
'zone': schedule.center.zone.zone_name,
'start_date': DateDeux.frompydate(schedule.start_date).dateformat("dd-mmm-yyyy"),
'end_date': DateDeux.frompydate(schedule.end_date).dateformat("dd-mmm-yyyy"),
'fee': schedule.donation_amount,
'offline_reg_code': schedule.event_management_code,
'online_reg_code': schedule.online_registration_code,
'contact': '%s @ %s, %s' % (schedule.contact_name or "Unknown",
schedule.contact_email,
schedule.contact_phone1),
'timestamp': datetime.now()
}
return render(request, "schedulemaster/view_single_program.html", {'schedule':
schedule_data})
| [
"django.shortcuts.render",
"datetime.datetime.now",
"utils.datedeux.DateDeux.frompydate",
"django.http.Http404"
] | [((1140, 1231), 'django.shortcuts.render', 'render', (['request', '"""schedulemaster/view_single_program.html"""', "{'schedule': schedule_data}"], {}), "(request, 'schedulemaster/view_single_program.html', {'schedule':\n schedule_data})\n", (1146, 1231), False, 'from django.shortcuts import render\n'), ((1103, 1117), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1115, 1117), False, 'from datetime import datetime\n'), ((354, 388), 'django.http.Http404', 'Http404', (['"""Schedule does not exist"""'], {}), "('Schedule does not exist')\n", (361, 388), False, 'from django.http import Http404\n'), ((571, 611), 'utils.datedeux.DateDeux.frompydate', 'DateDeux.frompydate', (['schedule.start_date'], {}), '(schedule.start_date)\n', (590, 611), False, 'from utils.datedeux import DateDeux\n'), ((659, 697), 'utils.datedeux.DateDeux.frompydate', 'DateDeux.frompydate', (['schedule.end_date'], {}), '(schedule.end_date)\n', (678, 697), False, 'from utils.datedeux import DateDeux\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unit test for status"""
import sys
import unittest
from os.path import dirname, realpath
from pm.status import Y, Conserved, PM, NA
class RoutineTest(unittest.TestCase):
"""Routine test."""
def test_pm_status_gt_order(self):
"""Status should have a right order when doing gt-comparison"""
r = Y() > Conserved(aa_pm=0) > Conserved(nt_pm=8, aa_pm=0) \
> PM(aa_pm=0) > PM(aa_pm=5, nt_pm=10) > NA() > NA(gaps=1)
self.assertTrue(r)
self.assertTrue(NA(aa_pm=9999999) > NA(aa_pm=None))
def test_pm_status_lt_order(self):
"""Status should have a right order when doing lt-comparison"""
r = NA() < PM() < Conserved(aa_pm=0) < Y()
self.assertTrue(r)
def test_pm_status_le_order(self):
"""Status should give right value when doing le-comparison"""
r = (Y() <= Y()) and (Conserved(aa_pm=0) <= Conserved(aa_pm=0)) \
and (PM() <= PM()) and (NA() <= NA())
self.assertTrue(r)
def test_pm_status_ge_order(self):
"""Status should give right value when doing ge-comparison"""
r = (Y() >= Y()) and (Conserved(aa_pm=0) >= Conserved(aa_pm=0)) \
and (PM() >= PM()) and (NA() >= NA())
self.assertTrue(r)
def test_pm_status_eq_order(self):
"""Status should give right value when doing eq-comparison"""
r = (Y() == Y()) and (Conserved(aa_pm=0) == Conserved(aa_pm=0)) \
and (PM() == PM()) and (NA() == NA())
self.assertTrue(r)
def test_pm_status_ne_order(self):
"""Status should give right value when doing ne-comparison"""
r = NA() != PM() != Conserved(aa_pm=0) != Y()
self.assertTrue(r)
def test_convert_pm_status_to_string(self):
"""Convert status object to string"""
input_pairs = ((Y(), 'Y'),
(Conserved(aa_pm=0), 'Conserved'),
(PM(), 'PM'),
(NA(), 'NA'))
for status, str_status in input_pairs:
self.assertEqual(str(status), str_status)
def test_pm_status_orderablity(self):
"""pm.status should be orderable with gaps-removed but still consistent stdseq"""
self.assertTrue(PM(stdseq='ATGATT', nt_pm=1) > NA(stdseq='ATG-ATT', gaps=1, nt_pm=1))
class ErrorTest(unittest.TestCase):
def test_raise_TypeError1(self):
"""status should raise TypeError when comparing between status operand with incosistent stdseq"""
with self.assertRaises(TypeError):
Y(stdseq='atg') > Conserved(stdseq='tga', aa_pm=0) \
> PM(stdseq='aaa') > NA(stdseq='tgg')
if __name__ == '__main__':
unittest.main()
| [
"pm.status.Y",
"pm.status.NA",
"pm.status.PM",
"unittest.main",
"pm.status.Conserved"
] | [((2725, 2740), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2738, 2740), False, 'import unittest\n'), ((372, 375), 'pm.status.Y', 'Y', ([], {}), '()\n', (373, 375), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((378, 396), 'pm.status.Conserved', 'Conserved', ([], {'aa_pm': '(0)'}), '(aa_pm=0)\n', (387, 396), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((399, 426), 'pm.status.Conserved', 'Conserved', ([], {'nt_pm': '(8)', 'aa_pm': '(0)'}), '(nt_pm=8, aa_pm=0)\n', (408, 426), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((443, 454), 'pm.status.PM', 'PM', ([], {'aa_pm': '(0)'}), '(aa_pm=0)\n', (445, 454), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((457, 478), 'pm.status.PM', 'PM', ([], {'aa_pm': '(5)', 'nt_pm': '(10)'}), '(aa_pm=5, nt_pm=10)\n', (459, 478), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((481, 485), 'pm.status.NA', 'NA', ([], {}), '()\n', (483, 485), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((488, 498), 'pm.status.NA', 'NA', ([], {'gaps': '(1)'}), '(gaps=1)\n', (490, 498), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((712, 716), 'pm.status.NA', 'NA', ([], {}), '()\n', (714, 716), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((719, 723), 'pm.status.PM', 'PM', ([], {}), '()\n', (721, 723), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((726, 744), 'pm.status.Conserved', 'Conserved', ([], {'aa_pm': '(0)'}), '(aa_pm=0)\n', (735, 744), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((747, 750), 'pm.status.Y', 'Y', ([], {}), '()\n', (748, 750), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1687, 1691), 'pm.status.NA', 'NA', ([], {}), '()\n', (1689, 1691), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1695, 1699), 'pm.status.PM', 'PM', ([], {}), '()\n', (1697, 1699), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1703, 1721), 'pm.status.Conserved', 'Conserved', ([], {'aa_pm': '(0)'}), '(aa_pm=0)\n', (1712, 1721), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1725, 1728), 'pm.status.Y', 'Y', ([], {}), '()\n', (1726, 1728), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((550, 567), 'pm.status.NA', 'NA', ([], {'aa_pm': '(9999999)'}), '(aa_pm=9999999)\n', (552, 567), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((570, 584), 'pm.status.NA', 'NA', ([], {'aa_pm': 'None'}), '(aa_pm=None)\n', (572, 584), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((902, 905), 'pm.status.Y', 'Y', ([], {}), '()\n', (903, 905), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((909, 912), 'pm.status.Y', 'Y', ([], {}), '()\n', (910, 912), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((919, 937), 'pm.status.Conserved', 'Conserved', ([], {'aa_pm': '(0)'}), '(aa_pm=0)\n', (928, 937), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((941, 959), 'pm.status.Conserved', 'Conserved', ([], {'aa_pm': '(0)'}), '(aa_pm=0)\n', (950, 959), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((980, 984), 'pm.status.PM', 'PM', ([], {}), '()\n', (982, 984), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((988, 992), 'pm.status.PM', 'PM', ([], {}), '()\n', (990, 992), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((999, 1003), 'pm.status.NA', 'NA', ([], {}), '()\n', (1001, 1003), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1007, 1011), 'pm.status.NA', 'NA', ([], {}), '()\n', (1009, 1011), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1164, 1167), 'pm.status.Y', 'Y', ([], {}), '()\n', (1165, 1167), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1171, 1174), 'pm.status.Y', 'Y', ([], {}), '()\n', (1172, 1174), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1181, 1199), 'pm.status.Conserved', 'Conserved', ([], {'aa_pm': '(0)'}), '(aa_pm=0)\n', (1190, 1199), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1203, 1221), 'pm.status.Conserved', 'Conserved', ([], {'aa_pm': '(0)'}), '(aa_pm=0)\n', (1212, 1221), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1242, 1246), 'pm.status.PM', 'PM', ([], {}), '()\n', (1244, 1246), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1250, 1254), 'pm.status.PM', 'PM', ([], {}), '()\n', (1252, 1254), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1261, 1265), 'pm.status.NA', 'NA', ([], {}), '()\n', (1263, 1265), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1269, 1273), 'pm.status.NA', 'NA', ([], {}), '()\n', (1271, 1273), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1426, 1429), 'pm.status.Y', 'Y', ([], {}), '()\n', (1427, 1429), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1433, 1436), 'pm.status.Y', 'Y', ([], {}), '()\n', (1434, 1436), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1443, 1461), 'pm.status.Conserved', 'Conserved', ([], {'aa_pm': '(0)'}), '(aa_pm=0)\n', (1452, 1461), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1465, 1483), 'pm.status.Conserved', 'Conserved', ([], {'aa_pm': '(0)'}), '(aa_pm=0)\n', (1474, 1483), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1504, 1508), 'pm.status.PM', 'PM', ([], {}), '()\n', (1506, 1508), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1512, 1516), 'pm.status.PM', 'PM', ([], {}), '()\n', (1514, 1516), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1523, 1527), 'pm.status.NA', 'NA', ([], {}), '()\n', (1525, 1527), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1531, 1535), 'pm.status.NA', 'NA', ([], {}), '()\n', (1533, 1535), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1876, 1879), 'pm.status.Y', 'Y', ([], {}), '()\n', (1877, 1879), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1912, 1930), 'pm.status.Conserved', 'Conserved', ([], {'aa_pm': '(0)'}), '(aa_pm=0)\n', (1921, 1930), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((1971, 1975), 'pm.status.PM', 'PM', ([], {}), '()\n', (1973, 1975), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((2009, 2013), 'pm.status.NA', 'NA', ([], {}), '()\n', (2011, 2013), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((2281, 2309), 'pm.status.PM', 'PM', ([], {'stdseq': '"""ATGATT"""', 'nt_pm': '(1)'}), "(stdseq='ATGATT', nt_pm=1)\n", (2283, 2309), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((2312, 2349), 'pm.status.NA', 'NA', ([], {'stdseq': '"""ATG-ATT"""', 'gaps': '(1)', 'nt_pm': '(1)'}), "(stdseq='ATG-ATT', gaps=1, nt_pm=1)\n", (2314, 2349), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((2589, 2604), 'pm.status.Y', 'Y', ([], {'stdseq': '"""atg"""'}), "(stdseq='atg')\n", (2590, 2604), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((2607, 2639), 'pm.status.Conserved', 'Conserved', ([], {'stdseq': '"""tga"""', 'aa_pm': '(0)'}), "(stdseq='tga', aa_pm=0)\n", (2616, 2639), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((2656, 2672), 'pm.status.PM', 'PM', ([], {'stdseq': '"""aaa"""'}), "(stdseq='aaa')\n", (2658, 2672), False, 'from pm.status import Y, Conserved, PM, NA\n'), ((2675, 2691), 'pm.status.NA', 'NA', ([], {'stdseq': '"""tgg"""'}), "(stdseq='tgg')\n", (2677, 2691), False, 'from pm.status import Y, Conserved, PM, NA\n')] |
#! /usr/bin/env python3
import math
import decimal
N_DIRECTIONS = 256
# If rounded up to 384 and last element has max value, it has the bit
# pattern, that prevents bisection from searching past the last element.
N_PRECALC_DRAW_DIST = 384
# Python round() uses "banker's rounding", i.e. "round half even"
def round_half_up(d):
return int(decimal.Decimal(d).to_integral_value(rounding=decimal.ROUND_HALF_UP))
def fixed_from_float(d):
if d >= 127.0:
return 0x7FFF
elif d <= -128.0:
return -0x8000
elif d >= 0.0:
return round_half_up(d * 256.0)
else:
return round_half_up(-d * -256.0)
def float_from_fixed(f):
return f/256.0
def phi_from_direction(i):
return i * 2.0 * math.pi / N_DIRECTIONS
def generate_array(f, el_type, name, comment, n_el, geterateValue):
f.write(f"// {comment}\n")
f.write(f"{el_type} {name}[] = {{")
for i in range(n_el):
if (i > 0):
f.write(",")
f.write("\n")
f.write(str(geterateValue(i)))
f.write("};\n")
def generate_direction_array(f, el_type, name, func, comment=None):
generate_array(f, el_type, name, comment if comment else name, N_DIRECTIONS, lambda i : func(i, phi_from_direction(i), math.sin(phi_from_direction(i)), math.cos(phi_from_direction(i))))
def generate_distance_array(f, el_type, name, func, comment=None):
generate_array(f, el_type, name, comment if comment else name, N_PRECALC_DRAW_DIST, lambda i : func(i, 16*i))
f = open("../tables.c", "w")
f.write("// Precalculated Look-up tables\n")
f.write("// Generated by " + __file__ + "\n")
f.write("\n")
f.write("#include <stdlib.h>\n")
f.write("\n")
generate_direction_array(f, "int", "f_dir_sin", \
lambda i, phi, sin_phi, cos_phi : fixed_from_float(sin_phi))
generate_direction_array(f, "int", "f_dir_cos", \
lambda i, phi, sin_phi, cos_phi : fixed_from_float(cos_phi))
generate_direction_array(f, "int", "f_dir_ctan", \
lambda i, phi, sin_phi, cos_phi : fixed_from_float(cos_phi / sin_phi if sin_phi != 0.0 else (127.0 if cos_phi > 0 else -128.0)), comment="ctan of direction, saturated")
generate_direction_array(f, "int", "f_dir_tan", \
lambda i, phi, sin_phi, cos_phi : fixed_from_float(sin_phi / cos_phi if cos_phi != 0.0 else (127.0 if sin_phi > 0 else -128.0)), comment="tan of direction, saturated")
generate_distance_array(f, "long", "f16_sqrs", \
lambda distidx, f_dist: (f_dist * f_dist) if (distidx < N_PRECALC_DRAW_DIST-1) else 0x7FFFFFFF, comment="Squares of distance, in 16-shifted fixed point, for fast sqrt")
def gen_height(distidx, f_dist):
if f_dist > 0:
height = round_half_up(65.0 / float_from_fixed(f_dist))
result = 95 - height;
return max(result, 0);
else:
return 0
generate_distance_array(f, "uchar", "draw_heigths", gen_height, comment="Heights of given distance index")
def gen_height1(distidx, f_dist):
if f_dist > 0:
height = round_half_up(100.0 / float_from_fixed(f_dist))
result = 97 + height;
return min(result, 191);
else:
return 191
generate_distance_array(f, "uchar", "draw_heigths1", gen_height1, comment="Heights of given distance index")
def gen_height(distidx, f_dist):
N_INTENSITIES = 17
INTENSITY_WHITE = 16
if f_dist > 0:
distance = float_from_fixed(f_dist)
intensity = INTENSITY_WHITE - round_half_up((16.0 * (1.0 / ((distance + 2.0) * 0.34))))
return max(0, min(intensity, INTENSITY_WHITE));
else:
return 0
generate_distance_array(f, "uchar", "draw_intens", gen_height, comment="Color intensities of given distance index")
f.close()
| [
"decimal.Decimal"
] | [((349, 367), 'decimal.Decimal', 'decimal.Decimal', (['d'], {}), '(d)\n', (364, 367), False, 'import decimal\n')] |
# KicadModTree is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KicadModTree is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
#
# (C) 2016 by <NAME>, <<EMAIL>>
from KicadModTree.Vector import *
from KicadModTree.nodes.Node import Node
from .PolygoneLine import PolygoneLine
class RectLine(PolygoneLine):
r"""Add a Rect to the render tree
:param \**kwargs:
See below
:Keyword Arguments:
* *start* (``Vector2D``) --
start edge of the rect
* *end* (``Vector2D``) --
end edge of the rect
* *layer* (``str``) --
layer on which the rect is drawn
* *width* (``float``) --
width of the outer line (default: None, which means auto detection)
* *offset* (``Vector2D``, ``float``) --
offset of the rect line to the specified one
:Example:
>>> from KicadModTree import *
>>> RectLine(start=[-3, -2], end=[3, 2], layer='F.SilkS')
"""
def __init__(self, **kwargs):
self.start_pos = Vector2D(kwargs['start'])
self.end_pos = Vector2D(kwargs['end'])
# If specified, an 'offset' can be applied to the RectLine.
# For example, creating a border around a given Rect of a specified size
if kwargs.get('offset'):
# offset for the rect line
# e.g. for creating a rectLine 0.5mm LARGER than the given rect, or similar
offset = [0, 0]
# Has an offset / inset been specified?
if type(kwargs['offset']) in [int, float]:
offset[0] = offset[1] = kwargs['offset']
elif type(kwargs['offset']) in [list, tuple] and len(kwargs['offset']) == 2:
# Ensure that all offset params are numerical
if all([type(i) in [int, float] for i in kwargs['offset']]):
offset = kwargs['offset']
# For the offset to work properly, start-pos must be top-left, and end-pos must be bottom-right
x1 = min(self.start_pos.x, self.end_pos.x)
x2 = max(self.start_pos.x, self.end_pos.x)
y1 = min(self.start_pos.y, self.end_pos.y)
y2 = max(self.start_pos.y, self.end_pos.y)
# Put the offset back in
self.start_pos.x = x1 - offset[0]
self.start_pos.y = y1 - offset[1]
self.end_pos.x = x2 + offset[0]
self.end_pos.y = y2 + offset[1]
polygone_line = [{'x': self.start_pos.x, 'y': self.start_pos.y},
{'x': self.start_pos.x, 'y': self.end_pos.y},
{'x': self.end_pos.x, 'y': self.end_pos.y},
{'x': self.end_pos.x, 'y': self.start_pos.y},
{'x': self.start_pos.x, 'y': self.start_pos.y}]
PolygoneLine.__init__(self, polygone=polygone_line, layer=kwargs['layer'], width=kwargs.get('width'))
def _getRenderTreeText(self):
render_text = Node._getRenderTreeText(self)
render_text += " [start: [x: {sx}, y: {sy}] end: [x: {ex}, y: {ey}]]".format(sx=self.start_pos.x,
sy=self.start_pos.y,
ex=self.end_pos.x,
ey=self.end_pos.y)
return render_text
| [
"KicadModTree.nodes.Node.Node._getRenderTreeText"
] | [((3461, 3490), 'KicadModTree.nodes.Node.Node._getRenderTreeText', 'Node._getRenderTreeText', (['self'], {}), '(self)\n', (3484, 3490), False, 'from KicadModTree.nodes.Node import Node\n')] |
# Generated by Django 3.0.3 on 2020-05-30 20:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('microimprocessing', '0005_auto_20200530_2231'),
]
operations = [
migrations.AlterField(
model_name='serverdatafilename',
name='file_path',
field=models.FilePathField(path='C:\\Users\\Jirik\\data', recursive=True, verbose_name='File Path on server'),
),
]
| [
"django.db.models.FilePathField"
] | [((361, 468), 'django.db.models.FilePathField', 'models.FilePathField', ([], {'path': '"""C:\\\\Users\\\\Jirik\\\\data"""', 'recursive': '(True)', 'verbose_name': '"""File Path on server"""'}), "(path='C:\\\\Users\\\\Jirik\\\\data', recursive=True,\n verbose_name='File Path on server')\n", (381, 468), False, 'from django.db import migrations, models\n')] |
# Copyright 2015 Google Inc. All Rights Reserved.
import json
import unittest
import webtest
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import ndb
from google.appengine.ext import testbed
import apprtc
import constants
import gcm_register
import gcmrecord
import room
import test_utilities
class DeclinePageHandlerTest(test_utilities.BasePageHandlerTest):
HOST = 'http://localhost'
def requestDeclineAndVerify(self, room_id, callee_gcm_id, expected_response):
body = {
constants.PARAM_CALLEE_GCM_ID: callee_gcm_id
}
response = self.makePostRequest('/decline/' + room_id, json.dumps(body))
self.verifyResultCode(response, expected_response)
def testDecline(self):
self.addTestData()
room_id = 'callercallee'
self.requestCallAndVerify(room_id, 'caller1gcm1', 'callee1',
constants.RESPONSE_SUCCESS)
self.assertEqual(True, room.has_room(self.HOST, room_id))
self.requestDeclineAndVerify(room_id, 'callee1gcm1',
constants.RESPONSE_SUCCESS)
self.assertEqual(room.Room.STATE_EMPTY, room.get_room_state(self.HOST, room_id))
def testJoinAfterDecline(self):
self.addTestData()
room_id = 'callercallee'
self.requestCallAndVerify(room_id, 'caller1gcm1', 'callee1',
constants.RESPONSE_SUCCESS)
self.requestDeclineAndVerify(room_id, 'callee1gcm1',
constants.RESPONSE_SUCCESS)
self.requestCallAndVerify(room_id, 'caller1gcm1', 'callee1',
constants.RESPONSE_SUCCESS)
def testDeclineInvalidInput(self):
body = {
constants.PARAM_CALLEE_GCM_ID: 'callee1gcm1'
}
self.checkInvalidRequests('/bind/verify', body.keys())
def testDeclineRoomNotFound(self):
self.addTestData()
room_id = 'callercallee'
self.requestDeclineAndVerify(room_id, 'callee1gcm1',
constants.RESPONSE_INVALID_ROOM)
def testDeclineInvalidCallee(self):
self.addTestData()
room_id = 'callercallee'
self.requestCallAndVerify(room_id, 'caller1gcm1', 'callee4',
constants.RESPONSE_SUCCESS)
# Wrong callee for room.
self.requestDeclineAndVerify(room_id, 'callee2gcm1',
constants.RESPONSE_INVALID_CALLEE)
# Right callee, but unverified gcm id.
# TODO (chuckhays): Once registration is enabled, this test should
# return a result code of constants.RESPONSE_INVALID_CALLEE.
self.requestDeclineAndVerify(room_id, 'callee4gcm2',
constants.RESPONSE_SUCCESS)
def testDeclineAcceptedRoom(self):
self.addTestData()
room_id = 'callercallee'
self.requestCallAndVerify(room_id, 'caller1gcm1', 'callee1',
constants.RESPONSE_SUCCESS)
# Accept the room so it is full.
self.requestAcceptAndVerify(room_id, 'callee1gcm1',
constants.RESPONSE_SUCCESS)
# Attempt to decline the full room.
self.requestDeclineAndVerify(room_id, 'callee1gcm2',
constants.RESPONSE_INVALID_ROOM)
def testDeclineByCaller(self):
self.addTestData()
room_id = 'callercallee'
self.requestCallAndVerify(room_id, 'caller1gcm1', 'callee1',
constants.RESPONSE_SUCCESS)
# Attempt to decline as the caller.
self.requestDeclineAndVerify(room_id, 'caller1gcm1',
constants.RESPONSE_INVALID_CALLEE)
def testDeclineWrongRoomType(self):
self.addTestData()
# Room created by apprtc.
room_id = 'room2'
response = self.makePostRequest('/join/' + room_id)
self.verifyResultCode(response, constants.RESPONSE_SUCCESS)
# Attempt to decline the room created by apprtc.
self.requestDeclineAndVerify(room_id, 'callee1gcm1',
constants.RESPONSE_INVALID_ROOM)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"json.dumps",
"room.has_room",
"room.get_room_state"
] | [((3685, 3700), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3698, 3700), False, 'import unittest\n'), ((644, 660), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (654, 660), False, 'import json\n'), ((923, 956), 'room.has_room', 'room.has_room', (['self.HOST', 'room_id'], {}), '(self.HOST, room_id)\n', (936, 956), False, 'import room\n'), ((1095, 1134), 'room.get_room_state', 'room.get_room_state', (['self.HOST', 'room_id'], {}), '(self.HOST, room_id)\n', (1114, 1134), False, 'import room\n')] |
import math
import torch
import numpy as np
from collections import OrderedDict, defaultdict
from transformers import BertTokenizer
sentiment2id = {'negative': 3, 'neutral': 4, 'positive': 5}
label = ['N', 'B-A', 'I-A', 'A', 'B-O', 'I-O', 'O', 'negative', 'neutral', 'positive']
# label2id = {'N': 0, 'B-A': 1, 'I-A': 2, 'A': 3, 'B-O': 4, 'I-O': 5, 'O': 6, 'negative': 7, 'neutral': 8, 'positive': 9}
label2id, id2label = OrderedDict(), OrderedDict()
for i, v in enumerate(label):
label2id[v] = i
id2label[i] = v
def get_spans(tags):
'''for BIO tag'''
tags = tags.strip().split()
length = len(tags)
spans = []
start = -1
for i in range(length):
if tags[i].endswith('B'):
if start != -1:
spans.append([start, i - 1])
start = i
elif tags[i].endswith('O'):
if start != -1:
spans.append([start, i - 1])
start = -1
if start != -1:
spans.append([start, length - 1])
return spans
def get_evaluate_spans(tags, length, token_range):
'''for BIO tag'''
spans = []
start = -1
for i in range(length):
l, r = token_range[i]
if tags[l] == -1:
continue
elif tags[l] == 1:
if start != -1:
spans.append([start, i - 1])
start = i
elif tags[l] == 0:
if start != -1:
spans.append([start, i - 1])
start = -1
if start != -1:
spans.append([start, length - 1])
return spans
class Instance(object):
def __init__(self, tokenizer, sentence_pack, post_vocab, deprel_vocab, postag_vocab, synpost_vocab, args):
self.id = sentence_pack['id']
self.sentence = sentence_pack['sentence']
self.tokens = self.sentence.strip().split()
self.postag = sentence_pack['postag']
self.head = sentence_pack['head']
self.deprel = sentence_pack['deprel']
self.sen_length = len(self.tokens)
self.token_range = []
self.bert_tokens = tokenizer.encode(self.sentence)
self.length = len(self.bert_tokens)
self.bert_tokens_padding = torch.zeros(args.max_sequence_len).long()
self.aspect_tags = torch.zeros(args.max_sequence_len).long()
self.opinion_tags = torch.zeros(args.max_sequence_len).long()
self.tags = torch.zeros(args.max_sequence_len, args.max_sequence_len).long()
self.tags_symmetry = torch.zeros(args.max_sequence_len, args.max_sequence_len).long()
self.mask = torch.zeros(args.max_sequence_len)
for i in range(self.length):
self.bert_tokens_padding[i] = self.bert_tokens[i]
self.mask[:self.length] = 1
token_start = 1
for i, w, in enumerate(self.tokens):
token_end = token_start + len(tokenizer.encode(w, add_special_tokens=False))
self.token_range.append([token_start, token_end-1])
token_start = token_end
assert self.length == self.token_range[-1][-1]+2
self.aspect_tags[self.length:] = -1
self.aspect_tags[0] = -1
self.aspect_tags[self.length-1] = -1
self.opinion_tags[self.length:] = -1
self.opinion_tags[0] = -1
self.opinion_tags[self.length - 1] = -1
self.tags[:, :] = -1
self.tags_symmetry[:, :] = -1
for i in range(1, self.length-1):
for j in range(i, self.length-1):
self.tags[i][j] = 0
for triple in sentence_pack['triples']:
aspect = triple['target_tags']
opinion = triple['opinion_tags']
aspect_span = get_spans(aspect)
opinion_span = get_spans(opinion)
'''set tag for aspect'''
for l, r in aspect_span:
start = self.token_range[l][0]
end = self.token_range[r][1]
for i in range(start, end+1):
for j in range(i, end+1):
if j == start:
self.tags[i][j] = label2id['B-A']
elif j == i:
self.tags[i][j] = label2id['I-A']
else:
self.tags[i][j] = label2id['A']
for i in range(l, r+1):
set_tag = 1 if i == l else 2
al, ar = self.token_range[i]
self.aspect_tags[al] = set_tag
self.aspect_tags[al+1:ar+1] = -1
'''mask positions of sub words'''
self.tags[al+1:ar+1, :] = -1
self.tags[:, al+1:ar+1] = -1
'''set tag for opinion'''
for l, r in opinion_span:
start = self.token_range[l][0]
end = self.token_range[r][1]
for i in range(start, end+1):
for j in range(i, end+1):
if j == start:
self.tags[i][j] = label2id['B-O']
elif j == i:
self.tags[i][j] = label2id['I-O']
else:
self.tags[i][j] = label2id['O']
for i in range(l, r+1):
set_tag = 1 if i == l else 2
pl, pr = self.token_range[i]
self.opinion_tags[pl] = set_tag
self.opinion_tags[pl+1:pr+1] = -1
self.tags[pl+1:pr+1, :] = -1
self.tags[:, pl+1:pr+1] = -1
for al, ar in aspect_span:
for pl, pr in opinion_span:
for i in range(al, ar+1):
for j in range(pl, pr+1):
sal, sar = self.token_range[i]
spl, spr = self.token_range[j]
self.tags[sal:sar+1, spl:spr+1] = -1
if args.task == 'pair':
if i > j:
self.tags[spl][sal] = 7
else:
self.tags[sal][spl] = 7
elif args.task == 'triplet':
if i > j:
self.tags[spl][sal] = label2id[triple['sentiment']]
else:
self.tags[sal][spl] = label2id[triple['sentiment']]
for i in range(1, self.length-1):
for j in range(i, self.length-1):
self.tags_symmetry[i][j] = self.tags[i][j]
self.tags_symmetry[j][i] = self.tags_symmetry[i][j]
'''1. generate position index of the word pair'''
self.word_pair_position = torch.zeros(args.max_sequence_len, args.max_sequence_len).long()
for i in range(len(self.tokens)):
start, end = self.token_range[i][0], self.token_range[i][1]
for j in range(len(self.tokens)):
s, e = self.token_range[j][0], self.token_range[j][1]
for row in range(start, end + 1):
for col in range(s, e + 1):
self.word_pair_position[row][col] = post_vocab.stoi.get(abs(row - col), post_vocab.unk_index)
"""2. generate deprel index of the word pair"""
self.word_pair_deprel = torch.zeros(args.max_sequence_len, args.max_sequence_len).long()
for i in range(len(self.tokens)):
start = self.token_range[i][0]
end = self.token_range[i][1]
for j in range(start, end + 1):
s, e = self.token_range[self.head[i] - 1] if self.head[i] != 0 else (0, 0)
for k in range(s, e + 1):
self.word_pair_deprel[j][k] = deprel_vocab.stoi.get(self.deprel[i])
self.word_pair_deprel[k][j] = deprel_vocab.stoi.get(self.deprel[i])
self.word_pair_deprel[j][j] = deprel_vocab.stoi.get('self')
"""3. generate POS tag index of the word pair"""
self.word_pair_pos = torch.zeros(args.max_sequence_len, args.max_sequence_len).long()
for i in range(len(self.tokens)):
start, end = self.token_range[i][0], self.token_range[i][1]
for j in range(len(self.tokens)):
s, e = self.token_range[j][0], self.token_range[j][1]
for row in range(start, end + 1):
for col in range(s, e + 1):
self.word_pair_pos[row][col] = postag_vocab.stoi.get(tuple(sorted([self.postag[i], self.postag[j]])))
"""4. generate synpost index of the word pair"""
self.word_pair_synpost = torch.zeros(args.max_sequence_len, args.max_sequence_len).long()
tmp = [[0]*len(self.tokens) for _ in range(len(self.tokens))]
for i in range(len(self.tokens)):
j = self.head[i]
if j == 0:
continue
tmp[i][j - 1] = 1
tmp[j - 1][i] = 1
tmp_dict = defaultdict(list)
for i in range(len(self.tokens)):
for j in range(len(self.tokens)):
if tmp[i][j] == 1:
tmp_dict[i].append(j)
word_level_degree = [[4]*len(self.tokens) for _ in range(len(self.tokens))]
for i in range(len(self.tokens)):
node_set = set()
word_level_degree[i][i] = 0
node_set.add(i)
for j in tmp_dict[i]:
if j not in node_set:
word_level_degree[i][j] = 1
node_set.add(j)
for k in tmp_dict[j]:
if k not in node_set:
word_level_degree[i][k] = 2
node_set.add(k)
for g in tmp_dict[k]:
if g not in node_set:
word_level_degree[i][g] = 3
node_set.add(g)
for i in range(len(self.tokens)):
start, end = self.token_range[i][0], self.token_range[i][1]
for j in range(len(self.tokens)):
s, e = self.token_range[j][0], self.token_range[j][1]
for row in range(start, end + 1):
for col in range(s, e + 1):
self.word_pair_synpost[row][col] = synpost_vocab.stoi.get(word_level_degree[i][j], synpost_vocab.unk_index)
def load_data_instances(sentence_packs, post_vocab, deprel_vocab, postag_vocab, synpost_vocab, args):
instances = list()
tokenizer = BertTokenizer.from_pretrained(args.bert_model_path)
for sentence_pack in sentence_packs:
instances.append(Instance(tokenizer, sentence_pack, post_vocab, deprel_vocab, postag_vocab, synpost_vocab, args))
return instances
class DataIterator(object):
def __init__(self, instances, args):
self.instances = instances
self.args = args
self.batch_count = math.ceil(len(instances)/args.batch_size)
def get_batch(self, index):
sentence_ids = []
sentences = []
sens_lens = []
token_ranges = []
bert_tokens = []
lengths = []
masks = []
aspect_tags = []
opinion_tags = []
tags = []
tags_symmetry = []
word_pair_position = []
word_pair_deprel = []
word_pair_pos = []
word_pair_synpost = []
for i in range(index * self.args.batch_size,
min((index + 1) * self.args.batch_size, len(self.instances))):
sentence_ids.append(self.instances[i].id)
sentences.append(self.instances[i].sentence)
sens_lens.append(self.instances[i].sen_length)
token_ranges.append(self.instances[i].token_range)
bert_tokens.append(self.instances[i].bert_tokens_padding)
lengths.append(self.instances[i].length)
masks.append(self.instances[i].mask)
aspect_tags.append(self.instances[i].aspect_tags)
opinion_tags.append(self.instances[i].opinion_tags)
tags.append(self.instances[i].tags)
tags_symmetry.append(self.instances[i].tags_symmetry)
word_pair_position.append(self.instances[i].word_pair_position)
word_pair_deprel.append(self.instances[i].word_pair_deprel)
word_pair_pos.append(self.instances[i].word_pair_pos)
word_pair_synpost.append(self.instances[i].word_pair_synpost)
bert_tokens = torch.stack(bert_tokens).to(self.args.device)
lengths = torch.tensor(lengths).to(self.args.device)
masks = torch.stack(masks).to(self.args.device)
aspect_tags = torch.stack(aspect_tags).to(self.args.device)
opinion_tags = torch.stack(opinion_tags).to(self.args.device)
tags = torch.stack(tags).to(self.args.device)
tags_symmetry = torch.stack(tags_symmetry).to(self.args.device)
word_pair_position = torch.stack(word_pair_position).to(self.args.device)
word_pair_deprel = torch.stack(word_pair_deprel).to(self.args.device)
word_pair_pos = torch.stack(word_pair_pos).to(self.args.device)
word_pair_synpost = torch.stack(word_pair_synpost).to(self.args.device)
return sentence_ids, sentences, bert_tokens, lengths, masks, sens_lens, token_ranges, aspect_tags, tags, \
word_pair_position, word_pair_deprel, word_pair_pos, word_pair_synpost, tags_symmetry
| [
"collections.OrderedDict",
"torch.stack",
"transformers.BertTokenizer.from_pretrained",
"torch.tensor",
"collections.defaultdict",
"torch.zeros"
] | [((426, 439), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (437, 439), False, 'from collections import OrderedDict, defaultdict\n'), ((441, 454), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (452, 454), False, 'from collections import OrderedDict, defaultdict\n'), ((10672, 10723), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['args.bert_model_path'], {}), '(args.bert_model_path)\n', (10701, 10723), False, 'from transformers import BertTokenizer\n'), ((2568, 2602), 'torch.zeros', 'torch.zeros', (['args.max_sequence_len'], {}), '(args.max_sequence_len)\n', (2579, 2602), False, 'import torch\n'), ((9108, 9125), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (9119, 9125), False, 'from collections import OrderedDict, defaultdict\n'), ((2188, 2222), 'torch.zeros', 'torch.zeros', (['args.max_sequence_len'], {}), '(args.max_sequence_len)\n', (2199, 2222), False, 'import torch\n'), ((2257, 2291), 'torch.zeros', 'torch.zeros', (['args.max_sequence_len'], {}), '(args.max_sequence_len)\n', (2268, 2291), False, 'import torch\n'), ((2327, 2361), 'torch.zeros', 'torch.zeros', (['args.max_sequence_len'], {}), '(args.max_sequence_len)\n', (2338, 2361), False, 'import torch\n'), ((2389, 2446), 'torch.zeros', 'torch.zeros', (['args.max_sequence_len', 'args.max_sequence_len'], {}), '(args.max_sequence_len, args.max_sequence_len)\n', (2400, 2446), False, 'import torch\n'), ((2483, 2540), 'torch.zeros', 'torch.zeros', (['args.max_sequence_len', 'args.max_sequence_len'], {}), '(args.max_sequence_len, args.max_sequence_len)\n', (2494, 2540), False, 'import torch\n'), ((6813, 6870), 'torch.zeros', 'torch.zeros', (['args.max_sequence_len', 'args.max_sequence_len'], {}), '(args.max_sequence_len, args.max_sequence_len)\n', (6824, 6870), False, 'import torch\n'), ((7421, 7478), 'torch.zeros', 'torch.zeros', (['args.max_sequence_len', 'args.max_sequence_len'], {}), '(args.max_sequence_len, args.max_sequence_len)\n', (7432, 7478), False, 'import torch\n'), ((8140, 8197), 'torch.zeros', 'torch.zeros', (['args.max_sequence_len', 'args.max_sequence_len'], {}), '(args.max_sequence_len, args.max_sequence_len)\n', (8151, 8197), False, 'import torch\n'), ((8774, 8831), 'torch.zeros', 'torch.zeros', (['args.max_sequence_len', 'args.max_sequence_len'], {}), '(args.max_sequence_len, args.max_sequence_len)\n', (8785, 8831), False, 'import torch\n'), ((12617, 12641), 'torch.stack', 'torch.stack', (['bert_tokens'], {}), '(bert_tokens)\n', (12628, 12641), False, 'import torch\n'), ((12681, 12702), 'torch.tensor', 'torch.tensor', (['lengths'], {}), '(lengths)\n', (12693, 12702), False, 'import torch\n'), ((12740, 12758), 'torch.stack', 'torch.stack', (['masks'], {}), '(masks)\n', (12751, 12758), False, 'import torch\n'), ((12802, 12826), 'torch.stack', 'torch.stack', (['aspect_tags'], {}), '(aspect_tags)\n', (12813, 12826), False, 'import torch\n'), ((12871, 12896), 'torch.stack', 'torch.stack', (['opinion_tags'], {}), '(opinion_tags)\n', (12882, 12896), False, 'import torch\n'), ((12933, 12950), 'torch.stack', 'torch.stack', (['tags'], {}), '(tags)\n', (12944, 12950), False, 'import torch\n'), ((12996, 13022), 'torch.stack', 'torch.stack', (['tags_symmetry'], {}), '(tags_symmetry)\n', (13007, 13022), False, 'import torch\n'), ((13074, 13105), 'torch.stack', 'torch.stack', (['word_pair_position'], {}), '(word_pair_position)\n', (13085, 13105), False, 'import torch\n'), ((13154, 13183), 'torch.stack', 'torch.stack', (['word_pair_deprel'], {}), '(word_pair_deprel)\n', (13165, 13183), False, 'import torch\n'), ((13229, 13255), 'torch.stack', 'torch.stack', (['word_pair_pos'], {}), '(word_pair_pos)\n', (13240, 13255), False, 'import torch\n'), ((13305, 13335), 'torch.stack', 'torch.stack', (['word_pair_synpost'], {}), '(word_pair_synpost)\n', (13316, 13335), False, 'import torch\n')] |
import platform as platform_module
import pytest
from cibuildwheel.__main__ import get_build_identifiers
from cibuildwheel.environment import parse_environment
from cibuildwheel.options import Options, _get_pinned_docker_images
from .utils import get_default_command_line_arguments
PYPROJECT_1 = """
[tool.cibuildwheel]
build = ["cp38*", "cp37*"]
environment = {FOO="BAR"}
test-command = "pyproject"
manylinux-x86_64-image = "manylinux1"
environment-pass = ["<PASSWORD>"]
[tool.cibuildwheel.macos]
test-requires = "else"
[[tool.cibuildwheel.overrides]]
select = "cp37*"
test-command = "pyproject-override"
manylinux-x86_64-image = "manylinux2014"
"""
def test_options_1(tmp_path, monkeypatch):
with tmp_path.joinpath("pyproject.toml").open("w") as f:
f.write(PYPROJECT_1)
args = get_default_command_line_arguments()
args.package_dir = str(tmp_path)
monkeypatch.setattr(platform_module, "machine", lambda: "x86_64")
options = Options(platform="linux", command_line_arguments=args)
identifiers = get_build_identifiers(
platform="linux",
build_selector=options.globals.build_selector,
architectures=options.globals.architectures,
)
override_display = """\
test_command: 'pyproject'
cp37-manylinux_x86_64: 'pyproject-override'"""
print(options.summary(identifiers))
assert override_display in options.summary(identifiers)
default_build_options = options.build_options(identifier=None)
assert default_build_options.environment == parse_environment('FOO="BAR"')
all_pinned_docker_images = _get_pinned_docker_images()
pinned_x86_64_docker_image = all_pinned_docker_images["x86_64"]
local = options.build_options("cp38-manylinux_x86_64")
assert local.manylinux_images is not None
assert local.test_command == "pyproject"
assert local.manylinux_images["x86_64"] == pinned_x86_64_docker_image["manylinux1"]
local = options.build_options("cp37-manylinux_x86_64")
assert local.manylinux_images is not None
assert local.test_command == "pyproject-override"
assert local.manylinux_images["x86_64"] == pinned_x86_64_docker_image["manylinux2014"]
def test_passthrough(tmp_path, monkeypatch):
with tmp_path.joinpath("pyproject.toml").open("w") as f:
f.write(PYPROJECT_1)
args = get_default_command_line_arguments()
args.package_dir = str(tmp_path)
monkeypatch.setattr(platform_module, "machine", lambda: "x86_64")
monkeypatch.setenv("EXAMPLE_ENV", "ONE")
options = Options(platform="linux", command_line_arguments=args)
default_build_options = options.build_options(identifier=None)
assert default_build_options.environment.as_dictionary(prev_environment={}) == {
"FOO": "BAR",
"EXAMPLE_ENV": "ONE",
}
@pytest.mark.parametrize(
"env_var_value",
[
"normal value",
'"value wrapped in quotes"',
"an unclosed single-quote: '",
'an unclosed double-quote: "',
"string\nwith\ncarriage\nreturns\n",
"a trailing backslash \\",
],
)
def test_passthrough_evil(tmp_path, monkeypatch, env_var_value):
args = get_default_command_line_arguments()
args.package_dir = str(tmp_path)
monkeypatch.setattr(platform_module, "machine", lambda: "x86_64")
monkeypatch.setenv("CIBW_ENVIRONMENT_PASS_LINUX", "ENV_VAR")
options = Options(platform="linux", command_line_arguments=args)
monkeypatch.setenv("ENV_VAR", env_var_value)
parsed_environment = options.build_options(identifier=None).environment
assert parsed_environment.as_dictionary(prev_environment={}) == {"ENV_VAR": env_var_value}
| [
"cibuildwheel.options.Options",
"cibuildwheel.__main__.get_build_identifiers",
"pytest.mark.parametrize",
"cibuildwheel.options._get_pinned_docker_images",
"cibuildwheel.environment.parse_environment"
] | [((2800, 3025), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""env_var_value"""', '[\'normal value\', \'"value wrapped in quotes"\', "an unclosed single-quote: \'",\n \'an unclosed double-quote: "\', """string\nwith\ncarriage\nreturns\n""",\n \'a trailing backslash \\\\\']'], {}), '(\'env_var_value\', [\'normal value\',\n \'"value wrapped in quotes"\', "an unclosed single-quote: \'",\n \'an unclosed double-quote: "\', """string\nwith\ncarriage\nreturns\n""",\n \'a trailing backslash \\\\\'])\n', (2823, 3025), False, 'import pytest\n'), ((967, 1021), 'cibuildwheel.options.Options', 'Options', ([], {'platform': '"""linux"""', 'command_line_arguments': 'args'}), "(platform='linux', command_line_arguments=args)\n", (974, 1021), False, 'from cibuildwheel.options import Options, _get_pinned_docker_images\n'), ((1041, 1177), 'cibuildwheel.__main__.get_build_identifiers', 'get_build_identifiers', ([], {'platform': '"""linux"""', 'build_selector': 'options.globals.build_selector', 'architectures': 'options.globals.architectures'}), "(platform='linux', build_selector=options.globals.\n build_selector, architectures=options.globals.architectures)\n", (1062, 1177), False, 'from cibuildwheel.__main__ import get_build_identifiers\n'), ((1590, 1617), 'cibuildwheel.options._get_pinned_docker_images', '_get_pinned_docker_images', ([], {}), '()\n', (1615, 1617), False, 'from cibuildwheel.options import Options, _get_pinned_docker_images\n'), ((2530, 2584), 'cibuildwheel.options.Options', 'Options', ([], {'platform': '"""linux"""', 'command_line_arguments': 'args'}), "(platform='linux', command_line_arguments=args)\n", (2537, 2584), False, 'from cibuildwheel.options import Options, _get_pinned_docker_images\n'), ((3380, 3434), 'cibuildwheel.options.Options', 'Options', ([], {'platform': '"""linux"""', 'command_line_arguments': 'args'}), "(platform='linux', command_line_arguments=args)\n", (3387, 3434), False, 'from cibuildwheel.options import Options, _get_pinned_docker_images\n'), ((1527, 1557), 'cibuildwheel.environment.parse_environment', 'parse_environment', (['"""FOO="BAR\\""""'], {}), '(\'FOO="BAR"\')\n', (1544, 1557), False, 'from cibuildwheel.environment import parse_environment\n')] |
import re
from discord.ext import commands
from utils import sql
from utils.functions import func
from cogs.Core.Rules import Rules
from cogs.Core.Language import Language
class MassEmoji(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.store = {} # Guilds Cache
@commands.Cog.listener()
async def on_message(self, message):
if not self.Init(message):
return
guild = message.guild
self.store[guild.id][message.author.id]["String"] = message.content
self.store[guild.id][message.author.id]["Count"] = 0
self.store[guild.id][message.author.id]["Count"] = len(re.findall(u'[\U00010000-\U0010ffff]', message.content)) # Normal Emojis
self.store[guild.id][message.author.id]["Count"] += len(re.findall('<(?P<animated>a)?:(?P<name>[0-9a-zA-Z_]{2,32}):(?P<id>[0-9]{15,21})>', message.content)) # Custom Emojis
if self.store[guild.id][message.author.id]["Count"] > self.bot.cache[guild.id]["AutoMod"][4]["Ratelimit"]:
member = message.author
await message.channel.send(embed=func.AutoModInfraction(self, guild, member, len(sql.GetInfractions(guild.id, member.id)), Language.get(message.guild.id, self.bot.cache, "AutoMod", "Emojis")), delete_after=30)
if self.bot.cache[message.guild.id]["AutoMod"][4]["Enabled"] == 1:
if self.bot.cache[message.guild.id]["Logs"]:
channel = await self.bot.fetch_channel(self.bot.cache[message.guild.id]["Logs"])
await channel.send(embed=func.AutoModInfraction(self, guild, member, len(sql.GetInfractions(guild.id, member.id)), Language.get(message.guild.id, self.bot.cache, "AutoMod", "Emojis")))
sql.Warn(guild.id, member.id, 702141833437773824, Language.get(message.guild.id, self.bot.cache, "AutoMod", "Emojis"))
sql.LogMe(guild.id, 9, "{} {}. {}: {}".format(Language.get(message.guild.id, self.bot.cache, "AutoMod", "Warned"), member.name, Language.get(message.guild.id, self.bot.cache, "AutoMod", "Reason"), Language.get(message.guild.id, self.bot.cache, "AutoMod", "Emojis")))
await Rules.DoRule(self, member, message.guild, self.bot.tobemuted, self.bot.tobekicked, self.bot.tobebanned)
try:
await message.delete()
except:
pass
self.store[guild.id][member.id]['Count'] = 0
self.store[guild.id][member.id]['String'] = None
def Init(self, message):
if not message.guild or message.author.bot:
return
if len(message.content) <= 5:
return
try:
if self.bot.cache[message.guild.id]["AutoMod"][4]["Enabled"] < 1:
return False
except:
return False
if not self.bot.cache[message.guild.id]["AutoMod"][4]["Ratelimit"]:
return False
if self.bot.cache[message.guild.id]["AutoMod"][4]["Ignored"] and str(message.channel.id) in self.bot.cache[message.guild.id]["AutoMod"][4]["Ignored"]:
return False
if self.bot.cache[message.guild.id]["IgnoredRoles"]:
for role in message.author.roles:
if str(role.id) in self.bot.cache[message.guild.id]["IgnoredRoles"]:
return False
if not message.guild.id in self.store:
self.store[message.guild.id] = {}
if not message.author.id in self.store[message.guild.id]:
self.store[message.guild.id][message.author.id] = {"String": None, "Count": 0}
return True
def setup(bot):
bot.add_cog(MassEmoji(bot))
| [
"discord.ext.commands.Cog.listener",
"utils.sql.GetInfractions",
"cogs.Core.Language.Language.get",
"cogs.Core.Rules.Rules.DoRule",
"re.findall"
] | [((304, 327), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (325, 327), False, 'from discord.ext import commands\n'), ((656, 702), 're.findall', 're.findall', (['u"""[𐀀-\U0010ffff]"""', 'message.content'], {}), "(u'[𐀀-\\U0010ffff]', message.content)\n", (666, 702), False, 'import re\n'), ((793, 901), 're.findall', 're.findall', (['"""<(?P<animated>a)?:(?P<name>[0-9a-zA-Z_]{2,32}):(?P<id>[0-9]{15,21})>"""', 'message.content'], {}), "(\n '<(?P<animated>a)?:(?P<name>[0-9a-zA-Z_]{2,32}):(?P<id>[0-9]{15,21})>',\n message.content)\n", (803, 901), False, 'import re\n'), ((1798, 1865), 'cogs.Core.Language.Language.get', 'Language.get', (['message.guild.id', 'self.bot.cache', '"""AutoMod"""', '"""Emojis"""'], {}), "(message.guild.id, self.bot.cache, 'AutoMod', 'Emojis')\n", (1810, 1865), False, 'from cogs.Core.Language import Language\n'), ((2172, 2280), 'cogs.Core.Rules.Rules.DoRule', 'Rules.DoRule', (['self', 'member', 'message.guild', 'self.bot.tobemuted', 'self.bot.tobekicked', 'self.bot.tobebanned'], {}), '(self, member, message.guild, self.bot.tobemuted, self.bot.\n tobekicked, self.bot.tobebanned)\n', (2184, 2280), False, 'from cogs.Core.Rules import Rules\n'), ((1929, 1996), 'cogs.Core.Language.Language.get', 'Language.get', (['message.guild.id', 'self.bot.cache', '"""AutoMod"""', '"""Warned"""'], {}), "(message.guild.id, self.bot.cache, 'AutoMod', 'Warned')\n", (1941, 1996), False, 'from cogs.Core.Language import Language\n'), ((2011, 2078), 'cogs.Core.Language.Language.get', 'Language.get', (['message.guild.id', 'self.bot.cache', '"""AutoMod"""', '"""Reason"""'], {}), "(message.guild.id, self.bot.cache, 'AutoMod', 'Reason')\n", (2023, 2078), False, 'from cogs.Core.Language import Language\n'), ((2080, 2147), 'cogs.Core.Language.Language.get', 'Language.get', (['message.guild.id', 'self.bot.cache', '"""AutoMod"""', '"""Emojis"""'], {}), "(message.guild.id, self.bot.cache, 'AutoMod', 'Emojis')\n", (2092, 2147), False, 'from cogs.Core.Language import Language\n'), ((1197, 1264), 'cogs.Core.Language.Language.get', 'Language.get', (['message.guild.id', 'self.bot.cache', '"""AutoMod"""', '"""Emojis"""'], {}), "(message.guild.id, self.bot.cache, 'AutoMod', 'Emojis')\n", (1209, 1264), False, 'from cogs.Core.Language import Language\n'), ((1155, 1194), 'utils.sql.GetInfractions', 'sql.GetInfractions', (['guild.id', 'member.id'], {}), '(guild.id, member.id)\n', (1173, 1194), False, 'from utils import sql\n'), ((1661, 1728), 'cogs.Core.Language.Language.get', 'Language.get', (['message.guild.id', 'self.bot.cache', '"""AutoMod"""', '"""Emojis"""'], {}), "(message.guild.id, self.bot.cache, 'AutoMod', 'Emojis')\n", (1673, 1728), False, 'from cogs.Core.Language import Language\n'), ((1619, 1658), 'utils.sql.GetInfractions', 'sql.GetInfractions', (['guild.id', 'member.id'], {}), '(guild.id, member.id)\n', (1637, 1658), False, 'from utils import sql\n')] |
import os, sys
import time
import argparse
import yaml
from collections import namedtuple
from yattag import Doc, indent
parser = argparse.ArgumentParser(description="A tool for consolidating YAML changelogs.")
parser.add_argument("-l", "--location", action="store", help="Only specify if changelog location differs from the script's location.")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-n", "--no-changes", action="store_true", help="Dry run, no file changes.")
args = parser.parse_args()
def verbose(msg):
print(msg)
# not verbose => replace the def
if not args.verbose:
def _verbose(msg):
pass
verbose = _verbose
date = time.strftime("%Y-%m-%d")
change = namedtuple("change", ("type", "value"))
#log = namedtuple("log", ("author", "changes"))
class log():
__slots__ = ("author", "changes")
def __init__(self, author, changes):
self.author = author
self.changes = changes
def __repr__(self):
return "log(author={}, changes={})".format(repr(self.author), repr(self.changes))
class Loader():
allowed_types = {"add", "remove", "tweak", "bugfix"}
def __init__(self, loc):
self.counter = 0
self.logs = {}
def load_old(self, data):
# self.logs = yaml.load(data)
y = yaml.safe_load(data)
for d in y:
self.logs[d] = []
for item in y[d]:
self.load_log(item, d, False)
def find_by_author(self, name, logdate=date):
for item in self.logs[logdate]:
if item.author == name:
return item
return None
def load_log(self, data, logdate=date, count=True):
author = data["author"]
changes = []
for entry in data["changes"]:
for ch in [change(type=k, value=v) for k, v in entry.items()]:
if ch.type in self.allowed_types:
changes.append(ch)
else:
raise Exception("Invalid change type.")
if logdate not in self.logs:
self.logs[logdate] = []
l = self.find_by_author(author, logdate)
if l:
l.changes += changes
else:
self.logs[logdate].append(log(author=author, changes=changes))
if count:
self.counter += 1
def tranform_logs(self):
r = {}
for d in sorted(self.logs.keys()):
r[d] = [{"author": item.author, "changes": [{ch.type: ch.value} for ch in item.changes]} for item in self.logs[d]]
return r
def save_yaml(self, path):
data = "# Automatically generated changelog archive. Not to be modified manually.\n"
data += yaml.safe_dump(self.tranform_logs(), default_flow_style=False)
if not args.no_changes:
with open(os.path.join(path, "changelog.yml"), "w") as f:
f.write(data)
verbose("Saving changelog.yml")
else:
verbose("Would save changelog.yml")
def save(self, path):
print("Adding {} new changelogs.".format(self.counter))
self.save_yaml(path)
doc, tag, text = Doc().tagtext()
doc.asis('<!DOCTYPE html>')
doc.asis("<!-- AUTOMATICALLY GENERATED - NOT TO BE MODIFIED MANUALLY -->")
with tag('html'):
with tag("head"):
doc.stag("meta", charset="utf-8")
doc.line("title", "GJAR IoT Changelog")
with open(os.path.join(path, "changelog.css"), "r") as f:
doc.line("style", f.read())
with tag('body'):
doc.line("h1", "GJAR IoT Changelog")
doc.line("i", "Last generated: {}".format(date))
for d in sorted(self.logs.keys()):
with tag("div"):
doc.line("h2", d)
with tag("ul"):
for item in self.logs[d]:
with tag("li"):
doc.line("h3", item.author)
with tag("ul"):
for ch in item.changes:
doc.line("li", ch.value, klass="type-" + ch.type)
if not args.no_changes:
with open(os.path.join(path, "changelog.html"), "w") as f:
f.write(indent(doc.getvalue()))
verbose("Saving changelog.html")
else:
verbose("Would save changelog.html")
if args.location:
loc = args.location
else:
loc = os.path.dirname(os.path.realpath(__file__))
files = []
loader = Loader(loc)
print("Running in directory {}...".format(loc))
if os.path.isfile(os.path.join(loc, "changelog.yml")):
verbose("changelog.yml file exists, loading old changelogs.")
with open(os.path.join(loc, "changelog.yml"), "r") as f:
loader.load_old(f.read())
for f in os.listdir(loc):
if not f.endswith(".yml") or f in ("template.yml", "changelog.yml"):
verbose("Skipping file {}.".format(f))
continue
try:
with open(os.path.join(loc, f), "r") as lf:
verbose("Reading file {}.".format(f))
try:
loader.load_log(yaml.safe_load(lf.read()))
except Exception:
print("Error when reading file {}. Skipping.".format(f), end="\t")
print(sys.exc_info()[1])
continue
except Exception:
print("Error when opening file {}. Skipping.".format(f))
continue
files.append(f)
loader.save(loc)
if not args.no_changes:
print("Purging directory {}...".format(loc))
for f in files:
try:
os.remove(os.path.join(loc, f))
verbose("Removing file {}.".format(f))
except:
print("Could not remove file {}.".format(f))
| [
"os.listdir",
"collections.namedtuple",
"argparse.ArgumentParser",
"time.strftime",
"os.path.join",
"os.path.realpath",
"yaml.safe_load",
"sys.exc_info",
"yattag.Doc"
] | [((131, 216), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A tool for consolidating YAML changelogs."""'}), "(description='A tool for consolidating YAML changelogs.'\n )\n", (154, 216), False, 'import argparse\n'), ((688, 713), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d"""'], {}), "('%Y-%m-%d')\n", (701, 713), False, 'import time\n'), ((724, 763), 'collections.namedtuple', 'namedtuple', (['"""change"""', "('type', 'value')"], {}), "('change', ('type', 'value'))\n", (734, 763), False, 'from collections import namedtuple\n'), ((4937, 4952), 'os.listdir', 'os.listdir', (['loc'], {}), '(loc)\n', (4947, 4952), False, 'import os, sys\n'), ((4729, 4763), 'os.path.join', 'os.path.join', (['loc', '"""changelog.yml"""'], {}), "(loc, 'changelog.yml')\n", (4741, 4763), False, 'import os, sys\n'), ((1310, 1330), 'yaml.safe_load', 'yaml.safe_load', (['data'], {}), '(data)\n', (1324, 1330), False, 'import yaml\n'), ((4601, 4627), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4617, 4627), False, 'import os, sys\n'), ((4846, 4880), 'os.path.join', 'os.path.join', (['loc', '"""changelog.yml"""'], {}), "(loc, 'changelog.yml')\n", (4858, 4880), False, 'import os, sys\n'), ((3151, 3156), 'yattag.Doc', 'Doc', ([], {}), '()\n', (3154, 3156), False, 'from yattag import Doc, indent\n'), ((5118, 5138), 'os.path.join', 'os.path.join', (['loc', 'f'], {}), '(loc, f)\n', (5130, 5138), False, 'import os, sys\n'), ((5728, 5748), 'os.path.join', 'os.path.join', (['loc', 'f'], {}), '(loc, f)\n', (5740, 5748), False, 'import os, sys\n'), ((2822, 2857), 'os.path.join', 'os.path.join', (['path', '"""changelog.yml"""'], {}), "(path, 'changelog.yml')\n", (2834, 2857), False, 'import os, sys\n'), ((4319, 4355), 'os.path.join', 'os.path.join', (['path', '"""changelog.html"""'], {}), "(path, 'changelog.html')\n", (4331, 4355), False, 'import os, sys\n'), ((3474, 3509), 'os.path.join', 'os.path.join', (['path', '"""changelog.css"""'], {}), "(path, 'changelog.css')\n", (3486, 3509), False, 'import os, sys\n'), ((5413, 5427), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5425, 5427), False, 'import os, sys\n')] |
"""Indexing domain models
Revision ID: 9960bbbe4d92
Revises: d<PASSWORD>
Create Date: 2017-09-06 13:09:21.210982
"""
# revision identifiers, used by Alembic.
revision = '9960bbbe4d92'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_cfda_program_archived_date'), 'cfda_program', ['archived_date'], unique=False)
op.create_index(op.f('ix_cfda_program_program_number'), 'cfda_program', ['program_number'], unique=False)
op.create_index(op.f('ix_cfda_program_published_date'), 'cfda_program', ['published_date'], unique=False)
op.create_index(op.f('ix_city_code_city_code'), 'city_code', ['city_code'], unique=False)
op.create_index(op.f('ix_city_code_state_code'), 'city_code', ['state_code'], unique=False)
op.create_index(op.f('ix_county_code_county_number'), 'county_code', ['county_number'], unique=False)
op.create_index(op.f('ix_county_code_state_code'), 'county_code', ['state_code'], unique=False)
op.create_index(op.f('ix_program_activity_account_number'), 'program_activity', ['account_number'], unique=False)
op.create_index(op.f('ix_program_activity_agency_id'), 'program_activity', ['agency_id'], unique=False)
op.create_index(op.f('ix_program_activity_budget_year'), 'program_activity', ['budget_year'], unique=False)
op.create_index(op.f('ix_program_activity_program_activity_code'), 'program_activity', ['program_activity_code'], unique=False)
op.create_index(op.f('ix_program_activity_program_activity_name'), 'program_activity', ['program_activity_name'], unique=False)
op.create_index(op.f('ix_sf_133_agency_identifier'), 'sf_133', ['agency_identifier'], unique=False)
op.create_index(op.f('ix_sf_133_allocation_transfer_agency'), 'sf_133', ['allocation_transfer_agency'], unique=False)
op.create_index(op.f('ix_sf_133_fiscal_year'), 'sf_133', ['fiscal_year'], unique=False)
op.create_index(op.f('ix_sf_133_period'), 'sf_133', ['period'], unique=False)
op.create_index('ix_sf_133_tas_group', 'sf_133', ['tas', 'fiscal_year', 'period', 'line'], unique=True)
op.drop_index('ix_sf_133_tas', table_name='sf_133')
op.create_index(op.f('ix_sf_133_tas'), 'sf_133', ['tas'], unique=False)
op.create_index(op.f('ix_states_state_code'), 'states', ['state_code'], unique=False)
op.create_index(op.f('ix_zips_congressional_district_no'), 'zips', ['congressional_district_no'], unique=False)
op.create_index(op.f('ix_zips_county_number'), 'zips', ['county_number'], unique=False)
op.create_index(op.f('ix_zips_state_abbreviation'), 'zips', ['state_abbreviation'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_zips_state_abbreviation'), table_name='zips')
op.drop_index(op.f('ix_zips_county_number'), table_name='zips')
op.drop_index(op.f('ix_zips_congressional_district_no'), table_name='zips')
op.drop_index(op.f('ix_states_state_code'), table_name='states')
op.drop_index(op.f('ix_sf_133_tas'), table_name='sf_133')
op.create_index('ix_sf_133_tas', 'sf_133', ['tas', 'fiscal_year', 'period', 'line'], unique=True)
op.drop_index('ix_sf_133_tas_group', table_name='sf_133')
op.drop_index(op.f('ix_sf_133_period'), table_name='sf_133')
op.drop_index(op.f('ix_sf_133_fiscal_year'), table_name='sf_133')
op.drop_index(op.f('ix_sf_133_allocation_transfer_agency'), table_name='sf_133')
op.drop_index(op.f('ix_sf_133_agency_identifier'), table_name='sf_133')
op.drop_index(op.f('ix_program_activity_program_activity_name'), table_name='program_activity')
op.drop_index(op.f('ix_program_activity_program_activity_code'), table_name='program_activity')
op.drop_index(op.f('ix_program_activity_budget_year'), table_name='program_activity')
op.drop_index(op.f('ix_program_activity_agency_id'), table_name='program_activity')
op.drop_index(op.f('ix_program_activity_account_number'), table_name='program_activity')
op.drop_index(op.f('ix_county_code_state_code'), table_name='county_code')
op.drop_index(op.f('ix_county_code_county_number'), table_name='county_code')
op.drop_index(op.f('ix_city_code_state_code'), table_name='city_code')
op.drop_index(op.f('ix_city_code_city_code'), table_name='city_code')
op.drop_index(op.f('ix_cfda_program_published_date'), table_name='cfda_program')
op.drop_index(op.f('ix_cfda_program_program_number'), table_name='cfda_program')
op.drop_index(op.f('ix_cfda_program_archived_date'), table_name='cfda_program')
### end Alembic commands ###
| [
"alembic.op.drop_index",
"alembic.op.f",
"alembic.op.create_index"
] | [((2272, 2379), 'alembic.op.create_index', 'op.create_index', (['"""ix_sf_133_tas_group"""', '"""sf_133"""', "['tas', 'fiscal_year', 'period', 'line']"], {'unique': '(True)'}), "('ix_sf_133_tas_group', 'sf_133', ['tas', 'fiscal_year',\n 'period', 'line'], unique=True)\n", (2287, 2379), False, 'from alembic import op\n'), ((2380, 2431), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_sf_133_tas"""'], {'table_name': '"""sf_133"""'}), "('ix_sf_133_tas', table_name='sf_133')\n", (2393, 2431), False, 'from alembic import op\n'), ((3392, 3493), 'alembic.op.create_index', 'op.create_index', (['"""ix_sf_133_tas"""', '"""sf_133"""', "['tas', 'fiscal_year', 'period', 'line']"], {'unique': '(True)'}), "('ix_sf_133_tas', 'sf_133', ['tas', 'fiscal_year', 'period',\n 'line'], unique=True)\n", (3407, 3493), False, 'from alembic import op\n'), ((3494, 3551), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_sf_133_tas_group"""'], {'table_name': '"""sf_133"""'}), "('ix_sf_133_tas_group', table_name='sf_133')\n", (3507, 3551), False, 'from alembic import op\n'), ((562, 599), 'alembic.op.f', 'op.f', (['"""ix_cfda_program_archived_date"""'], {}), "('ix_cfda_program_archived_date')\n", (566, 599), False, 'from alembic import op\n'), ((670, 708), 'alembic.op.f', 'op.f', (['"""ix_cfda_program_program_number"""'], {}), "('ix_cfda_program_program_number')\n", (674, 708), False, 'from alembic import op\n'), ((780, 818), 'alembic.op.f', 'op.f', (['"""ix_cfda_program_published_date"""'], {}), "('ix_cfda_program_published_date')\n", (784, 818), False, 'from alembic import op\n'), ((890, 920), 'alembic.op.f', 'op.f', (['"""ix_city_code_city_code"""'], {}), "('ix_city_code_city_code')\n", (894, 920), False, 'from alembic import op\n'), ((984, 1015), 'alembic.op.f', 'op.f', (['"""ix_city_code_state_code"""'], {}), "('ix_city_code_state_code')\n", (988, 1015), False, 'from alembic import op\n'), ((1080, 1116), 'alembic.op.f', 'op.f', (['"""ix_county_code_county_number"""'], {}), "('ix_county_code_county_number')\n", (1084, 1116), False, 'from alembic import op\n'), ((1186, 1219), 'alembic.op.f', 'op.f', (['"""ix_county_code_state_code"""'], {}), "('ix_county_code_state_code')\n", (1190, 1219), False, 'from alembic import op\n'), ((1286, 1328), 'alembic.op.f', 'op.f', (['"""ix_program_activity_account_number"""'], {}), "('ix_program_activity_account_number')\n", (1290, 1328), False, 'from alembic import op\n'), ((1404, 1441), 'alembic.op.f', 'op.f', (['"""ix_program_activity_agency_id"""'], {}), "('ix_program_activity_agency_id')\n", (1408, 1441), False, 'from alembic import op\n'), ((1512, 1551), 'alembic.op.f', 'op.f', (['"""ix_program_activity_budget_year"""'], {}), "('ix_program_activity_budget_year')\n", (1516, 1551), False, 'from alembic import op\n'), ((1624, 1673), 'alembic.op.f', 'op.f', (['"""ix_program_activity_program_activity_code"""'], {}), "('ix_program_activity_program_activity_code')\n", (1628, 1673), False, 'from alembic import op\n'), ((1756, 1805), 'alembic.op.f', 'op.f', (['"""ix_program_activity_program_activity_name"""'], {}), "('ix_program_activity_program_activity_name')\n", (1760, 1805), False, 'from alembic import op\n'), ((1888, 1923), 'alembic.op.f', 'op.f', (['"""ix_sf_133_agency_identifier"""'], {}), "('ix_sf_133_agency_identifier')\n", (1892, 1923), False, 'from alembic import op\n'), ((1992, 2036), 'alembic.op.f', 'op.f', (['"""ix_sf_133_allocation_transfer_agency"""'], {}), "('ix_sf_133_allocation_transfer_agency')\n", (1996, 2036), False, 'from alembic import op\n'), ((2114, 2143), 'alembic.op.f', 'op.f', (['"""ix_sf_133_fiscal_year"""'], {}), "('ix_sf_133_fiscal_year')\n", (2118, 2143), False, 'from alembic import op\n'), ((2206, 2230), 'alembic.op.f', 'op.f', (['"""ix_sf_133_period"""'], {}), "('ix_sf_133_period')\n", (2210, 2230), False, 'from alembic import op\n'), ((2452, 2473), 'alembic.op.f', 'op.f', (['"""ix_sf_133_tas"""'], {}), "('ix_sf_133_tas')\n", (2456, 2473), False, 'from alembic import op\n'), ((2528, 2556), 'alembic.op.f', 'op.f', (['"""ix_states_state_code"""'], {}), "('ix_states_state_code')\n", (2532, 2556), False, 'from alembic import op\n'), ((2618, 2659), 'alembic.op.f', 'op.f', (['"""ix_zips_congressional_district_no"""'], {}), "('ix_zips_congressional_district_no')\n", (2622, 2659), False, 'from alembic import op\n'), ((2734, 2763), 'alembic.op.f', 'op.f', (['"""ix_zips_county_number"""'], {}), "('ix_zips_county_number')\n", (2738, 2763), False, 'from alembic import op\n'), ((2826, 2860), 'alembic.op.f', 'op.f', (['"""ix_zips_state_abbreviation"""'], {}), "('ix_zips_state_abbreviation')\n", (2830, 2860), False, 'from alembic import op\n'), ((3054, 3088), 'alembic.op.f', 'op.f', (['"""ix_zips_state_abbreviation"""'], {}), "('ix_zips_state_abbreviation')\n", (3058, 3088), False, 'from alembic import op\n'), ((3127, 3156), 'alembic.op.f', 'op.f', (['"""ix_zips_county_number"""'], {}), "('ix_zips_county_number')\n", (3131, 3156), False, 'from alembic import op\n'), ((3195, 3236), 'alembic.op.f', 'op.f', (['"""ix_zips_congressional_district_no"""'], {}), "('ix_zips_congressional_district_no')\n", (3199, 3236), False, 'from alembic import op\n'), ((3275, 3303), 'alembic.op.f', 'op.f', (['"""ix_states_state_code"""'], {}), "('ix_states_state_code')\n", (3279, 3303), False, 'from alembic import op\n'), ((3344, 3365), 'alembic.op.f', 'op.f', (['"""ix_sf_133_tas"""'], {}), "('ix_sf_133_tas')\n", (3348, 3365), False, 'from alembic import op\n'), ((3570, 3594), 'alembic.op.f', 'op.f', (['"""ix_sf_133_period"""'], {}), "('ix_sf_133_period')\n", (3574, 3594), False, 'from alembic import op\n'), ((3635, 3664), 'alembic.op.f', 'op.f', (['"""ix_sf_133_fiscal_year"""'], {}), "('ix_sf_133_fiscal_year')\n", (3639, 3664), False, 'from alembic import op\n'), ((3705, 3749), 'alembic.op.f', 'op.f', (['"""ix_sf_133_allocation_transfer_agency"""'], {}), "('ix_sf_133_allocation_transfer_agency')\n", (3709, 3749), False, 'from alembic import op\n'), ((3790, 3825), 'alembic.op.f', 'op.f', (['"""ix_sf_133_agency_identifier"""'], {}), "('ix_sf_133_agency_identifier')\n", (3794, 3825), False, 'from alembic import op\n'), ((3866, 3915), 'alembic.op.f', 'op.f', (['"""ix_program_activity_program_activity_name"""'], {}), "('ix_program_activity_program_activity_name')\n", (3870, 3915), False, 'from alembic import op\n'), ((3966, 4015), 'alembic.op.f', 'op.f', (['"""ix_program_activity_program_activity_code"""'], {}), "('ix_program_activity_program_activity_code')\n", (3970, 4015), False, 'from alembic import op\n'), ((4066, 4105), 'alembic.op.f', 'op.f', (['"""ix_program_activity_budget_year"""'], {}), "('ix_program_activity_budget_year')\n", (4070, 4105), False, 'from alembic import op\n'), ((4156, 4193), 'alembic.op.f', 'op.f', (['"""ix_program_activity_agency_id"""'], {}), "('ix_program_activity_agency_id')\n", (4160, 4193), False, 'from alembic import op\n'), ((4244, 4286), 'alembic.op.f', 'op.f', (['"""ix_program_activity_account_number"""'], {}), "('ix_program_activity_account_number')\n", (4248, 4286), False, 'from alembic import op\n'), ((4337, 4370), 'alembic.op.f', 'op.f', (['"""ix_county_code_state_code"""'], {}), "('ix_county_code_state_code')\n", (4341, 4370), False, 'from alembic import op\n'), ((4416, 4452), 'alembic.op.f', 'op.f', (['"""ix_county_code_county_number"""'], {}), "('ix_county_code_county_number')\n", (4420, 4452), False, 'from alembic import op\n'), ((4498, 4529), 'alembic.op.f', 'op.f', (['"""ix_city_code_state_code"""'], {}), "('ix_city_code_state_code')\n", (4502, 4529), False, 'from alembic import op\n'), ((4573, 4603), 'alembic.op.f', 'op.f', (['"""ix_city_code_city_code"""'], {}), "('ix_city_code_city_code')\n", (4577, 4603), False, 'from alembic import op\n'), ((4647, 4685), 'alembic.op.f', 'op.f', (['"""ix_cfda_program_published_date"""'], {}), "('ix_cfda_program_published_date')\n", (4651, 4685), False, 'from alembic import op\n'), ((4732, 4770), 'alembic.op.f', 'op.f', (['"""ix_cfda_program_program_number"""'], {}), "('ix_cfda_program_program_number')\n", (4736, 4770), False, 'from alembic import op\n'), ((4817, 4854), 'alembic.op.f', 'op.f', (['"""ix_cfda_program_archived_date"""'], {}), "('ix_cfda_program_archived_date')\n", (4821, 4854), False, 'from alembic import op\n')] |
# -*- coding: utf-8 -*-
"""
Barycenters
===========
This example shows three methods to compute barycenters of time series.
For an overview over the available methods see the :mod:`tslearn.barycenters`
module.
*tslearn* provides three methods for calculating barycenters for a given set of
time series:
* *Euclidean barycenter* is simply the arithmetic mean for
each individual point in time, minimizing the summed euclidean distance
for each of them. As can be seen below, it is very different from the
DTW-based methods and may often be inappropriate. However, it is the
fastest of the methods shown.
* *DTW Barycenter Averaging (DBA)* is an iteratively refined barycenter,
starting out with a (potentially) bad candidate and improving it
until convergence criteria are met. The optimization can be accomplished
with (a) expectation-maximization [1] and (b) stochastic subgradient
descent [2]. Empirically, the latter "is [often] more stable and finds better
solutions in shorter time" [2].
* *Soft-DTW barycenter* uses a differentiable loss function to iteratively
find a barycenter [3]. The method itself and the parameter
:math:`\\gamma=1.0` is described in more detail in the section on
:ref:`DTW<dtw>`. There is also a dedicated
:ref:`example<sphx_glr_auto_examples_plot_barycenter_interpolate.py>`
available.
[1] <NAME>, <NAME> & <NAME>. A global averaging method for
dynamic time warping, with applications to clustering. Pattern Recognition,
Elsevier, 2011, Vol. 44, Num. 3, pp. 678-693.
[2] <NAME> & <NAME>. Nonsmooth Analysis and Subgradient Methods for
Averaging in Dynamic Time Warping Spaces. Pattern Recognition, 74, 340-358.
[3] <NAME> & <NAME>. Soft-DTW: a Differentiable Loss Function for
Time-Series. ICML 2017.
"""
# Author: <NAME>, <NAME>
# License: BSD 3 clause
import numpy
import matplotlib.pyplot as plt
from tslearn.barycenters import \
euclidean_barycenter, \
dtw_barycenter_averaging, \
dtw_barycenter_averaging_subgradient, \
softdtw_barycenter
from tslearn.datasets import CachedDatasets
# fetch the example data set
numpy.random.seed(0)
X_train, y_train, _, _ = CachedDatasets().load_dataset("Trace")
X = X_train[y_train == 2]
length_of_sequence = X.shape[1]
def plot_helper(barycenter):
# plot all points of the data set
for series in X:
plt.plot(series.ravel(), "k-", alpha=.2)
# plot the given barycenter of them
plt.plot(barycenter.ravel(), "r-", linewidth=2)
# plot the four variants with the same number of iterations and a tolerance of
# 1e-3 where applicable
ax1 = plt.subplot(4, 1, 1)
plt.title("Euclidean barycenter")
plot_helper(euclidean_barycenter(X))
plt.subplot(4, 1, 2, sharex=ax1)
plt.title("DBA (vectorized version of Petitjean's EM)")
plot_helper(dtw_barycenter_averaging(X, max_iter=50, tol=1e-3))
plt.subplot(4, 1, 3, sharex=ax1)
plt.title("DBA (subgradient descent approach)")
plot_helper(dtw_barycenter_averaging_subgradient(X, max_iter=50, tol=1e-3))
plt.subplot(4, 1, 4, sharex=ax1)
plt.title("Soft-DTW barycenter ($\gamma$=1.0)")
plot_helper(softdtw_barycenter(X, gamma=1., max_iter=50, tol=1e-3))
# clip the axes for better readability
ax1.set_xlim([0, length_of_sequence])
# show the plot(s)
plt.tight_layout()
plt.show()
| [
"tslearn.barycenters.dtw_barycenter_averaging",
"tslearn.barycenters.dtw_barycenter_averaging_subgradient",
"tslearn.barycenters.euclidean_barycenter",
"tslearn.datasets.CachedDatasets",
"numpy.random.seed",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"t... | [((2102, 2122), 'numpy.random.seed', 'numpy.random.seed', (['(0)'], {}), '(0)\n', (2119, 2122), False, 'import numpy\n'), ((2587, 2607), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(1)'], {}), '(4, 1, 1)\n', (2598, 2607), True, 'import matplotlib.pyplot as plt\n'), ((2608, 2641), 'matplotlib.pyplot.title', 'plt.title', (['"""Euclidean barycenter"""'], {}), "('Euclidean barycenter')\n", (2617, 2641), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2712), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(2)'], {'sharex': 'ax1'}), '(4, 1, 2, sharex=ax1)\n', (2691, 2712), True, 'import matplotlib.pyplot as plt\n'), ((2713, 2768), 'matplotlib.pyplot.title', 'plt.title', (['"""DBA (vectorized version of Petitjean\'s EM)"""'], {}), '("DBA (vectorized version of Petitjean\'s EM)")\n', (2722, 2768), True, 'import matplotlib.pyplot as plt\n'), ((2834, 2866), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(3)'], {'sharex': 'ax1'}), '(4, 1, 3, sharex=ax1)\n', (2845, 2866), True, 'import matplotlib.pyplot as plt\n'), ((2867, 2914), 'matplotlib.pyplot.title', 'plt.title', (['"""DBA (subgradient descent approach)"""'], {}), "('DBA (subgradient descent approach)')\n", (2876, 2914), True, 'import matplotlib.pyplot as plt\n'), ((2992, 3024), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(4)'], {'sharex': 'ax1'}), '(4, 1, 4, sharex=ax1)\n', (3003, 3024), True, 'import matplotlib.pyplot as plt\n'), ((3025, 3073), 'matplotlib.pyplot.title', 'plt.title', (['"""Soft-DTW barycenter ($\\\\gamma$=1.0)"""'], {}), "('Soft-DTW barycenter ($\\\\gamma$=1.0)')\n", (3034, 3073), True, 'import matplotlib.pyplot as plt\n'), ((3239, 3257), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3255, 3257), True, 'import matplotlib.pyplot as plt\n'), ((3258, 3268), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3266, 3268), True, 'import matplotlib.pyplot as plt\n'), ((2654, 2677), 'tslearn.barycenters.euclidean_barycenter', 'euclidean_barycenter', (['X'], {}), '(X)\n', (2674, 2677), False, 'from tslearn.barycenters import euclidean_barycenter, dtw_barycenter_averaging, dtw_barycenter_averaging_subgradient, softdtw_barycenter\n'), ((2781, 2832), 'tslearn.barycenters.dtw_barycenter_averaging', 'dtw_barycenter_averaging', (['X'], {'max_iter': '(50)', 'tol': '(0.001)'}), '(X, max_iter=50, tol=0.001)\n', (2805, 2832), False, 'from tslearn.barycenters import euclidean_barycenter, dtw_barycenter_averaging, dtw_barycenter_averaging_subgradient, softdtw_barycenter\n'), ((2927, 2990), 'tslearn.barycenters.dtw_barycenter_averaging_subgradient', 'dtw_barycenter_averaging_subgradient', (['X'], {'max_iter': '(50)', 'tol': '(0.001)'}), '(X, max_iter=50, tol=0.001)\n', (2963, 2990), False, 'from tslearn.barycenters import euclidean_barycenter, dtw_barycenter_averaging, dtw_barycenter_averaging_subgradient, softdtw_barycenter\n'), ((3085, 3141), 'tslearn.barycenters.softdtw_barycenter', 'softdtw_barycenter', (['X'], {'gamma': '(1.0)', 'max_iter': '(50)', 'tol': '(0.001)'}), '(X, gamma=1.0, max_iter=50, tol=0.001)\n', (3103, 3141), False, 'from tslearn.barycenters import euclidean_barycenter, dtw_barycenter_averaging, dtw_barycenter_averaging_subgradient, softdtw_barycenter\n'), ((2148, 2164), 'tslearn.datasets.CachedDatasets', 'CachedDatasets', ([], {}), '()\n', (2162, 2164), False, 'from tslearn.datasets import CachedDatasets\n')] |
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from ..utils import resolution
from ..doctools import document
from .stat import stat
@document
class stat_boxplot(stat):
"""
Compute boxplot statistics
{usage}
Parameters
----------
{common_parameters}
coef : float, optional (default: 1.5)
Length of the whiskers as a multiple of the Interquartile
Range.
See Also
--------
plotnine.geoms.geom_boxplot
"""
_aesthetics_doc = """
{aesthetics_table}
.. rubric:: Options for computed aesthetics
::
'width' # width of boxplot
'lower' # lower hinge, 25% quantile
'middle' # median, 50% quantile
'upper' # upper hinge, 75% quantile
'notchlower' # lower edge of notch, computed as;
# :py:`median - 1.58 * IQR / sqrt(n)`
'notchupper' # upper edge of notch, computed as;
# :py:`median + 1.58 * IQR / sqrt(n)`
'ymin' # lower whisker, computed as; smallest observation
# greater than or equal to lower hinge - 1.5 * IQR
'ymax' # upper whisker, computed as; largest observation
# less than or equal to upper hinge + 1.5 * IQR
Calculated aesthetics are accessed using the `after_stat` function.
e.g. :py:`after_stat('width')`.
"""
REQUIRED_AES = {'x', 'y'}
NON_MISSING_AES = {'weight'}
DEFAULT_PARAMS = {'geom': 'boxplot', 'position': 'dodge',
'na_rm': False, 'coef': 1.5, 'width': None}
CREATES = {'lower', 'upper', 'middle', 'ymin', 'ymax',
'outliers', 'notchupper', 'notchlower', 'width',
'relvarwidth'}
def setup_params(self, data):
if self.params['width'] is None:
self.params['width'] = resolution(data['x'], False) * 0.75
return self.params
@classmethod
def compute_group(cls, data, scales, **params):
y = data['y'].to_numpy()
weights = data.get('weight', None)
total_weight = len(y) if weights is None else np.sum(weights)
res = weighted_boxplot_stats(y, weights=weights, whis=params['coef'])
if len(np.unique(data['x'])) > 1:
width = np.ptp(data['x']) * 0.9
else:
width = params['width']
if pdtypes.is_categorical_dtype(data['x']):
x = data['x'].iloc[0]
else:
x = np.mean([data['x'].min(), data['x'].max()])
d = {
'ymin': res['whislo'],
'lower': res['q1'],
'middle': [res['med']],
'upper': res['q3'],
'ymax': res['whishi'],
'outliers': [res['fliers']],
'notchupper': res['cihi'],
'notchlower': res['cilo'],
'x': x,
'width': width,
'relvarwidth': np.sqrt(total_weight)
}
return pd.DataFrame(d)
def weighted_percentile(a, q, weights=None):
"""
Compute the weighted q-th percentile of data
Parameters
----------
a : array_like
Input that can be converted into an array.
q : array_like[float]
Percentile or sequence of percentiles to compute. Must be int
the range [0, 100]
weights : array_like
Weights associated with the input values.
"""
# Calculate and interpolate weighted percentiles
# method derived from https://en.wikipedia.org/wiki/Percentile
# using numpy's standard C = 1
if weights is None:
weights = np.ones(len(a))
weights = np.asarray(weights)
q = np.asarray(q)
C = 1
idx_s = np.argsort(a)
a_s = a[idx_s]
w_n = weights[idx_s]
S_N = np.sum(weights)
S_n = np.cumsum(w_n)
p_n = (S_n - C * w_n) / (S_N + (1 - 2 * C) * w_n)
pcts = np.interp(q / 100.0, p_n, a_s)
return pcts
def weighted_boxplot_stats(x, weights=None, whis=1.5):
"""
Calculate weighted boxplot plot statistics
Parameters
----------
x : array_like
Data
weights : array_like, optional
Weights associated with the data.
whis : float, optional (default: 1.5)
Position of the whiskers beyond the interquartile range.
The data beyond the whisker are considered outliers.
If a float, the lower whisker is at the lowest datum above
``Q1 - whis*(Q3-Q1)``, and the upper whisker at the highest
datum below ``Q3 + whis*(Q3-Q1)``, where Q1 and Q3 are the
first and third quartiles. The default value of
``whis = 1.5`` corresponds to Tukey's original definition of
boxplots.
Notes
-----
This method adapted from Matplotlibs boxplot_stats. The key difference
is the use of a weighted percentile calculation and then using linear
interpolation to map weight percentiles back to data.
"""
if weights is None:
q1, med, q3 = np.percentile(x, (25, 50, 75))
n = len(x)
else:
q1, med, q3 = weighted_percentile(x, (25, 50, 75), weights)
n = np.sum(weights)
iqr = q3 - q1
mean = np.average(x, weights=weights)
cilo = med - 1.58 * iqr / np.sqrt(n)
cihi = med + 1.58 * iqr / np.sqrt(n)
# low extreme
loval = q1 - whis * iqr
lox = x[x >= loval]
if len(lox) == 0 or np.min(lox) > q1:
whislo = q1
else:
whislo = np.min(lox)
# high extreme
hival = q3 + whis * iqr
hix = x[x <= hival]
if len(hix) == 0 or np.max(hix) < q3:
whishi = q3
else:
whishi = np.max(hix)
bpstats = {
'fliers': x[(x < whislo) | (x > whishi)],
'mean': mean,
'med': med,
'q1': q1,
'q3': q3,
'iqr': iqr,
'whislo': whislo,
'whishi': whishi,
'cilo': cilo,
'cihi': cihi,
}
return bpstats
| [
"numpy.ptp",
"numpy.sqrt",
"numpy.unique",
"numpy.average",
"numpy.asarray",
"numpy.max",
"numpy.argsort",
"numpy.sum",
"pandas.api.types.is_categorical_dtype",
"numpy.percentile",
"numpy.interp",
"numpy.min",
"pandas.DataFrame",
"numpy.cumsum"
] | [((3580, 3599), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (3590, 3599), True, 'import numpy as np\n'), ((3608, 3621), 'numpy.asarray', 'np.asarray', (['q'], {}), '(q)\n', (3618, 3621), True, 'import numpy as np\n'), ((3645, 3658), 'numpy.argsort', 'np.argsort', (['a'], {}), '(a)\n', (3655, 3658), True, 'import numpy as np\n'), ((3713, 3728), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (3719, 3728), True, 'import numpy as np\n'), ((3739, 3753), 'numpy.cumsum', 'np.cumsum', (['w_n'], {}), '(w_n)\n', (3748, 3753), True, 'import numpy as np\n'), ((3819, 3849), 'numpy.interp', 'np.interp', (['(q / 100.0)', 'p_n', 'a_s'], {}), '(q / 100.0, p_n, a_s)\n', (3828, 3849), True, 'import numpy as np\n'), ((5101, 5131), 'numpy.average', 'np.average', (['x'], {'weights': 'weights'}), '(x, weights=weights)\n', (5111, 5131), True, 'import numpy as np\n'), ((2350, 2389), 'pandas.api.types.is_categorical_dtype', 'pdtypes.is_categorical_dtype', (["data['x']"], {}), "(data['x'])\n", (2378, 2389), True, 'import pandas.api.types as pdtypes\n'), ((2925, 2940), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (2937, 2940), True, 'import pandas as pd\n'), ((4915, 4945), 'numpy.percentile', 'np.percentile', (['x', '(25, 50, 75)'], {}), '(x, (25, 50, 75))\n', (4928, 4945), True, 'import numpy as np\n'), ((5055, 5070), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (5061, 5070), True, 'import numpy as np\n'), ((5374, 5385), 'numpy.min', 'np.min', (['lox'], {}), '(lox)\n', (5380, 5385), True, 'import numpy as np\n'), ((5547, 5558), 'numpy.max', 'np.max', (['hix'], {}), '(hix)\n', (5553, 5558), True, 'import numpy as np\n'), ((2107, 2122), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (2113, 2122), True, 'import numpy as np\n'), ((2878, 2899), 'numpy.sqrt', 'np.sqrt', (['total_weight'], {}), '(total_weight)\n', (2885, 2899), True, 'import numpy as np\n'), ((5162, 5172), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (5169, 5172), True, 'import numpy as np\n'), ((5203, 5213), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (5210, 5213), True, 'import numpy as np\n'), ((5309, 5320), 'numpy.min', 'np.min', (['lox'], {}), '(lox)\n', (5315, 5320), True, 'import numpy as np\n'), ((5482, 5493), 'numpy.max', 'np.max', (['hix'], {}), '(hix)\n', (5488, 5493), True, 'import numpy as np\n'), ((2217, 2237), 'numpy.unique', 'np.unique', (["data['x']"], {}), "(data['x'])\n", (2226, 2237), True, 'import numpy as np\n'), ((2264, 2281), 'numpy.ptp', 'np.ptp', (["data['x']"], {}), "(data['x'])\n", (2270, 2281), True, 'import numpy as np\n')] |
# This document contains definitions of functions and variables used
# in multiple tutorial examples. Each code fragment is actually passed to Sphinx
# twice:
# - This whole file is imported in a hidden, global setup block
# - The example in which the function is defined included its code from this
# file.
#
# The reason for all this is that definitions in one document, even if contained
# in a 'testsetup:: *' block, are not visible in other documents. The global
# setup block is the only way of sharing definitions, but we still want them
# presented in the appropriate part of the tutorial.
#
# Each fragment is surrounded by START-<NAME> and END-<NAME> markers to allow
# easy inclusions through the literalinclude directive.
# BEGIN-CONNECT
from franz.openrdf.connect import ag_connect
conn = ag_connect('python-tutorial', create=True, clear=True)
# END-CONNECT
| [
"franz.openrdf.connect.ag_connect"
] | [((816, 870), 'franz.openrdf.connect.ag_connect', 'ag_connect', (['"""python-tutorial"""'], {'create': '(True)', 'clear': '(True)'}), "('python-tutorial', create=True, clear=True)\n", (826, 870), False, 'from franz.openrdf.connect import ag_connect\n')] |
from threading import Thread
from statistics import mean
import nvidia_smi
import time
class MemoryMonitor(Thread):
def __init__(self, delay):
super(MemoryMonitor, self).__init__()
self.__stopped = False
self.__delay = delay
nvidia_smi.nvmlInit()
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)
self.__info = nvidia_smi.nvmlDeviceGetMemoryInfo(handle)
self.__used_memory = [self.__info.used / (1024**2)]
self.__total_memory = self.__info.total / (1024**2)
self.start()
@property
def used_memory(self):
return mean(self.__used_memory)
def run(self) -> None:
while not self.__stopped:
self.__used_memory.append(self.__info.used / (1024**2))
time.sleep(self.__delay)
def stop(self):
self.__stopped = True
nvidia_smi.nvmlShutdown() | [
"nvidia_smi.nvmlDeviceGetHandleByIndex",
"statistics.mean",
"nvidia_smi.nvmlDeviceGetMemoryInfo",
"nvidia_smi.nvmlInit",
"time.sleep",
"nvidia_smi.nvmlShutdown"
] | [((264, 285), 'nvidia_smi.nvmlInit', 'nvidia_smi.nvmlInit', ([], {}), '()\n', (283, 285), False, 'import nvidia_smi\n'), ((303, 343), 'nvidia_smi.nvmlDeviceGetHandleByIndex', 'nvidia_smi.nvmlDeviceGetHandleByIndex', (['(0)'], {}), '(0)\n', (340, 343), False, 'import nvidia_smi\n'), ((366, 408), 'nvidia_smi.nvmlDeviceGetMemoryInfo', 'nvidia_smi.nvmlDeviceGetMemoryInfo', (['handle'], {}), '(handle)\n', (400, 408), False, 'import nvidia_smi\n'), ((608, 632), 'statistics.mean', 'mean', (['self.__used_memory'], {}), '(self.__used_memory)\n', (612, 632), False, 'from statistics import mean\n'), ((859, 884), 'nvidia_smi.nvmlShutdown', 'nvidia_smi.nvmlShutdown', ([], {}), '()\n', (882, 884), False, 'import nvidia_smi\n'), ((775, 799), 'time.sleep', 'time.sleep', (['self.__delay'], {}), '(self.__delay)\n', (785, 799), False, 'import time\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, sys, re
sys.path.append("{0}/Desktop/cbmi/reproduce/python/MedicalResearchTool/objects".format(os.environ['HOME'])) #TODO
sys.path.append("{0}/Desktop/cbmi/reproduce/python/MedicalResearchTool".format(os.environ['HOME']))
import nltk
import requests
from pprint import pprint
from stemming.porter2 import stem
from ArticleManager import ArticleManager
from DatabaseManager import DatabaseManager
from bs4 import BeautifulSoup
class ArticleExtractor(ArticleManager):
"""
Extract study information from the article text
Depends on:
os -- https://docs.python.org/3/library/os.html
sys -- https://docs.python.org/3/library/sys.html
re -- https://docs.python.org/3/library/re.html
nltk -- http://www.nltk.org/
requests -- http://docs.python-requests.org/en/master/
pprint -- https://docs.python.org/3/library/pprint.html
stemming -- https://pypi.python.org/pypi/stemming/1.0
beautiful soup -- https://www.crummy.com/software/BeautifulSoup/bs4/doc/
Functions from inherited from ArticleManager:
get_choices
check
check_boolean
ask
ask_without_choices
See ArticleManager for additional documentation
"""
def __init__(self,**kwargs):
super(ArticleExtractor,self).__init__(**kwargs) #pass run_style and metadata keyword argument on to ArticleManager constructor (if provided)
def clean_entry(self):
"""
For fields in ArticleExtractor.entry attribute with multiple entries, remove duplicates and format the final input
Runs on ArticleExtractor.entry attribute
Return: ArticleExtractor.entry attribute (dictionary)
Article.entry must be type: dictionary
Example:
>>> ae = ArticleExtractor()
...
>>> ae.entry
{
'article_doi':'10.1016/j.arth.2015.12.012',
'analysis_sw':'SAS,SPSS,SAS',
'grant_ids':'#29861982, #EI98239',
'primary_research':1
}
>>> clean_entry()
{
'article_doi':'10.1016/j.arth.2015.12.012',
'analysis_sw':'SAS, SPSS',
'grant_ids':'#29861982, #EI98239',
'primary_research':1
}
Raise TypeError when entry is not a dictionary
>>> print(ae.entry)
['a', 'b']
>>> ae.clean_entry()
TypeError: clean_entry called on: ['a', 'b']
invalid type: <class 'list'>
"""
if (type(self.entry) is not dict):
raise TypeError("clean_entry called on: {0} \ninvalid type: {1}".format(self.entry,type(self.entry)))
return self.entry
for (k,v) in self.entry.items():
copy = v
try:
val = copy.split(',')
val = list(map(str.strip, val))
val = set(val)
val = ', '.join(val)
self.entry[k] = val
except AttributeError:
#copy.split(',') failed because val was not a string
#v was already clean
pass
return self.entry
def get_reviewer(self):
"""
Get the name of the person reviewing the article
Use computer's username to take a guess, or ask for input if cant determine a guess
Return: Void
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae.get_reviewer()
I think 'Whos reviewing the article?' should be 'Elvis' based on: 'user of computer'
Is this correct? (if no, type no and press enter; otherwise, press enter):
>>> ae.entry
{'reviewer':'Elvis'}
If a guess cant be determined, ask user whos reviewing the article
>>> ae.get_reviewer()
#popup window appears
>>> ae = ArticleExtractor(run_style=1)
>>> ae.get_reviewer()
>>> ae.entry
{'reviewer':'Elvis'}
"""
username = os.getlogin() or pwd.getpwuid(os.getuid())[0] #username of the person using the computer
users = self.get_choices("reviewer")
for user in users:
if (re.search(username,user,re.I)):
self.check("Whos reviewing the article?",users[user],"user of computer","reviewer",display=user)
return
self.ask("Whos reviewing the article?","reviewer")
def chunker(self,sentence):
"""
Chunk a sentence
Args: sentence -- (string)
Return: nltk.tree.Tree object, traversable,
chunks begin with and end with proper noun (singular or plural)
and these may occur between the two proper nouns:
proper noun, noun, ',', '(', ')', ':', demonstrative adjective, conjuction, preposition
For more information, see:
http://www.nltk.org/book/ch07.html
Example:
>>> ae = ArticleExtractor()
>>> ae.chunker("The Institute for American Greatness has partnered with The University of Iceland")
Tree('S', [('The', 'DT'), \
Tree('Chunk', [('Institute', 'NNP'), ('for', 'IN'), ('American', 'NNP'), ('Greatness', 'NNP')]), \
('has', 'VBZ'), ('partnered', 'VBN'), ('with', 'IN'), ('The', 'DT'), \
Tree('Chunk', [('University', 'NNP'), ('of', 'IN'), ('Iceland', 'NNP')]) \
])
Except TypeError when sentence is not a string, retry by casting sentence to string
>>> ae.chunker(12)
chunker called on: '12'
12 is type <class 'int'> but must be a string or bytes-like object
retrying with cast to string
Tree('S', [('12', 'CD')])
"""
try:
words = nltk.word_tokenize(sentence)
tagged = nltk.pos_tag(words)
#tagged = nltk.pos_tag([word.rstrip(''.join([str(i) for i in range(10)])) for word in words])
chunkGram = r"Chunk: {<NNP.?><NNP.?|NN.?|,|\(|\)|:|IN|CC|DT>*<NNP.?|\)>}"
chunkedParser = nltk.RegexpParser(chunkGram)
chunked = chunkedParser.parse(tagged)
return chunked
except TypeError as e:
print("chunker called on: '{}' \n{} is type: {} but must be a string or bytes-like object".format(sentence,sentence,type(sentence)))
print("retrying with cast to string")
return self.chunker(str(sentence))
def get_clinical_domain(self,key_words):
"""
Get the clinical domain of the article
Args: key_words -- words to search against the clinical domain choices (list of strings)
Return: int value corresponding to redcap key for given domain, or 0 if no keyword matches (unknown domain) or keywords is invalid type
Example:
>>> ae = ArticleExtractor()
>>> ae.get_clinical_domain(['Neurology'])
23
>>> ae.get_clinical_domain(['The American Dream'])
0
>>> ae.get_clinical_domain(12)
0
"""
if ('clinical_domain' in self.entry):
return
if (type(key_words) is not list):
return 0
stopwords = nltk.corpus.stopwords.words('english') + ['health','disease','medicine','medical','sciences','medicine','international']
key_words = [stem(word.lower().strip()) for word in key_words if word.lower() not in stopwords]
domains = self.get_choices("clinical_domain")
for word in key_words:
for domain in domains:
if (re.search(re.escape(word),domain,re.I)):
return domain
return 0
def _get_hypotheses(self,text):
"""
Determine whether the study in the article was 'Hypothesis Driven or Hypothesis Generating'
and if they stated the null and alternative hypotheses
assign the value to entry
Args: text -- text from the article to be extracted (string)
Return: void
Article that were 'hypothesis driven' usually presented their hypotheses in the format:
"we hypothesized ..."
Example:
>>> ae = ArticleExtractor()
>>> ae._get_hypotheses("We hypothesized that patients undergoing extended BEV therapy could have altered patterns of recurrence and symptoms of recurrence due to its novel mechanism of action")
>>> ae.entry
{'hypothesis_gen_or_driv':1}
#from article: 23632207.pdf (doi: 10.1016/j.ygyno.2013.04.055)
>>> ae.entry
{'hypothesis_gen_or_driv':1}
>>> ae._get_hypotheses("We randomly collected data with no direction *hand slap")
>>> ae.entry
{'hypothesis_gen_or_driv':2}
"""
for each_sent in nltk.sent_tokenize(text):
if (re.search(r'we.*?hypothes',each_sent,re.I)):
self.check("Hypothesis Driven or Hypothesis Generating",1,each_sent,"hypothesis_gen_or_driv",display="driven")
if ("hypothesis_gen_or_driv" in self.entry):
#we didnt encounter any articles that stated null and alternate hypotheses. Here's how we might ask
self.generate_chooser("Does the publication state null and alternative hypotheses?",self.get_choices("clear_hypothesis"),info=each_sent)
if (self.user_choice != -1):
self.entry['clear_hypothesis'] = self.user_choice
return
self.entry['hypothesis_gen_or_driv'] = 2
return
def _get_funding(self,text):
"""
Get funding and grant information for the study in an article
Args: text -- text from the article to be extracted (string)
Return: void
Articles usually presented their funding in the format:
"This study was funded in part by ... (grant #NIH8982)"
"This study was funded by a grant from ..."
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae._get_funding("Our study was funded by the NIH (grant id: #3234Gj8)")
I think 'Grant ID' should be: '3234Gj8' based on:
Our study was funded by the NIH (grant id: #3234Gj8)
Is this correct? (if no, type no and press enter; otherwise, press enter):
I think 'Funders' should be: 'the NIH' based on:
Our study was funded by the NIH (grant id: #3234Gj8)
Is this correct? (if no, type no and press enter; otherwise, press enter):
>>> ae.entry
{'funders': 'the NIH', 'grant_ids': '3234Gj8'}
>>> ae = ArticleExtractor(run_style=0)
>>> ae._get_funding("Our study was not funded by the NIH, but rather my mom")
I think 'Funders' should be: 'the NIH' based on:
Our study was not funded by the NIH, but rather my mom
Is this correct? (if no, type no and press enter; otherwise, press enter): no
Do you know the correct value? (if yes, type yes and press enter; otherwise, press enter): yes
Type the correct value for 'Funders': researchers mom
>>> ae.entry
{'funders': 'researchers mom'}
>>> ae = ArticleExtractor(run_style=1)
>>> ae._get_funding("Our study was funded by the NIH (grant id: #3234Gj8)")
>>> ae.entry
{'funders': 'the NIH', 'grant_ids': '3234Gj8'}
>>> ae._get_funding("The research was funded by Wayne Enterprises.")
>>> ae.entry
{'funders': 'Wayne Enterprises'}
"""
for each_sent in nltk.sent_tokenize(text):
if (re.search(r'funded.*?by',each_sent,re.I|re.S)):
search = re.search(r"grant.*?(\w*\d[\w\d/-]*)",each_sent,re.I)
if (search):
self.check("Grant ID",search.group(1).strip(),each_sent,"grant_ids")
search = re.search(r'grant.*?from (.*?)[^\w\s-]',each_sent,re.I|re.S)
if (search):
self.check("Funders",search.group(1).strip(),each_sent,"funders")
else:
search = re.search(r'funded.*?by (.*?)[^\w\s-]',each_sent,re.I|re.S)
self.check("Funders",search.group(1).strip(),each_sent,"funders")
def _get_inex_criteria(self,text):
"""
Determine if the study in an article documented their inclusion / exclusion criteria
Args: text -- text from the article to be extracted (string)
Return: void
Search for:
"... were included", "... were excluded", "inclusion", "exclusion"
indicator phrases
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae._get_inex_criteria("Aliens and other reptiles were excluded from our study")
I think 'Inclusion Exclusion Criteria Stated' should be: 'yes' based on:
Aliens and other reptiles were excluded from our study
Is this correct? (if no, type no and press enter; otherwise, press enter):
>>> ae.entry
{'inclusion_and_exclusion_stated': '1', 'inclusion_exclu_location___3': 1}
#location___3 is body of article
>>> ae = ArticleExtractor(run_style=1)
>>> ae._get_inex_criteria("Aliens and other reptiles were excluded from our study")
>>> ae.entry
{'inclusion_and_exclusion_stated': '1', 'inclusion_exclu_location___3': 1}
"""
for each_sent in nltk.sent_tokenize(text):
copy = each_sent
if(re.search(r'were\W*includ',each_sent,re.I) or re.search(r'were\W*exclud',each_sent,re.I) or
re.search(r'inclus',each_sent,re.I) or (re.search(r'exclus',each_sent,re.I) and not re.search(r'exclusively',each_sent,re.I))):
if ("inclusion_and_exclusion_stated" not in self.entry):
self.check_boolean("Inclusion Exclusion Criteria Stated",1,each_sent,"inclusion_and_exclusion_stated",display='yes')
if ("inclusion_and_exclusion_stated" in self.entry):
self.entry['inclusion_exclu_location___3'] = 1
self.check_ontol(each_sent)
return
def check_ontol(self,info):
"""
Ask user if any inclusion / exclusion criteria are presenting relative to standard ontologies
(method is called when a sentence is found indicating that inclusion / exclusion criteria was stated)
Args: info -- sentence that indicated inclusion / exclusion criteria was stated (string)
Return: void
#TODO, this needs to be expanded to check for standard ontologies that occur outside of the sentence which
indicated that inclusion / exclusion criteria were stated
This is difficult because a variety of names exist (ICD-9, ICD9, 'International Classification of Diseases')
and it is difficult to discern which category ('Procedure', 'Diagnosis', 'Medication', 'Laboratory')
the ontology belongs to
For now, prompts user whenever 'inclusion_and_exclusion_stated' set to True
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae.check_ontol("Patients suffering from sleep apnea (ICD-9 327.23) were excluded")
based on:
Patients suffering from sleep apnea (ICD-9 327.23) were excluded
Are any standard ontologies stated (such as CPT, ICD9, etc)? (if yes, type yes and press enter; otherwise, press enter): yes
#user interacts with GUI
>>> ae.entry
{'ontol_and_vocab_stated': 1, 'diag_vocabulary': 1}
"""
if ("ontol_and_vocab_stated" in self.entry):
return
if (not self.run_style):
print("based on:")
print(info)
if (self.ask_question("Are any standard ontologies stated (such as CPT, ICD9, etc)?")):
self.entry['ontol_and_vocab_stated'] = 1
c1 = {
"Procedure":1,
"Diagnosis":2,
"Medication":3,
"Laboratory":4
}
c2 = {
"Procedure":"proc_vocabulary",
"Diagnosis":"diag_vocabulary",
"Medication":"med_vocab",
"Laboratory":"lab_vocab"
}
c3 = dict((v,k) for k,v in c1.items())
self.generate_chooser("What categories are the ontologies a part of?",c1)
if (self.user_choice != -1):
self.ask("What ontologies are given for the category {}?".format(c3[self.user_choice]),c2[c3[self.user_choice]])
#TODO, **** I think this isnt correct, check when redcap comes back online
def _get_databases(self,text):
"""
Determine if article cited databases used in the study
Args: text -- text from the article to be extracted (string)
Return: void
If a sentence has the word 'database', guesses that the database is the largest chunk (see chunker method for more information)
Sometimes correct, but probaby the best default
#TODO, if there's more than one database
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae._get_databases("The study database was obtained retrospectively from the Swedish National Stroke Register, Riksstroke and comprised all consecutive patients diagnosed with acute ischemic stroke or intracerebral hemorrhage admitted to Danderyd Hospital within 7 days of symptom onset be- tween January 1, 2005, and January 1, 2006 (n 5 725)")
#from article: {doi: 10.1016/j.jstrokecerebrovasdis.2015.06.043, pmid: 26236002}
I think 'Database Name' should be: 'National Stroke Register , Riksstroke' based on:
The study database was obtained retrospectively from the Swedish National Stroke Register, Riksstroke and comprised all consecutive patients diagnosed with acute ischemic stroke or intracerebral hemorrhage admitted to Danderyd Hospital within 7 days of symptom onset be- tween January 1, 2005, and January 1, 2006 (n 5 725)
Is this correct? (if no, type no and press enter; otherwise, press enter):
>>> ae.entry
{'db_citation_1': 'National Stroke Register , Riksstroke', 'state_data_sources': 1}
>>> ae = ArticleExtractor(run_style=0)
>>> ae._get_databases("This is a really short article")
>>> ae.entry
{'state_data_sources':0}
>>> ae = ArticleExtractor(run_style=1)
>>> ae._get_databases("The study database was obtained retrospectively from the Swedish National Stroke Register, Riksstroke and comprised all consecutive patients diagnosed with acute ischemic stroke or intracerebral hemorrhage admitted to Danderyd Hospital within 7 days of symptom onset be- tween January 1, 2005, and January 1, 2006 (n 5 725)")
#from article: {doi: 10.1016/j.jstrokecerebrovasdis.2015.06.043, pmid: 26236002}
>>> ae.entry
{'db_citation_1': 'National Stroke Register , Riksstroke', 'state_data_sources': 1}
"""
for each_sent in nltk.sent_tokenize(text):
if (re.search(r'database',each_sent,re.I)):
tree = self.chunker(each_sent)
sts = []
try:
for st in tree.subtrees(lambda tree: tree.height() == 3):
for st2 in st.subtrees(lambda tree: tree.height() == 2):
sts.append([str(tup[0]) for tup in st2.leaves()])
if (len(sts) > 0):
longest_chunk = max(sts,key=len)
self.check("Database Name",' '.join(longest_chunk),each_sent,"db_citation_1")
if ('db_citation_1' in self.entry):
self.entry['state_data_sources'] = 1
self.entry['state_database_where___4'] = 1
return
except AttributeError as e:
#chunker run on invalid data type, didnt return a tree
pass
self.entry['state_data_sources'] = 0
def _get_query(self,text):
"""
Determine method used for extracting data
Args: text -- text from the article to be extracted (string)
Return: void
Look for word indicators:
'abstracted', 'manually', 'query',
'records' and 'review'
Few articles documented query methods. Further review should be conducted
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae._get_query("Data were manually abstracted")
I think 'Query Method Stated' should be: 'yes' based on:
Data were manually abstracted
Is this correct? (if no, type no and press enter; otherwise, press enter):
>>> ae.entry
{'query_method_stated': '1', 'query_method_location___4': 1}
>>> ae = ArticleExtractor(run_style=1)
>>> ae._get_query("Data were manually abstracted")
>>> ae.entry
{'query_method_stated': '1', 'query_method_location___4': 1}
"""
for each_sent in nltk.sent_tokenize(text):
if (re.search(r'abstracted',each_sent,re.I) or
re.search(r'manual',each_sent,re.I) or
re.search(r'query',each_sent,re.I) or
(re.search('records',each_sent,re.I) and re.search('review',each_sent,re.I))):
self.check_boolean("Query Method Stated",1,each_sent,"query_method_stated",display='yes')
if ('query_method_stated' in self.entry):
self.entry['query_method_location___4'] = 1 #query method given in body of article
return
else:
self.entry['query_method_stated'] = 0
return
self.entry['query_method_stated'] = 0
def _get_nlp(self,text):
"""
Check if study used natural language Processing
Args: text -- text from the article to be extracted (string)
Return: void
Look for word indicators:
'language proc', 'nlp'
#TODO, when redcap gets back online
Only one article used natural language processing. Further review should be conducted
Ask user if article states source of text from which data were mined
"""
for each_sent in nltk.sent_tokenize(text):
if (re.search(r'language\spro',each_sent,re.I) or re.search(r'\snlp\s',each_sent,re.I)):
self.check_boolean("Research Involves Natural Language Processing",1,each_sent,"text_nlp_yn",display='yes')
if ("text_nlp_yn" in self.entry):
if (self.ask_without_choices("Does the publication state source of the text from which data were mined? (ex: emergency department summary, operative notes, etc)\n","Enter the source of text: ","text_mine_source")):
if (re.search(r'appendix',each_sent,re.I)):
if (self.check_boolean("Manuscript shares a pre-processed sample text source in",9,each_sent,"nlp_source_shared_loc",display="appendix")):
self.assign("text_mining_preprocess",1)
elif (re.search(r'\Wgit',each_sent,re.I)):
if (self.check_boolean("Manuscript shares a pre-processed sample text source in",5,each_sent,"nlp_source_shared_loc",display="GitHub")):
self.assign("text_mining_preprocess",1)
if ("text_mining_preprocess" not in self.entry):
if (self.ask_question("Do they share a pre-processed sample of the text source?")):
self.assign("text_mining_preprocess",1)
self.ask("Where is the sample shared?","nlp_source_shared_loc")
if (self.ask_without_choices("Does the publication state software used for text mining?","Enter softwares used: ","nlp_software")):
self.ask("Is the software open or proprietary?","nlp_software_open")
return
def _get_analysis(self,text):
return #TODO, run machine learning algorithm
for each_sent in nltk.sent_tokenize(text):
if (re.search(r'statistical analys[ie]s',each_sent,re.I) or re.search(r'data analys[ie]s',each_sent,re.I)):
if (self.check_boolean("Publications States Analysis Methodology And Process",1,each_sent,"analysis_processes_clear",display='yes')):
self.entry['data_analysis_doc_loc'] = 4
return
def _get_stats(self,text):
"""
Determine software, version, and operating system used for statistical Analyses
Args: text -- text from the article to be extracted (string)
Return: void
Articles often presented their analyses in the format:
"analyses were performed on ..."
"analysis was performed using ..."
Since a few domains cover almost all cases, search for common software (SAS, SPSS, etc)
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae._get_stats("Analyses were performed using SAS (version 9.1)")
I think 'Analysis Software' should be: 'SAS' based on:
Analyses were performed using SAS (version 9.1)
Is this correct? (if no, type no and press enter; otherwise, press enter):
I think 'Analysis Software Version' should be: '9.1' based on:
Analyses were performed using SAS (version 9.1)
Is this correct? (if no, type no and press enter; otherwise, press enter):
>>> ae.entry
{'analysis_sw': 'SAS','analysis_sw_version': '9.1'}
>>> ae = ArticleExtractor(run_style=0)
>>> ae._get_stats("Analyses were performed using SAS (version 9.1)")
>>> ae.entry
{'analysis_sw': 'SAS','analysis_sw_version': '9.1'}
>>> ae = ArticleExtractor(run_style=1)
>>> ae._get_stats("Analyses were performed using GraphPad for Windows")
{'analysis_sw': 'GraphPad', 'software_analysis_code': 1, 'analysis_os': 'Windows'}
"""
self.check_standards(text)
if ("analysis_sw" in self.entry):
self.entry['software_analysis_code'] = 1
return
for each_sent in nltk.sent_tokenize(text):
search = re.search(r'analys[ie]s (were)?(was)? performed\s+\w+\W+(.*?)\s',each_sent,re.I)
if (search):
self.check("Analyses Software",search.group(3).strip(),each_sent,"analysis_sw")
else:
search = re.search(r'analys',each_sent,re.I) and re.search(r'were\s\w*\susing\s(.*?)\s',each_sent,re.I)
if (search):
self.check("Analyses Software",search.group(1),each_sent,"analysis_sw")
if ("analysis_sw" in self.entry):
self.entry['software_analysis_code'] = 1
search = re.search(self.entry['analysis_sw'] + r'.*?(\d[\d\.]*\d)',each_sent,re.I|re.S)
if (search):
self.check("Analysis Software Version",search.group(1),each_sent,"analysis_sw_version")
self.check_operating_system(each_sent)
return
self.entry['software_analysis_code'] = 0
def check_standards(self,text):
"""
Check if 'STATA','SAS','SPSS','R' statistical softwares used in analyses
Method is called by _get_stats method (user doesnt need to execute both)
Args: text -- text from the article to be extracted (string)
Return: void
Example:
>>> ae = ArticleExtractor(run_style=1)
>>> ae.check_standards('SAS and SPSS were used in this study')
I think 'Analysis Software' should be: 'SAS' based on:
SAS and SPSS were used in this study
Is this correct? (if no, type no and press enter; otherwise, press enter):
I think 'Analysis Software' should be: 'SPSS' based on:
SAS and SPSS were used in this study
Is this correct? (if no, type no and press enter; otherwise, press enter):
>>> ae.entry
{'analysis_sw':'SAS,SPSS',}
#TODO, when redcap works again
"""
stands = ["STATA","SAS","SPSS"]
for each_sent in nltk.sent_tokenize(text):
for stand in stands:
if re.search(stand,each_sent):
self.check("Analysis Software",stand,each_sent,"analysis_sw")
if ("analysis_sw" in self.entry and stand in self.entry['analysis_sw']):
self.entry['analysis_software_open___1'] = 1 #software is proprietary
search = re.search(stand + r'.*?(\d[\d\.]*\d)',each_sent)
if (search):
self.check("Analysis Software Version",search.group(1),each_sent,"analysis_sw_version")
self.check_operating_system(each_sent)
if (re.search(r'analys',each_sent,re.I) and re.search(r'\sR\s',each_sent)):
self.check("Analysis Software","R",each_sent,"analysis_sw")
if ("analysis_sw" in self.entry and "R" in self.entry['analysis_sw']):
search = re.search(r'\sR\s.*?(\d[\d\.]*\d)',each_sent)
if (search):
self.check("Analysis Software Version",search.group(1),each_sent,"analysis_sw_version")
self.entry['analysis_software_open___2'] = 1 #software is open-source
def check_operating_system(self,sentence):
"""
Determine if article records what operating system was used for statistical analyses
Called by _get_stats and check_standards methods when an analysis software is found
Args: sentence -- sentence that indicated what analysis software was used in analyses (string)
Return: void
Searches for only the most common operating systems: 'Windows', 'Mac', 'Linux', 'Unix'
"""
for os in ['Windows','Mac','Linux','Unix']:
if (re.search(os,sentence,re.I)):
self.check_boolean("Operating System Used For Analyses",os,text,"analysis_os")
if ("analysis_os" in self.entry):
return
def _get_limitations(self,text):
"""
Determine if the article documented limitations of the study
Args: text -- text from the article to be extracted (string)
Return: void
Articles often presented their limitations in the format:
"There were several limitations to the study"
"The study was limited by ..."
"Our study had several shortcomings"
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae._get_limitations("This study faced several limitations")
I think 'Publication Documents Limitations Of The Study' should be: 'yes' based on:
This study faced several limitations
Is this correct? (if no, type no and press enter; otherwise, press enter):
>>> ae.entry
{'limitations_where___7': '1'}
>>> ae = ArticleExtractor(run_style=1)
>>> ae._get_limitations("This study faced several limitations")
>>> ae.entry
{'limitations_where___7': '1'}
"""
for each_sent in nltk.sent_tokenize(text):
if (re.search(r'shortcomings',each_sent,re.I) or re.search(r'limitation',each_sent,re.I) or re.search(r'(was)?(is)? limited',each_sent,re.I)):
self.check_boolean("Publication Documents Limitations Of The Study",1,each_sent,"limitations_where___7",display='yes')
if ("limitations_where___7" in self.entry):
return
def _get_institution(self,affiliation):
"""
Determine the institution of the primary author of the article
Args: affiliation -- affiliation of the first author extracted from xml page of the article's corresponding ncbi page
Return: void
The affiliation tag of the ncbi site follows this format:
<Affiliation>John Jay College of Criminal Justice, The City University of New York, Department of Criminal Justice, 524 West 59th Street, New York, NY 10019, United States. Electronic address: <EMAIL>.</Affiliation>
#from article:
doi: 10.1016/j.burns.2013.12.002
pubmed code: 24433938
The institution section often has one of the keywords:
'hospital','university','school','college','institute'
If none of the indicator words is found, guess the second section of the affiliation tag (sections broken by commas)
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae._get_institution("John Jay College of Criminal Justice, The City University of New York, Department of Criminal Justice, 524 West 59th Street, New York, NY 10019, United States. Electronic address: <EMAIL>.")
I think 'Institution' should be: 'John Jay College of Criminal Justice' based on:
affiliation: '<NAME> College of Criminal Justice, The City University of New York, Department of Criminal Justice, 524 West 59th Street, New York, NY 10019, United States. Electronic address: <EMAIL>.'
Is this correct? (if no, type no and press enter; otherwise, press enter):
>>> ae.entry
{'institution_corr_author': 'John Jay College of Criminal Justice'}
>>> ae = ArticleExtractor(run_style=1)
>>> ae._get_institution("John Jay College of Criminal Justice, The City University of New York, Department of Criminal Justice, 524 West 59th Street, New York, NY 10019, United States. Electronic address: <EMAIL>.")
>>> ae.entry
{'institution_corr_author': '<NAME> of Criminal Justice'}
"""
af_from_xml = affiliation.split(", ")
for option in af_from_xml: #could tweak slightly
if (re.search(r'hospital',option,re.I) or re.search(r'university',option,re.I) or re.search(r'school',option,re.I) or re.search(r'college',option,re.I) or re.search(r'institute',option,re.I)):
self.check("Institution",option,"affiliation: '{}'".format(affiliation),"institution_corr_author")
if ("institution_corr_author" in self.entry):
return
try:
self.check("Institution",af_from_xml[1],"affiliation: '{}'".format(affiliation),"institution_corr_author")
except IndexError:
pass
def get_clinical_domain_from_xml(self,institution):
"""
Determine the clinical domain of the article (oncology, Neurology, etc)
Args: affiliation -- affiliation of the first author extracted from xml page of the article's corresponding ncbi page
Return: void
The affiliation tag of the ncbi site follows this format:
<Affiliation>Department of Neurology, Aizawa Hospital, Matsumoto, Japan. Electronic address: <EMAIL>.</Affiliation>
#from article:
doi: 10.1016/j.clineuro.2015.10.004
pubmed code: 26513432
Compare against possible clinical domain categorites:
department
division
journal title
article title
Example:
>>> ae = ArticleExtractor(run_style=0)
>>> ae.get_clinical_domain_from_xml("Department of Neurology, Aizawa Hospital, Matsumoto, Japan. Electronic address: <EMAIL>.")
I think 'Clinical Domain' should be: 'Neurology' based on:
Department: Neurology
Is this correct? (if no, type no and press enter; otherwise, press enter):
>>> ae.entry
{'clinical_domain': '23'}
>>> ae = ArticleExtractor(run_style=1)
>>> ae.get_clinical_domain_from_xml("Department of Neurology, Aizawa Hospital, Matsumoto, Japan. Electronic address: <EMAIL>.")
>>> ae.entry
{'clinical_domain': '23'}
"""
def _clinical_asides(afs):
"""
Retrieve department and divison from the affiliation info
Args: afs -- affiliation tag from the ncbi site split into sections (list)
Return: (department,division) tuple (string,string)
Can only be called within the get_clinical_domain_from_xml method
"""
department = division = ""
for option in afs:
search = re.search(r'departments? of(.*)',option,re.I)
if (search):
department = search.group(1).strip()
search = re.search(r'division of(.*)',option,re.I)
if (search):
division = search.group(1).strip()
return (department,division)
af_from_xml = institution.split(", ")
(department, division) = _clinical_asides(af_from_xml)
cd_from_department = self.get_clinical_domain(department.split())
if (cd_from_department):
self.check_boolean("Clinical Domain",self.get_choices("clinical_domain")[cd_from_department],"Department: {0}".format(department),"clinical_domain",display=cd_from_department)
cd_from_division = self.get_clinical_domain(division.split())
if (cd_from_division):
self.check_boolean("Clinical Domain",self.get_choices("clinical_domain")[cd_from_division],"Division: {0}".format(division),"clinical_domain",display=cd_from_division)
try:
cd_from_journal = self.get_clinical_domain(self.entry['journal_publication'].split())
if (cd_from_journal):
self.check_boolean("Clinical Domain",self.get_choices("clinical_domain")[cd_from_journal],"Journal Title: " + self.entry['journal_publication'],"clinical_domain",display=cd_from_journal)
cd_from_title = self.get_clinical_domain(self.entry['article_title'].split())
if (cd_from_title):
self.check_boolean("Clinical Domain",self.get_choices("clinical_domain")[cd_from_title],"Article Title: " + self.entry['article_title'],"clinical_domain",display=cd_from_title)
except KeyError as e:
#journal_publication or article_title not in entry
#xml_extract hasnt been run
pass
| [
"nltk.pos_tag",
"nltk.RegexpParser",
"re.escape",
"nltk.corpus.stopwords.words",
"nltk.word_tokenize",
"os.getuid",
"os.getlogin",
"nltk.sent_tokenize",
"re.search"
] | [((7484, 7508), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['text'], {}), '(text)\n', (7502, 7508), False, 'import nltk\n'), ((9877, 9901), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['text'], {}), '(text)\n', (9895, 9901), False, 'import nltk\n'), ((11468, 11492), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['text'], {}), '(text)\n', (11486, 11492), False, 'import nltk\n'), ((16426, 16450), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['text'], {}), '(text)\n', (16444, 16450), False, 'import nltk\n'), ((18050, 18074), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['text'], {}), '(text)\n', (18068, 18074), False, 'import nltk\n'), ((19077, 19101), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['text'], {}), '(text)\n', (19095, 19101), False, 'import nltk\n'), ((20641, 20665), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['text'], {}), '(text)\n', (20659, 20665), False, 'import nltk\n'), ((22490, 22514), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['text'], {}), '(text)\n', (22508, 22514), False, 'import nltk\n'), ((24174, 24198), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['text'], {}), '(text)\n', (24192, 24198), False, 'import nltk\n'), ((26736, 26760), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['text'], {}), '(text)\n', (26754, 26760), False, 'import nltk\n'), ((3438, 3451), 'os.getlogin', 'os.getlogin', ([], {}), '()\n', (3449, 3451), False, 'import os, sys, re\n'), ((3594, 3625), 're.search', 're.search', (['username', 'user', 're.I'], {}), '(username, user, re.I)\n', (3603, 3625), False, 'import os, sys, re\n'), ((4916, 4944), 'nltk.word_tokenize', 'nltk.word_tokenize', (['sentence'], {}), '(sentence)\n', (4934, 4944), False, 'import nltk\n'), ((4957, 4976), 'nltk.pos_tag', 'nltk.pos_tag', (['words'], {}), '(words)\n', (4969, 4976), False, 'import nltk\n'), ((5170, 5198), 'nltk.RegexpParser', 'nltk.RegexpParser', (['chunkGram'], {}), '(chunkGram)\n', (5187, 5198), False, 'import nltk\n'), ((6121, 6159), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (6148, 6159), False, 'import nltk\n'), ((7517, 7560), 're.search', 're.search', (['"""we.*?hypothes"""', 'each_sent', 're.I'], {}), "('we.*?hypothes', each_sent, re.I)\n", (7526, 7560), False, 'import os, sys, re\n'), ((9910, 9958), 're.search', 're.search', (['"""funded.*?by"""', 'each_sent', '(re.I | re.S)'], {}), "('funded.*?by', each_sent, re.I | re.S)\n", (9919, 9958), False, 'import os, sys, re\n'), ((16459, 16497), 're.search', 're.search', (['"""database"""', 'each_sent', 're.I'], {}), "('database', each_sent, re.I)\n", (16468, 16497), False, 'import os, sys, re\n'), ((22528, 22617), 're.search', 're.search', (['"""analys[ie]s (were)?(was)? performed\\\\s+\\\\w+\\\\W+(.*?)\\\\s"""', 'each_sent', 're.I'], {}), "('analys[ie]s (were)?(was)? performed\\\\s+\\\\w+\\\\W+(.*?)\\\\s',\n each_sent, re.I)\n", (22537, 22617), False, 'import os, sys, re\n'), ((25657, 25686), 're.search', 're.search', (['os', 'sentence', 're.I'], {}), '(os, sentence, re.I)\n', (25666, 25686), False, 'import os, sys, re\n'), ((9971, 10029), 're.search', 're.search', (['"""grant.*?(\\\\w*\\\\d[\\\\w\\\\d/-]*)"""', 'each_sent', 're.I'], {}), "('grant.*?(\\\\w*\\\\d[\\\\w\\\\d/-]*)', each_sent, re.I)\n", (9980, 10029), False, 'import os, sys, re\n'), ((10129, 10194), 're.search', 're.search', (['"""grant.*?from (.*?)[^\\\\w\\\\s-]"""', 'each_sent', '(re.I | re.S)'], {}), "('grant.*?from (.*?)[^\\\\w\\\\s-]', each_sent, re.I | re.S)\n", (10138, 10194), False, 'import os, sys, re\n'), ((11520, 11564), 're.search', 're.search', (['"""were\\\\W*includ"""', 'each_sent', 're.I'], {}), "('were\\\\W*includ', each_sent, re.I)\n", (11529, 11564), False, 'import os, sys, re\n'), ((11566, 11610), 're.search', 're.search', (['"""were\\\\W*exclud"""', 'each_sent', 're.I'], {}), "('were\\\\W*exclud', each_sent, re.I)\n", (11575, 11610), False, 'import os, sys, re\n'), ((11616, 11652), 're.search', 're.search', (['"""inclus"""', 'each_sent', 're.I'], {}), "('inclus', each_sent, re.I)\n", (11625, 11652), False, 'import os, sys, re\n'), ((18083, 18123), 're.search', 're.search', (['"""abstracted"""', 'each_sent', 're.I'], {}), "('abstracted', each_sent, re.I)\n", (18092, 18123), False, 'import os, sys, re\n'), ((18131, 18167), 're.search', 're.search', (['"""manual"""', 'each_sent', 're.I'], {}), "('manual', each_sent, re.I)\n", (18140, 18167), False, 'import os, sys, re\n'), ((18175, 18210), 're.search', 're.search', (['"""query"""', 'each_sent', 're.I'], {}), "('query', each_sent, re.I)\n", (18184, 18210), False, 'import os, sys, re\n'), ((19110, 19154), 're.search', 're.search', (['"""language\\\\spro"""', 'each_sent', 're.I'], {}), "('language\\\\spro', each_sent, re.I)\n", (19119, 19154), False, 'import os, sys, re\n'), ((19156, 19195), 're.search', 're.search', (['"""\\\\snlp\\\\s"""', 'each_sent', 're.I'], {}), "('\\\\snlp\\\\s', each_sent, re.I)\n", (19165, 19195), False, 'import os, sys, re\n'), ((20674, 20727), 're.search', 're.search', (['"""statistical analys[ie]s"""', 'each_sent', 're.I'], {}), "('statistical analys[ie]s', each_sent, re.I)\n", (20683, 20727), False, 'import os, sys, re\n'), ((20730, 20776), 're.search', 're.search', (['"""data analys[ie]s"""', 'each_sent', 're.I'], {}), "('data analys[ie]s', each_sent, re.I)\n", (20739, 20776), False, 'import os, sys, re\n'), ((23016, 23106), 're.search', 're.search', (["(self.entry['analysis_sw'] + '.*?(\\\\d[\\\\d\\\\.]*\\\\d)')", 'each_sent', '(re.I | re.S)'], {}), "(self.entry['analysis_sw'] + '.*?(\\\\d[\\\\d\\\\.]*\\\\d)', each_sent, re\n .I | re.S)\n", (23025, 23106), False, 'import os, sys, re\n'), ((24231, 24258), 're.search', 're.search', (['stand', 'each_sent'], {}), '(stand, each_sent)\n', (24240, 24258), False, 'import os, sys, re\n'), ((24711, 24747), 're.search', 're.search', (['"""analys"""', 'each_sent', 're.I'], {}), "('analys', each_sent, re.I)\n", (24720, 24747), False, 'import os, sys, re\n'), ((24751, 24782), 're.search', 're.search', (['"""\\\\sR\\\\s"""', 'each_sent'], {}), "('\\\\sR\\\\s', each_sent)\n", (24760, 24782), False, 'import os, sys, re\n'), ((26769, 26811), 're.search', 're.search', (['"""shortcomings"""', 'each_sent', 're.I'], {}), "('shortcomings', each_sent, re.I)\n", (26778, 26811), False, 'import os, sys, re\n'), ((26814, 26854), 're.search', 're.search', (['"""limitation"""', 'each_sent', 're.I'], {}), "('limitation', each_sent, re.I)\n", (26823, 26854), False, 'import os, sys, re\n'), ((26857, 26906), 're.search', 're.search', (['"""(was)?(is)? limited"""', 'each_sent', 're.I'], {}), "('(was)?(is)? limited', each_sent, re.I)\n", (26866, 26906), False, 'import os, sys, re\n'), ((29096, 29131), 're.search', 're.search', (['"""hospital"""', 'option', 're.I'], {}), "('hospital', option, re.I)\n", (29105, 29131), False, 'import os, sys, re\n'), ((29134, 29171), 're.search', 're.search', (['"""university"""', 'option', 're.I'], {}), "('university', option, re.I)\n", (29143, 29171), False, 'import os, sys, re\n'), ((29174, 29207), 're.search', 're.search', (['"""school"""', 'option', 're.I'], {}), "('school', option, re.I)\n", (29183, 29207), False, 'import os, sys, re\n'), ((29210, 29244), 're.search', 're.search', (['"""college"""', 'option', 're.I'], {}), "('college', option, re.I)\n", (29219, 29244), False, 'import os, sys, re\n'), ((29247, 29283), 're.search', 're.search', (['"""institute"""', 'option', 're.I'], {}), "('institute', option, re.I)\n", (29256, 29283), False, 'import os, sys, re\n'), ((31239, 31285), 're.search', 're.search', (['"""departments? of(.*)"""', 'option', 're.I'], {}), "('departments? of(.*)', option, re.I)\n", (31248, 31285), False, 'import os, sys, re\n'), ((31357, 31399), 're.search', 're.search', (['"""division of(.*)"""', 'option', 're.I'], {}), "('division of(.*)', option, re.I)\n", (31366, 31399), False, 'import os, sys, re\n'), ((3468, 3479), 'os.getuid', 'os.getuid', ([], {}), '()\n', (3477, 3479), False, 'import os, sys, re\n'), ((6457, 6472), 're.escape', 're.escape', (['word'], {}), '(word)\n', (6466, 6472), False, 'import os, sys, re\n'), ((10302, 10366), 're.search', 're.search', (['"""funded.*?by (.*?)[^\\\\w\\\\s-]"""', 'each_sent', '(re.I | re.S)'], {}), "('funded.*?by (.*?)[^\\\\w\\\\s-]', each_sent, re.I | re.S)\n", (10311, 10366), False, 'import os, sys, re\n'), ((11656, 11692), 're.search', 're.search', (['"""exclus"""', 'each_sent', 're.I'], {}), "('exclus', each_sent, re.I)\n", (11665, 11692), False, 'import os, sys, re\n'), ((18219, 18256), 're.search', 're.search', (['"""records"""', 'each_sent', 're.I'], {}), "('records', each_sent, re.I)\n", (18228, 18256), False, 'import os, sys, re\n'), ((18259, 18295), 're.search', 're.search', (['"""review"""', 'each_sent', 're.I'], {}), "('review', each_sent, re.I)\n", (18268, 18295), False, 'import os, sys, re\n'), ((22731, 22767), 're.search', 're.search', (['"""analys"""', 'each_sent', 're.I'], {}), "('analys', each_sent, re.I)\n", (22740, 22767), False, 'import os, sys, re\n'), ((22771, 22831), 're.search', 're.search', (['"""were\\\\s\\\\w*\\\\susing\\\\s(.*?)\\\\s"""', 'each_sent', 're.I'], {}), "('were\\\\s\\\\w*\\\\susing\\\\s(.*?)\\\\s', each_sent, re.I)\n", (22780, 22831), False, 'import os, sys, re\n'), ((24936, 24987), 're.search', 're.search', (['"""\\\\sR\\\\s.*?(\\\\d[\\\\d\\\\.]*\\\\d)"""', 'each_sent'], {}), "('\\\\sR\\\\s.*?(\\\\d[\\\\d\\\\.]*\\\\d)', each_sent)\n", (24945, 24987), False, 'import os, sys, re\n'), ((11700, 11741), 're.search', 're.search', (['"""exclusively"""', 'each_sent', 're.I'], {}), "('exclusively', each_sent, re.I)\n", (11709, 11741), False, 'import os, sys, re\n'), ((19575, 19613), 're.search', 're.search', (['"""appendix"""', 'each_sent', 're.I'], {}), "('appendix', each_sent, re.I)\n", (19584, 19613), False, 'import os, sys, re\n'), ((24496, 24548), 're.search', 're.search', (["(stand + '.*?(\\\\d[\\\\d\\\\.]*\\\\d)')", 'each_sent'], {}), "(stand + '.*?(\\\\d[\\\\d\\\\.]*\\\\d)', each_sent)\n", (24505, 24548), False, 'import os, sys, re\n'), ((19821, 19857), 're.search', 're.search', (['"""\\\\Wgit"""', 'each_sent', 're.I'], {}), "('\\\\Wgit', each_sent, re.I)\n", (19830, 19857), False, 'import os, sys, re\n')] |
# -*- coding: utf-8 -*-
import math
import numpy as np
import matplotlib.pyplot as plt
#Constants
GNa = 120 #Maximal conductance(Na+) ms/cm2
Gk = 36 #Maximal condectance(K+) ms/cm2
Gleak = 0.3 #Maximal conductance(leak) ms/cm2
cm = 1 #Cell capacitance uF/cm2
delta = 0.01 #Axon condectivity ms2
ENa = 50 #Nernst potential (Na+) mV
Ek = -77 #Nernst potential (K+) mV
Eleak = -54.4 #Nernst potential (leak) mV
#Simulation Parameters
simulation_time = 25.0 #ms
domain_length = 4 #cm
dt = 0.001 #ms
dx = 0.1 #cm
x = np.arange(0,domain_length,dx)
time = np.arange(0,simulation_time,dt)
#Convenience variables
a1 = delta*dt/(dx*dx*cm) #V(i-1,t)
a2 = 1 - 2*delta*dt/(dx*dx*cm) #V(i,t)
a3 = delta*dt/(dx*dx*cm) #V(i+1,t)
#Solution matrix
V = np.zeros((len(x),len(time)))
M = np.zeros((len(x),len(time)))
N = np.zeros((len(x),len(time)))
H = np.zeros((len(x),len(time)))
V_initial = -70 #mV
#Initial condition
#When t=0, V=-70mV M=N=H=0
V[:,0] = V_initial
M[:,0] = 0
N[:,0] = 0
H[:,0] = 0
#time loop
for n in range(0,len(time)-1):
#loop over space
for i in range(1,len(x)-1):
if n*dt <= 0.9 and n*dt >= 0.5 and i*dx <= 0.3:
Istim = -25 #uA/cm2
else:
Istim = 0
#Convenience variables
INa = GNa*math.pow(M[i,n],3)*H[i,n]*(V[i,n]-ENa)
Ik = Gk*math.pow(N[i,n],4)*(V[i,n]-Ek)
Ileak = Gleak*(V[i,n]-Eleak)
Iion = INa + Ik + Ileak + Istim
#FTCS
V[i,n+1] = a1*V[i-1,n] + a2*V[i,n] + a3*V[i+1,n] - dt*Iion/cm
#Gating variables:M
aM = (40+V[i,n])/(1-math.exp(-0.1*(40+V[i,n])))
bM = 0.108*math.exp(-V[i,n]/18)
Minf = aM/(aM+bM)
tM = 1/(aM+bM)
M[i,n+1] = M[i,n] + dt*(Minf-M[i,n])/tM
#Gating variables:H
aH = 0.0027*math.exp(-V[i,n]/20)
bH = 1/(1+math.exp(-0.1*(35-V[i,n])))
Hinf = aH/(aH+bH)
tH = 1/(aH+bH)
H[i,n+1] = H[i,n] + dt*(Hinf-H[i,n])/tH
#Gating variables:N
aN = 0.01*(55+V[i,n])/(1-math.exp(-0.1*(55+V[i,n])))
bN = 0.055*math.exp(-V[i,n]/80)
Ninf = aN/(aN+bN)
tN = 1/(aN+bN)
N[i,n+1] = N[i,n] + dt*(Ninf-N[i,n])/tN
#No flux boundary condition at both end
V[0,n+1] = V[1,n+1]
V[len(x)-1,n+1] = V[len(x)-2,n+1]
#Conduction velocity
Max1 = np.argmax(V[0,:])
Max2 = np.argmax(V[len(x)-1,:])
print(domain_length/((Max2-Max1)*dt))
#Plot V versus time for the first and last node of the axon.
plt.figure(1)
plt.clf()
plt.plot(time,V[0,:],'r-',time,V[len(x)-1,:],'b-')
plt.show()
| [
"math.pow",
"matplotlib.pyplot.clf",
"numpy.argmax",
"matplotlib.pyplot.figure",
"math.exp",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((518, 549), 'numpy.arange', 'np.arange', (['(0)', 'domain_length', 'dx'], {}), '(0, domain_length, dx)\n', (527, 549), True, 'import numpy as np\n'), ((555, 588), 'numpy.arange', 'np.arange', (['(0)', 'simulation_time', 'dt'], {}), '(0, simulation_time, dt)\n', (564, 588), True, 'import numpy as np\n'), ((2350, 2368), 'numpy.argmax', 'np.argmax', (['V[0, :]'], {}), '(V[0, :])\n', (2359, 2368), True, 'import numpy as np\n'), ((2502, 2515), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2512, 2515), True, 'import matplotlib.pyplot as plt\n'), ((2516, 2525), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2523, 2525), True, 'import matplotlib.pyplot as plt\n'), ((2577, 2587), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2585, 2587), True, 'import matplotlib.pyplot as plt\n'), ((1642, 1665), 'math.exp', 'math.exp', (['(-V[i, n] / 18)'], {}), '(-V[i, n] / 18)\n', (1650, 1665), False, 'import math\n'), ((1809, 1832), 'math.exp', 'math.exp', (['(-V[i, n] / 20)'], {}), '(-V[i, n] / 20)\n', (1817, 1832), False, 'import math\n'), ((2085, 2108), 'math.exp', 'math.exp', (['(-V[i, n] / 80)'], {}), '(-V[i, n] / 80)\n', (2093, 2108), False, 'import math\n'), ((1337, 1357), 'math.pow', 'math.pow', (['N[i, n]', '(4)'], {}), '(N[i, n], 4)\n', (1345, 1357), False, 'import math\n'), ((1595, 1626), 'math.exp', 'math.exp', (['(-0.1 * (40 + V[i, n]))'], {}), '(-0.1 * (40 + V[i, n]))\n', (1603, 1626), False, 'import math\n'), ((1850, 1881), 'math.exp', 'math.exp', (['(-0.1 * (35 - V[i, n]))'], {}), '(-0.1 * (35 - V[i, n]))\n', (1858, 1881), False, 'import math\n'), ((2038, 2069), 'math.exp', 'math.exp', (['(-0.1 * (55 + V[i, n]))'], {}), '(-0.1 * (55 + V[i, n]))\n', (2046, 2069), False, 'import math\n'), ((1282, 1302), 'math.pow', 'math.pow', (['M[i, n]', '(3)'], {}), '(M[i, n], 3)\n', (1290, 1302), False, 'import math\n')] |
# from flask_login import LoginManager
from flask_restless import APIManager
from flask_sqlalchemy import SQLAlchemy
from flask import logging
__author__ = 'sharp'
db = SQLAlchemy()
restless = APIManager(app=None, flask_sqlalchemy_db=db)
logger = logging.getLogger()
# login_manager = LoginManager()
| [
"flask_sqlalchemy.SQLAlchemy",
"flask.logging.getLogger",
"flask_restless.APIManager"
] | [((171, 183), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (181, 183), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((196, 240), 'flask_restless.APIManager', 'APIManager', ([], {'app': 'None', 'flask_sqlalchemy_db': 'db'}), '(app=None, flask_sqlalchemy_db=db)\n', (206, 240), False, 'from flask_restless import APIManager\n'), ((251, 270), 'flask.logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (268, 270), False, 'from flask import logging\n')] |
from application.factories import vcf_handler_api
application = vcf_handler_api(
name="VCF Handler API",
)
| [
"application.factories.vcf_handler_api"
] | [((65, 104), 'application.factories.vcf_handler_api', 'vcf_handler_api', ([], {'name': '"""VCF Handler API"""'}), "(name='VCF Handler API')\n", (80, 104), False, 'from application.factories import vcf_handler_api\n')] |
from flask import Flask, Blueprint
from flask_ask import Ask
from flask_sqlalchemy import SQLAlchemy
from config import config
# Extensions
db = SQLAlchemy()
alexa = Ask(route = '/')
# Main blueprint
main = Blueprint('main', __name__)
from . import models, views
def create_app(config_name = 'development'):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
db.init_app(app)
alexa.init_app(app)
from . import main as main_blueprint
app.register_blueprint(main_blueprint)
return app | [
"flask_sqlalchemy.SQLAlchemy",
"flask_ask.Ask",
"flask.Blueprint",
"flask.Flask"
] | [((147, 159), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (157, 159), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((168, 182), 'flask_ask.Ask', 'Ask', ([], {'route': '"""/"""'}), "(route='/')\n", (171, 182), False, 'from flask_ask import Ask\n'), ((210, 237), 'flask.Blueprint', 'Blueprint', (['"""main"""', '__name__'], {}), "('main', __name__)\n", (219, 237), False, 'from flask import Flask, Blueprint\n'), ((323, 338), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (328, 338), False, 'from flask import Flask, Blueprint\n')] |
from __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import ArticlePage
class Command(BaseCommand):
def handle(self, **options):
ArticlePage.objects.all().update(
featured_in_latest=False,
featured_in_latest_start_date=None,
featured_in_latest_end_date=None,
featured_in_homepage=False,
featured_in_homepage_start_date=None,
featured_in_homepage_end_date=None)
| [
"molo.core.models.ArticlePage.objects.all"
] | [((222, 247), 'molo.core.models.ArticlePage.objects.all', 'ArticlePage.objects.all', ([], {}), '()\n', (245, 247), False, 'from molo.core.models import ArticlePage\n')] |
from markovch import markov
diagram = markov.Markov('./data_tr.txt')
print(diagram.result_list(50))
| [
"markovch.markov.Markov"
] | [((39, 69), 'markovch.markov.Markov', 'markov.Markov', (['"""./data_tr.txt"""'], {}), "('./data_tr.txt')\n", (52, 69), False, 'from markovch import markov\n')] |