code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
"""
The base for all plugins to derive from. It also implements an auto registering pattern so that
the plugins do not have to explicitly register.
"""
import abc
from marshmallow_jsonschema import JSONSchema
from Lego.Datatypes import InputParams
from .decorators import check_chart_configuration
from .decorators import check_input_configuration
from .decorators import check_modes_of_operation
from .decorators import run_async
# Plugin implementation
class PluginBase(metaclass=abc.ABCMeta):
"""
The base class for all plugins that want to register with this application.
"""
plugin_registry = {}
def __init__(self, name, group):
"""
Constructor to initialize basic fields.
"""
self.name = name
self.group = group
self.input_params = InputParams()
def __new__(cls, name, group, *args, **kwargs):
"""
Factory method for base/subtype creation. Simply creates an
(new-style class) object instance and sets a base property.
"""
del args
del kwargs
instance = object.__new__(cls)
# Call base class constructors by default to avoid doing them in each plugin.
super(cls, instance).__init__(name, group)
typedef = cls.__dict__
for attr in typedef:
func = typedef[attr]
if hasattr(func, "__dont_decorate__"):
pass
elif callable(func) and func.__name__ == 'get_input_configuration':
setattr(cls, attr, check_input_configuration(func))
elif callable(func) and func.__name__ == 'get_chart_configuration':
setattr(cls, attr, check_chart_configuration(func))
elif callable(func) and func.__name__ == 'get_modes_of_operation':
setattr(cls, attr, check_modes_of_operation(func))
elif callable(func) and func.__name__ == 'run':
setattr(cls, attr, run_async(func))
if group not in cls.plugin_registry.keys():
cls.plugin_registry[group] = []
cls.plugin_registry[group].append(instance)
return instance
@classmethod
def get_plugins(cls):
"""
Gets the list of all plugins registered.
"""
return cls.plugin_registry
@classmethod
def get_plugins_group(cls, group):
"""
Gets plugins registered under a single group name.
"""
if not group in cls.plugin_registry.keys():
return None
return cls.plugin_registry[group]
def get_plugin_name(self):
"""
returns the plugin name.
"""
return self.name
def get_plugin_group(self):
"""
returns plugin family name.
"""
return self.group
def get_input_configuration(self):
"""
Get name and type json value for input parameters required by this plugin to operate.
"""
json_schema = JSONSchema()
schema_blue_print = self.input_params.generate_schema(self.name + 'InputParams')
schema_desc = schema_blue_print()
return json_schema.dump(schema_desc).data
@abc.abstractmethod
def get_chart_configuration(self):
"""
Get name and type json value for chart display for this plugin.
"""
return None
@abc.abstractmethod
def get_modes_of_operation(self):
"""
Get supported modes of operation online or offline
"""
return ['online', 'offline']
@abc.abstractmethod
def run(self):
"""
Run method to call for the plugin processing.
"""
print("Running abstract method")
return
|
[
"Lego.Datatypes.InputParams",
"marshmallow_jsonschema.JSONSchema"
] |
[((812, 825), 'Lego.Datatypes.InputParams', 'InputParams', ([], {}), '()\n', (823, 825), False, 'from Lego.Datatypes import InputParams\n'), ((2971, 2983), 'marshmallow_jsonschema.JSONSchema', 'JSONSchema', ([], {}), '()\n', (2981, 2983), False, 'from marshmallow_jsonschema import JSONSchema\n')]
|
#!/usr/bin/env python
#
# Copyright (c) 2021 <NAME>
# URL: https://github.com/jamescherti/monitor-xfconf-changes/
#
# Distributed under terms of the MIT license.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# --
# pylint: disable=invalid-name
#
"""This command-line tool will help you to configure XFCE 4 programmatically.
It will display the xfconf-query commands of all the Xfconf settings that
are bring modified by xfce4-settings-manager (or by any other software that
modifies Xfconf like Thunar, Catfish, Ristretto...).
You can then add the xfconf-query commands to a Shell script that you can use
to configure XFCE 4 programmatically.
"""
import os
import signal
from typing import Set, Union, Any
from pathlib import Path
import psutil
from lxml import etree as ETree
__author__ = "<NAME>"
__license__ = "MIT"
class XfconfError(Exception):
"""Exception raised by the class Xfconf() or its children."""
class XfconfItem:
"""Xfconf item."""
def __init__(self,
channel: str,
property_path: str,
property_type: str,
property_value: Union[str, list]):
"""Init the class."""
self.channel = channel
self.property_path = property_path
self.property_type = property_type
self.property_value = property_value
def __repr__(self) -> str:
"""Object representation in string format."""
result = "{}{} : {} = {}".format(self.channel,
self.property_path,
self.property_type,
self.property_value)
return result
class Xfconf:
"""Load Xfconf settings."""
@staticmethod
def escape_command(command: str) -> str:
"""Quote a command."""
return "'{}'".format(command.replace("'", "'\\''"))
@staticmethod
def reload_xfconfd():
"""Reload the process 'xfconfd'."""
for proc in psutil.process_iter():
try:
if proc.name() == "xfconfd":
# reload the process
os.kill(proc.pid, signal.SIGHUP)
except psutil.Error:
pass
def __init__(self):
"""Load Xfconf settings."""
self.xfconf_items: set = set()
dir_xfconf = Path("~/.config/xfce4/xfconf/xfce-perchannel-xml")
for xml_file in dir_xfconf.expanduser().glob("*.xml"):
self._parse_xfconf_perchannel_xml(str(xml_file))
def diff(self) -> Set[str]:
"""Return the settings that have been changed."""
Xfconf.reload_xfconfd()
new_xfce_config = Xfconf()
before = set(str(self).splitlines())
after = set(str(new_xfce_config).splitlines())
self.xfconf_items = new_xfce_config.xfconf_items
return after - before
def __iter__(self):
"""Iterate through 'self.items'."""
yield from self.xfconf_items
def __repr__(self) -> str:
"""Object representation in string format."""
commands = []
for item in self:
cmd = "{} --create -c {} -p {}" \
.format("xfconf-query",
self.escape_command(item.channel),
self.escape_command(item.property_path))
if item.property_type == "array":
for array_item_type, array_item_value in item.property_value:
cmd = "{} --type {} --set {}".format(
cmd,
self.escape_command(array_item_type),
self.escape_command(array_item_value)
)
else:
cmd = "{} --type {} --set {}".format(
cmd,
self.escape_command(item.property_type),
self.escape_command(str(item.property_value))
)
commands.append(cmd)
return "{}\n".format("\n".join(commands))
def _parse_xfconf_perchannel_xml(self,
xml_file: str,
root: Any = None,
channel_name: str = "",
property_path: str = ""):
"""Parse the Xfconf XML."""
if root is None:
tree = ETree.parse(xml_file)
root = tree.getroot()
channel_name = root.attrib.get("name")
if root.attrib.get("version") != "1.0" \
or root.tag.lower() != "channel":
err_str = ("invalid XML file: '{}'").format(xml_file)
raise XfconfError(err_str)
for elem in root.getchildren():
if elem.tag.title().lower() != "property":
continue
property_type = elem.attrib.get("type").strip().lower()
cur_property_path = "{}/{}".format(property_path,
elem.attrib.get("name").strip())
if property_type not in ["empty", "uint", "int", "string", "bool",
"array", "double"]:
err_str = ("the type '{}' of '{}{}' is not supported. "
"XML file: '{}'") \
.format(property_type, channel_name, cur_property_path,
xml_file)
raise XfconfError(err_str)
# 'empty' = contains sub items
if property_type == "empty":
self._parse_xfconf_perchannel_xml(
xml_file=xml_file,
root=elem,
channel_name=channel_name,
property_path=cur_property_path
)
continue
# Modify the variable property_value
if property_type == "array":
property_value = []
for elem_property_value in elem.getchildren():
array_item_type = elem_property_value.attrib.get("type")
array_item_value = elem_property_value.attrib.get("value")
if array_item_value is None:
array_item_value = ""
property_value.append((array_item_type, array_item_value))
else:
property_value = elem.attrib.get("value")
self.xfconf_items.add(
XfconfItem(
channel=channel_name,
property_path=cur_property_path,
property_type=property_type,
property_value=property_value
)
)
|
[
"lxml.etree.parse",
"psutil.process_iter",
"os.kill",
"pathlib.Path"
] |
[((3012, 3033), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (3031, 3033), False, 'import psutil\n'), ((3366, 3416), 'pathlib.Path', 'Path', (['"""~/.config/xfce4/xfconf/xfce-perchannel-xml"""'], {}), "('~/.config/xfce4/xfconf/xfce-perchannel-xml')\n", (3370, 3416), False, 'from pathlib import Path\n'), ((5382, 5403), 'lxml.etree.parse', 'ETree.parse', (['xml_file'], {}), '(xml_file)\n', (5393, 5403), True, 'from lxml import etree as ETree\n'), ((3158, 3190), 'os.kill', 'os.kill', (['proc.pid', 'signal.SIGHUP'], {}), '(proc.pid, signal.SIGHUP)\n', (3165, 3190), False, 'import os\n')]
|
# ==========================================================================================================================================================
#import the libraries
# ==========================================================================================================================================================
import tweepy
import re
import matplotlib.pyplot as plt
from tweepy import OAuthHandler
from textblob import TextBlob
import numpy as np
# ==========================================================================================================================================================
#initialize the keys
# ==========================================================================================================================================================
consumer_key = 'xxxxxxxxxxxxxxxxxxxxxxx'
consumer_secret = '<KEY>'
access_token = '<KEY>'
access_secret = '<KEY>'
# ==========================================================================================================================================================
#initialize the tokens
# ==========================================================================================================================================================
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth,timeout=10)
# ==========================================================================================================================================================
#function to clean the tweets: standard procedure
# ==========================================================================================================================================================
def clean_tweet(tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
#function to get the tweets
def get_tweet_sentiment(tweet):
analysis = TextBlob(clean_tweet(tweet))
if analysis.sentiment.polarity > 0:
return 'positive'
elif analysis.sentiment.polarity == 0:
return 'neutral'
else:
return 'negative'
#function to get the tweets
def get_tweets(query, count = 10):
tweets = []
try:
fetched_tweets = api.search(q = query, count = count)
for tweet in fetched_tweets:
parsed_tweet = {}
parsed_tweet['text'] = tweet.text
parsed_tweet['sentiment'] = get_tweet_sentiment(tweet.text)
if tweet.retweet_count > 0:
if parsed_tweet not in tweets:
tweets.append(parsed_tweet)
else:
tweets.append(parsed_tweet)
return tweets
except tweepy.TweepError as e:
print("Error : " + str(e))
# ==========================================================================================================================================================
#function to get the tweets and plot the graph
# ==========================================================================================================================================================
def main(queryname):
tweets = get_tweets(queryname, count = 200)
ptweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive']
print("Positive tweets percentage: {} %".format(100*len(ptweets)/len(tweets)))
ntweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative']
print("Negative tweets percentage: {} %".format(100*len(ntweets)/len(tweets)))
print("Neutral tweets percentage: {} % ".format(100*(len(tweets) - len(ntweets) - len(ptweets))/len(tweets)))
print("\n\nPositive tweets:")
for tweet in ptweets[:10]:
print(tweet['text'])
print("\n\nNegative tweets:")
for tweet in ntweets[:10]:
print(tweet['text'])
objects = ['Positive','Negative','Neutral']
y_pos = np.arange(len(objects))
performance = [100*len(ptweets)/len(tweets),100*len(ntweets)/len(tweets),100*(len(tweets) - len(ntweets) - len(ptweets))/len(tweets)]
plt.bar(y_pos, performance, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel('Percentage')
plt.title('Sentiment')
plt.show()
# ==========================================================================================================================================================
# USAGE
# ==========================================================================================================================================================
main("PyTorch")
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"tweepy.API",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.ylabel",
"tweepy.OAuthHandler",
"matplotlib.pyplot.xticks",
"re.sub"
] |
[((1481, 1524), 'tweepy.OAuthHandler', 'OAuthHandler', (['consumer_key', 'consumer_secret'], {}), '(consumer_key, consumer_secret)\n', (1493, 1524), False, 'from tweepy import OAuthHandler\n'), ((1582, 1610), 'tweepy.API', 'tweepy.API', (['auth'], {'timeout': '(10)'}), '(auth, timeout=10)\n', (1592, 1610), False, 'import tweepy\n'), ((4434, 4488), 'matplotlib.pyplot.bar', 'plt.bar', (['y_pos', 'performance'], {'align': '"""center"""', 'alpha': '(0.5)'}), "(y_pos, performance, align='center', alpha=0.5)\n", (4441, 4488), True, 'import matplotlib.pyplot as plt\n'), ((4493, 4519), 'matplotlib.pyplot.xticks', 'plt.xticks', (['y_pos', 'objects'], {}), '(y_pos, objects)\n', (4503, 4519), True, 'import matplotlib.pyplot as plt\n'), ((4524, 4548), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage"""'], {}), "('Percentage')\n", (4534, 4548), True, 'import matplotlib.pyplot as plt\n'), ((4553, 4575), 'matplotlib.pyplot.title', 'plt.title', (['"""Sentiment"""'], {}), "('Sentiment')\n", (4562, 4575), True, 'import matplotlib.pyplot as plt\n'), ((4580, 4590), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4588, 4590), True, 'import matplotlib.pyplot as plt\n'), ((2088, 2161), 're.sub', 're.sub', (['"""(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\\\\w+:\\\\/\\\\/\\\\S+)"""', '""" """', 'tweet'], {}), "('(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\\\w+:\\\\/\\\\/\\\\S+)', ' ', tweet)\n", (2094, 2161), False, 'import re\n')]
|
import pygame, sys
from pygame.locals import *
from math import cos, sin, sqrt, tan, pi
# Initialize pygame
pygame.init()
pygame.display.set_caption('Calculator')
clock = pygame.time.Clock()
SURF = pygame.display.set_mode((450, 550))
font = pygame.font.SysFont(None, 30)
calc = pygame.font.SysFont('ocraextended', 25)
FPS = 60
WHITE = (255, 255, 255)
BLUE = (0, 0, 120)
BLACK = (0, 0, 0)
GREEN = (36, 204, 68)
mouse_pos = (0, 0)
equation = ''
y = 0
btn_width = 45
screen = pygame.Rect(50, 50, 300, 50)
mouse = pygame.draw.rect(SURF, WHITE, Rect(mouse_pos, (1, 1)))
text, pos, rect, face, text_rect = 'text', 'pos', 'rect', 'face', 'text_rect'
buttons = {
'btn_clear': {text: 'C', pos: (100, 150)},
'btn_bksp': {text: '<x', pos: (150, 150)},
'btn_left': {text: '(', pos: (200, 150)},
'btn_right': {text: ')', pos: (250, 150)},
'btn_7': {text: '7', pos: (100, 200)},
'btn_8': {text: '8', pos: (150, 200)},
'btn_9': {text: '9', pos: (200, 200)},
'btn_divide': {text: '/', pos: (250, 200)},
'btn_4': {text: '4', pos: (100, 250)},
'btn_5': {text: '5', pos: (150, 250)},
'btn_6': {text: '6', pos: (200, 250)},
'btn_multiply': {text: '*', pos: (250, 250)},
'btn_1': {text: '1', pos: (100, 300)},
'btn_2': {text: '2', pos: (150, 300)},
'btn_3': {text: '3', pos: (200, 300)},
'btn_minus': {text: '-', pos: (250, 300)},
'btn_decimal': {text: '.', pos: (100, 350)},
'btn_0': {text: '0', pos: (150, 350)},
'btn_equals': {text: '=', pos: (200, 350)},
'btn_plus': {text: '+', pos: (250, 350)},
'btn_cos': {text: 'cos(', pos: (100, 400)},
'btn_tan': {text: 'tan(', pos: (150, 400)},
'btn_sin': {text: 'sin(', pos: (200, 400)},
'btn_sqrt': {text: 'sqrt(', pos: (250, 400)},
'btn_pi': {text: 'pi', pos: (100, 450)},
'btn_modulo': {text: '%', pos: (150, 450)}
}
keys = "1234567890."
for button in buttons:
b_pos = buttons[button][pos]
b_text = buttons[button][text]
# Create a rectangle object and store it in the dict
rect_params = list(b_pos)
rect_params.extend((btn_width, btn_width))
b_rectangle = pygame.Rect(rect_params)
buttons[button][rect] = b_rectangle
# Create a "face" and store it in the dict
if b_text[-1] == '(' and b_text[0] != '(':
b_text = b_text[:-1] # trim the trailing paren off the math functions
b_face = font.render(b_text, True, WHITE)
b_text_rect = b_face.get_rect()
b_text_rect.center = b_rectangle.center # center the text in the rect
buttons[button][face] = b_face
buttons[button][text_rect] = b_text_rect
while True:
try:
for event in pygame.event.get():
if event.type == pygame.MOUSEMOTION:
mouse_pos = event.pos
if event.type == pygame.MOUSEBUTTONDOWN:
for button in buttons:
if mouse.colliderect(buttons[button][rect]):
current_button = buttons[button][text]
if current_button == '=' and equation == '':
equation = ''
elif current_button == '=':
equation = f"{eval(equation)}"
elif current_button == 'C':
equation = ''
elif current_button == '<x':
equation = equation[:-1]
elif current_button == 'pi':
equation += str(pi)
else:
equation += buttons[button][text]
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
mods = pygame.key.get_mods()
if event.key == K_c:
equation = ''
elif event.key == K_EQUALS and mods & pygame.KMOD_SHIFT:
equation += '+'
elif event.key == K_MINUS:
equation += '-'
elif event.key == K_SLASH:
equation += '/'
elif event.key == K_8 and mods & pygame.KMOD_SHIFT:
equation += '*'
elif event.key == K_5 and mods & pygame.KMOD_SHIFT:
equation += '%'
elif event.key == K_9 and mods & pygame.KMOD_SHIFT:
equation += '('
elif event.key == K_0 and mods & pygame.KMOD_SHIFT:
equation += ')'
elif event.key == K_EQUALS or event.key == K_RETURN:
equation = f"{eval(equation)}"
elif event.key == K_BACKSPACE:
equation = equation[:-1]
elif pygame.key.name(event.key) in keys:
equation += pygame.key.name(event.key)
SURF.fill(WHITE)
mouse = pygame.draw.rect(SURF, WHITE, Rect(mouse_pos, (1, 1)))
pygame.draw.rect(SURF, BLACK, screen, 0)
for button in buttons:
pygame.draw.rect(SURF, BLUE, buttons[button][rect], 0)
SURF.blit(buttons[button][face], buttons[button][text_rect])
equation_text = calc.render(equation, True, GREEN)
equation_rect = equation_text.get_rect()
equation_rect.centery = screen.centery
equation_rect.right = screen.right - 5
SURF.blit(equation_text, equation_rect)
clock.tick(FPS)
pygame.display.update()
except SyntaxError:
equation = 'ERROR'
except NameError:
equation = 'ERROR'
except ZeroDivisionError:
equation = 'ERROR'
|
[
"pygame.quit",
"pygame.font.SysFont",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.draw.rect",
"pygame.Rect",
"pygame.init",
"pygame.key.get_mods",
"pygame.key.name",
"pygame.display.update",
"pygame.display.set_caption",
"pygame.time.Clock",
"sys.exit"
] |
[((110, 123), 'pygame.init', 'pygame.init', ([], {}), '()\n', (121, 123), False, 'import pygame, sys\n'), ((124, 164), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Calculator"""'], {}), "('Calculator')\n", (150, 164), False, 'import pygame, sys\n'), ((173, 192), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (190, 192), False, 'import pygame, sys\n'), ((200, 235), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(450, 550)'], {}), '((450, 550))\n', (223, 235), False, 'import pygame, sys\n'), ((243, 272), 'pygame.font.SysFont', 'pygame.font.SysFont', (['None', '(30)'], {}), '(None, 30)\n', (262, 272), False, 'import pygame, sys\n'), ((280, 319), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""ocraextended"""', '(25)'], {}), "('ocraextended', 25)\n", (299, 319), False, 'import pygame, sys\n'), ((476, 504), 'pygame.Rect', 'pygame.Rect', (['(50)', '(50)', '(300)', '(50)'], {}), '(50, 50, 300, 50)\n', (487, 504), False, 'import pygame, sys\n'), ((2118, 2142), 'pygame.Rect', 'pygame.Rect', (['rect_params'], {}), '(rect_params)\n', (2129, 2142), False, 'import pygame, sys\n'), ((2637, 2655), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2653, 2655), False, 'import pygame, sys\n'), ((4994, 5034), 'pygame.draw.rect', 'pygame.draw.rect', (['SURF', 'BLACK', 'screen', '(0)'], {}), '(SURF, BLACK, screen, 0)\n', (5010, 5034), False, 'import pygame, sys\n'), ((5490, 5513), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (5511, 5513), False, 'import pygame, sys\n'), ((5079, 5133), 'pygame.draw.rect', 'pygame.draw.rect', (['SURF', 'BLUE', 'buttons[button][rect]', '(0)'], {}), '(SURF, BLUE, buttons[button][rect], 0)\n', (5095, 5133), False, 'import pygame, sys\n'), ((3683, 3696), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (3694, 3696), False, 'import pygame, sys\n'), ((3713, 3723), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3721, 3723), False, 'import pygame, sys\n'), ((3785, 3806), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (3804, 3806), False, 'import pygame, sys\n'), ((4794, 4820), 'pygame.key.name', 'pygame.key.name', (['event.key'], {}), '(event.key)\n', (4809, 4820), False, 'import pygame, sys\n'), ((4862, 4888), 'pygame.key.name', 'pygame.key.name', (['event.key'], {}), '(event.key)\n', (4877, 4888), False, 'import pygame, sys\n')]
|
from django import forms
from django.forms import ModelForm
from models import Venue, Location
from django.forms.models import inlineformset_factory
class VenueForm(ModelForm):
def clean(self):
super(VenueForm, self).clean() #I would always do this for forms.
return self.cleaned_data
full_address = forms.CharField(label='Full Address', \
help_text='Type a new address')
latlng = forms.CharField(label='Lat/Lng', \
help_text='Lat Long of the address')
class Meta:
model = Venue
|
[
"django.forms.CharField"
] |
[((327, 396), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Full Address"""', 'help_text': '"""Type a new address"""'}), "(label='Full Address', help_text='Type a new address')\n", (342, 396), False, 'from django import forms\n'), ((421, 490), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Lat/Lng"""', 'help_text': '"""Lat Long of the address"""'}), "(label='Lat/Lng', help_text='Lat Long of the address')\n", (436, 490), False, 'from django import forms\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
import sys
from app import initialize
manager = Manager(initialize.web_app)
def register_migrate(manager):
from app import models
migrate = Migrate(initialize.web_app, models.db)
manager.add_command('db', MigrateCommand)
return migrate
if __name__ == '__main__':
if 'db' in sys.argv:
migrate = register_migrate(manager)
manager.run()
|
[
"flask_script.Manager",
"flask_migrate.Migrate"
] |
[((181, 208), 'flask_script.Manager', 'Manager', (['initialize.web_app'], {}), '(initialize.web_app)\n', (188, 208), False, 'from flask_script import Manager\n'), ((283, 321), 'flask_migrate.Migrate', 'Migrate', (['initialize.web_app', 'models.db'], {}), '(initialize.web_app, models.db)\n', (290, 321), False, 'from flask_migrate import Migrate, MigrateCommand\n')]
|
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.altair import spec as spec_altair
from eth2spec.merge import spec as spec_merge
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
specs = (spec_phase0, spec_altair, spec_merge)
if __name__ == "__main__":
phase_0_mods = {'finality': 'eth2spec.test.phase0.finality.test_finality'}
altair_mods = phase_0_mods # No additional Altair specific finality tests
merge_mods = phase_0_mods # No additional Merge specific finality tests
all_mods = {
PHASE0: phase_0_mods,
ALTAIR: altair_mods,
MERGE: spec_merge,
}
run_state_test_generators(runner_name="finality", specs=specs, all_mods=all_mods)
|
[
"eth2spec.gen_helpers.gen_from_tests.gen.run_state_test_generators"
] |
[((717, 803), 'eth2spec.gen_helpers.gen_from_tests.gen.run_state_test_generators', 'run_state_test_generators', ([], {'runner_name': '"""finality"""', 'specs': 'specs', 'all_mods': 'all_mods'}), "(runner_name='finality', specs=specs, all_mods=\n all_mods)\n", (742, 803), False, 'from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators\n')]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'detect.ui'
#
# Created by: PyQt5 UI code generator 5.12
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.openimage = QtWidgets.QPushButton(self.centralwidget)
self.openimage.setGeometry(QtCore.QRect(20, 180, 75, 23))
self.openimage.setObjectName("openimage")
self.showimage = QtWidgets.QLabel(self.centralwidget)
self.showimage.setGeometry(QtCore.QRect(100, 20, 401, 451))
self.showimage.setObjectName("showimage")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.openimage.setText(_translate("MainWindow", "打开图片"))
self.showimage.setText(_translate("MainWindow", "TextLabel"))
|
[
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QStatusBar",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QMenuBar"
] |
[((414, 443), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (431, 443), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((527, 568), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (548, 568), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((710, 746), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (726, 746), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((944, 974), 'PyQt5.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (962, 974), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1152, 1184), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (1172, 1184), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1331, 1380), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (1368, 1380), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((604, 633), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(180)', '(75)', '(23)'], {}), '(20, 180, 75, 23)\n', (616, 633), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((782, 813), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(100)', '(20)', '(401)', '(451)'], {}), '(100, 20, 401, 451)\n', (794, 813), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1008, 1035), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(800)', '(23)'], {}), '(0, 0, 800, 23)\n', (1020, 1035), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
|
import argparse
import math_lib as ml
parser = argparse.ArgumentParser(description='do math')
parser.add_argument('first_number', type=int, help='First Number')
parser.add_argument('second_number', type=int, help='Second Number')
args = parser.parse_args()
if __name__ == '__main__':
print(ml.add(args.first_number, args.second_number))
print(ml.div(args.first_number, args.second_number))
|
[
"math_lib.add",
"argparse.ArgumentParser",
"math_lib.div"
] |
[((48, 94), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""do math"""'}), "(description='do math')\n", (71, 94), False, 'import argparse\n'), ((296, 341), 'math_lib.add', 'ml.add', (['args.first_number', 'args.second_number'], {}), '(args.first_number, args.second_number)\n', (302, 341), True, 'import math_lib as ml\n'), ((353, 398), 'math_lib.div', 'ml.div', (['args.first_number', 'args.second_number'], {}), '(args.first_number, args.second_number)\n', (359, 398), True, 'import math_lib as ml\n')]
|
import cv2
import numpy as np
import utils
def outlineRect(image, rect, color):
"""used to draw a rectangle"""
if rect is None:
return
X, y, w, h = [int(i) for i in rect]
cv2.rectangle(image, (X, y), (X + w, y + h), color)
def copyRect(src, dst, srcRect, dstRect, mask=None,
interpolation=cv2.INTER_LINEAR):
"""Copy part of the source to part of the destination"""
x0, y0, w0, h0 = [int(i) for i in srcRect]
x1, y1, w1, h1 = [int(j) for j in dstRect]
# Resize the contents of the source sub-rectangle
# Put the result in the destination subrectangle
if mask is None:
dst[y1:y1 + h1, x1:x1 + w1] = cv2.resize(src[y0:y0 + h0, x0:x0 + w0], (w1, h1),
interpolation=interpolation)
else:
if not utils.isGray(src):
# Convert the mask to 3 channels, like the image.
mask = mask.repeat(3).reshape(h0, w0, 3)
# Perform the copy, with the mask applied.
dst[y1:y1 + h1, x1:x1 + w1] = np.where(cv2.resize(mask,
(w1, h1),
interpolation=cv2.INTER_LINEAR),
cv2.resize(src[y0:y0 + h0, x0:x0 + w0], (w1, h1),
interpolation=interpolation),
dst[y1:y1 + h1, x1:x1 + w1])
def swapRects(src, dst, rects, masks=None,
interpolation=cv2.INTER_LINEAR):
"""Copy the source with two or more sub-rectangles swapped."""
if dst is not src:
dst[:] = src
numRects = len(rects)
if numRects < 2:
return
if masks is None:
masks = [None] * numRects
# Copy the contents of last rectangle into temporary storage.
x, y, w, h = rects[numRects - 1]
temp = src[y:y + h, x:x + w].copy()
# Copy the contents of each rectangle into next
i = numRects - 2
while i >= 0:
copyRect(src, dst, rects[i], rects[i + 1], masks[i],
interpolation)
i -= 1
# Copy the temporarily stored content into the first rectangle
copyRect(temp, dst, (0, 0, w, h), rects[0], masks[numRects - 1],
interpolation)
|
[
"utils.isGray",
"cv2.resize",
"cv2.rectangle"
] |
[((197, 248), 'cv2.rectangle', 'cv2.rectangle', (['image', '(X, y)', '(X + w, y + h)', 'color'], {}), '(image, (X, y), (X + w, y + h), color)\n', (210, 248), False, 'import cv2\n'), ((671, 749), 'cv2.resize', 'cv2.resize', (['src[y0:y0 + h0, x0:x0 + w0]', '(w1, h1)'], {'interpolation': 'interpolation'}), '(src[y0:y0 + h0, x0:x0 + w0], (w1, h1), interpolation=interpolation)\n', (681, 749), False, 'import cv2\n'), ((824, 841), 'utils.isGray', 'utils.isGray', (['src'], {}), '(src)\n', (836, 841), False, 'import utils\n'), ((1056, 1114), 'cv2.resize', 'cv2.resize', (['mask', '(w1, h1)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(mask, (w1, h1), interpolation=cv2.INTER_LINEAR)\n', (1066, 1114), False, 'import cv2\n'), ((1279, 1357), 'cv2.resize', 'cv2.resize', (['src[y0:y0 + h0, x0:x0 + w0]', '(w1, h1)'], {'interpolation': 'interpolation'}), '(src[y0:y0 + h0, x0:x0 + w0], (w1, h1), interpolation=interpolation)\n', (1289, 1357), False, 'import cv2\n')]
|
import pytest
from ssh2net.core.cisco_iosxr.driver import IOSXRDriver, PRIVS
from tests.unit.drivers.base_driver_unit_tests import BaseDriverUnitTest
class TestIOSXR(BaseDriverUnitTest):
def setup_method(self):
self.privs = PRIVS
self.driver = IOSXRDriver()
def test__determine_current_priv_exec(self):
pytest.skip("no privilege exec on iosxr")
|
[
"ssh2net.core.cisco_iosxr.driver.IOSXRDriver",
"pytest.skip"
] |
[((267, 280), 'ssh2net.core.cisco_iosxr.driver.IOSXRDriver', 'IOSXRDriver', ([], {}), '()\n', (278, 280), False, 'from ssh2net.core.cisco_iosxr.driver import IOSXRDriver, PRIVS\n'), ((339, 380), 'pytest.skip', 'pytest.skip', (['"""no privilege exec on iosxr"""'], {}), "('no privilege exec on iosxr')\n", (350, 380), False, 'import pytest\n')]
|
import json,sys,io,os
from argparse import ArgumentParser
from elasticsearch import Elasticsearch
from argparse import ArgumentParser
def main():
parser = ArgumentParser()
parser.add_argument("-d", "--data", dest="data", help="path to dataset", metavar="DATA")
parser.add_argument("-i", "--index", dest="index", help="elasticsearch index", metavar="INDEX")
args = parser.parse_args()
if not args.data or not args.index:
parser.print_help()
return
es = Elasticsearch(['localhost'], port=9200)
id = 0
dataset_path = os.path.normpath(args.data);
for entry in os.scandir(dataset_path):
filepath = entry.path
filename = entry.name
if filename.endswith(".json"):
print("Indexing "+filename)
with io.open(filepath, 'r', encoding="utf8") as file:
json_data = json.load(file)
json_data['filename'] = filename
es.index(index=args.index, doc_type="_doc", id=id, body=json_data)
id = id + 1
else:
continue
if __name__ == "__main__":
main()
|
[
"elasticsearch.Elasticsearch",
"json.load",
"argparse.ArgumentParser",
"os.path.normpath",
"io.open",
"os.scandir"
] |
[((157, 173), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (171, 173), False, 'from argparse import ArgumentParser\n'), ((466, 505), 'elasticsearch.Elasticsearch', 'Elasticsearch', (["['localhost']"], {'port': '(9200)'}), "(['localhost'], port=9200)\n", (479, 505), False, 'from elasticsearch import Elasticsearch\n'), ((531, 558), 'os.path.normpath', 'os.path.normpath', (['args.data'], {}), '(args.data)\n', (547, 558), False, 'import json, sys, io, os\n'), ((574, 598), 'os.scandir', 'os.scandir', (['dataset_path'], {}), '(dataset_path)\n', (584, 598), False, 'import json, sys, io, os\n'), ((720, 759), 'io.open', 'io.open', (['filepath', '"""r"""'], {'encoding': '"""utf8"""'}), "(filepath, 'r', encoding='utf8')\n", (727, 759), False, 'import json, sys, io, os\n'), ((785, 800), 'json.load', 'json.load', (['file'], {}), '(file)\n', (794, 800), False, 'import json, sys, io, os\n')]
|
"""
SC101 - Assignment3
Adapted from <NAME>'s Ghost assignment by
<NAME>.
-----------------------------------------------
This code helps users combine the photos to get a best photo.
"""
import os
import sys
from simpleimage import SimpleImage
def get_pixel_dist(pixel, red, green, blue):
"""
Returns the square of the color distance between pixel and mean RGB value
Input:
pixel (Pixel): pixel with RGB values to be compared
red (int): average red value across all images
green (int): average green value across all images
blue (int): average blue value across all images
Returns:
dist (int): squared distance between red, green, and blue pixel values
"""
# define the color distance from each pixel to the average pixel
color_distance = ((red - pixel.red)**2 + (green - pixel.green)**2 + (blue - pixel.blue)**2) ** 0.5
return color_distance
def get_average(pixels):
"""
Given a list of pixels, finds the average red, blue, and green values
Input:
pixels (List[Pixel]): list of pixels to be averaged
Returns:
rgb (List[int]): list of average red, green, blue values across pixels respectively
Assumes you are returning in the order: [red, green, blue]
"""
# calculate the average pixel for rea, green and blue colors
pixel_red_sum = 0 # create a box for red pixel
pixel_green_sum = 0 # create a box for green pixel
pixel_blue_sum = 0 # create a box for blue pixel
n = 0
# sum up each color pixels
for i in range(len(pixels)):
pixel_red_sum += pixels[i].red
pixel_green_sum += pixels[i].green
pixel_blue_sum += pixels[i].blue
n += 1 # count how many pixels are there
rgb = [int(pixel_red_sum / n), int(pixel_green_sum / n), int(pixel_blue_sum / n)] # calculate the average
return rgb
def get_best_pixel(pixels):
"""
Given a list of pixels, returns the pixel with the smallest
distance from the average red, green, and blue values across all pixels.
Input:
pixels (List[Pixel]): list of pixels to be averaged and compared
Returns:
best (Pixel): pixel closest to RGB averages
"""
avg = get_average(pixels) # find the average pixels for different photos
smallest = get_pixel_dist(pixels[0], avg[0], avg[1], avg[2]) # use the first photo as the best pixel compared to the average pixels
best_pixel = pixels[0] # if other photo is better than the first photo, the first photo can be changed
for i in range(len(pixels)):
a = get_pixel_dist(pixels[i], avg[0], avg[1], avg[2])
if a < smallest:
smallest = a
best_pixel = pixels[i]
return best_pixel
def solve(images):
"""
Given a list of image objects, compute and display a Ghost solution image
based on these images. There will be at least 3 images and they will all
be the same size.
Input:
images (List[SimpleImage]): list of images to be processed
"""
width = images[0].width
height = images[0].height
result = SimpleImage.blank(width, height)
######## YOUR CODE STARTS HERE #########
# compare different photo at same position
for x in range(width):
for y in range(height):
pixels = [] # a list for load the the pixel at same position
for image in images:
same_position_pixel = image.get_pixel(x, y)
pixels.append(same_position_pixel)
best_pixel = get_best_pixel(pixels) # choose the best pixel
result_pixel = result.get_pixel(x, y) # fill the best pixel into the result
result_pixel.red = best_pixel.red
result_pixel.green = best_pixel.green
result_pixel.blue = best_pixel.blue
######## YOUR CODE ENDS HERE ###########
print("Displaying image!")
result.show()
def jpgs_in_dir(dir):
"""
(provided, DO NOT MODIFY)
Given the name of a directory, returns a list of the .jpg filenames
within it.
Input:
dir (string): name of directory
Returns:
filenames(List[string]): names of jpg files in directory
"""
filenames = []
for filename in os.listdir(dir):
if filename.endswith('.jpg'):
filenames.append(os.path.join(dir, filename))
return filenames
def load_images(dir):
"""
(provided, DO NOT MODIFY)
Given a directory name, reads all the .jpg files within it into memory and
returns them in a list. Prints the filenames out as it goes.
Input:
dir (string): name of directory
Returns:
images (List[SimpleImages]): list of images in directory
"""
images = []
jpgs = jpgs_in_dir(dir)
for filename in jpgs:
print("Loading", filename)
image = SimpleImage(filename)
images.append(image)
return images
def main():
# (provided, DO NOT MODIFY)
args = sys.argv[1:]
# We just take 1 argument, the folder containing all the images.
# The load_images() capability is provided above.
images = load_images(args[0])
solve(images)
if __name__ == '__main__':
main()
|
[
"simpleimage.SimpleImage",
"os.path.join",
"os.listdir",
"simpleimage.SimpleImage.blank"
] |
[((3154, 3186), 'simpleimage.SimpleImage.blank', 'SimpleImage.blank', (['width', 'height'], {}), '(width, height)\n', (3171, 3186), False, 'from simpleimage import SimpleImage\n'), ((4288, 4303), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (4298, 4303), False, 'import os\n'), ((4887, 4908), 'simpleimage.SimpleImage', 'SimpleImage', (['filename'], {}), '(filename)\n', (4898, 4908), False, 'from simpleimage import SimpleImage\n'), ((4372, 4399), 'os.path.join', 'os.path.join', (['dir', 'filename'], {}), '(dir, filename)\n', (4384, 4399), False, 'import os\n')]
|
from clean_architecture_basic_classes.basic_domain.basic_value import \
BasicValue
from marshmallow import Schema, fields
from uuid import uuid4
def missing_id():
return str(uuid4())
class BasicEntity(BasicValue):
def __init__(self, entity_id=None):
self.entity_id = entity_id or str(uuid4())
self.adapter = None
def set_adapter(self, adapter):
self.adapter = adapter
def save(self):
my_id = self.adapter.save(self.to_json())
return my_id
def update(self):
my_id = self.adapter.save(self.to_json())
return my_id
def delete(self):
self.adapter.delete(self.entity_id)
def __eq__(self, other):
return self.entity_id == other.entity_id
def __hash__(self):
return hash(self.entity_id)
class Schema(Schema):
entity_id = fields.String(required=False,
allow_none=True,
missing=missing_id)
|
[
"uuid.uuid4",
"marshmallow.fields.String"
] |
[((184, 191), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (189, 191), False, 'from uuid import uuid4\n'), ((853, 919), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(False)', 'allow_none': '(True)', 'missing': 'missing_id'}), '(required=False, allow_none=True, missing=missing_id)\n', (866, 919), False, 'from marshmallow import Schema, fields\n'), ((308, 315), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (313, 315), False, 'from uuid import uuid4\n')]
|
# Generated by Django 3.2.7 on 2021-10-21 21:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('file_manager', '0038_auto_20211021_1513'),
]
operations = [
migrations.AddField(
model_name='rawfile',
name='content_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='contenttypes.contenttype'),
),
migrations.AddField(
model_name='rawfile',
name='object_id',
field=models.PositiveIntegerField(default=5),
),
migrations.AlterField(
model_name='rawfile',
name='note_file',
field=models.ManyToManyField(blank=True, to='file_manager.NoteFile'),
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.PositiveIntegerField",
"django.db.models.ManyToManyField"
] |
[((438, 559), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""contenttypes.contenttype"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.PROTECT, to='contenttypes.contenttype')\n", (455, 559), False, 'from django.db import migrations, models\n'), ((678, 716), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(5)'}), '(default=5)\n', (705, 716), False, 'from django.db import migrations, models\n'), ((842, 904), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'to': '"""file_manager.NoteFile"""'}), "(blank=True, to='file_manager.NoteFile')\n", (864, 904), False, 'from django.db import migrations, models\n')]
|
from __future__ import annotations
import time
from datetime import datetime, timezone
from lazy.models.timez import TimeCls
from .config import TimeZoneConfigz
from .base_imports import *
def _require_pytz():
pytz = Lib.import_lib('pytz')
Lib.reload_module(pytz)
def _require_dateparser():
dateparser = Lib.import_lib('dateparser')
Lib.reload_module(dateparser)
if _pytz_available:
import pytz
api_timezone = pytz.timezone(TimeZoneConfigz.desired)
utc_timezone = pytz.timezone("UTC")
dtime_now_tz = lambda: datetime.now(api_timezone)
else:
api_timezone = _require_pytz
utc_timezone = _require_pytz
dtime_now_tz = _require_pytz
if _dateparser_available:
import dateparser
dtime_now = lambda: datetime.now()
dtime_now_utc = lambda: datetime.now(timezone.utc)
def timer(s: float = None):
return time.time() if not s else (time.time() - s)
def dtime_parse(timeframe: str = '30 mins', future: bool = False):
if not _dateparser_available: _require_dateparser()
if future:
timeframe = 'in ' + timeframe
prefer = 'future'
else:
timeframe += ' ago'
prefer = 'past'
return dateparser.parse(timeframe, settings={'PREFER_DATES_FROM': prefer, 'TIMEZONE': 'UTC', 'RETURN_AS_TIMEZONE_AWARE': True})
def dtime_diff(dtime: datetime = None, timeframe: str = '30 mins', future: bool = False, secs_only: bool = False):
if not dtime: dtime = datetime.now(timezone.utc)
dtime_to_obj = dtime_parse(timeframe=timeframe, future=future)
dtime_diff = (dtime_to_obj - dtime) if future else (dtime - dtime_to_obj)
if secs_only: return dtime_diff.total_seconds()
return dtime_diff
def get_dtime(dtime: datetime = None, start: datetime = None, timeframe: str = None, future: bool = False) -> datetime:
if dtime and start: return start - dtime
if dtime and timeframe: return dtime_diff(dtobj=dtime, timeframe=timeframe, future=future)
if timeframe: return dtime_parse(timeframe=timeframe, future=future)
return dtime_now_utc()
def get_dtime_str(dtime: datetime = None, start: datetime = None, timeframe: str = None, future: bool = False, tz_format: bool = True, dt_format: str = None):
dt = get_dtime(dtime=dtime, start=start, timeframe=timeframe, future=future)
if tz_format: return dt.strftime(TimeZoneConfigz.tz_format)
if dt_format: return dt.strftime(dt_format)
return dt.isoformat('T')
def get_dtime_iso(dtime_str: str, z_break: str = 'Z', z_repl: str = '.000000+00:00'):
"""Breaks the timestamp at z_break and replaces with z_repl if z_break is not empty. """
# We use central timezone for rancher clusters, so need to convert CST -> UTC
if z_break: return datetime.fromisoformat(dtime_str.replace(z_break, z_repl))
return datetime.fromisoformat(dtime_str).astimezone(utc_timezone)
def get_date(timeframe: str = None, future: bool = False):
if not timeframe: return dtime_now_utc()
return dtime_parse(timeframe=timeframe, future=future)
def get_dtime_secs(dtime: datetime = None, start: datetime = None, as_cls: bool = False):
if dtime and start: return (start - dtime).total_seconds()
if as_cls: return TimeCls((dtime_now() - dtime).total_seconds())
try: return (dtime_now_utc() - dtime).total_seconds()
except: return (dtime_now() - dtime).total_seconds()
dtime = get_dtime
dtstr = get_dtime_str
dtsecs = get_dtime_secs
dtnow = dtime_now
dtnow_utc = dtime_now_utc
get_timestamp = dtime_now
get_timestamp_tz = dtime_now_tz
get_timestamp_utc = dtime_now_utc
dtiso = get_dtime_iso
__all__ = [
'time',
'datetime',
'dtime_now',
'dtime_now_tz',
'dtime_now_utc',
'get_timestamp',
'get_timestamp_tz',
'get_timestamp_utc',
'api_timezone',
'utc_timezone',
'timezone_format',
'timer',
'dtime_parse',
'dtime_diff',
'get_dtime',
'get_dtime_str',
'get_dtime_iso',
'get_date',
'get_dtime_secs',
'TimeCls',
]
|
[
"datetime.datetime.fromisoformat",
"time.time",
"pytz.timezone",
"dateparser.parse",
"datetime.datetime.now"
] |
[((439, 477), 'pytz.timezone', 'pytz.timezone', (['TimeZoneConfigz.desired'], {}), '(TimeZoneConfigz.desired)\n', (452, 477), False, 'import pytz\n'), ((497, 517), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (510, 517), False, 'import pytz\n'), ((747, 761), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (759, 761), False, 'from datetime import datetime, timezone\n'), ((786, 812), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (798, 812), False, 'from datetime import datetime, timezone\n'), ((1174, 1298), 'dateparser.parse', 'dateparser.parse', (['timeframe'], {'settings': "{'PREFER_DATES_FROM': prefer, 'TIMEZONE': 'UTC', 'RETURN_AS_TIMEZONE_AWARE':\n True}"}), "(timeframe, settings={'PREFER_DATES_FROM': prefer,\n 'TIMEZONE': 'UTC', 'RETURN_AS_TIMEZONE_AWARE': True})\n", (1190, 1298), False, 'import dateparser\n'), ((545, 571), 'datetime.datetime.now', 'datetime.now', (['api_timezone'], {}), '(api_timezone)\n', (557, 571), False, 'from datetime import datetime, timezone\n'), ((853, 864), 'time.time', 'time.time', ([], {}), '()\n', (862, 864), False, 'import time\n'), ((1438, 1464), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (1450, 1464), False, 'from datetime import datetime, timezone\n'), ((880, 891), 'time.time', 'time.time', ([], {}), '()\n', (889, 891), False, 'import time\n'), ((2787, 2820), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['dtime_str'], {}), '(dtime_str)\n', (2809, 2820), False, 'from datetime import datetime, timezone\n')]
|
#!/usr/bin/env python3
# coding=utf-8
from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF
from rdflib.namespace import SKOS, XSD, OWL, DC
from rdflib.namespace import DCTERMS as DCT
from SPARQLWrapper import SPARQLWrapper, SPARQLExceptions
import socket
import time
from pymarc import Record, Field, XMLWriter, MARCReader, parse_xml_to_array
from lxml import etree as ET
import shutil
import pickle
import os
import argparse
import hashlib
import unicodedata
from configparser import ConfigParser, ExtendedInterpolation
import sys
import logging
from datetime import datetime, date, timedelta
import subprocess
import urllib
from collections import namedtuple
from collections.abc import Sequence
from html.parser import HTMLParser
# globaalit muuttujat
CONVERSION_PROCESS = "Finto SKOS to MARC 1.03"
CONVERSION_URI = "https://www.kiwi.fi/x/XoK6B" # konversio-APIn uri tai muu dokumentti, jossa kuvataan konversio
CREATOR_AGENCY = "FI-NL" # Tietueen luoja/omistaja & luetteloiva organisaatio, 040-kentat
DEFAULTCREATIONDATE = "1980-01-01"
KEEPMODIFIEDAFTER = "ALL"
KEEPDEPRECATEDAFTER = "ALL"
ENDPOINT_ADDRESS = "http://api.dev.finto.fi/sparql"
ENDPOINTGRAPHS = [] # palvelupisteen graafien osoitteet, jotka ladataan läpikäytäviin muihin graafeihin
IGNOREOTHERGRAPHWARNINGS = False # lokitetaanko virheet muissa, kuin prosessoitavassa graafissa
NORMALIZATION_FORM = "NFD" # käytetään UTF8-merkkien dekoodauksessa
YSO=Namespace('http://www.yso.fi/onto/yso/')
YSOMETA=Namespace('http://www.yso.fi/onto/yso-meta/')
YSOPAIKATGRAPH=Namespace("http://www.yso.fi/onto/yso-paikat/")
YSA=Namespace('http://www.yso.fi/onto/ysa/')
YSAMETA=Namespace('http://www.yso.fi/onto/ysa-meta/')
ALLARS=Namespace('http://www.yso.fi/onto/allars/')
ALLARSMETA=Namespace("http://www.yso.fi/onto/allars-meta/")
KOKO=Namespace('http://www.yso.fi/onto/koko/')
LCSH=Namespace("http://id.loc.gov/authorities/subjects/")
LCGF=Namespace("http://id.loc.gov/authorities/genreForms/")
RDAU=Namespace('http://rdaregistry.info/Elements/u/')
ISOTHES=Namespace('http://purl.org/iso25964/skos-thes#')
SKOSEXT=Namespace('http://purl.org/finnonto/schema/skosext#')
SLM=Namespace("http://urn.fi/URN:NBN:fi:au:slm:")
UDC=Namespace("http://udcdata.info/")
WIKIDATA=Namespace("http://www.wikidata.org/entity/")
LANGUAGES = {
'fi': 'fin',
'sv': 'swe',
'en': 'eng',
'de': 'ger',
'et': 'est',
'fr': 'fre',
'it': 'ita',
'ru': 'rus',
'sme': 'sme', # pohjoissaame
'sma': 'sma', # eteläsaame
'smn': 'smn', # inarinsaame
'sms': 'sms', # koltansaame
'smj': 'smj', # luulajansaame
}
#LCSH mäpättävät 1xx-kentät
LCSH_1XX_FIELDS = ["100", "110", "111", "130", "147", "148", "150", "151", "155", "162", "180", "181", "182", "185"]
TRANSLATIONS = {
SKOSEXT.partOf: {
"fi": "osa kokonaisuutta/käsitettä",
"sv": "är en del av",
"en": "is part of"
},
"682iDEFAULT": {
"fi": "Käytöstä poistetun termin korvaava termi",
"sv": "Termen som ersättar den avlagda termen",
"en": "Term replacing the deprecated term"
},
"688aCREATED": {
"fi": "Luotu",
"sv": "Skapad",
"en": "Created"
},
"688aMODIFIED": {
"fi": "Viimeksi muokattu",
"sv": "Senast editerad",
"en": "Last modified"
}
}
# arvot tulevat osakentan $w 1. merkkipaikkaan
SEEALSOPROPS = {
SKOS.broader : 'g',
SKOS.narrower : 'h',
SKOS.related : 'n',
RDAU.P60683 : 'a',
RDAU.P60686 : 'b',
SKOSEXT.partOf : 'i',
ISOTHES.broaderPartitive : "g",
ISOTHES.narrowerPartitive : "h"
}
SORT_5XX_W_ORDER = {
'g': '001',
'h': '002',
'n': '003',
'i': '004',
'a': '005',
'b': '006'
}
# paikka 5, 'n' = uusi, 'c' = muuttunut/korjattu, d = poistettu (ei seuraajia), x = 1 seuraaja, s = >= 2 seuraajaa
LEADERNEW = '00000nz a2200000n 4500'
LEADERCHANGED = '00000cz a2200000n 4500'
LEADERDELETED0 = '00000dz a2200000n 4500'
LEADERDELETED1 = '00000xz a2200000n 4500'
LEADERDELETED2 = '00000sz a2200000n 4500'
CATALOGCODES = '|n|anznnbabn | ana '
CATALOGCODES_NA = '|n|enznnbbbn | ana '
GROUPINGCLASSES = [ISOTHES.ConceptGroup, ISOTHES.ThesaurusArray, SKOS.Collection, YSOMETA.Hierarchy]
# tuple helpottamaan getValues-apufunktion arvojen käsittelyä
ValueProp = namedtuple("ValueProp", ['value', 'prop'])
# apufunktiot
def readCommandLineArguments():
parser = argparse.ArgumentParser(description="Program for converting Finto SKOS-vocabularies into MARC (.mrcx).")
parser.add_argument("-c", "--config",
help="Config file location. The key/value pairs defined in the config file are overwritten with possible CLI key/value pairs.")
parser.add_argument("-cs", "--config_section",
help="Config section identifier. Set if vocabulary code is different from section identifier.")
parser.add_argument("-e", "--endpoint", help="Endpoint address to be used for querying linked concepts.")
parser.add_argument("-eg", "--endpoint_graphs",
help="The graphs one wants to query from the endpoint, e.g., http://www.yso.fi/onto/yso/. In case of multiple, separate them with space.")
parser.add_argument("-ignoreOtherGraphWarnings", "--ignore_other_graph_warnings",
help="Do you want ignore warnings produced whilst processing other graphs? Set this flag only if you want to ignore.", action="store_true")
parser.add_argument("-i", "--input", help="Input file location, e.g., yso-skos.ttl")
parser.add_argument("-if", "--input_format", help="Input file format. Default: turtle")
parser.add_argument("-o", "--output", help="Output file name, e.g., yso.mrcx.")
parser.add_argument("-vocId", "--vocabulary_code", help="MARC code used in tag 040 subfield f.", required=True)
parser.add_argument("-lang", "--languages",
help="The RDF language tag of the language one is willing to convert. In case of multiple, separate them with space.")
parser.add_argument("-m", "--multilanguage_vocabulary", action='store_true',
help="Is the vocabulary using language specified vocabulary codes, e.g., yso/fin? Set this flag only if it is.")
parser.add_argument("-gc", "--grouping_classes",
help="Types of classes not meant for describing/cataloging items in the vocabulary, e.g, hierarchical ones. In case of multiple, seperate them with space.")
parser.add_argument("-log", "--log_file", help="Log file location.")
parser.add_argument("-locDir", "--loc_directory",
help="Library of Congress directory from which to look for and download to LoC marcxml files. One shall not set if one does not want LoC links.")
parser.add_argument("-pv", "--pickle_vocabulary",
help="File location for the vocabulary in Python's pickle format for faster execution. \
If file's modification date is earlier than today, the file is overwritten. Else the vocabulary is loaded from this file.")
parser.add_argument("-modificationDates", "--modification_dates",
help="File location for pickle file, which contains latest modification dates for concepts (e. g. {'concept uri': 'YYYY-MM-DD'}) \
The file is updated after new records are created, if keepModifiedAfter is left out of command line arguments")
parser.add_argument("-keepModifiedAfter", "--keep_modified_after",
help="Create separate batch of MARC21 files for concepts modified after the date given (set in YYYY-MM-DD format).")
parser.add_argument("-defaultCreationDate", "--default_creation_date",
help="Default creation date (set in YYYY-MM-DD format) for a concept if it has not been declared explicitly. Default: " + DEFAULTCREATIONDATE)
parser.add_argument("-keepDeprecatedAfter", "--keep_deprecated_after",
help="Keep deprecated concepts deprecated after (not inclusive) the date given (set in YYYY-MM-DD format). Set to 'ALL' for no limits and 'NONE' to discard all.")
parser.add_argument("-keepGroupingClasses", "--keep_grouping_classes",
help="Keep grouping classes defined in config file.")
args = parser.parse_args()
return args
def readEndpointGraphs(settings):
sparql = SPARQLWrapper(settings.get("endpoint"))
queryStart = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
CONSTRUCT {
?concept skos:prefLabel ?prefLabel .
?concept skos:inScheme ?inScheme .
?concept owl:deprecated ?deprecated .
?concept a ?types .
}"""
queryEnd = """
WHERE {
?concept a skos:Concept .
?concept skos:prefLabel ?prefLabel .
?concept a ?types .
OPTIONAL {?concept skos:inScheme ?inScheme .}
OPTIONAL {?concept owl:deprecated ?deprecated .}
}
"""
queryFrom = ""
ret = Graph()
for endpointGraphIRI in settings.get("endpointGraphs").split(","):
sparql.setQuery(queryStart + "\nFROM <" + str(endpointGraphIRI) + ">" + queryEnd)
sparql.setMethod("GET")
sparql.setTimeout(600)
ret_length = len(ret)
try:
ret += sparql.query().convert()
if ret_length == len(ret):
logging.warning("Querying graph <" + str(endpointGraphIRI) +
"> from endpoint " + settings.get("endpoint") +
" returned 0 triples. Continuing.")
except (SPARQLExceptions.SPARQLWrapperException) as err:
logging.warning("Whilst querying endpoint " + settings.get("endpoint") +
" for graph <" + str(endpointGraphIRI) +
"> the following error occurred: " + err.__class__.__name__ + ": " + err.msg +
". Skipping the graph.")
except (urllib.error.HTTPError, urllib.error.URLError) as err:
logging.warning("SPARQL endpoint not found in url " + settings.get("endpoint") +
". Skipping querying linked concepts.")
break
except socket.timeout as e:
logging.warning("SPARQL endpoint now answering within timeout limit. " +
"Skipping querying linked concepts.")
return ret
# funktio konfiguraatiotiedostoissa olevien monimutkaisten merkkijonojen lukemiseen ja siistimiseen
def readConfigVariable(string, separator=None):
if separator:
return [x.strip() for x in string.split(separator) if len(x.strip()) > 0]
else:
return string.strip()
# funktio åäöÅÄÖ-kirjainten muuttamiseksi takaisin UTF8-merkeiksi (decomposed -> composed)
def decomposedÅÄÖtoUnicodeCharacters(string):
return (string.replace("A\u030a", "Å").replace("a\u030a", "å").
replace("A\u0308", "Ä").replace("a\u0308", "ä").
replace("O\u0308", "Ö").replace("o\u0308", "ö"))
def getValues(graph, target, props, language=None, literal_datatype=None):
"""Given a subject, get all values for a list of properties
in the order in which those properties were defined.
Args:
graph (Graph): The graph from which to search for the properties of the target.
target (URIRef|BNode): Concept.
props (URIRef|sequence(URIRef)): Property or list of properties to search for.
language (str, optional): Language of literals. Defaults to None (return all literals with languages).
Set to empty string ("") for empty lang tag.
literal_datatype (URIRef, optional): Datatype of datatyped literals. Defaults to None (return all literals with datatypes).
Returns:
list(TypeValue): List containing TypeValue namedtuples
prop (URIRef): Matched property
value (URIRef|BNode|Literal): For matched property, object value
Raises:
ValueError: If parameters do not respect the required types
"""
if isinstance(props, URIRef):
# cast to list in order to uniform code
props = [props]
if not (isinstance(target, URIRef) or isinstance(target, BNode)):
raise ValueError("Parameter 'target' must be of type URIRef or BNode.")
elif isinstance(props, str) or not isinstance(props, Sequence):
raise ValueError(
"Type of parameter 'props' must be a URIRef or sequence; got %s." % (type(props)))
elif language is not None and not isinstance(language, str):
raise ValueError("Parameter 'language' must be string if set.")
elif literal_datatype is not None and not isinstance(literal_datatype, URIRef):
raise ValueError("Parameter 'datatype' must be URIRef if set.")
v = []
# setup the language filtering
if language is not None:
if language == '': # we only want not language-tagged literals
langfilter = lambda l: l.language == None
else:
langfilter = lambda l: l.language == language
else: # we don't care about language tags
langfilter = lambda l: True
# setup the datatype filtering
if literal_datatype is not None:
typefilter = lambda l: l.datatype == literal_datatype
else:
typefilter = lambda l: True
for prop in props:
if not isinstance(prop, URIRef):
raise ValueError(
"Types of properties must be URIRefs; got %s from property '%s'." % (type(prop), str(prop)))
# values that pass restrictions are returned
values = [l for l in graph.objects(target, prop) if
(isinstance(l, URIRef) or isinstance(l, BNode)) or
(l.datatype == None and langfilter(l)) or
(l.datatype != None and typefilter(l))
]
# loop through the values and add them to the list
for val in values:
v.append(ValueProp(value=val, prop=prop))
return v
# apufunktio urlien parsimiseen merkkijonosta
# mietittävä uudelleen, jos näitä rakenteistetaan
def getURLs(string):
urls = []
for word in string:
if len(word) < 10:
continue
if word[0] in ["(", "["]:
word = word[1:-1]
res = urllib.parse.urlparse(word)
if res.scheme in ("http", "https") and \
len(res.netloc) > 3 and "." in res.netloc:
urls.append(word)
return urls
class ConvertHTMLYSOATags(HTMLParser):
'''
Korvaa mahdolliset yso-linkit $a-osakenttämerkillä siten, että käytettävä termi
jää näkyviin. Muu osa tekstistä on $i-osakentissä. Käytetään mm. kentässä 680
TODO: Virheiden käsittely ja HTML-erikoisentiteettien/kommenttien käsittely
'''
merkkijono = ["$i"]
in_a_yso = False
ended_a_yso = False
def initialize(self):
self.merkkijono = ["$i"]
self.in_a_yso = False
self.ended_a_yso = False
def handle_starttag(self, tag, attrs):
if tag == "a":
for attr in attrs:
if attr[0] == "href":
link = attr[1]
if link.startswith(YSO):
self.in_a_yso = True
self.merkkijono[-1] = self.merkkijono[-1].rstrip()
self.merkkijono.append("$a")
return
self.merkkijono.append("<" + tag)
for attr in attrs:
self.merkkijono.append(" " + attr[0] + "='" + attr[1] + "'")
self.merkkijono.append(">")
def handle_endtag(self, tag):
if tag == "a" and self.in_a_yso:
self.in_a_yso = False
self.ended_a_yso = True
else:
self.merkkijono.append("</" + tag + ">")
def handle_data(self, data):
if self.ended_a_yso:
self.merkkijono.append("$i")
self.ended_a_yso = False
# korjaa normaalien tekstistä löytyvien '<'-merkkien käsittely
# TODO: Selvitä, tarvitseeko samanlainen korjaus tehdä myös alla
# määritellyille funktioille
if self.merkkijono[-1] != "$i" and self.merkkijono[-1] != "$a":
self.merkkijono[-1] += data
else:
# tavallinen tapaus - lisätään vain käsitelty teksti uuteen osioon
self.merkkijono.append(data)
def handle_comment(self, data):
self.merkkijono.append(data)
def handle_entityref(self, name):
# TODO: tarkista, mitä nämä esimerkkikoodit tekevät
#c = chr(name2codepoint[name])
self.merkkijono.append(name)
def handle_charref(self, name):
# TODO: tarkista, mitä nämä esimerkkikoodit tekevät
#if name.startswith('x'):
# c = chr(int(name[1:], 16))
#else:
# c = chr(int(name))
self.merkkijono.append(name)
def handle_decl(self, data):
self.merkkijono.append(data)
# pääfunktio
def convert(cs, vocabulary_name, language, g, g2):
# kääntää graafin (g) kielellä (language) ConfigParser-sektion (cs) ohjeiden mukaisesti MARCXML-muotoon
# g2 sisältää vieraat graafit (poislukien mahdolliset lcsh & lcgf-viitteet), joista etsitään
# käytettyjä termejä 7XX kenttiin
# vocabulary_name-parametriä tarvitaan tunnistamaan, että kyseessä YSO-paikat-ontolohgia, joihin tehdään 670-kenttiä
vocId = cs.get("vocabulary_code")
# variable for a bit complicated constants and casting/converting them to appropiate types
helper_variables = {
"vocCode" : (cs.get("vocabulary_code") + "/" + LANGUAGES[language] \
if cs.getboolean("multilanguage", fallback=False) \
else vocId),
"groupingClasses" : [URIRef(x) for x in cs.get("groupingClasses", fallback=",".join(GROUPINGCLASSES)).split(",")],
"groupingClassesDefault" : [URIRef(x) for x in cs.parser.get("DEFAULT", "groupingClasses", fallback=",".join(GROUPINGCLASSES)).split(",")],
'modificationDates': cs.get("modificationDates", fallback=None),
'keepModified' : cs.get("keepModifiedAfter", fallback=None),
'keepDeprecated' : cs.get("keepDeprecatedAfter", fallback=KEEPDEPRECATEDAFTER).lower() != "none",
'keepGroupingClasses' : cs.getboolean("keepGroupingClasses", fallback=False),
'write688created' : cs.get("defaultCreationDate", fallback=None) != None,
'defaultOutputFileName' : "yso2marc-" + cs.name.lower() + "-" + language + ".mrcx"
}
if helper_variables['keepModified']:
helper_variables['keepModifiedLimit'] = False \
if cs.get("keepModifiedAfter", fallback=KEEPMODIFIEDAFTER).lower() == "all" \
else datetime.date(datetime.strptime(cs.get("keepModifiedAfter"), "%Y-%m-%d"))
if helper_variables['keepDeprecated']:
helper_variables['keepDeprecatedLimit'] = False \
if cs.get("keepDeprecatedAfter", fallback=KEEPDEPRECATEDAFTER).lower() == "all" \
else datetime.date(datetime.strptime(cs.get("keepDeprecatedAfter"), "%Y-%m-%d"))
if cs.get("output", fallback=None):
parts = cs.get("languages").split(",")
if len(parts) > 1:
output = cs.get("output")
if len(output.split(".")) > 1:
helper_variables["outputFileName"] = ".".join(output.split(".")[:-1]) + "-" + language + "." + output.split(".")[-1]
else:
helper_variables["outputFileName"] = output + "-" + language
if not "outputFileName" in helper_variables:
helper_variables["outputFileName"] = cs.get("output", fallback=helper_variables["defaultOutputFileName"])
#modified_dates on dict-objekti, joka sisältää tietueen id:n avaimena ja
#arvona tuplen, jossa on tietueen viimeinen muokkauspäivämäärä ja tietueen sisältö MD5-tiivisteenä
if helper_variables['modificationDates']:
if os.path.isfile(helper_variables['modificationDates']):
with open(helper_variables['modificationDates'], 'rb') as pickle_file:
try:
modified_dates = pickle.load(pickle_file)
except EOFError:
logging.error("The file %s for modification dates is empty "%helper_variables['modificationDates'])
sys.exit(2)
else:
modified_dates = {}
logging.info("Processing vocabulary with vocabulary code '%s' in language '%s'" % (vocId, language))
incrementor = 0
deprecated_counter = 0
writer_records_counter = 0
ysoATagParser = ConvertHTMLYSOATags()
ET_namespaces = {"marcxml": "http://www.loc.gov/MARC21/slim",
"atom": "http://www.w3.org/2005/Atom"}
handle = open(cs.get("output", fallback=helper_variables["defaultOutputFileName"]), "wb")
writer = XMLWriter(handle)
pref_labels = set()
for conc in g.subjects(RDF.type, SKOS.Concept):
pref_label = g.preferredLabel(conc, lang=language)
if pref_label:
pref_labels.add(str(pref_label[0][1]).lower())
concs = []
# haetaan Kongressin kirjaston päivitykset viimeisen viikon ajalta,
# jos ohjelmaa ei ole ajettu aiemmin samana päivänä
loc_update_dict = {}
update_loc_concepts = True
loc_update_file = os.path.join(cs.get("locDirectory"), "updates.pkl")
if os.path.exists(loc_update_file):
timestamp = os.path.getmtime(loc_update_file)
file_date = date.fromtimestamp(timestamp)
if file_date == date.today():
update_loc_concepts = False
with open(loc_update_file, 'rb') as input_file:
try:
loc_update_dict = pickle.load(input_file)
except EOFError:
logging.error("EOFError in "%loc_update_file)
limit_date = date.today() - timedelta(days=7)
lc_namespaces = [LCGF, LCSH]
feed_prefix = "feed/"
for ns in lc_namespaces:
limit_reached = False
for idx in range(1,100):
if limit_reached:
break
file_path = os.path.join(str(ns), feed_prefix, str(idx))
try:
with urllib.request.urlopen(file_path, timeout=5) as atom_xml:
recordNode = ET.parse(atom_xml)
root = recordNode.getroot()
for entry in root.findall("atom:entry", ET_namespaces):
label = None
for updated in entry.findall("atom:updated", ET_namespaces):
updated = datetime.strptime(updated.text[:10], "%Y-%m-%d").date()
if updated >= limit_date:
for link in entry.findall("atom:link", ET_namespaces):
if not 'type' in link.attrib:
uri = link.attrib['href']
if uri in loc_update_dict:
if loc_update_dict[uri]['date'] < updated:
loc_update_dict[uri]['date'] = updated
loc_update_dict[uri]['updatable'] = True
else:
limit_reached = True
except ET.ParseError as e:
logging.warning("Failed to parse Library of Congress update feed")
if not limit_reached:
logging.warning("More than 10 000 updates in Library of Congress feed %s"%ns)
if helper_variables['keepModified']:
# käydään läpi vain muuttuneet käsitteet
for uri in modified_dates:
if modified_dates[uri][0] >= helper_variables['keepModifiedLimit']:
concs.append(URIRef(uri))
else:
concs = g.subjects(RDF.type, SKOS.Concept)
for concept in sorted(concs):
incrementor += 1
if incrementor % 1000 == 0:
logging.info("Processing %sth concept" % (incrementor))
# skipataan deprekoidut, jos niitä ei haluta mukaan. Jos haetaan muuttuneita käsitteitä, tulostetaan kaikki
if not helper_variables['keepModified'] and (concept, OWL.deprecated, Literal(True)) in g:
if not helper_variables['keepDeprecated']:
deprecated_counter += 1
continue
#skipataan ryhmittelevät käsitteet
if not helper_variables['keepGroupingClasses']:
if any (conceptType in helper_variables["groupingClasses"] for conceptType in g.objects(concept, RDF.type)):
continue
rec = Record()
deprecatedString = ""
loc_concept_downloaded = False
# dct:modified -> 005 EI TULOSTETA, 688
# tutkitaan, onko käsite muuttunut vai alkuperäinen
# ja valitaan leader sen perusteella
mod = g.value(concept, DCT.modified, None)
if mod is None:
rec.leader = cs.get("leaderNew", fallback=LEADERNEW)
else:
rec.leader = cs.get("leaderChanged", fallback=LEADERCHANGED)
modified = mod.toPython() # datetime.date or datetime.datetime object
if not type(modified) in [date, datetime]:
logging.error("Modification date invalid in concept %s "%concept)
modified = None
# dct:created -> 008
crt = g.value(concept, DCT.created, None)
if crt is None:
created = datetime.date(datetime.strptime(cs.get("defaultCreationDate", fallback=DEFAULTCREATIONDATE), "%Y-%m-%d"))
else:
created = crt.toPython() # datetime.date or datetime.datetime object
if not type(created) in [date, datetime]:
logging.error("Creation date invalid in concept %s "%concept)
created = datetime.date(datetime.strptime(cs.get("defaultCreationDate", fallback=DEFAULTCREATIONDATE), "%Y-%m-%d"))
code = cs.get("catalogCodes", fallback=CATALOGCODES)
# asetetaan kuvailukielto käsitteelle, jos tyypiä ryhmittelevä käsite
for conceptType in g.objects(concept, RDF.type):
if conceptType in helper_variables["groupingClasses"]:
code = cs.get("catalogCodes_na", fallback=CATALOGCODES_NA)
break
# jos kyseessä on poistettu käsite, asetetaan leaderit ja koodit asianmukaisesti
if (concept, OWL.deprecated, Literal(True)) in g:
replacers = sorted(g.objects(concept, DCT.isReplacedBy))
if len(replacers) == 0:
rec.leader = cs.get("leaderDeleted0", fallback=LEADERDELETED0)
elif len(replacers) == 1:
rec.leader = cs.get("leaderDeleted1", fallback=LEADERDELETED1)
else:
rec.leader = cs.get("leaderDeleted2", fallback=LEADERDELETED2)
code = cs.get("catalogCodes_na", fallback=CATALOGCODES_NA)
# jos on lisäksi asetettu jokin päivämäärärajoite
if helper_variables['keepDeprecatedLimit']:
# mikäli scopeNote puuttuu, poistettu tulkitaan uudeksi poistoksi ja sen tulkitaan
# "ylittävän" asetetun limitin eli jää tulosjoukkoon
for valueProp in sorted(getValues(g, concept, SKOS.scopeNote, language=""),
key=lambda o: str(o.value)):
if valueProp.value.startswith("deprecated on"):
deprecatedString = str(valueProp.value)
break
if deprecatedString:
deprecatedDateString = deprecatedString.split(" ")[-1]
try:
# yritetään parsia päivämäärä kahdessa eri formaatissa
deprecatedDate = datetime.date(datetime.strptime(deprecatedDateString, "%d.%m.%Y"))
if helper_variables['keepDeprecatedLimit'] > deprecatedDate:
deprecated_counter += 1
continue # skipataan ennen vanhentamisrajaa vanhennetut termit
except ValueError:
try:
deprecatedDate = datetime.date(datetime.strptime(deprecatedDateString, "%Y-%m-%d"))
if helper_variables['keepDeprecatedLimit'] > deprecatedDate:
deprecated_counter += 1
continue # skipataan ennen vanhentamisrajaa vanhennetut termit
except ValueError:
logging.warning("Converting deprecated date failed for concept %s. Proceeding." %
(concept))
if not created and not helper_variables["write688created"]:
logging.warning("No explicit creation date defined for concept %s. Using default value '%s' for character positions 00-05 in tag 008." % (
concept, datetime.date(datetime.strptime(DEFAULTCREATIONDATE, "%Y-%m-%d")).strftime('%y%m%d')))
rec.add_field(
Field(
tag='008',
data=created.strftime('%y%m%d') + code
)
)
# 024 muut standarditunnukset - käsitteen URI tallennetaan tähän
rec.add_field(
Field(
tag='024',
indicators = ['7', ' '],
subfields = [
'a', concept,
'2', "uri"
]
)
)
# 034 paikkojen koordinaatit - yso-paikat?
# 035 yso-tietueen numero?
# 040 luetteloiva organisaatio
rec.add_field(
Field(
tag='040',
indicators = [' ', ' '],
subfields = [
'a', cs.get("creatorAgency", fallback=CREATOR_AGENCY),
'b', LANGUAGES[language],
'f', helper_variables["vocCode"]
]
)
)
# 043 - ysopaikat, käytetäänkö
# http://marc21.kansalliskirjasto.fi/aukt/01X-09X.htm#043
# 045 - yso-ajanjaksot, käytetäänkö
# http://marc21.kansalliskirjasto.fi/aukt/01X-09X.htm#045
# 046 - erikoiskoodatut ajanjaksot?
# 052 - maantieteellinen luokitus
# 7#$a(480)$2udc$0http://udcdata.info/004604
# jos 151 kaytossa, pitaisiko kayttaa? Jarmo: UDC-luokitus, Suomi "(480)"
#ConceptGroup / skos:member -> 065 yso-aihealuekoodi
# vain siina tapauksessa, kun ne halutaan mukaan Asteriin
# jos luokkanumeroa ei löydy, ei tulosteta
# vain jos vocId = "yso", tehdään tämä
if vocId == "yso":
for group in sorted(g.subjects(SKOS.member, concept)):
if not helper_variables['keepDeprecated'] and \
(group, OWL.deprecated, Literal(True)) in g:
continue # skip deprecated group concepts
if (group, RDF.type, ISOTHES.ConceptGroup) not in g:
continue
# ryhmätunnuksen ekstraktointi: yritä ensin kaivaa skos:notationista, muuten prefLabelista
groupno = g.value(group, SKOS.notation, None)
if groupno is None:
valueProps = sorted(getValues(g, group, SKOS.prefLabel, language=language),
key=lambda o: o.value)
if len(valueProps) == 0:
logging.warning("Could not find preflabel for target %s in language: %s. Skipping property %s target for concept %s." %
(group, language, SKOS.member, concept))
continue
elif len(valueProps) != 1:
logging.warning("Multiple prefLabels detected for concept %s in language %s. Taking the first only." %
(concept, language))
groupname = str(valueProps[0].value)
try:
groupno = str(groupname[0:groupname.index(" ")])
groupname = str(groupname[len(groupno) + 1:])
except ValueError:
logging.warning("Tried to parse group number for group %s from concept %s in language %s but failed." %
(group, valueProps[0].value, language))
continue
rec.add_field(
Field(
tag='065',
indicators = [' ', ' '],
subfields = [
'a', groupno,
'c', decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, groupname)),
#'c', groupname,
'0', group,
'2', vocId
]
)
)
# 080 - UDK-luokka. Asiasanaan liittyva UDK-luokka
# 147 Tapahtuman nimi. Ei kayteta?
# 148 Aikaa merkitseva termi. Selvitetaan.
# skos:prefLabel -> 150 aihetta ilmaiseva termi
valueProps = sorted(getValues(g, concept, SKOS.prefLabel, language=language),
key=lambda o: o.value)
if len(valueProps) == 0:
logging.warning("Could not find preflabel for concept %s in language %s. Skipping the whole concept." %
(concept, language))
continue
elif len(valueProps) != 1:
logging.warning("Multiple prefLabels detected for concept %s in language %s. Choosing the first." %
(concept, language))
# tunnistetaan käsitteen tyyppi (aika, yleinen, paikka, genre)
# -> 148, 150, 151, 155, 162
# tukee tällä hetkellä tavallisia asiasanoja (150), YSO-paikkoja (151) & SLM:ää (155)
tag = "150"
if (concept, SKOS.inScheme, YSO.places) in g:
tag = "151"
elif vocId == "slm":
tag = "155"
rec.add_field(
Field(
tag=tag,
indicators = [' ', ' '],
subfields=[
'a', decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(valueProps[0].value)))
#'a', str(valueProps[0].value)
]
)
)
# skos:altLabel -> 447, 448, 450, 451, 455
# 450 katso-viittaus
# poistetaan toisteiset skos:hiddenLabelit
# jätetään tuottamatta 45X-kentät, jotka ovat toisessa käsitteessä 15X-kenttinä, paitsi altLabelein kohdalla
# OLETUS: poistettujen käsitteiden seuraajien tietoihin EI merkitä poistetun käsitteen
# skos:prefLabelia näihin kenttiin, sillä sen oletetaan jo olevan skos:altLabelina kun siihen
# on haluttu viitata vanhalla muodolla
seen_values = set()
for valueProp in sorted(getValues(g, concept, [SKOS.altLabel, YSOMETA.singularPrefLabel,
YSOMETA.singularAltLabel, SKOS.hiddenLabel], language=language),
key=lambda o: str(o.value)):
# singularPrefLabel, singularAltLabel ja hiddenLabel jätetään pois 45X-kentistä,
# jos ne kirjainkoosta riippumatta ovat jossain 15X-kentässä
if valueProp.prop != SKOS.altLabel and str(valueProp.value.lower()) in pref_labels:
continue
if valueProp.prop == SKOS.hiddenLabel:
if str(valueProp.value) in seen_values:
continue
seen_values.add(str(valueProp.value))
tag = "450"
if (concept, SKOS.inScheme, YSO.places) in g:
tag = "451"
elif vocId == "slm":
tag = "455"
rec.add_field(
Field(
tag = tag,
indicators = [' ', ' '],
subfields = [
'a', decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(valueProp.value)))
#'a', str(valueProp.value)
]
)
)
# broader/narrower/related/successor/predecessor/skosext:partOf
# -> 550 "katso myos" viittaus
# HUOM: Objektit vain olioita
# TODO: ysoon lisätään myöhemmin partOf-suhteiden käänteinen suhde
# TODO: useat erityyppiset i-kentät eivät toimi tällä hetkellä
fields = list()
for prop, wval in SEEALSOPROPS.items():
for target in sorted(g.objects(concept, prop)):
if not helper_variables['keepDeprecated'] and \
(target, OWL.deprecated, Literal(True)) in g:
continue # skip deprecated concepts
valueProps = getValues(g, target, SKOS.prefLabel, language=language)
if len(valueProps) == 0:
logging.warning("Could not find preflabel for target %s in language %s. Skipping property %s target for concept %s." %
(target, language, prop, concept))
continue
elif len(valueProps) != 1:
logging.warning("Multiple prefLabels detected for target %s in language %s. Choosing the first." %
(target, language))
label = valueProps[0].value
tag = "550" # alustetaan 550-arvoon
if (target, SKOS.inScheme, YSO.places) in g:
tag = "551"
elif vocId == "slm":
tag = "555"
subfields = []
#TODO: YSOn mahdolliset SKOSEXT-ominaisuudet?
#TODO: tarkista tämä YSOn tietomalliuudistusta varten
if wval == "i":
if (target, SKOS.inScheme, YSO.places) in g:
if prop == SKOSEXT.partOf:
subfields.extend(('w', 'g'))
elif prop == SKOSEXT.hasPart:
subfields.extend(('w', 'h'))
else:
subfields.extend(('w', wval,
"i", TRANSLATIONS[prop][language]
))
else:
subfields.extend(('w', wval,
"i", TRANSLATIONS[prop][language]
))
else:
subfields.extend(('w', wval))
subfields.extend(('a',
decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(label)))
#str(label)
))
subfields.extend(('0', target))
# yso-paikoissa on sekä ISOTHES.broaderPartitive, että
# SKOS.broader redundanttina,
# samoin ISOTHES.narrowerPartitive - SKOS.narrower
# Otetaan kuitenkin toistaiseksi varmuudeksi kummatkin konversioon mukaan
# ja poistetaan tuplakentät tässä
see_also_field = Field(
tag = tag,
indicators = [' ', ' '],
subfields = subfields
)
if not any(str(see_also_field) == str(f) for f in fields):
fields.append(see_also_field)
# järjestä 5XX-kentät ja lisää ne tietueeseen
for sorted_field in sorted(fields, key=lambda o: (
o.tag,
SORT_5XX_W_ORDER[o.get_subfields("w")[0]] if o.get_subfields("w") else "999",
o.get_subfields('a')[0]
)):
rec.add_field(sorted_field)
# TODO: JS: laitetaan 667 kenttään SLM:n käsiteskeemat jokaiselle käsitteelle
# dc:source -> 670 kasitteen tai kuvauksen lahde
# tulostetaan vain yso-paikkojen kohdalla url, joka closeMatchissa
# haetaan ensin lähdetiedoista Maanmittauslaitoksen paikannimirekisterin tyyppitieto
# liitetään se paikkatieto-URIin closeMatchissa, jos kumpiakin on vain yksi
if vocabulary_name == "YSO-PAIKAT":
subfield_list = []
subfield_b = None
geographical_types = set()
for valueProp in sorted(getValues(g, concept, DC.source, language=language), key=lambda o: str(o.value)):
if "Maanmittauslaitoksen paikannimirekisteri; " in valueProp.value:
geographical_type = valueProp.value.split("; ")
if len(geographical_type) > 1:
geographical_type = geographical_type[1]
geographical_types.add(geographical_type)
elif not any(substring in valueProp.value for substring in ["Wikidata",
"Sijaintitietojen lähde",
"Källa för positionsinformation"]):
# dc:sourcessa on ollut myös URLeja. Siivotaan ne tässä pois
if not valueProp.value.startswith("http"):
subfield_list.append([
'a', decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, valueProp.value))
])
if len(geographical_types) == 1:
subfield_b = next(iter(geographical_types))
# ruotsinkieliseen sanastoon ei laiteta paikanninimirekisterilinkkejä, koska ruotsinkielinen selite puuttuu
for valueProp in sorted(getValues(g, concept, SKOS.closeMatch, language=language), key=lambda o: str(o.value)):
if "http://paikkatiedot.fi" in valueProp.value:
if subfield_b:
subfield_list.append([
'a', 'Maanmittauslaitoksen paikannimirekisteri',
'b', subfield_b,
'u', valueProp.value
])
for subfields in subfield_list:
rec.add_field(
Field(
tag='670',
indicators = [' ', ' '],
subfields = subfields
)
)
# skos:definition -> 677 huomautus määritelmästä
# määritelmän lähde voidaan merkitä osakenttään $v
# sitä varten tulee sopia tavasta merkitä tämä lähde, jotta
# se voidaan koneellisesti erottaa tekstistä
# JS ehdottaa: jos tekstissä on merkkijono ". Lähde: ",
# kaikki sen perässä oleva teksti merkitään osakenttään $v
# entä jos linkki lähteen perässä?
# JS ehdottaa: linkki aivan viimeisenä sanana
# 4.5.2018 - palataan myöhemmin tähän
# 6.8.2018 - ei vielä käsitelty
# 5.9.2018 - määritelmän lähde tulee määritelmän jälkeen kahdella tavuviivalla (--) erotettuna
# jätetään toistaiseksi paikalleen (13 kpl)
for valueProp in sorted(getValues(g, concept, SKOS.definition, language=language),
key=lambda o: str(o.value)):
subfields = [
'a',
decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(valueProp.value)))
#str(valueProp.value)
]
# TODO: linkkien koodaus tarkistetaan/tehdään myöhemmin
#urls = getURLs(valueProp.value)
#for url in urls:
# subfields.append("u")
# subfields.append(url)
rec.add_field(
Field(
tag='677',
indicators = [' ', ' '],
subfields = subfields
)
)
# skos:note -> 680 yleinen huomautus, julkinen
for valueProp in sorted(getValues(g, concept, [SKOS.note, SKOS.scopeNote, SKOS.example], language=language),
key=lambda o: str(o.value)):
ysoATagParser.initialize()
ysoATagParser.feed(valueProp.value)
if len(ysoATagParser.merkkijono)%2 == 1:
logging.warning("Parsing the property %s for concept %s into seperate subfields failed. Continuing with complete value." % (valueProp.prop, concept))
subfieldCodeValuePair = ("i", valueProp.value.strip())
if len(subfieldCodeValuePair[1]) == 0:
subfieldCodeValuePair = []
else:
subfieldCodeValuePair = [[x[1], ysoATagParser.merkkijono[ind+1].strip()] for (ind,x) in enumerate(ysoATagParser.merkkijono) if ind%2 == 0]
# poistetaan viimeinen i-tägi, jos se on vain 1 merkin mittainen (loppupisteet)
if subfieldCodeValuePair[-1][0] == "i" and len(subfieldCodeValuePair[-1][1]) <= 1 and len(subfieldCodeValuePair) > 1:
subfieldCodeValuePair[-2][1] = subfieldCodeValuePair[-2][1] + subfieldCodeValuePair[-1][1]
subfieldCodeValuePair = subfieldCodeValuePair[:-1]
subfield_values = []
for subfield in subfieldCodeValuePair:
subfield_values.extend(
(subfield[0], decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, subfield[1])))
#(subfield[0], subfield[1])
)
rec.add_field(
Field(
tag='680',
indicators = [' ', ' '],
subfields = subfield_values
)
)
# mahdollinen deprekointitieto lisätään erikseen
if deprecatedString:
rec.add_field(
Field(
tag='680',
indicators = [' ', ' '],
subfields = ['i', deprecatedString]
)
)
# owl:deprecated -> 682 Huomautus poistetusta otsikkomuodosta (ei toistettava)
# Ohjaus uuteen/uusiin käsitteisiin
# seuraaja-suhde
# a-kenttään seuraajan preflabel, 0-kenttään URI, i selite
# TODO: onko seuraajaa vai ei, lisäksi mietittävä deprekoidun käsitteen
# tyyppi (onko hierarkia jne.). Deprekaattorin huomautustekstiä kehitettävä
# (kentät mietittävä uudelleen - EI skos:scopeNote kuten nyt on 4.5.2018)
# 2018-12-05 Huomattiin, että ei ole toistettavissa --> ongelma useiden korvaajien tapauksessa ($0)
# kongressin kirjasto on työstämässä parhaista käytännöistä $0-kentän toistettavuudesta vielä tämän vuoden aikana
# päätettiin jättää tässä vaiheessa $0-kentät kokonaan pois
if (concept, OWL.deprecated, Literal(True)) in g:
target = None
labels = []
for target in sorted(g.objects(concept, DCT.isReplacedBy)):
if not helper_variables['keepDeprecated'] and \
(target, OWL.deprecated, Literal(True)) in g:
continue # skip deprecated concepts
valueProps = sorted(getValues(g, target, SKOS.prefLabel, language=language), key=lambda o: str(o.value))
replacedByURIRef = URIRef(target)
if len(valueProps) > 1:
logging.warning("Multiple prefLabels detected for target %s in language %s. Choosing the first." %
(target, language))
elif len(valueProps) == 0:
logging.warning("Could not find preflabel for target %s in language: %s. Skipping property %s target for concept %s." %
(target, language, DCT.isReplacedBy, concept))
continue
label = valueProps[0].value
labels.append(valueProps[0].value)
#rec.add_field(
# Field(
# tag = '682',
# indicators = [' ', ' '],
# subfields = [
# 'i', TRANSLATIONS["682iDEFAULT"][language],
# 'a', decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(label))),
# #'a', str(label),
# '0', target
# ]
# )
#)
if len(labels) > 0:
subfield_values = ['i', TRANSLATIONS["682iDEFAULT"][language]]
for label in labels[:-1]:
subfield_values.extend(('a',
decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(label) + ","))
#str(label)
))
subfield_values.extend(('a',
decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(labels[-1])))
#str(label)
))
#subfield_values.extend(('0', target)) #TODO: seurataan kongressin kirjaston tulevia ohjeistuksia
rec.add_field(
Field(
tag='682',
indicators = [' ', ' '],
subfields = subfield_values
)
)
if helper_variables["write688created"]:
rec.add_field(
Field(
tag = '688',
indicators = [' ', ' '],
subfields = [
'a', TRANSLATIONS["688aCREATED"][language] + ": " + created.strftime('%Y-%m-%d')
]
)
)
if mod and modified:
rec.add_field(
Field(
tag = '688',
indicators = [' ', ' '],
subfields = [
'a', TRANSLATIONS["688aMODIFIED"][language] + ": " + modified.strftime('%Y-%m-%d')
]
)
)
# all skos:match*es -> 7XX linkkikenttiin
# halutaan linkit kaikkiin kieliversioihin
# lisäksi saman sanaston erikieliset preflabelit tulevat tänne
# graafit on haettu etukäteen ohjelman muistiin ohjelman alussa
# 750 $a label, $4 relaatiotyyppi, $2 sanastolahde, $0 uri
# miten $w? JS: ei oteta mukaan ollenkaan
# 2.5.2018-kokouksessa päätettiin, että DCT.spatialia ei käännetä
# MARC-muotoon
# 13.8.2018 LCSH/LCGF käsitellään erikseen; niille on tehty oma kansio, joka
# on tallennettu locDirectory-muuttujaan. Puuttuvat loc-linkit haetaan
# dynaamisesti tarvittaessa ja lisätään kansioon, josta ne sitten luetaan ohjelman käyttöön
valueProps = getValues(g, concept, [SKOS.prefLabel, SKOS.exactMatch, SKOS.closeMatch,
SKOS.broadMatch, SKOS.narrowMatch,
SKOS.relatedMatch])
fields = list() # kerätään kentät tähän muuttujaan, joka sitten lopuksi järjestetään
for valueProp in valueProps:
if valueProp.prop == SKOS.prefLabel:
# suodatetaan samankieliset, jotka menivät jo 1xx-kenttiin
# valueProp.value sisältää tässä poikkeuksellisesti jo halutun literaalin
# (vrt. kun muissa on solmu)
if valueProp.value.language == language:
continue
matchURIRef = URIRef(concept)
else:
# tehdään osumasta URIRef
matchURIRef = URIRef(valueProp.value)
#if not helper_variables['keepDeprecated'] and \
if (matchURIRef, OWL.deprecated, Literal(True)) in g2:
# skip deprecated matches
# 19.12.2018 käyty keskustelua tästä - päätetty tässä vaiheessa
# olla seuraamatta dct:isReplacedBy-suhteita ja lisäämättä näitä
# TODO-listalle?
continue
# 27.12.2018 pitäisikö tarkistaa myös groupingClassesien varalta?
# Ratkaisu: Ei - nämä on merkitty omissa tietueissaan ei-käytettäviksi
second_indicator = "7"
tag = "750"
loc_object = None
if (matchURIRef, SKOS.inScheme, YSO.places) in g2 or \
(matchURIRef, SKOS.inScheme, YSO.places) in g: #or matchType == DCT.spatial:
tag = "751"
# TODO: nimetyt graafit, kohdista kyselyt niihin?
# Comment: if we want to direct queries to spesific graphs, one per vocab,
# that graph needs to be selected here based on the void:uriSpace
sub0 = concept
sub2 = ""
if matchURIRef.startswith(LCSH):
second_indicator = "0"
loc_object = {"prefix": str(LCSH), "id": matchURIRef.split("/")[-1]}
elif matchURIRef.startswith(LCGF):
sub2 = "lcgft"
loc_object = {"prefix": str(LCGF), "id": matchURIRef.split("/")[-1]}
elif matchURIRef.startswith(ALLARS):
if (matchURIRef, RDF.type, ALLARSMETA.GeographicalConcept) in g2: #or matchType == DCT.spatial:
tag = "751"
sub2 = "allars"
#continue
elif matchURIRef.startswith(KOKO):
continue # skip KOKO concepts
elif matchURIRef.startswith(SLM):
tag = "755"
sub2 = "slm"
elif matchURIRef.startswith(YSA):
if (matchURIRef, RDF.type, YSAMETA.GeographicalConcept) in g2: #or matchType == DCT.spatial:
tag = "751"
sub2 = "ysa"
#continue
elif matchURIRef.startswith(YSO):
sub2 = "yso"
else:
second_indicator = "4"
if not cs.getboolean("ignoreOtherGraphWarnings", fallback=IGNOREOTHERGRAPHWARNINGS):
logging.warning("Matched target %s did not belong to any known vocabulary" % (str(matchURIRef)))
# do not put subfield 2 in this case
if not ((matchURIRef, None, None) in g or
(matchURIRef, None, None) in g2):
if not loc_object and not cs.getboolean("ignoreOtherGraphWarnings", fallback=IGNOREOTHERGRAPHWARNINGS):
logging.warning("Matched target %s did not belong to any known vocabulary. Skipping." % (str(matchURIRef)))
continue
sub4 = ""
if valueProp.prop == SKOS.broadMatch:
sub4 = "BM"
elif valueProp.prop == SKOS.narrowMatch:
sub4 = "NM"
elif valueProp.prop == SKOS.exactMatch:
sub4 = "EQ"
elif valueProp.prop == SKOS.prefLabel:
sub4 = "EQ"
# kovakoodattu yso ja slm - muuten niiden tulisi olla jossain globaalissa muuttujassa
if sub2 == "yso" or sub2 == "slm" or cs.getboolean("multilanguage", fallback=False):
sub2 = sub2 + "/" + LANGUAGES[valueProp.value.language]
# englanninkielisten YSO-paikkojen prefLabelit ovat Wikidatasta peräisin
if tag == "751" and LANGUAGES[valueProp.value.language] in ["en", "eng"]:
wdEntities = []
closeMatches = getValues(g, concept, [SKOS.closeMatch])
for closeMatch in closeMatches:
if closeMatch.value.startswith(WIKIDATA):
wdEntities.append(URIRef(closeMatch.value))
if len(wdEntities) == 1:
sub0 = wdEntities[0]
sub2 = "wikidata"
sub4 = "~EQ"
else:
sub2 = None
sub4 = None
if sub2 and sub4:
fields.append(
Field(
tag=tag,
indicators = [' ', second_indicator],
subfields = [
'a', decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(valueProp.value))),
'4', sub4,
'2', sub2,
'0', sub0
]
)
)
continue
elif valueProp.prop == SKOS.closeMatch:
sub4 = "~EQ"
else:
sub4 = "RM"
# library of congress -viitteet käsitellään erikseen
if loc_object:
if cs.get("locDirectory", fallback=None) == None:
continue
recordNode = None
local_loc_source = os.path.join(cs.get("locDirectory"), loc_object["id"] + ".marcxml.xml")
if matchURIRef not in loc_update_dict:
loc_update_dict[matchURIRef] = {'date': date.today()}
if os.path.exists(local_loc_source):
loc_update_dict[matchURIRef]['updatable'] = False
else:
loc_update_dict[matchURIRef]['updatable'] = True
if not loc_update_dict[matchURIRef]['updatable'] and os.path.exists(local_loc_source):
try:
with open(local_loc_source, encoding="utf-8") as f:
recordNode = ET.parse(f)
except ET.ParseError as e:
logging.warning("Failed to parse the following file: %s. Skipping the property for concept %s." %
(local_loc_source, concept))
elif update_loc_concepts:
try:
# Kongressin kirjastosta voi ladata 120 käsitettä minuutissa, varmistetaan aikaviiveellä, ettei raja ylity
time.sleep(0.5)
with urllib.request.urlopen(loc_object["prefix"] + loc_object["id"] + ".marcxml.xml", timeout=5) as marcxml, \
open(local_loc_source, 'wb') as out_file:
shutil.copyfileobj(marcxml, out_file)
logging.info("Downloaded LCSH link to %s." %
(local_loc_source))
loc_concept_downloaded = True
loc_update_dict[matchURIRef]['updatable'] = False
except urllib.error.URLError as e:
logging.warning('Unable to load the marcxml for %s. Reason: %s. Skipping the property for concept %s.' %
(loc_object["id"], e.reason, concept))
except OSError as e:
logging.warning("Failed to create a file for %s under %s directory. Skipping the property for concept %s." %
(loc_object["id"], cs.get("locDirectory"), concept))
if loc_concept_downloaded:
try:
with open(local_loc_source, encoding="utf-8") as f:
recordNode = ET.parse(f)
except OSError as e:
logging.warning("Failed to read the file for %s under %s directory. Skipping the property for concept %s" %
(loc_object["id"], cs.get("locDirectory"), concept))
except ET.ParseError as e:
logging.warning("Failed to parse the following file: %s. Skipping the property for concept %s." %
(local_loc_source, concept))
if recordNode:
tagNode = None
for tagNumber in LCSH_1XX_FIELDS:
tagNode = recordNode.find("./marcxml:datafield[@tag='" + tagNumber + "']", ET_namespaces)
if tagNode is not None:
# otetaan ensimmäinen
break
if tagNode is not None:
tag = "7" + tagNode.attrib["tag"][1:]
first_indicator = tagNode.attrib["ind1"]
subfields = []
for child in tagNode:
subfields.extend((child.attrib["code"],
decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(child.text)))
#str(child.text)
))
subfields.extend(("4", sub4))
if second_indicator == "7":
subfields.extend(("2", sub2))
subfields.extend(("0", str(matchURIRef)))
fields.append(
Field(
tag = tag,
indicators = [first_indicator, second_indicator],
subfields = subfields
)
)
else:
logging.warning("Could not find any marcxml:datafield objects with a tag number in the following list: %s for the following record: %s. %s" %
(LCSH_1XX_FIELDS, loc_object["id"], "Skipping the property for concept " + concept + "."))
#continue
else:
#käsitellään kaikki muut sanastot paitsi lcsh & lcgf
prefLabel = None
multipleLanguages = False
languagesEncountered = set()
sortedPrefLabels = sorted(g2.preferredLabel(matchURIRef,
labelProperties=(SKOS.prefLabel)))
for label in sortedPrefLabels:
languagesEncountered.add(label[1].language)
if len(languagesEncountered) > 1:
multipleLanguages = True
break
processedLanguages = set()
for type2, prefLabel in sortedPrefLabels:
prefLabelLanguage = prefLabel.language if prefLabel.language != None else ""
if prefLabelLanguage:
if LANGUAGES.get(prefLabelLanguage):
pass
else:
if not cs.getboolean("ignoreOtherGraphWarnings", fallback=IGNOREOTHERGRAPHWARNINGS):
logging.warning("LANGUAGES dictionary has no key for language '%s' found from the skos:prefLabel %s of target %s. Skipping." %
(prefLabelLanguage, matchURIRef, concept))
continue
if prefLabelLanguage in processedLanguages:
if not cs.getboolean("ignoreOtherGraphWarnings", fallback=IGNOREOTHERGRAPHWARNINGS):
logging.warning("Multiple prefLabels detected for target %s in language %s. Skipping prefLabel %s." %
(matchURIRef, prefLabelLanguage, prefLabel))
continue
processedLanguages.add(prefLabelLanguage)
subfields = [
'a', decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(prefLabel))),
#'a', str(prefLabel),
'4', sub4
]
if prefLabelLanguage == "":
multipleLanguagesEnd = ""
else:
# kovakoodattu yso & slm tännekin
multipleLanguagesEnd = "/" + LANGUAGES[prefLabel.language] if sub2 in ["yso", "slm"] or multipleLanguages else ""
if second_indicator != "4":
subfields.extend(("2",
sub2 + multipleLanguagesEnd
))
subfields.extend(("0", str(matchURIRef)))
fields.append(
Field(
tag=tag,
indicators = [' ', second_indicator],
subfields = subfields
)
)
if not prefLabel and not cs.getboolean("ignoreOtherGraphWarnings", fallback=IGNOREOTHERGRAPHWARNINGS):
logging.warning("Could not find preflabel for target %s. Skipping property %s target for concept %s." %
(str(matchURIRef), str(valueProp.prop), concept))
#continue
# sort fields and add them
for sorted_field in sorted(fields, key=lambda o: (
o.tag,
o.value().lower()
)):
rec.add_field(sorted_field)
writer_records_counter += 1
writer.write(rec)
if helper_variables['modificationDates']:
md5 = hashlib.md5()
md5.update(str.encode(str(rec)))
hash = md5.hexdigest()
if str(concept) in modified_dates:
if not hash == modified_dates[str(concept)][1] or loc_concept_downloaded:
modified_dates[str(concept)] = (date.today(), hash)
else:
modified_dates[str(concept)] = (date.today(), hash)
if handle is not sys.stdout:
writer.close()
if helper_variables['modificationDates']:
with open(helper_variables['modificationDates'], 'wb') as output:
pickle.dump(modified_dates, output, pickle.HIGHEST_PROTOCOL)
with open(loc_update_file, 'wb') as output:
pickle.dump(loc_update_dict, output, pickle.HIGHEST_PROTOCOL)
# tuotetaan tuotetaan lopuksi käsitteet laveassa XML-muodossa
parser = ET.XMLParser(remove_blank_text=True,strip_cdata=False)
file_path = helper_variables["outputFileName"]
tree = ET.parse(file_path, parser)
e = tree.getroot()
handle = open(cs.get("output", fallback=helper_variables["defaultOutputFileName"]), "wb")
handle.write(ET.tostring(e, encoding='UTF-8', pretty_print=True, xml_declaration=True))
if handle is not sys.stdout:
handle.close()
# lokitetaan vähän tietoa konversiosta
if helper_variables['keepDeprecated']:
logging.info(
"Processed %s concepts, from which %s were left out because of deprecation. Wrote %s MARCXML records." %
(incrementor, deprecated_counter, writer_records_counter)
)
else:
logging.info(
"Processed %s concepts. Wrote %s MARCXML records." %
(incrementor, writer_records_counter)
)
if cs.get("outputSpecified", fallback=None) == None:
outputChannel = sys.stdout.buffer
with open(cs.get("output", fallback=helper_variables['defaultOutputFileName']), "rb") as f:
shutil.copyfileobj(f, outputChannel)
if cs.get("outputSpecified", fallback=None) == None:
os.remove(cs.get("output", fallback=helper_variables['defaultOutputFileName']))
logging.info("Conversion completed: %s"%datetime.now().replace(microsecond=0).isoformat())
# MAIN
def main():
settings = ConfigParser(interpolation=ExtendedInterpolation())
args = readCommandLineArguments()
if args.config:
settings.read(args.config)
else:
settings.add_section(args.vocabulary_code.upper())
# for extracting meaningful leading/trailing spaces
# (removing double quotes around the string)
for sec in settings.sections():
for (key, val) in settings.items(sec):
if len(val) > 0 and val[-1] == '"' and val[0] == '"':
settings.set(sec, key, val[1:-1])
cs = args.vocabulary_code.upper() # default config section to vocabulary code
settings.set("DEFAULT", "vocabulary_code", cs.lower())
# Used in MARC code used in tag 040 subfield f
# and 7XX foreign language prefLabels
graphi = Graph()
other_graphs = Graph()
if args.config_section:
# override default config section
cs = args.config_section.upper()
# prepare settings
# configure logging
loglevel = logging.INFO
logFormatter = logging.Formatter('%(levelname)s - %(message)s')
if args.log_file:
logging.basicConfig(filename=args.log_file, filemode="w")
logger = logging.getLogger()
logger.setLevel(loglevel)
logger.propagate = False
logging.info("Conversion started: %s"%datetime.now().replace(microsecond=0).isoformat())
if args.endpoint:
settings.set(cs, "endpoint", args.endpoint)
# normalize endpoint graphs
if args.endpoint_graphs:
settings.set(cs, "endpointGraphs", ",".join(readConfigVariable(args.endpoint_graphs, " ")))
elif settings.get(cs, "endpointGraphs", fallback=None) != None:
settings.set(cs, "endpointGraphs", ",".join(readConfigVariable(settings.get(cs, "endpointGraphs"), ",")))
else:
settings.set(cs, "endpointGraphs", ",".join(ENDPOINTGRAPHS))
if args.ignore_other_graph_warnings:
settings.set(cs, "ignoreOtherGraphWarnings", "true")
if args.grouping_classes:
settings.set(cs, "groupingClasses", ",".join(readConfigVariable(args.grouping_classes, " ")))
elif settings.get(cs, "groupingClasses", fallback=None) != None:
settings.set(cs, "groupingClasses", ",".join(readConfigVariable(settings.get(cs, "groupingClasses"), ",")))
else:
settings.set(cs, "groupingClasses", "")
if not args.input:
logging.error("Input is required.")
sys.exit(2)
if args.input_format:
settings.set(cs, "inputFormat", args.input_format)
graphi = Graph()
graph_loaded = False
if args.pickle_vocabulary:
pickleFile = args.pickle_vocabulary
else:
pickleFile = settings.get(cs, "pickleVocabulary", fallback=None)
if pickleFile:
if os.path.isfile(pickleFile):
timestamp = os.path.getmtime(pickleFile)
file_date = date.fromtimestamp(timestamp)
if file_date == date.today():
with open(pickleFile, 'rb') as input_file:
try:
graphi = pickle.load(input_file)
graph_loaded = True
except EOFError:
logging.error("EOFError in "%pickleFile)
if not graph_loaded:
graphi += Graph().parse(args.input, format=settings.get(cs, "inputFormat", fallback="turtle"))
if pickleFile:
with open(pickleFile, 'wb') as output:
pickle.dump(graphi, output, pickle.HIGHEST_PROTOCOL)
if args.output:
settings.set(cs, "output", args.output)
settings.set(cs, "outputSpecified", "true")
if args.languages != None:
settings.set(cs, "languages", ",".join(readConfigVariable(args.languages, " ")))
elif settings.get(cs, "languages", fallback=None) == None:
logging.error("Language is required. Set with --languages.")
sys.exit(2)
else:
settings.set(cs, "languages", ",".join(readConfigVariable(settings.get(cs, "languages"), ",")))
if args.multilanguage_vocabulary:
settings.set(cs, "multilanguage", "true")
if args.loc_directory:
settings.set(cs, "locDirectory", args.loc_directory)
if args.keep_modified_after and not args.modification_dates:
logging.error('Arguments required with --keep_modified_after: --modification_dates')
sys.exit(2)
if args.modification_dates:
settings.set(cs, "modificationDates", args.modification_dates)
if args.keep_modified_after:
settings.set(cs, "keepModifiedAfter", args.keep_modified_after)
modifiedLimit = settings.get(cs, "keepModifiedAfter")
if modifiedLimit.lower() == "all":
pass
elif modifiedLimit.lower() == "none":
pass
else:
try:
datetime.date(datetime.strptime(modifiedLimit, "%Y-%m-%d"))
except ValueError:
logging.error("Cannot interpret 'keepModifiedAfter' value set in configuration file or given as a CLI parameter. Possible values are 'ALL', 'NONE' and ISO 8601 format for dates.")
sys.exit(2)
if args.default_creation_date:
settings.set(cs, "defaultCreationDate", args.default_creation_date)
if settings.get(cs, "defaultCreationDate", fallback=None) != None:
try:
datetime.date(datetime.strptime(settings.get(cs, "defaultCreationDate"), "%Y-%m-%d"))
except ValueError:
logging.error("Cannot interpret 'defaultCreationDate' value set in configuration file or given as a CLI parameter. Possible values: ISO 8601 format for dates.")
sys.exit(2)
if args.keep_deprecated_after:
settings.set(cs, "keepDeprecatedAfter", args.keep_deprecated_after)
if settings.get(cs, "keepDeprecatedAfter", fallback=None) != None:
deprecationLimit = settings.get(cs, "keepDeprecatedAfter")
if deprecationLimit.lower() == "all":
pass
elif deprecationLimit.lower() == "none":
pass
else:
try:
datetime.date(datetime.strptime(deprecationLimit, "%Y-%m-%d"))
except ValueError:
logging.error("Cannot interpret 'keepDeprecatedAfter' value set in configuration file or given as a CLI parameter. Possible values are 'ALL', 'NONE' and ISO 8601 format for dates.")
sys.exit(2)
if settings.get(cs, "endpointGraphs"):
if settings.get(cs, "endpoint", fallback=None) == None:
logging.warning("No endpoint address for endpoint graphs (set with --endpoint). Skipping endpoint graphs.")
else:
other_graphs += readEndpointGraphs(settings[cs])
pass
for lang in settings.get(cs, "languages").split(","):
convert(settings[cs], cs, lang, graphi, other_graphs)
if __name__ == "__main__":
try:
main()
except BaseException as e:
logging.exception(e)
|
[
"pickle.dump",
"argparse.ArgumentParser",
"pymarc.Field",
"logging.Formatter",
"os.path.isfile",
"pickle.load",
"urllib.parse.urlparse",
"unicodedata.normalize",
"logging.error",
"logging.warning",
"os.path.exists",
"urllib.request.urlopen",
"lxml.etree.XMLParser",
"datetime.timedelta",
"configparser.ExtendedInterpolation",
"lxml.etree.parse",
"lxml.etree.tostring",
"shutil.copyfileobj",
"datetime.datetime.now",
"rdflib.Graph",
"hashlib.md5",
"rdflib.Literal",
"datetime.date.today",
"rdflib.URIRef",
"rdflib.Namespace",
"time.sleep",
"datetime.datetime.strptime",
"pymarc.XMLWriter",
"sys.exit",
"logging.exception",
"logging.basicConfig",
"pymarc.Record",
"logging.info",
"collections.namedtuple",
"os.path.getmtime",
"datetime.date.fromtimestamp",
"logging.getLogger"
] |
[((1435, 1475), 'rdflib.Namespace', 'Namespace', (['"""http://www.yso.fi/onto/yso/"""'], {}), "('http://www.yso.fi/onto/yso/')\n", (1444, 1475), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((1484, 1529), 'rdflib.Namespace', 'Namespace', (['"""http://www.yso.fi/onto/yso-meta/"""'], {}), "('http://www.yso.fi/onto/yso-meta/')\n", (1493, 1529), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((1545, 1592), 'rdflib.Namespace', 'Namespace', (['"""http://www.yso.fi/onto/yso-paikat/"""'], {}), "('http://www.yso.fi/onto/yso-paikat/')\n", (1554, 1592), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((1597, 1637), 'rdflib.Namespace', 'Namespace', (['"""http://www.yso.fi/onto/ysa/"""'], {}), "('http://www.yso.fi/onto/ysa/')\n", (1606, 1637), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((1646, 1691), 'rdflib.Namespace', 'Namespace', (['"""http://www.yso.fi/onto/ysa-meta/"""'], {}), "('http://www.yso.fi/onto/ysa-meta/')\n", (1655, 1691), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((1699, 1742), 'rdflib.Namespace', 'Namespace', (['"""http://www.yso.fi/onto/allars/"""'], {}), "('http://www.yso.fi/onto/allars/')\n", (1708, 1742), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((1754, 1802), 'rdflib.Namespace', 'Namespace', (['"""http://www.yso.fi/onto/allars-meta/"""'], {}), "('http://www.yso.fi/onto/allars-meta/')\n", (1763, 1802), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((1808, 1849), 'rdflib.Namespace', 'Namespace', (['"""http://www.yso.fi/onto/koko/"""'], {}), "('http://www.yso.fi/onto/koko/')\n", (1817, 1849), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((1855, 1907), 'rdflib.Namespace', 'Namespace', (['"""http://id.loc.gov/authorities/subjects/"""'], {}), "('http://id.loc.gov/authorities/subjects/')\n", (1864, 1907), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((1913, 1967), 'rdflib.Namespace', 'Namespace', (['"""http://id.loc.gov/authorities/genreForms/"""'], {}), "('http://id.loc.gov/authorities/genreForms/')\n", (1922, 1967), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((1973, 2021), 'rdflib.Namespace', 'Namespace', (['"""http://rdaregistry.info/Elements/u/"""'], {}), "('http://rdaregistry.info/Elements/u/')\n", (1982, 2021), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((2030, 2078), 'rdflib.Namespace', 'Namespace', (['"""http://purl.org/iso25964/skos-thes#"""'], {}), "('http://purl.org/iso25964/skos-thes#')\n", (2039, 2078), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((2087, 2140), 'rdflib.Namespace', 'Namespace', (['"""http://purl.org/finnonto/schema/skosext#"""'], {}), "('http://purl.org/finnonto/schema/skosext#')\n", (2096, 2140), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((2145, 2190), 'rdflib.Namespace', 'Namespace', (['"""http://urn.fi/URN:NBN:fi:au:slm:"""'], {}), "('http://urn.fi/URN:NBN:fi:au:slm:')\n", (2154, 2190), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((2195, 2228), 'rdflib.Namespace', 'Namespace', (['"""http://udcdata.info/"""'], {}), "('http://udcdata.info/')\n", (2204, 2228), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((2238, 2282), 'rdflib.Namespace', 'Namespace', (['"""http://www.wikidata.org/entity/"""'], {}), "('http://www.wikidata.org/entity/')\n", (2247, 2282), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((4331, 4373), 'collections.namedtuple', 'namedtuple', (['"""ValueProp"""', "['value', 'prop']"], {}), "('ValueProp', ['value', 'prop'])\n", (4341, 4373), False, 'from collections import namedtuple\n'), ((4435, 4544), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Program for converting Finto SKOS-vocabularies into MARC (.mrcx)."""'}), "(description=\n 'Program for converting Finto SKOS-vocabularies into MARC (.mrcx).')\n", (4458, 4544), False, 'import argparse\n'), ((8842, 8849), 'rdflib.Graph', 'Graph', ([], {}), '()\n', (8847, 8849), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((20179, 20289), 'logging.info', 'logging.info', (['("Processing vocabulary with vocabulary code \'%s\' in language \'%s\'" % (\n vocId, language))'], {}), '(\n "Processing vocabulary with vocabulary code \'%s\' in language \'%s\'" % (\n vocId, language))\n', (20191, 20289), False, 'import logging\n'), ((20634, 20651), 'pymarc.XMLWriter', 'XMLWriter', (['handle'], {}), '(handle)\n', (20643, 20651), False, 'from pymarc import Record, Field, XMLWriter, MARCReader, parse_xml_to_array\n'), ((21167, 21198), 'os.path.exists', 'os.path.exists', (['loc_update_file'], {}), '(loc_update_file)\n', (21181, 21198), False, 'import os\n'), ((67015, 67070), 'lxml.etree.XMLParser', 'ET.XMLParser', ([], {'remove_blank_text': '(True)', 'strip_cdata': '(False)'}), '(remove_blank_text=True, strip_cdata=False)\n', (67027, 67070), True, 'from lxml import etree as ET\n'), ((67132, 67159), 'lxml.etree.parse', 'ET.parse', (['file_path', 'parser'], {}), '(file_path, parser)\n', (67140, 67159), True, 'from lxml import etree as ET\n'), ((69199, 69206), 'rdflib.Graph', 'Graph', ([], {}), '()\n', (69204, 69206), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((69226, 69233), 'rdflib.Graph', 'Graph', ([], {}), '()\n', (69231, 69233), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((69449, 69497), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s - %(message)s"""'], {}), "('%(levelname)s - %(message)s')\n", (69466, 69497), False, 'import logging\n'), ((69609, 69628), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (69626, 69628), False, 'import logging\n'), ((70958, 70965), 'rdflib.Graph', 'Graph', ([], {}), '()\n', (70963, 70965), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((14082, 14109), 'urllib.parse.urlparse', 'urllib.parse.urlparse', (['word'], {}), '(word)\n', (14103, 14109), False, 'import urllib\n'), ((19721, 19774), 'os.path.isfile', 'os.path.isfile', (["helper_variables['modificationDates']"], {}), "(helper_variables['modificationDates'])\n", (19735, 19774), False, 'import os\n'), ((21220, 21253), 'os.path.getmtime', 'os.path.getmtime', (['loc_update_file'], {}), '(loc_update_file)\n', (21236, 21253), False, 'import os\n'), ((21274, 21303), 'datetime.date.fromtimestamp', 'date.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (21292, 21303), False, 'from datetime import datetime, date, timedelta\n'), ((21628, 21640), 'datetime.date.today', 'date.today', ([], {}), '()\n', (21638, 21640), False, 'from datetime import datetime, date, timedelta\n'), ((21643, 21660), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (21652, 21660), False, 'from datetime import datetime, date, timedelta\n'), ((24461, 24469), 'pymarc.Record', 'Record', ([], {}), '()\n', (24467, 24469), False, 'from pymarc import Record, Field, XMLWriter, MARCReader, parse_xml_to_array\n'), ((66873, 66934), 'pickle.dump', 'pickle.dump', (['loc_update_dict', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(loc_update_dict, output, pickle.HIGHEST_PROTOCOL)\n', (66884, 66934), False, 'import pickle\n'), ((67294, 67367), 'lxml.etree.tostring', 'ET.tostring', (['e'], {'encoding': '"""UTF-8"""', 'pretty_print': '(True)', 'xml_declaration': '(True)'}), "(e, encoding='UTF-8', pretty_print=True, xml_declaration=True)\n", (67305, 67367), True, 'from lxml import etree as ET\n'), ((67521, 67707), 'logging.info', 'logging.info', (["('Processed %s concepts, from which %s were left out because of deprecation. Wrote %s MARCXML records.'\n % (incrementor, deprecated_counter, writer_records_counter))"], {}), "(\n 'Processed %s concepts, from which %s were left out because of deprecation. Wrote %s MARCXML records.'\n % (incrementor, deprecated_counter, writer_records_counter))\n", (67533, 67707), False, 'import logging\n'), ((67750, 67859), 'logging.info', 'logging.info', (["('Processed %s concepts. Wrote %s MARCXML records.' % (incrementor,\n writer_records_counter))"], {}), "('Processed %s concepts. Wrote %s MARCXML records.' % (\n incrementor, writer_records_counter))\n", (67762, 67859), False, 'import logging\n'), ((69533, 69590), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'args.log_file', 'filemode': '"""w"""'}), "(filename=args.log_file, filemode='w')\n", (69552, 69590), False, 'import logging\n'), ((70802, 70837), 'logging.error', 'logging.error', (['"""Input is required."""'], {}), "('Input is required.')\n", (70815, 70837), False, 'import logging\n'), ((70846, 70857), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (70854, 70857), False, 'import sys\n'), ((71183, 71209), 'os.path.isfile', 'os.path.isfile', (['pickleFile'], {}), '(pickleFile)\n', (71197, 71209), False, 'import os\n'), ((72701, 72790), 'logging.error', 'logging.error', (['"""Arguments required with --keep_modified_after: --modification_dates"""'], {}), "(\n 'Arguments required with --keep_modified_after: --modification_dates')\n", (72714, 72790), False, 'import logging\n'), ((72794, 72805), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (72802, 72805), False, 'import sys\n'), ((17585, 17594), 'rdflib.URIRef', 'URIRef', (['x'], {}), '(x)\n', (17591, 17594), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((17715, 17724), 'rdflib.URIRef', 'URIRef', (['x'], {}), '(x)\n', (17721, 17724), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((21328, 21340), 'datetime.date.today', 'date.today', ([], {}), '()\n', (21338, 21340), False, 'from datetime import datetime, date, timedelta\n'), ((23275, 23354), 'logging.warning', 'logging.warning', (["('More than 10 000 updates in Library of Congress feed %s' % ns)"], {}), "('More than 10 000 updates in Library of Congress feed %s' % ns)\n", (23290, 23354), False, 'import logging\n'), ((23784, 23837), 'logging.info', 'logging.info', (["('Processing %sth concept' % incrementor)"], {}), "('Processing %sth concept' % incrementor)\n", (23796, 23837), False, 'import logging\n'), ((29190, 29267), 'pymarc.Field', 'Field', ([], {'tag': '"""024"""', 'indicators': "['7', ' ']", 'subfields': "['a', concept, '2', 'uri']"}), "(tag='024', indicators=['7', ' '], subfields=['a', concept, '2', 'uri'])\n", (29195, 29267), False, 'from pymarc import Record, Field, XMLWriter, MARCReader, parse_xml_to_array\n'), ((33339, 33473), 'logging.warning', 'logging.warning', (["('Could not find preflabel for concept %s in language %s. Skipping the whole concept.'\n % (concept, language))"], {}), "(\n 'Could not find preflabel for concept %s in language %s. Skipping the whole concept.'\n % (concept, language))\n", (33354, 33473), False, 'import logging\n'), ((66166, 66179), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (66177, 66179), False, 'import hashlib\n'), ((66755, 66815), 'pickle.dump', 'pickle.dump', (['modified_dates', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(modified_dates, output, pickle.HIGHEST_PROTOCOL)\n', (66766, 66815), False, 'import pickle\n'), ((68101, 68137), 'shutil.copyfileobj', 'shutil.copyfileobj', (['f', 'outputChannel'], {}), '(f, outputChannel)\n', (68119, 68137), False, 'import shutil\n'), ((68445, 68468), 'configparser.ExtendedInterpolation', 'ExtendedInterpolation', ([], {}), '()\n', (68466, 68468), False, 'from configparser import ConfigParser, ExtendedInterpolation\n'), ((71235, 71263), 'os.path.getmtime', 'os.path.getmtime', (['pickleFile'], {}), '(pickleFile)\n', (71251, 71263), False, 'import os\n'), ((71288, 71317), 'datetime.date.fromtimestamp', 'date.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (71306, 71317), False, 'from datetime import datetime, date, timedelta\n'), ((72245, 72305), 'logging.error', 'logging.error', (['"""Language is required. Set with --languages."""'], {}), "('Language is required. Set with --languages.')\n", (72258, 72305), False, 'import logging\n'), ((72314, 72325), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (72322, 72325), False, 'import sys\n'), ((74957, 75074), 'logging.warning', 'logging.warning', (['"""No endpoint address for endpoint graphs (set with --endpoint). Skipping endpoint graphs."""'], {}), "(\n 'No endpoint address for endpoint graphs (set with --endpoint). Skipping endpoint graphs.'\n )\n", (74972, 75074), False, 'import logging\n'), ((75377, 75397), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (75394, 75397), False, 'import logging\n'), ((10031, 10145), 'logging.warning', 'logging.warning', (["('SPARQL endpoint now answering within timeout limit. ' +\n 'Skipping querying linked concepts.')"], {}), "('SPARQL endpoint now answering within timeout limit. ' +\n 'Skipping querying linked concepts.')\n", (10046, 10145), False, 'import logging\n'), ((21495, 21518), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (21506, 21518), False, 'import pickle\n'), ((25078, 25145), 'logging.error', 'logging.error', (["('Modification date invalid in concept %s ' % concept)"], {}), "('Modification date invalid in concept %s ' % concept)\n", (25091, 25145), False, 'import logging\n'), ((25575, 25638), 'logging.error', 'logging.error', (["('Creation date invalid in concept %s ' % concept)"], {}), "('Creation date invalid in concept %s ' % concept)\n", (25588, 25638), False, 'import logging\n'), ((26273, 26286), 'rdflib.Literal', 'Literal', (['(True)'], {}), '(True)\n', (26280, 26286), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((33546, 33676), 'logging.warning', 'logging.warning', (["('Multiple prefLabels detected for concept %s in language %s. Choosing the first.'\n % (concept, language))"], {}), "(\n 'Multiple prefLabels detected for concept %s in language %s. Choosing the first.'\n % (concept, language))\n", (33561, 33676), False, 'import logging\n'), ((39422, 39480), 'pymarc.Field', 'Field', ([], {'tag': 'tag', 'indicators': "[' ', ' ']", 'subfields': 'subfields'}), "(tag=tag, indicators=[' ', ' '], subfields=subfields)\n", (39427, 39480), False, 'from pymarc import Record, Field, XMLWriter, MARCReader, parse_xml_to_array\n'), ((43989, 44049), 'pymarc.Field', 'Field', ([], {'tag': '"""677"""', 'indicators': "[' ', ' ']", 'subfields': 'subfields'}), "(tag='677', indicators=[' ', ' '], subfields=subfields)\n", (43994, 44049), False, 'from pymarc import Record, Field, XMLWriter, MARCReader, parse_xml_to_array\n'), ((44570, 44729), 'logging.warning', 'logging.warning', (["('Parsing the property %s for concept %s into seperate subfields failed. Continuing with complete value.'\n % (valueProp.prop, concept))"], {}), "(\n 'Parsing the property %s for concept %s into seperate subfields failed. Continuing with complete value.'\n % (valueProp.prop, concept))\n", (44585, 44729), False, 'import logging\n'), ((45874, 45940), 'pymarc.Field', 'Field', ([], {'tag': '"""680"""', 'indicators': "[' ', ' ']", 'subfields': 'subfield_values'}), "(tag='680', indicators=[' ', ' '], subfields=subfield_values)\n", (45879, 45940), False, 'from pymarc import Record, Field, XMLWriter, MARCReader, parse_xml_to_array\n'), ((46166, 46240), 'pymarc.Field', 'Field', ([], {'tag': '"""680"""', 'indicators': "[' ', ' ']", 'subfields': "['i', deprecatedString]"}), "(tag='680', indicators=[' ', ' '], subfields=['i', deprecatedString])\n", (46171, 46240), False, 'from pymarc import Record, Field, XMLWriter, MARCReader, parse_xml_to_array\n'), ((47141, 47154), 'rdflib.Literal', 'Literal', (['(True)'], {}), '(True)\n', (47148, 47154), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((47643, 47657), 'rdflib.URIRef', 'URIRef', (['target'], {}), '(target)\n', (47649, 47657), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((52031, 52046), 'rdflib.URIRef', 'URIRef', (['concept'], {}), '(concept)\n', (52037, 52046), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((52138, 52161), 'rdflib.URIRef', 'URIRef', (['valueProp.value'], {}), '(valueProp.value)\n', (52144, 52161), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((71346, 71358), 'datetime.date.today', 'date.today', ([], {}), '()\n', (71356, 71358), False, 'from datetime import datetime, date, timedelta\n'), ((71704, 71711), 'rdflib.Graph', 'Graph', ([], {}), '()\n', (71709, 71711), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((71879, 71931), 'pickle.dump', 'pickle.dump', (['graphi', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(graphi, output, pickle.HIGHEST_PROTOCOL)\n', (71890, 71931), False, 'import pickle\n'), ((73898, 74068), 'logging.error', 'logging.error', (['"""Cannot interpret \'defaultCreationDate\' value set in configuration file or given as a CLI parameter. Possible values: ISO 8601 format for dates."""'], {}), '(\n "Cannot interpret \'defaultCreationDate\' value set in configuration file or given as a CLI parameter. Possible values: ISO 8601 format for dates."\n )\n', (73911, 74068), False, 'import logging\n'), ((74071, 74082), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (74079, 74082), False, 'import sys\n'), ((19918, 19942), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (19929, 19942), False, 'import pickle\n'), ((21564, 21611), 'logging.error', 'logging.error', (["('EOFError in ' % loc_update_file)"], {}), "('EOFError in ' % loc_update_file)\n", (21577, 21611), False, 'import logging\n'), ((21971, 22015), 'urllib.request.urlopen', 'urllib.request.urlopen', (['file_path'], {'timeout': '(5)'}), '(file_path, timeout=5)\n', (21993, 22015), False, 'import urllib\n'), ((22062, 22080), 'lxml.etree.parse', 'ET.parse', (['atom_xml'], {}), '(atom_xml)\n', (22070, 22080), True, 'from lxml import etree as ET\n'), ((23166, 23232), 'logging.warning', 'logging.warning', (['"""Failed to parse Library of Congress update feed"""'], {}), "('Failed to parse Library of Congress update feed')\n", (23181, 23232), False, 'import logging\n'), ((23598, 23609), 'rdflib.URIRef', 'URIRef', (['uri'], {}), '(uri)\n', (23604, 23609), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((24043, 24056), 'rdflib.Literal', 'Literal', (['(True)'], {}), '(True)\n', (24050, 24056), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((37090, 37253), 'logging.warning', 'logging.warning', (["('Could not find preflabel for target %s in language %s. Skipping property %s target for concept %s.'\n % (target, language, prop, concept))"], {}), "(\n 'Could not find preflabel for target %s in language %s. Skipping property %s target for concept %s.'\n % (target, language, prop, concept))\n", (37105, 37253), False, 'import logging\n'), ((42459, 42519), 'pymarc.Field', 'Field', ([], {'tag': '"""670"""', 'indicators': "[' ', ' ']", 'subfields': 'subfields'}), "(tag='670', indicators=[' ', ' '], subfields=subfields)\n", (42464, 42519), False, 'from pymarc import Record, Field, XMLWriter, MARCReader, parse_xml_to_array\n'), ((47718, 47846), 'logging.warning', 'logging.warning', (["('Multiple prefLabels detected for target %s in language %s. Choosing the first.'\n % (target, language))"], {}), "(\n 'Multiple prefLabels detected for target %s in language %s. Choosing the first.'\n % (target, language))\n", (47733, 47846), False, 'import logging\n'), ((49638, 49704), 'pymarc.Field', 'Field', ([], {'tag': '"""682"""', 'indicators': "[' ', ' ']", 'subfields': 'subfield_values'}), "(tag='682', indicators=[' ', ' '], subfields=subfield_values)\n", (49643, 49704), False, 'from pymarc import Record, Field, XMLWriter, MARCReader, parse_xml_to_array\n'), ((57857, 57889), 'os.path.exists', 'os.path.exists', (['local_loc_source'], {}), '(local_loc_source)\n', (57871, 57889), False, 'import os\n'), ((58133, 58165), 'os.path.exists', 'os.path.exists', (['local_loc_source'], {}), '(local_loc_source)\n', (58147, 58165), False, 'import os\n'), ((66545, 66557), 'datetime.date.today', 'date.today', ([], {}), '()\n', (66555, 66557), False, 'from datetime import datetime, date, timedelta\n'), ((19996, 20101), 'logging.error', 'logging.error', (["('The file %s for modification dates is empty ' % helper_variables[\n 'modificationDates'])"], {}), "('The file %s for modification dates is empty ' %\n helper_variables['modificationDates'])\n", (20009, 20101), False, 'import logging\n'), ((20116, 20127), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (20124, 20127), False, 'import sys\n'), ((31411, 31581), 'logging.warning', 'logging.warning', (["('Could not find preflabel for target %s in language: %s. Skipping property %s target for concept %s.'\n % (group, language, SKOS.member, concept))"], {}), "(\n 'Could not find preflabel for target %s in language: %s. Skipping property %s target for concept %s.'\n % (group, language, SKOS.member, concept))\n", (31426, 31581), False, 'import logging\n'), ((37358, 37486), 'logging.warning', 'logging.warning', (["('Multiple prefLabels detected for target %s in language %s. Choosing the first.'\n % (target, language))"], {}), "(\n 'Multiple prefLabels detected for target %s in language %s. Choosing the first.'\n % (target, language))\n", (37373, 37486), False, 'import logging\n'), ((47923, 48099), 'logging.warning', 'logging.warning', (["('Could not find preflabel for target %s in language: %s. Skipping property %s target for concept %s.'\n % (target, language, DCT.isReplacedBy, concept))"], {}), "(\n 'Could not find preflabel for target %s in language: %s. Skipping property %s target for concept %s.'\n % (target, language, DCT.isReplacedBy, concept))\n", (47938, 48099), False, 'import logging\n'), ((52276, 52289), 'rdflib.Literal', 'Literal', (['(True)'], {}), '(True)\n', (52283, 52289), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((57820, 57832), 'datetime.date.today', 'date.today', ([], {}), '()\n', (57830, 57832), False, 'from datetime import datetime, date, timedelta\n'), ((62103, 62350), 'logging.warning', 'logging.warning', (["('Could not find any marcxml:datafield objects with a tag number in the following list: %s for the following record: %s. %s'\n % (LCSH_1XX_FIELDS, loc_object['id'], \n 'Skipping the property for concept ' + concept + '.'))"], {}), "(\n 'Could not find any marcxml:datafield objects with a tag number in the following list: %s for the following record: %s. %s'\n % (LCSH_1XX_FIELDS, loc_object['id'], \n 'Skipping the property for concept ' + concept + '.'))\n", (62118, 62350), False, 'import logging\n'), ((65260, 65331), 'pymarc.Field', 'Field', ([], {'tag': 'tag', 'indicators': "[' ', second_indicator]", 'subfields': 'subfields'}), "(tag=tag, indicators=[' ', second_indicator], subfields=subfields)\n", (65265, 65331), False, 'from pymarc import Record, Field, XMLWriter, MARCReader, parse_xml_to_array\n'), ((66457, 66469), 'datetime.date.today', 'date.today', ([], {}), '()\n', (66467, 66469), False, 'from datetime import datetime, date, timedelta\n'), ((71483, 71506), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (71494, 71506), False, 'import pickle\n'), ((73264, 73308), 'datetime.datetime.strptime', 'datetime.strptime', (['modifiedLimit', '"""%Y-%m-%d"""'], {}), "(modifiedLimit, '%Y-%m-%d')\n", (73281, 73308), False, 'from datetime import datetime, date, timedelta\n'), ((73357, 73546), 'logging.error', 'logging.error', (['"""Cannot interpret \'keepModifiedAfter\' value set in configuration file or given as a CLI parameter. Possible values are \'ALL\', \'NONE\' and ISO 8601 format for dates."""'], {}), '(\n "Cannot interpret \'keepModifiedAfter\' value set in configuration file or given as a CLI parameter. Possible values are \'ALL\', \'NONE\' and ISO 8601 format for dates."\n )\n', (73370, 73546), False, 'import logging\n'), ((73553, 73564), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (73561, 73564), False, 'import sys\n'), ((74527, 74574), 'datetime.datetime.strptime', 'datetime.strptime', (['deprecationLimit', '"""%Y-%m-%d"""'], {}), "(deprecationLimit, '%Y-%m-%d')\n", (74544, 74574), False, 'from datetime import datetime, date, timedelta\n'), ((74623, 74814), 'logging.error', 'logging.error', (['"""Cannot interpret \'keepDeprecatedAfter\' value set in configuration file or given as a CLI parameter. Possible values are \'ALL\', \'NONE\' and ISO 8601 format for dates."""'], {}), '(\n "Cannot interpret \'keepDeprecatedAfter\' value set in configuration file or given as a CLI parameter. Possible values are \'ALL\', \'NONE\' and ISO 8601 format for dates."\n )\n', (74636, 74814), False, 'import logging\n'), ((74821, 74832), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (74829, 74832), False, 'import sys\n'), ((27693, 27744), 'datetime.datetime.strptime', 'datetime.strptime', (['deprecatedDateString', '"""%d.%m.%Y"""'], {}), "(deprecatedDateString, '%d.%m.%Y')\n", (27710, 27744), False, 'from datetime import datetime, date, timedelta\n'), ((30797, 30810), 'rdflib.Literal', 'Literal', (['(True)'], {}), '(True)\n', (30804, 30810), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((31702, 31835), 'logging.warning', 'logging.warning', (["('Multiple prefLabels detected for concept %s in language %s. Taking the first only.'\n % (concept, language))"], {}), "(\n 'Multiple prefLabels detected for concept %s in language %s. Taking the first only.'\n % (concept, language))\n", (31717, 31835), False, 'import logging\n'), ((32141, 32294), 'logging.warning', 'logging.warning', (["('Tried to parse group number for group %s from concept %s in language %s but failed.'\n % (group, valueProps[0].value, language))"], {}), "(\n 'Tried to parse group number for group %s from concept %s in language %s but failed.'\n % (group, valueProps[0].value, language))\n", (32156, 32294), False, 'import logging\n'), ((36850, 36863), 'rdflib.Literal', 'Literal', (['(True)'], {}), '(True)\n', (36857, 36863), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((45698, 45752), 'unicodedata.normalize', 'unicodedata.normalize', (['NORMALIZATION_FORM', 'subfield[1]'], {}), '(NORMALIZATION_FORM, subfield[1])\n', (45719, 45752), False, 'import unicodedata\n'), ((47389, 47402), 'rdflib.Literal', 'Literal', (['(True)'], {}), '(True)\n', (47396, 47402), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n'), ((58309, 58320), 'lxml.etree.parse', 'ET.parse', (['f'], {}), '(f)\n', (58317, 58320), True, 'from lxml import etree as ET\n'), ((58392, 58528), 'logging.warning', 'logging.warning', (["('Failed to parse the following file: %s. Skipping the property for concept %s.'\n % (local_loc_source, concept))"], {}), "(\n 'Failed to parse the following file: %s. Skipping the property for concept %s.'\n % (local_loc_source, concept))\n", (58407, 58528), False, 'import logging\n'), ((58773, 58788), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (58783, 58788), False, 'import time\n'), ((60029, 60040), 'lxml.etree.parse', 'ET.parse', (['f'], {}), '(f)\n', (60037, 60040), True, 'from lxml import etree as ET\n'), ((60366, 60502), 'logging.warning', 'logging.warning', (["('Failed to parse the following file: %s. Skipping the property for concept %s.'\n % (local_loc_source, concept))"], {}), "(\n 'Failed to parse the following file: %s. Skipping the property for concept %s.'\n % (local_loc_source, concept))\n", (60381, 60502), False, 'import logging\n'), ((61810, 61898), 'pymarc.Field', 'Field', ([], {'tag': 'tag', 'indicators': '[first_indicator, second_indicator]', 'subfields': 'subfields'}), '(tag=tag, indicators=[first_indicator, second_indicator], subfields=\n subfields)\n', (61815, 61898), False, 'from pymarc import Record, Field, XMLWriter, MARCReader, parse_xml_to_array\n'), ((64004, 64160), 'logging.warning', 'logging.warning', (["('Multiple prefLabels detected for target %s in language %s. Skipping prefLabel %s.'\n % (matchURIRef, prefLabelLanguage, prefLabel))"], {}), "(\n 'Multiple prefLabels detected for target %s in language %s. Skipping prefLabel %s.'\n % (matchURIRef, prefLabelLanguage, prefLabel))\n", (64019, 64160), False, 'import logging\n'), ((68328, 68342), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (68340, 68342), False, 'from datetime import datetime, date, timedelta\n'), ((69730, 69744), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (69742, 69744), False, 'from datetime import datetime, date, timedelta\n'), ((71612, 71654), 'logging.error', 'logging.error', (["('EOFError in ' % pickleFile)"], {}), "('EOFError in ' % pickleFile)\n", (71625, 71654), False, 'import logging\n'), ((58818, 58913), 'urllib.request.urlopen', 'urllib.request.urlopen', (["(loc_object['prefix'] + loc_object['id'] + '.marcxml.xml')"], {'timeout': '(5)'}), "(loc_object['prefix'] + loc_object['id'] +\n '.marcxml.xml', timeout=5)\n", (58840, 58913), False, 'import urllib\n'), ((59022, 59059), 'shutil.copyfileobj', 'shutil.copyfileobj', (['marcxml', 'out_file'], {}), '(marcxml, out_file)\n', (59040, 59059), False, 'import shutil\n'), ((59088, 59150), 'logging.info', 'logging.info', (["('Downloaded LCSH link to %s.' % local_loc_source)"], {}), "('Downloaded LCSH link to %s.' % local_loc_source)\n", (59100, 59150), False, 'import logging\n'), ((59400, 59553), 'logging.warning', 'logging.warning', (["('Unable to load the marcxml for %s. Reason: %s. Skipping the property for concept %s.'\n % (loc_object['id'], e.reason, concept))"], {}), "(\n 'Unable to load the marcxml for %s. Reason: %s. Skipping the property for concept %s.'\n % (loc_object['id'], e.reason, concept))\n", (59415, 59553), False, 'import logging\n'), ((63539, 63718), 'logging.warning', 'logging.warning', (['("LANGUAGES dictionary has no key for language \'%s\' found from the skos:prefLabel %s of target %s. Skipping."\n % (prefLabelLanguage, matchURIRef, concept))'], {}), '(\n "LANGUAGES dictionary has no key for language \'%s\' found from the skos:prefLabel %s of target %s. Skipping."\n % (prefLabelLanguage, matchURIRef, concept))\n', (63554, 63718), False, 'import logging\n'), ((22365, 22413), 'datetime.datetime.strptime', 'datetime.strptime', (['updated.text[:10]', '"""%Y-%m-%d"""'], {}), "(updated.text[:10], '%Y-%m-%d')\n", (22382, 22413), False, 'from datetime import datetime, date, timedelta\n'), ((28101, 28152), 'datetime.datetime.strptime', 'datetime.strptime', (['deprecatedDateString', '"""%Y-%m-%d"""'], {}), "(deprecatedDateString, '%Y-%m-%d')\n", (28118, 28152), False, 'from datetime import datetime, date, timedelta\n'), ((28465, 28560), 'logging.warning', 'logging.warning', (["('Converting deprecated date failed for concept %s. Proceeding.' % concept)"], {}), "(\n 'Converting deprecated date failed for concept %s. Proceeding.' % concept)\n", (28480, 28560), False, 'import logging\n'), ((28851, 28901), 'datetime.datetime.strptime', 'datetime.strptime', (['DEFAULTCREATIONDATE', '"""%Y-%m-%d"""'], {}), "(DEFAULTCREATIONDATE, '%Y-%m-%d')\n", (28868, 28901), False, 'from datetime import datetime, date, timedelta\n'), ((32651, 32703), 'unicodedata.normalize', 'unicodedata.normalize', (['NORMALIZATION_FORM', 'groupname'], {}), '(NORMALIZATION_FORM, groupname)\n', (32672, 32703), False, 'import unicodedata\n'), ((41584, 41642), 'unicodedata.normalize', 'unicodedata.normalize', (['NORMALIZATION_FORM', 'valueProp.value'], {}), '(NORMALIZATION_FORM, valueProp.value)\n', (41605, 41642), False, 'import unicodedata\n'), ((56300, 56324), 'rdflib.URIRef', 'URIRef', (['closeMatch.value'], {}), '(closeMatch.value)\n', (56306, 56324), False, 'from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF\n')]
|
from django.conf.urls import url
from common import views
urlpatterns = [
url(r'^$', views.index, name='home'),
url(r'^disciplines/?$', views.disciplines_index, name='disciplines.index'),
url(r'^disciplines/new$',
views.DisciplineCreateView.as_view(), name='disciplines.new'),
url(r'^disciplines/(?P<slug>[-\w\d]*)-(?P<id>\d+)$',
views.discipline_detail, name='disciplines.detail'),
url(r'^disciplines/(?P<slug>[-\w\d]*)-(?P<pk>\d+)/edit$',
views.DisciplineUpdateView.as_view(), name='disciplines.edit'),
url(r'^disciplines/(?P<slug>[-\w\d]*)-(?P<pk>\d+)/delete$',
views.DisciplineDeleteView.as_view(), name='disciplines.delete'),
url(r'^performances/?$',
views.performances_index, name='performances.index'),
url(r'^performances/new$',
views.PerformanceCreateView.as_view(), name='performances.new'),
url(r'^performances/(?P<pk>\d+)$',
views.PerformanceDetailView.as_view(), name='performances.detail'),
url(r'^performances/(?P<pk>\d+)/edit$',
views.PerformanceUpdateView.as_view(), name='performances.edit'),
url(r'^performances/(?P<pk>\d+)/delete$',
views.PerformanceDeleteView.as_view(), name='performances.delete'),
]
|
[
"common.views.PerformanceDetailView.as_view",
"common.views.DisciplineCreateView.as_view",
"common.views.PerformanceUpdateView.as_view",
"common.views.PerformanceDeleteView.as_view",
"common.views.DisciplineUpdateView.as_view",
"django.conf.urls.url",
"common.views.PerformanceCreateView.as_view",
"common.views.DisciplineDeleteView.as_view"
] |
[((80, 115), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""home"""'}), "('^$', views.index, name='home')\n", (83, 115), False, 'from django.conf.urls import url\n'), ((123, 196), 'django.conf.urls.url', 'url', (['"""^disciplines/?$"""', 'views.disciplines_index'], {'name': '"""disciplines.index"""'}), "('^disciplines/?$', views.disciplines_index, name='disciplines.index')\n", (126, 196), False, 'from django.conf.urls import url\n'), ((304, 415), 'django.conf.urls.url', 'url', (['"""^disciplines/(?P<slug>[-\\\\w\\\\d]*)-(?P<id>\\\\d+)$"""', 'views.discipline_detail'], {'name': '"""disciplines.detail"""'}), "('^disciplines/(?P<slug>[-\\\\w\\\\d]*)-(?P<id>\\\\d+)$', views.\n discipline_detail, name='disciplines.detail')\n", (307, 415), False, 'from django.conf.urls import url\n'), ((695, 771), 'django.conf.urls.url', 'url', (['"""^performances/?$"""', 'views.performances_index'], {'name': '"""performances.index"""'}), "('^performances/?$', views.performances_index, name='performances.index')\n", (698, 771), False, 'from django.conf.urls import url\n'), ((237, 273), 'common.views.DisciplineCreateView.as_view', 'views.DisciplineCreateView.as_view', ([], {}), '()\n', (271, 273), False, 'from common import views\n'), ((488, 524), 'common.views.DisciplineUpdateView.as_view', 'views.DisciplineUpdateView.as_view', ([], {}), '()\n', (522, 524), False, 'from common import views\n'), ((624, 660), 'common.views.DisciplineDeleteView.as_view', 'views.DisciplineDeleteView.as_view', ([], {}), '()\n', (658, 660), False, 'from common import views\n'), ((821, 858), 'common.views.PerformanceCreateView.as_view', 'views.PerformanceCreateView.as_view', ([], {}), '()\n', (856, 858), False, 'from common import views\n'), ((933, 970), 'common.views.PerformanceDetailView.as_view', 'views.PerformanceDetailView.as_view', ([], {}), '()\n', (968, 970), False, 'from common import views\n'), ((1053, 1090), 'common.views.PerformanceUpdateView.as_view', 'views.PerformanceUpdateView.as_view', ([], {}), '()\n', (1088, 1090), False, 'from common import views\n'), ((1173, 1210), 'common.views.PerformanceDeleteView.as_view', 'views.PerformanceDeleteView.as_view', ([], {}), '()\n', (1208, 1210), False, 'from common import views\n')]
|
"""
pylint-server
----
A small Flask application to keep keep track of pylint reports and ratings
on a per-repository basis.
"""
from setuptools import setup
setup(
name='pylint-server',
version='0.1',
url='https://github.com/drivet/pylint-server/',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='A Flask application to keep keep track of pylint information',
long_description=__doc__,
py_modules=['pylint-server'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask',
'TravisPy'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Software Development :: Version Control',
],
)
|
[
"setuptools.setup"
] |
[((161, 947), 'setuptools.setup', 'setup', ([], {'name': '"""pylint-server"""', 'version': '"""0.1"""', 'url': '"""https://github.com/drivet/pylint-server/"""', 'license': '"""MIT"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'description': '"""A Flask application to keep keep track of pylint information"""', 'long_description': '__doc__', 'py_modules': "['pylint-server']", 'zip_safe': '(False)', 'include_package_data': '(True)', 'platforms': '"""any"""', 'install_requires': "['Flask', 'TravisPy']", 'classifiers': "['Environment :: Web Environment', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Topic :: Software Development :: Version Control']"}), "(name='pylint-server', version='0.1', url=\n 'https://github.com/drivet/pylint-server/', license='MIT', author=\n '<NAME>', author_email='<EMAIL>', description=\n 'A Flask application to keep keep track of pylint information',\n long_description=__doc__, py_modules=['pylint-server'], zip_safe=False,\n include_package_data=True, platforms='any', install_requires=['Flask',\n 'TravisPy'], classifiers=['Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Topic :: Software Development :: Version Control'])\n", (166, 947), False, 'from setuptools import setup\n')]
|
""":mod:`sqlalchemy_imageattach.store` --- Image storage backend interface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module declares a common interface for physically agnostic storage
backends. Whatever a way to implement a storage, it needs only common
operations of the interface. This consists of some basic operations
like writing, reading, deletion, and finding urls.
Modules that implement the storage interface inside
:mod:`sqlalchemy_imageattach.storages` package might help to implement
a new storage backend.
"""
import io
import numbers
import shutil
from .file import FileProxy, SeekableFileProxy
__all__ = 'Store',
class Store(object):
"""The interface of image storage backends. Every image storage
backend implementation has to implement this.
"""
def put_file(self, file, object_type, object_id, width, height, mimetype,
reproducible):
"""Puts the ``file`` of the image.
:param file: the image file to put
:type file: file-like object, :class:`file`
:param object_type: the object type of the image to put
e.g. ``'comics.cover'``
:type object_type: :class:`str`
:param object_id: the object identifier number of the image to put
:type object_id: :class:`numbers.Integral`
:param width: the width of the image to put
:type width: :class:`numbers.Integral`
:param height: the height of the image to put
:type height: :class:`numbers.Integral`
:param mimetype: the mimetype of the image to put
e.g. ``'image/jpeg'``
:type mimetype: :class:`str`
:param reproducible: :const:`True` only if it's reproducible by
computing e.g. resized thumbnails.
:const:`False` if it cannot be reproduced
e.g. original images
:type reproducible: :class:`bool`
.. note::
This is an abstract method which has to be implemented
(overridden) by subclasses.
It's not for consumers but implementations, so consumers
should use :meth:`store()` method instead of this.
"""
raise NotImplementedError('put_file() has to be implemented')
def delete_file(self, object_type, object_id, width, height, mimetype):
"""Deletes all reproducible files related to the image.
It doesn't raise any exception even if there's no such file.
:param object_type: the object type of the image to put
e.g. ``'comics.cover'``
:type object_type: :class:`str`
:param object_id: the object identifier number of the image to put
:type object_id: :class:`numbers.Integral`
:param width: the width of the image to delete
:type width: :class:`numbers.Integral`
:param height: the height of the image to delete
:type height: :class:`numbers.Integral`
:param mimetype: the mimetype of the image to delete
e.g. ``'image/jpeg'``
:type mimetype: :class:`str`
"""
raise NotImplementedError('delete_file() has to be implemented')
def get_file(self, object_type, object_id, width, height, mimetype):
"""Gets the file-like object of the given criteria.
:param object_type: the object type of the image to find
e.g. ``'comics.cover'``
:type object_type: :class:`str`
:param object_id: the object identifier number of the image to find
:type object_id: :class:`numbers.Integral`
:param width: the width of the image to find
:type width: :class:`numbers.Integral`
:param height: the height of the image to find
:type height: :class:`numbers.Integral`
:param mimetype: the mimetype of the image to find
e.g. ``'image/jpeg'``
:type mimetype: :class:`str`
:returns: the file of the image
:rtype: file-like object, :class:`file`
:raise IOError: when such file doesn't exist
.. note::
This is an abstract method which has to be implemented
(overridden) by subclasses.
It's not for consumers but implementations, so consumers
should use :meth:`open()` method instead of this.
"""
raise NotImplementedError('get_file() has to be implemented')
def get_url(self, object_type, object_id, width, height, mimetype):
"""Gets the file-like object of the given criteria.
:param object_type: the object type of the image to find
e.g. ``'comics.cover'``
:type object_type: :class:`str`
:param object_id: the object identifier number of the image to find
:type object_id: :class:`numbers.Integral`
:param width: the width of the image to find
:type width: :class:`numbers.Integral`
:param height: the height of the image to find
:type height: :class:`numbers.Integral`
:param mimetype: the mimetype of the image to find
e.g. ``'image/jpeg'``
:type mimetype: :class:`str`
:returns: the url locating the image
:rtype: :class:`str`
.. note::
This is an abstract method which has to be implemented
(overridden) by subclasses.
It's not for consumers but implementations, so consumers
should use :meth:`locate()` method instead of this.
"""
raise NotImplementedError('get_url() has to be implemented')
def store(self, image, file):
"""Stores the actual data ``file`` of the given ``image``.
::
with open(imagefile, 'rb') as f:
store.store(image, f)
:param image: the image to store its actual data file
:type image: :class:`sqlalchemy_imageattach.entity.Image`
:param file: the image file to put
:type file: file-like object, :class:`file`
"""
from .entity import Image
if not isinstance(image, Image):
raise TypeError('image must be a sqlalchemy_imageattach.entity.'
'Image instance, not ' + repr(image))
elif not callable(getattr(file, 'read', None)):
raise TypeError('file must be a readable file-like object that '
'implements read() method, not ' + repr(file))
self.put_file(file, image.object_type, image.object_id,
image.width, image.height, image.mimetype,
not image.original)
def delete(self, image):
"""Delete the file of the given ``image``.
:param image: the image to delete
:type image: :class:`sqlalchemy_imageattach.entity.Image`
"""
from .entity import Image
if not isinstance(image, Image):
raise TypeError('image must be a sqlalchemy_imageattach.entity.'
'Image instance, not ' + repr(image))
self.delete_file(image.object_type, image.object_id,
image.width, image.height, image.mimetype)
def open(self, image, use_seek=False):
"""Opens the file-like object of the given ``image``.
Returned file-like object guarantees:
- context manager protocol
- :class:`collections.abc.Iterable` protocol
- :class:`collections.abc.Iterator` protocol
- :meth:`~io.RawIOBase.read()` method
- :meth:`~io.IOBase.readline()` method
- :meth:`~io.IOBase.readlines()` method
To sum up: you definitely can read the file, in :keyword:`with`
statement and :keyword:`for` loop.
Plus, if ``use_seek`` option is :const:`True`:
- :meth:`~io.IOBase.seek()` method
- :meth:`~io.IOBase.tell()` method
For example, if you want to make a local copy of
the image::
import shutil
with store.open(image) as src:
with open(filename, 'wb') as dst:
shutil.copyfileobj(src, dst)
:param image: the image to get its file
:type image: :class:`sqlalchemy_imageattach.entity.Image`
:param use_seek: whether the file should seekable.
if :const:`True` it maybe buffered in the memory.
default is :const:`False`
:type use_seek: :class:`bool`
:returns: the file-like object of the image, which is a context
manager (plus, also seekable only if ``use_seek``
is :const:`True`)
:rtype: :class:`file`, :class:`~sqlalchemy_imageattach.file.FileProxy`,
file-like object
:raise IOError: when such file doesn't exist
"""
from .entity import Image
if not isinstance(image, Image):
raise TypeError('image must be a sqlalchemy_imageattach.entity.'
'Image instance, not ' + repr(image))
elif image.object_id is None:
raise TypeError('image.object_id must be set; it is currently '
'None however')
elif not isinstance(image.object_id, numbers.Integral):
raise TypeError('image.object_id must be integer, not ' +
repr(image.object_id))
f = self.get_file(image.object_type, image.object_id,
image.width, image.height, image.mimetype)
for method in 'read', 'readline', 'readlines':
if not callable(getattr(f, method, None)):
raise TypeError(
'{0!r}.get_file() must return file-like object which '
'has {1}() method, not {2!r}'.format(self, method, f)
)
ctxt = (callable(getattr(f, '__enter__', None)) and
callable(getattr(f, '__exit__', None)))
if use_seek:
if not callable(getattr(f, 'seek', None)):
f2 = io.BytesIO()
shutil.copyfileobj(f, f2)
f2.seek(0)
return f2
if ctxt:
return f
return SeekableFileProxy(f)
if ctxt:
return f
return FileProxy(f)
def locate(self, image):
"""Gets the URL of the given ``image``.
:param image: the image to get its url
:type image: :class:`sqlalchemy_imageattach.entity.Image`
:returns: the url of the image
:rtype: :class:`str`
"""
from .entity import Image
if not isinstance(image, Image):
raise TypeError('image must be a sqlalchemy_imageattach.entity.'
'Image instance, not ' + repr(image))
url = self.get_url(image.object_type, image.object_id,
image.width, image.height, image.mimetype)
if '?' in url:
fmt = '{0}&_ts={1}'
else:
fmt = '{0}?_ts={1}'
return fmt.format(url, image.created_at.strftime('%Y%m%d%H%M%S%f'))
|
[
"io.BytesIO",
"shutil.copyfileobj"
] |
[((10120, 10132), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (10130, 10132), False, 'import io\n'), ((10149, 10174), 'shutil.copyfileobj', 'shutil.copyfileobj', (['f', 'f2'], {}), '(f, f2)\n', (10167, 10174), False, 'import shutil\n')]
|
import tensorflow as tf
import numpy as np
import os
SCRIPT_PATH = os.path.abspath(__file__)
SCRIPT_DIR = os.path.dirname(SCRIPT_PATH)
MODEL_PATH = os.path.join(SCRIPT_DIR, "model/model.h5")
MODEL = None
INPUT_SIZE = 7 * 12
OUTPUT_SIZE = 1
def _load_model():
"""
Load the TensorFlow model if it is not loaded in the current context
Azure functions often preserve their contexts between executions
https://docs.microsoft.com/en-us/azure/azure-functions/functions-reference-python#global-variables
"""
global MODEL
if MODEL is None:
MODEL = tf.keras.models.load_model(MODEL_PATH)
def normalize(costs):
return np.log(costs + 1)
def denormalize(costs):
return np.exp(costs) - 1
def make_subsequences(data, subsequence_size):
"""
Create subsequences of subsequence_size with the array
Example
-------
>>> make_subsequences(np.array([1, 2, 3, 4]), 2)
array([
[1, 2],
[2, 3],
[3, 4],
])
"""
number_of_subsequences = data.shape[0] - subsequence_size + 1
return np.array([data[index:subsequence_size+index] for index in range(number_of_subsequences)])
def predict_costs(actual_costs):
_load_model()
normalized_costs = normalize(np.array(actual_costs))
subsequences = make_subsequences(normalized_costs, INPUT_SIZE)
predictions = MODEL.predict(subsequences, subsequences.shape[0]).flatten()
predictions = denormalize(predictions)
return predictions.tolist()
|
[
"os.path.abspath",
"tensorflow.keras.models.load_model",
"numpy.log",
"os.path.dirname",
"numpy.array",
"numpy.exp",
"os.path.join"
] |
[((68, 93), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (83, 93), False, 'import os\n'), ((107, 135), 'os.path.dirname', 'os.path.dirname', (['SCRIPT_PATH'], {}), '(SCRIPT_PATH)\n', (122, 135), False, 'import os\n'), ((149, 191), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', '"""model/model.h5"""'], {}), "(SCRIPT_DIR, 'model/model.h5')\n", (161, 191), False, 'import os\n'), ((654, 671), 'numpy.log', 'np.log', (['(costs + 1)'], {}), '(costs + 1)\n', (660, 671), True, 'import numpy as np\n'), ((580, 618), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (606, 618), True, 'import tensorflow as tf\n'), ((709, 722), 'numpy.exp', 'np.exp', (['costs'], {}), '(costs)\n', (715, 722), True, 'import numpy as np\n'), ((1251, 1273), 'numpy.array', 'np.array', (['actual_costs'], {}), '(actual_costs)\n', (1259, 1273), True, 'import numpy as np\n')]
|
#!/usr/bin/python3
import json
import os
import shutil
import sys
import time
def main(config_file="/etc/ogkeeper/config.json"):
with open(config_file, 'r') as f:
content = ''.join(f.readlines())
config = json.loads(content)
time.sleep(int(config["countdownInMinutesNotFloatingpoint"])*60)
for i in config["keeping"]:
with open(i["og"], 'rb') as fsrc, open(i["newfile"], 'wb+') as fdst:
fdst.truncate()
shutil.copyfileobj(fsrc, fdst)
os.system(str(config["serviceRestartCmd"]))
if __name__ == '__main__':
assert len(sys.argv) > 1
main(sys.argv[1])
|
[
"shutil.copyfileobj",
"json.loads"
] |
[((223, 242), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (233, 242), False, 'import json\n'), ((463, 493), 'shutil.copyfileobj', 'shutil.copyfileobj', (['fsrc', 'fdst'], {}), '(fsrc, fdst)\n', (481, 493), False, 'import shutil\n')]
|
import os
import cv2
import numpy as np
from tensorflow.keras.layers import *
import tensorflow as tf
from tensorflow.keras.layers import add
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
from tensorflow.keras.preprocessing.image import load_img,img_to_array
from tensorflow.keras.optimizers import Nadam,Adam
from tensorflow.keras.initializers import Initializer
def conv_global(x,t,stride=False):
xin = Conv2D(64,3,padding="same",name="convg_"+str(t))(x)
xin = BatchNormalization(axis=-1)(xin)
xin = Activation("relu")(xin)
if stride:
xin = Conv2D(64,3,padding="same",strides=stride,name="convg_"+str(t))(x)
xin = BatchNormalization(axis=-1)(xin)
xin = Activation("relu")(xin)
return xin
def RDBlocks(x,name , count = 6 , g=32):
li = [x]
pas = Convolution2D(filters=g, kernel_size=(3,3), strides=(1, 1), padding='same' , activation='relu' , name = name+'_conv1')(x)
for i in range(2 , count+1):
li.append(pas)
out = Concatenate(axis = -1)(li) # conctenated out put
pas = Convolution2D(filters=g, kernel_size=(3,3), strides=(1, 1), padding='same' , activation='relu', name = name+'_conv'+str(i))(out)
# feature extractor from the dense net
li.append(pas)
out = Concatenate(axis = -1)(li)
feat = Convolution2D(filters=64, kernel_size=(1,1), strides=(1, 1), padding='same',activation='relu' , name = name+'_Local_Conv')(out)
feat = Add()([feat , x])
return feat
def tensor_depth_to_space(imag,block_size,names):
x = tf.depth_to_space(imag,block_size,name=names)
return x
def tf_subpixel_conv(tensor,block_size,filters):
x = Conv2D(filters,3,strides=(1,1),padding="same")(tensor)
x = Lambda(lambda x : tensor_depth_to_space(x,block_size,names="subpixel_conv"))(x)
x = PReLU(shared_axes=[1, 2])(x)
return x
|
[
"tensorflow.depth_to_space"
] |
[((1576, 1623), 'tensorflow.depth_to_space', 'tf.depth_to_space', (['imag', 'block_size'], {'name': 'names'}), '(imag, block_size, name=names)\n', (1593, 1623), True, 'import tensorflow as tf\n')]
|
# Generated by command write_metadata_files version 1
from metadata.models import Xrt
from .base_metadata import BaseMetadataResource
__all__ = ['XrtResource']
class XrtResource(BaseMetadataResource):
'''RESTful resource for model Xrt'''
class Meta(BaseMetadataResource.Meta):
abstract = False
queryset = Xrt.objects.all()
resource_name = 'metadata_xrt'
|
[
"metadata.models.Xrt.objects.all"
] |
[((316, 333), 'metadata.models.Xrt.objects.all', 'Xrt.objects.all', ([], {}), '()\n', (331, 333), False, 'from metadata.models import Xrt\n')]
|
"""webOS Smart TV trigger dispatcher."""
from __future__ import annotations
from typing import cast
from homeassistant.components.automation import (
AutomationActionType,
AutomationTriggerInfo,
)
from homeassistant.const import CONF_PLATFORM
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers.typing import ConfigType
from .triggers import TriggersPlatformModule, turn_on
TRIGGERS = {
"turn_on": turn_on,
}
def _get_trigger_platform(config: ConfigType) -> TriggersPlatformModule:
"""Return trigger platform."""
platform_split = config[CONF_PLATFORM].split(".", maxsplit=1)
if len(platform_split) < 2 or platform_split[1] not in TRIGGERS:
raise ValueError(
f"Unknown webOS Smart TV trigger platform {config[CONF_PLATFORM]}"
)
return cast(TriggersPlatformModule, TRIGGERS[platform_split[1]])
async def async_validate_trigger_config(
hass: HomeAssistant, config: ConfigType
) -> ConfigType:
"""Validate config."""
platform = _get_trigger_platform(config)
return cast(ConfigType, platform.TRIGGER_SCHEMA(config))
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: AutomationTriggerInfo,
) -> CALLBACK_TYPE:
"""Attach trigger of specified platform."""
platform = _get_trigger_platform(config)
assert hasattr(platform, "async_attach_trigger")
return cast(
CALLBACK_TYPE,
await getattr(platform, "async_attach_trigger")(
hass, config, action, automation_info
),
)
|
[
"typing.cast"
] |
[((831, 888), 'typing.cast', 'cast', (['TriggersPlatformModule', 'TRIGGERS[platform_split[1]]'], {}), '(TriggersPlatformModule, TRIGGERS[platform_split[1]])\n', (835, 888), False, 'from typing import cast\n')]
|
"""
ModelFit.py
Author: <NAME>
Affiliation: University of Colorado at Boulder
Created on: Mon May 12 14:01:29 MDT 2014
Description:
"""
import signal
import numpy as np
from ..util.PrintInfo import print_fit
from ..util.Pickling import write_pickle_file
from ..physics.Constants import nu_0_mhz
import gc, os, sys, copy, types, time, re
from .ModelFit import ModelFit, LogLikelihood, FitBase
from ..simulations import Global21cm as simG21
from ..analysis import Global21cm as anlGlobal21cm
from ..simulations import Global21cm as simGlobal21cm
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
try:
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
size = MPI.COMM_WORLD.size
except ImportError:
rank = 0
size = 1
def_kwargs = {'verbose': False, 'progress_bar': False}
class loglikelihood(LogLikelihood):
def __init__(self, xdata, ydata, error, turning_points):
"""
Computes log-likelihood at given step in MCMC chain.
Parameters
----------
"""
LogLikelihood.__init__(self, xdata, ydata, error)
self.turning_points = turning_points
def __call__(self, sim):
"""
Compute log-likelihood for model generated via input parameters.
Returns
-------
Tuple: (log likelihood, blobs)
"""
# Compute the likelihood if we've made it this far
if self.turning_points:
tps = sim.turning_points
try:
nu = [nu_0_mhz / (1. + tps[tp][0]) \
for tp in self.turning_points]
T = [tps[tp][1] for tp in self.turning_points]
except KeyError:
return -np.inf
yarr = np.array(nu + T)
assert len(yarr) == len(self.ydata)
else:
yarr = np.interp(self.xdata, sim.history['nu'], sim.history['dTb'])
if np.any(np.isnan(yarr)):
return -np.inf
lnL = -0.5 * (np.sum((yarr - self.ydata)**2 \
/ self.error**2 + np.log(2. * np.pi * self.error**2)))
return lnL + self.const_term
class FitGlobal21cm(FitBase):
@property
def loglikelihood(self):
if not hasattr(self, '_loglikelihood'):
self._loglikelihood = loglikelihood(self.xdata, self.ydata,
self.error, self.turning_points)
return self._loglikelihood
@property
def turning_points(self):
if not hasattr(self, '_turning_points'):
self._turning_points = False
return self._turning_points
@turning_points.setter
def turning_points(self, value):
if type(value) == bool:
if value:
self._turning_points = list('BCD')
else:
self._turning_points = False
elif type(value) == tuple:
self._turning_points = list(value)
elif type(value) == list:
self._turning_points = value
elif isinstance(value, basestring):
if len(value) == 1:
self._turning_points = [value]
else:
self._turning_points = list(value)
@property
def frequencies(self):
if not hasattr(self, '_frequencies'):
raise AttributeError('Must supply frequencies by hand!')
return self._frequencies
@frequencies.setter
def frequencies(self, value):
self._frequencies = value
@property
def data(self):
if not hasattr(self, '_data'):
raise AttributeError('Must set data by hand!')
return self._data
@data.setter
def data(self, value):
"""
Set x and ydata at the same time, either by passing in
a simulation instance, a dictionary of parameters, or a
sequence of brightness temperatures corresponding to the
frequencies defined in self.frequencies (self.xdata).
"""
if type(value) == dict:
kwargs = value.copy()
kwargs.update(def_kwargs)
sim = simGlobal21cm(**kwargs)
sim.run()
self.sim = sim
elif isinstance(value, simGlobal21cm) or \
isinstance(value, anlGlobal21cm):
sim = self.sim = value
elif type(value) in [list, tuple]:
sim = None
else:
assert len(value) == len(self.frequencies)
assert not self.turning_points
self.xdata = self.frequencies
self.ydata = value
return
if self.turning_points is not None:
self.xdata = None
if sim is not None:
z = [sim.turning_points[tp][0] for tp in self.turning_points]
T = [sim.turning_points[tp][1] for tp in self.turning_points]
nu = nu_0_mhz / (1. + np.array(z))
self.ydata = np.array(list(nu) + T)
else:
assert len(value) == 2 * len(self.turning_points)
self.ydata = value
else:
self.xdata = self.frequencies
if hasattr(self, 'sim'):
nu = self.sim.history['nu']
dTb = self.sim.history['dTb']
self.ydata = np.interp(self.xdata, nu, dTb).copy() \
+ self.noise
@property
def noise(self):
if not hasattr(self, '_noise'):
self._noise = np.zeros_like(self.xdata)
return self._noise
@noise.setter
def noise(self, value):
self._noise = np.random.normal(0., value, size=len(self.frequencies))
@property
def error(self):
if not hasattr(self, '_error'):
raise AttributeError('Must set errors by hand!')
return self._error
@error.setter
def error(self, value):
if type(value) is dict:
nu = [value[tp][0] for tp in self.turning_points]
T = [value[tp][1] for tp in self.turning_points]
self._error = np.array(nu + T)
else:
if hasattr(self, '_data'):
assert len(value) == len(self.data), \
"Data and errors must have same shape!"
self._error = value
def _check_for_conflicts(self):
"""
Hacky at the moment. Preventative measure against is_log=True for
spectrum_logN. Could generalize.
"""
for i, element in enumerate(self.parameters):
if re.search('spectrum_logN', element):
if self.is_log[i]:
raise ValueError('spectrum_logN is already logarithmic!')
|
[
"numpy.zeros_like",
"numpy.log",
"numpy.isnan",
"numpy.array",
"numpy.interp",
"re.search"
] |
[((1966, 1982), 'numpy.array', 'np.array', (['(nu + T)'], {}), '(nu + T)\n', (1974, 1982), True, 'import numpy as np\n'), ((2066, 2126), 'numpy.interp', 'np.interp', (['self.xdata', "sim.history['nu']", "sim.history['dTb']"], {}), "(self.xdata, sim.history['nu'], sim.history['dTb'])\n", (2075, 2126), True, 'import numpy as np\n'), ((2146, 2160), 'numpy.isnan', 'np.isnan', (['yarr'], {}), '(yarr)\n', (2154, 2160), True, 'import numpy as np\n'), ((5892, 5917), 'numpy.zeros_like', 'np.zeros_like', (['self.xdata'], {}), '(self.xdata)\n', (5905, 5917), True, 'import numpy as np\n'), ((6524, 6540), 'numpy.array', 'np.array', (['(nu + T)'], {}), '(nu + T)\n', (6532, 6540), True, 'import numpy as np\n'), ((7026, 7061), 're.search', 're.search', (['"""spectrum_logN"""', 'element'], {}), "('spectrum_logN', element)\n", (7035, 7061), False, 'import gc, os, sys, copy, types, time, re\n'), ((2275, 2312), 'numpy.log', 'np.log', (['(2.0 * np.pi * self.error ** 2)'], {}), '(2.0 * np.pi * self.error ** 2)\n', (2281, 2312), True, 'import numpy as np\n'), ((5274, 5285), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (5282, 5285), True, 'import numpy as np\n'), ((5717, 5747), 'numpy.interp', 'np.interp', (['self.xdata', 'nu', 'dTb'], {}), '(self.xdata, nu, dTb)\n', (5726, 5747), True, 'import numpy as np\n')]
|
"""Functions to manage Abaqus objects and actions internally.
Intended to be used by the Abaqus CAE Python interpreter.
Developed by <NAME>.
https://github.com/rodrigo1392
"""
from abaqus import *
from abaqusConstants import *
from driverUtils import *
import odbAccess
# Flexibilize for Abaqus viewer
try:
from caeModules import *
except:
pass
import ast
import os
import sys
from tools_submodule import filesystem_tools as ft
# Abaqus mesh-stats keywords.
ELEMENT_TYPES = ['numLineElems', 'numMeshedRegions', 'numNodes',
'numPointElems', 'numPyramidElems', 'numQuadElems',
'numTetBoundaryElems', 'numTetElems', 'numTriElems',
'numWedgeElems']
def assign_2d_parts_properties(model_name, section_name,
first_letters=None):
"""Assign section properties to parts in a current database model.
Parameters
----------
model_name : str
Name of model of interest.
section_name : str
Name of section properties object.
first_letters : str, optional
If given, filter parts by initial substring.
Returns
-------
None
"""
# Get list of model parts objects.
parts_list = [i for i in mdb.models[model_name].parts.values()]
# Optionally, filter by first letters.
if first_letters:
parts_list = [i for i in parts_list if
i.name.startswith(first_letters)]
# Iterate over parts objects list and assign properties.
for part in parts_list:
faces = part.faces.getSequenceFromMask(mask=('[#1 ]',), )
region = part.Set(faces=faces, name='BODY')
part.SectionAssignment(region=region, sectionName=section_name,
offset=0.0, offsetType=MIDDLE_SURFACE,
offsetField='', thicknessAssignment=FROM_SECTION)
def clean_parts_properties(model_name, first_letters=None):
"""Deletes section properties assignments of parts in a model.
Parameters
----------
model_name : str
Name of model of interest.
first_letters : str, optional
If given, filter parts by initial substring.
Returns
-------
None
"""
# Get list of model parts objects.
parts_list = [i for i in mdb.models[model_name].parts.values()]
# Optionally, filter by first letters.
if first_letters:
parts_list = [i for i in parts_list if i.name.startswith(first_letters)]
# Iterate over parts objects list and delete all properties.
for part in parts_list:
assignments_number = len(part.sectionAssignments)
for i in range(0, assignments_number):
del part.sectionAssignments[0]
def export_parts_iges(model_name, output_path, first_letters=None):
"""Export parts in a model as iges files.
Parameters
----------
model_name : str
Name of model of interest.
output_path : Path-like str
Path to export iges parts to.
first_letters : str, optional
If given, filter parts by initial substring.
Returns
-------
None
"""
# Get list of model parts objects.
parts_list = [i for i in mdb.models[model_name].parts.values()]
# Optionally, filter by first letters.
if first_letters:
parts_list = [i for i in parts_list if i.name.startswith(first_letters)]
# Iterate over parts objects list and export them.
for part in parts_list:
part.writeIgesFile(fileName=output_path + r"\\" + part.name + '.igs',
flavor=STANDARD)
def extract_set_mesh_nodes(odb, set_name):
"""Get mesh nodes labels and coordinates of a set of points.
The method builds a dictionary with a dict {set name: instance name}
as keys and a list of tuples (mesh node labels : nodes coordinates)
as values, for all the points corresponding to `set_name`.
Parameters
----------
odb : Odb object
To read from.
set_name : str
Name of set of points of interest.
Returns
-------
Dict
{set name: instance name} : [(node labels : nodes coordinates)].
"""
# Normalize input to Odb object
print('Extracting nodes...')
odb = normalize_odb_object(odb)
# Get nodes set, instance names and build output dict.
node_set = odb.rootAssembly.nodeSets[set_name]
instances_names_list = [i for i in node_set.instanceNames]
output = {(set_name, instance_name):
[(node.label, node.coordinates) for node in node_set.nodes[num]]
for num, instance_name in enumerate(instances_names_list)}
return output
def get_folder_calc_time(odbs_folder, show=True, recursive=False,
close_odbs=True):
"""Get job calculation time from all Odb objects in folder.
Parameters
----------
odbs_folder : Path
Folder containing Odb objects.
show : bool, optional
If True, print Odb calculation time.
recursive : bool, optional
If True, list Odb files recursively, including subfolders.
close_odbs : bool, optional
If True, close current session Odb objects at the end of script.
Returns
-------
dict
Odb names : Dict of calculation times pairs.
"""
# List odb full paths.
odb_list = ft.list_files_with_extension(root_path=odbs_folder,
extension='.odb',
full_path=True,
recursively=recursive)
print(len(odb_list), 'Odb objects found')
# Iterate trough Odb files, open them and extract calculation time.
output = {}
for job_key in odb_list:
odb = normalize_odb_object(job_key)
output[job_key] = get_odb_calc_time(odb, show)
# Optionally, close all opened Odb objects in current session.
if close_odbs:
from abaqusMacros import close_all_odbs
close_all_odbs()
return output
def get_odb_calc_time(odb, show=True):
"""Get job calculation time from a Odb object.
Parameters
----------
odb : Odb object
To read data from.
show : bool, optional
If True, print Odb calculation time.
Returns
-------
dict
Seconds as values and systemTime, userTime and wallclockTime
objects as keys.
"""
# Normalize input to Odb object.
odb = normalize_odb_object(odb)
calc_time = odb.diagnosticData.jobTime
# Get calculation time and convert time to dict.
output = ast.literal_eval(str(calc_time)[1:-1])
# Print calculation time.
if show:
odb_name = (os.path.splitext(os.path.basename(odb.name))[0])
print(odb_name, ': ', str(calc_time))
return output
def log_message(input_string):
"""Set an output message to pass to Popen subprocess stdout method.
Parameters
----------
input_string : str
Message to be output.
Returns
-------
None
"""
print >>sys.__stdout__, input_string
def normalize_odb_object(odb_ish):
"""Return a odb object from current session.
If input is already a Odb object, return it. If it is a string,
look for corresponding object within opened Odbs, if none is find,
try to open it.
Parameters
----------
odb_ish : Odb object or string-like Path
Odb object identifier.
Returns
-------
Odb object
Opened Odb object from current session.
"""
# Verify is odb is already opened, if not, try to it.
if isinstance(odb_ish, str):
try:
odb = session.odbs[odb_ish]
except KeyError:
odb = session.openOdb(odb_ish, readOnly=False)
# If neither could be done, just return its reference.
else:
odb = odb_ish
return odb
def print_model_mesh_stats(model_name, total_stats=False):
"""Show amount of mesh elements, instance by instance.
Parameters
----------
model_name : str
Name of model of interest.
total_stats : bool, optional
If True, show whole model stats in addition to instances stats.
Returns
-------
None
"""
# Iterate trough model instances and show mesh stats by
# element type.
print('*****', '\nModel:', model_name, '\n')
model = mdb.models[model_name]
for inst_key, instance in model.rootAssembly.instances.items():
print('\nInstance:', inst_key)
try:
for element_type in ELEMENT_TYPES:
number_of_elements = eval('stats.' + element_type)
if number_of_elements:
print(element_type, ':', number_of_elements)
except AttributeError:
pass
# Optionally, show whole model mesh stats.
if total_stats:
print('\n--- TOTAL ---')
try:
for element_type in ELEMENT_TYPES:
number_of_elements = eval('tot_stats.' + element_type)
if number_of_elements:
print(element_type, ':', number_of_elements)
except AttributeError:
pass
def rename_model(model_name, new_name):
"""Assign a new name to a model in current database.
Parameters
----------
model_name : str
Current model name.
new_name : str
New name for model.
Returns
-------
None
"""
mdb.models.changeKey(fromName=model_name, toName=new_name)
def rename_set(set_name, new_set_name):
"""Rename a set of points in all models from current database.
Parameters
----------
set_name : str
Current set name.
new_set_name : str
New name for set.
Returns
-------
None
"""
# Iterate trough models and change set name.
for model_key, model in mdb.models.items():
model.rootAssembly.sets.changeKey(fromName=set_name,
toName=new_set_name)
return
def retrieve_odb_name(number, show_all=False):
"""Get Odb name from session Odbs list, depending on its position.
This function orders session Odbs list alphabetically, and retrieves
the name of an Odb object, depending on its position in that list.
Useful when a Odb name is not known, but its position is.
Parameters
----------
number : int
Position of Odb object in session Odbs list.
show_all : bool, optional
If True, print name of all opened Odbs.
Returns
-------
str
Name of opened Odb object from current session.
"""
# Get list of opened Odbs keys, sort it and select one by position.
keys = session.odbs.keys()
keys = sorted(keys)
selected_key = keys[number]
# Print list of opened Odbs.
if show_all:
print('Currently opened Odbs', keys)
return selected_key
def retrieve_odb_set_name(odb, number, show_all=False):
"""Get set name from a Odb object, depending on its position.
This function orders Odb sets list alphabetically, and retrieves
the name of a set object, depending on its position in that list.
Useful when set name is not known, but its position is.
Parameters
----------
odb : Odb object or string.like Path
Odb object identifier.
number : int
Position of set object in Odb sets list.
show_all : bool, optional
If True, print name of all Odb sets name.
Returns
-------
str
Name of set name in a Odb object from current session.
"""
# Get list of Odb sets keys, sort it and select one by position.
odb = normalize_odb_object(odb)
keys = odb.rootAssembly.nodeSets.keys()
keys = sorted(keys)
selected_key = keys[number]
# Print list of available node sets.
if show_all:
print('Available node sets', keys)
return selected_key
def upgrade_odbs_folder(odbs_folder, recursive=False, print_every=1):
"""Upgrade version of all Odb objects in a folder.
Parameters
----------
odbs_folder : Path
Folder containing Odb objects.
recursive : bool, optional
If True, list Odb files recursively, including subfolders.
print_every : int, optional
If given, reduces printing reports frequency.
Returns
-------
None
"""
# List Odb paths, filter only old versioned and report.
odb_list = ft.list_files_with_extension(odbs_folder, '.odb', 1, recursive)
upgradable_odb_list = [i for i in
odb_list if odbAccess.isUpgradeRequiredForOdb(i)]
print(len(odb_list), 'Odb objects found', len(upgradable_odb_list),
'require upgrade')
# Set temporary names and iterate over old versioned Odbs.
temp_name = os.path.join(odbs_folder, 'temp_odb_name.odb')
for job_number, job_key in enumerate(upgradable_odb_list):
# Optionally, report less times.
if divmod(job_number, print_every)[1] == 0:
print('Processing', job_key,
job_number + 1, 'of', len(upgradable_odb_list))
# Upgrade and rename new and old Odb files.
new_name = job_key
old_name = job_key.replace('.odb', '-old.odb')
session.upgradeOdb(job_key, temp_name)
os.rename(job_key, old_name)
os.rename(temp_name, new_name)
print('DONE')
return
|
[
"tools_submodule.filesystem_tools.list_files_with_extension",
"abaqusMacros.close_all_odbs",
"os.path.basename",
"os.rename",
"odbAccess.isUpgradeRequiredForOdb",
"os.path.join"
] |
[((5329, 5441), 'tools_submodule.filesystem_tools.list_files_with_extension', 'ft.list_files_with_extension', ([], {'root_path': 'odbs_folder', 'extension': '""".odb"""', 'full_path': '(True)', 'recursively': 'recursive'}), "(root_path=odbs_folder, extension='.odb',\n full_path=True, recursively=recursive)\n", (5357, 5441), True, 'from tools_submodule import filesystem_tools as ft\n'), ((12382, 12445), 'tools_submodule.filesystem_tools.list_files_with_extension', 'ft.list_files_with_extension', (['odbs_folder', '""".odb"""', '(1)', 'recursive'], {}), "(odbs_folder, '.odb', 1, recursive)\n", (12410, 12445), True, 'from tools_submodule import filesystem_tools as ft\n'), ((12742, 12788), 'os.path.join', 'os.path.join', (['odbs_folder', '"""temp_odb_name.odb"""'], {}), "(odbs_folder, 'temp_odb_name.odb')\n", (12754, 12788), False, 'import os\n'), ((5976, 5992), 'abaqusMacros.close_all_odbs', 'close_all_odbs', ([], {}), '()\n', (5990, 5992), False, 'from abaqusMacros import close_all_odbs\n'), ((13243, 13271), 'os.rename', 'os.rename', (['job_key', 'old_name'], {}), '(job_key, old_name)\n', (13252, 13271), False, 'import os\n'), ((13280, 13310), 'os.rename', 'os.rename', (['temp_name', 'new_name'], {}), '(temp_name, new_name)\n', (13289, 13310), False, 'import os\n'), ((12523, 12559), 'odbAccess.isUpgradeRequiredForOdb', 'odbAccess.isUpgradeRequiredForOdb', (['i'], {}), '(i)\n', (12556, 12559), False, 'import odbAccess\n'), ((6692, 6718), 'os.path.basename', 'os.path.basename', (['odb.name'], {}), '(odb.name)\n', (6708, 6718), False, 'import os\n')]
|
from util import Singleton
import datetime as dt
import logging
import pyglet
from ascension.settings import AscensionConf
LOG = logging.getLogger(__name__)
SLOW_FRAME_MESSAGE = (
"Last execution of '{profiler_label}' lasted {time_passed}, which is over the target "
"{target_time} by {time_over}!"
)
REPORT_MESSAGES = [
"PROFILER REPORT: {name}",
" {time_passed} passed since last report",
" {run_count} times ran",
" {max_time} max",
" {min_time} min",
" {average_time} average",
" {total_time} total",
" {share} share of time passed",
]
TIME_FORMAT = "{}s {:>3}ms {:>3}\xces"
def get_time_string(t):
if not t:
return "NO_VALUE"
return TIME_FORMAT.format(t.seconds, t.microseconds / 1000, t.microseconds % 1000)
class ProfilerBlock(object):
def __init__(self, name, targets=None, report_every=5):
self.name = name
self.targets = targets or []
self.report_every = report_every
self.start_time = None
self.schedule_report()
def reset_metrics(self):
self.report_start = dt.datetime.now()
self.count = 0
self.maximum = None
self.minimum = None
def schedule_report(self):
self.reset_metrics()
pyglet.clock.schedule_interval(self.report, self.report_every)
def report(self, *args):
report_end = dt.datetime.now()
time_passed_num = report_end - self.report_start
time_passed = get_time_string(time_passed_num)
max_time = get_time_string(self.maximum)
min_time = get_time_string(self.minimum)
average_time = get_time_string(self.total / self.count)
total_time = get_time_string(self.total)
share = "{0:.0f}%".format(self.total.total_seconds()*100 / time_passed_num.total_seconds())
for message in REPORT_MESSAGES:
LOG.info(message.format(
name=self.name, time_passed=time_passed, run_count=self.count,
max_time=max_time, min_time=min_time, average_time=average_time,
share=share, total_time=total_time
))
self.report_start = dt.datetime.now()
self.reset_metrics()
def start(self):
if self.start_time:
raise KeyError(
"Cannot start profiler '{}', it was not stopped since last call".format(self.name)
)
self.start_time = dt.datetime.now()
def stop(self):
if not self.start_time:
raise KeyError(
"Cannot start profiler '{}', it was not stopped since last call".format(self.name)
)
stop_time = dt.datetime.now()
time_passed = stop_time - self.start_time
self.start_time = None
self.count += 1
if self.count == 1:
self.maximum = time_passed
self.minimum = time_passed
self.total = time_passed
else:
self.maximum = time_passed > self.maximum and time_passed or self.maximum
self.minimum = time_passed < self.minimum and time_passed or self.minimum
self.total += time_passed
for log_level, target_time in self.targets:
if not target_time or time_passed > target_time:
time_over = time_passed - target_time
getattr(LOG, log_level.lower())(SLOW_FRAME_MESSAGE.format(
profiler_label=self.name, time_passed=get_time_string(time_passed),
time_over=get_time_string(time_over), target_time=get_time_string(target_time),
))
break
class ProfilerManager(object):
__metaclass__ = Singleton
def __init__(self):
self.profilers = {}
def start_draw(self):
loopend = dt.datetime.now()
if hasattr(self, "loopstart"):
self.record_loop()
self.loopstart = loopend
def add_profiler(self, profiler):
if profiler.name in self.profilers:
raise KeyError(
"ProfilerManager already has a prfiler named '{}'".format(profiler.name)
)
def start(self, name, targets=None, report_every=5):
if name in AscensionConf.disabled_profilers:
return
if name not in self.profilers:
self.profilers[name] = ProfilerBlock(name, targets=targets, report_every=report_every)
self.profilers[name].start()
def stop(self, name):
if name in AscensionConf.disabled_profilers:
return
if name not in self.profilers:
raise KeyError("No such profiler '{}' to stop".format(name))
self.profilers[name].stop()
|
[
"datetime.datetime.now",
"logging.getLogger",
"pyglet.clock.schedule_interval"
] |
[((131, 158), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (148, 158), False, 'import logging\n'), ((1098, 1115), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1113, 1115), True, 'import datetime as dt\n'), ((1264, 1326), 'pyglet.clock.schedule_interval', 'pyglet.clock.schedule_interval', (['self.report', 'self.report_every'], {}), '(self.report, self.report_every)\n', (1294, 1326), False, 'import pyglet\n'), ((1379, 1396), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1394, 1396), True, 'import datetime as dt\n'), ((2151, 2168), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2166, 2168), True, 'import datetime as dt\n'), ((2415, 2432), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2430, 2432), True, 'import datetime as dt\n'), ((2647, 2664), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2662, 2664), True, 'import datetime as dt\n'), ((3769, 3786), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (3784, 3786), True, 'import datetime as dt\n')]
|
import math as m
def is_power_of_2(num):
return num != 0 and ((num & (num - 1)) == 0)
def code(num, mod):
unary = num // mod
reminder = num % mod
code = '1' * unary + '0'
print(code)
if is_power_of_2(mod):
bits_needed = int(m.log(mod, 2))
add_bits = bin(reminder)[2:]
add_bits = '0' * (bits_needed - len(add_bits)) + add_bits
else:
bits_needed = int(m.ceil(m.log(mod, 2)))
cutoff = (1 << bits_needed) - mod
if reminder < cutoff:
add_bits = bin(reminder)[2:]
add_bits = '0' * (bits_needed - len(add_bits) - 1) + add_bits
else:
add_bits = bin(reminder + cutoff)[2:]
add_bits = '0' * (bits_needed - len(add_bits)) + add_bits
code = code + add_bits
return code
def decode(code, mod):
idx = code.find('0')
num = mod * idx
rem_part = code[idx+1:]
if is_power_of_2(mod):
num = num + int(rem_part, 2)
else:
bits_needed = int(m.ceil(m.log(mod, 2)))
cutoff = (1 << bits_needed) - mod
if len(rem_part) == bits_needed:
num = num + (int(rem_part, 2) - cutoff)
else:
num = num + int(rem_part, 2)
return num
N, N2, M1, M2 = 42, 47, 10, 16
c1 = code(N, M2)
c2 = code(N2, M2)
c3 = code(N, M1)
c4 = code(N2, M1)
dc1 = decode(c1, M2)
dc2 = decode(c2, M2)
dc3 = decode(c3, M1)
dc4 = decode(c4, M1)
print(N, c1, dc1)
print(N2, c2, dc2)
print(N, c3, dc3)
print(N2, c3, dc3)
|
[
"math.log"
] |
[((255, 268), 'math.log', 'm.log', (['mod', '(2)'], {}), '(mod, 2)\n', (260, 268), True, 'import math as m\n'), ((416, 429), 'math.log', 'm.log', (['mod', '(2)'], {}), '(mod, 2)\n', (421, 429), True, 'import math as m\n'), ((1000, 1013), 'math.log', 'm.log', (['mod', '(2)'], {}), '(mod, 2)\n', (1005, 1013), True, 'import math as m\n')]
|
"""
Tools for extracting snapshots and structures from core22 FAH trajectories.
Limitations:
* The reference structure (`natoms_reference`) must share the same atom ordering as the first `natoms_reference` atoms of the trajectory.
For now, this means that the SpruceTK prepared structure (`Mpro-x10789_0_bound-protein-thiolate.pdb`) is used
Dependencies:
* mdtraj >= 1.9.4 (conda-forge)
"""
from functools import partial
import logging
import pathlib
import multiprocessing
import os
import tempfile
from typing import Dict, List, Optional
from pydantic import BaseModel, Field
import joblib
import mdtraj as md
from ..schema import TransformationAnalysis, AnalysisConfig
class SnapshotArtifactory(BaseModel):
"""Structural snapshot creator."""
config: AnalysisConfig
project_dir: pathlib.Path = Field(
description="Path to project directory (e.g. '/home/server/server2/projects/13422')"
)
project_data_dir: pathlib.Path = Field(
description="Path to project data directory (e.g. '/home/server/server2/data/SVR314342810/PROJ13422')"
)
cache_dir: pathlib.Path = Field(
None,
description="If specified, cache relevant parts of 'htf.npz' file in a local directory of this name",
)
@staticmethod
def _transformation_to_file_mapping(output_dir, run_id, ligand):
fnames = [
f"{ligand}_protein.pdb",
f"{ligand}_complex.pdb",
f"{ligand}_ligand.sdf",
]
outfiles = [
os.path.join(output_dir, f"RUN{run_id}", f"{fname}") for fname in fnames
]
return outfiles
@staticmethod
def load_trajectory(
project_dir: str, project_data_dir: str, run: int, clone: int, gen: int
) -> md.Trajectory:
"""
Load the trajectory from the specified PRCG.
Parameters
----------
project_dir : str
Path to project directory (e.g. '/home/server/server2/projects/13422')
project_data_dir : str
Path to project data directory (e.g. '/home/server/server2/data/SVR314342810/PROJ13422')
run : int
Run (e.g. 0)
clone : int
Clone (e.g. 0)
gen : int
Gen (e.g. 0)
Returns
-------
trajectory : mdtraj.Trajectory
The trajectory
"""
# Load trajectory
pdbfile_path = os.path.join(
project_dir, "RUNS", f"RUN{run}", "hybrid_complex.pdb"
)
# TODO: Reuse path logic from fah_xchem.lib
trajectory_path = os.path.join(
project_data_dir,
f"RUN{run}",
f"CLONE{clone}",
f"results{gen}",
"positions.xtc",
)
try:
pdbfile = md.load(pdbfile_path)
except OSError as e:
raise ValueError(f"Failed to load PDB file: {e}")
try:
trajectory = md.load(trajectory_path, top=pdbfile.top)
except OSError as e:
raise ValueError(f"Failed to load trajectory: {e}")
return trajectory
@staticmethod
def load_fragment(
structure_path: pathlib.Path,
target_name: str,
fragment_id: str,
annotations: str,
component: str,
) -> md.Trajectory:
"""
Load the reference fragment structure
Parameters
----------
structure_path : pathlib.Path
Path to reference structure directory.
target_name : str
Name of target (e.g. 'Mpro').
fragment_id : str
Fragment ID (e.g. 'x10789').
annotations : str
Additional characters in the reference file name (e.g. '_0A_bound').
component : str
Component of the system the reference corresponds to (e.g. 'protein')
Returns
-------
fragment : mdtraj.Trajectory
The fragment structure
"""
# several components here: path, target name, fragment id, annotations (e.g. "0A_bound"), and component (e.g. "protein", "ligand")
# separated by hyphens
# TODO: Put this in the covid-moonshot path, or generalize to an arbitrary file
# fragment = md.load(
# f"/home/server/server2/projects/available/covid-moonshot/receptors/monomer/Mpro-{fragment_id}_0A_bound-protein.pdb"
# )
fragment = md.load(
f"{structure_path}/{target_name}-{fragment_id}{annotations}-{component}.pdb"
)
return fragment
def _mdtraj_to_oemol(self, snapshot: md.Trajectory):
"""
Create an OEMol from an MDTraj file by writing and reading
NOTE: This uses terrible heuristics
Parameters
----------
snapshot : mdtraj.Trajectory
MDTraj Trajectory with a single snapshot
Returns
-------
oemol : openeye.oechem.OEMol
The OEMol
"""
from openeye import oechem
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, "tmp.pdb")
# Write the PDB file
snapshot.save(filename)
# Read it with OpenEye
with oechem.oemolistream(filename) as ifs:
for mol in ifs.GetOEGraphMols():
return mol
def extract_snapshot(
self,
project_dir: str,
project_data_dir: str,
run: int,
clone: int,
gen: int,
frame: int,
fragment_id: str,
):
"""
Extract the specified snapshot, align it to the reference fragment, and write protein and ligands to separate PDB files
Parameters
----------
project_dir : str
Path to project directory (e.g. '/home/server/server2/projects/13422')
run : str or int
Run (e.g. '0')
clone : str or int
Clone (e.g. '0')
gen : str or int
Gen (e.g. '0')
frame : int
fragment_id : str
Fragment ID (e.g. 'x10789')
Returns
-------
sliced_snapshot : dict of str : mdtraj.Trajectory
sliced_snapshot[name] is the Trajectory for name in ['protein', 'old_ligand', 'new_ligand', 'old_complex', 'new_complex']
components : dict of str : oechem.OEMol
components[name] is the OEMol for name in ['protein', 'old_ligand', 'new_ligand']
"""
# Load the trajectory
trajectory = self.load_trajectory(
project_dir, project_data_dir, run, clone, gen
)
# Load the fragment
fragment = self.load_fragment(structure_path=self.config.structure_path,
target_name=self.config.target_name,
fragment_id=fragment_id,
annotations=self.config.annotations,
component=self.config.component)
# Align the trajectory to the fragment (in place)
# trajectory.image_molecules(inplace=True) # No need to image molecules anymore now that perses adds zero-energy bonds between protein and ligand!
# trajectory.superpose(fragment, atom_indices=fragment.top.select("name CA"))
# TODO: fix this hardcode for *MPro*!
trajectory.superpose(
fragment,
atom_indices=fragment.top.select(
"(name CA) and (residue 145 or residue 41 or residue 164 or residue 165 or residue 142 or residue 163)"
),
) # DEBUG : Mpro active site only
# Extract the snapshot
snapshot = trajectory[frame]
# Slice out old or new state
sliced_snapshot = self.slice_snapshot(
snapshot, project_dir, run, self.cache_dir
)
# Convert to OEMol
# NOTE: This uses heuristics, and should be replaced once we start storing actual chemical information
components = dict()
for name in ["protein", "old_ligand", "new_ligand"]:
components[name] = self._mdtraj_to_oemol(sliced_snapshot[name])
return sliced_snapshot, components
@staticmethod
def _get_stored_atom_indices(project_dir: str, run: int):
"""
Load hybrid topology file and return relevant atom indices.
"""
import numpy as np
path = os.path.join(project_dir, "RUNS", f"RUN{run}")
htf = np.load(os.path.join(path, "htf.npz"), allow_pickle=True)[
"arr_0"
].tolist()
# Determine mapping between hybrid topology and stored atoms in the positions.xtc
# <xtcAtoms v="solute"/> eliminates waters
nonwater_atom_indices = htf.hybrid_topology.select("not water")
hybrid_to_stored_map = {
nonwater_atom_indices[index]: index
for index in range(len(nonwater_atom_indices))
}
# Get all atom indices from the hybrid system
# Omit hydrogens
protein_atom_indices = htf.hybrid_topology.select("protein and (mass > 1.1)")
hybrid_ligand_atom_indices = htf.hybrid_topology.select(
"resn MOL and (mass > 1.1)"
)
# Identify atom index subsets for the old and new ligands from the hybrid system
old_ligand_atom_indices = [
index
for index in hybrid_ligand_atom_indices
if index in htf._old_to_hybrid_map.values()
]
new_ligand_atom_indices = [
index
for index in hybrid_ligand_atom_indices
if index in htf._new_to_hybrid_map.values()
]
# Compute sliced atom indices using atom indices within positions.xtc
return {
"protein": [hybrid_to_stored_map[index] for index in protein_atom_indices],
"old_ligand": [
hybrid_to_stored_map[index] for index in old_ligand_atom_indices
],
"new_ligand": [
hybrid_to_stored_map[index] for index in new_ligand_atom_indices
],
"old_complex": [
hybrid_to_stored_map[index]
for index in list(protein_atom_indices) + list(old_ligand_atom_indices)
],
"new_complex": [
hybrid_to_stored_map[index]
for index in list(protein_atom_indices) + list(new_ligand_atom_indices)
],
}
def slice_snapshot(
self,
snapshot: md.Trajectory,
project_dir: str,
run: int,
cache_dir: Optional[str],
) -> Dict[str, md.Trajectory]:
"""
Slice snapshot to specified state in-place
.. TODO ::
The htf.npz file is very slow to load.
Replace this with a JSON file containing relevant ligand indices only
Parameters
----------
snapshot : mdtraj.Trajectory
Snapshot to slice
project_dir : str
Path to project directory (e.g. '/home/server/server2/projects/13422')
run : int
Run (e.g. '0')
cache_dir : str or None
If specified, cache relevant parts of "htf.npz" file in a local directory of this name
Returns
-------
sliced_snapshot : dict of str : mdtraj.Trajectory
sliced_snapshot[x] where x is one of ['protein', 'old_ligand', 'new_ligand', 'old_complex', 'new_complex']
"""
get_stored_atom_indices_cached = (
self._get_stored_atom_indices
if cache_dir is None
else joblib.Memory(cachedir=cache_dir, verbose=0).cache(
self._get_stored_atom_indices
)
)
stored_atom_indices = get_stored_atom_indices_cached(project_dir, run)
sliced_snapshot = dict()
for key, atom_indices in stored_atom_indices.items():
sliced_snapshot[key] = md.Trajectory(
snapshot.xyz[:, atom_indices, :], snapshot.topology.subset(atom_indices)
)
return sliced_snapshot
def generate_representative_snapshot(
self,
transformation: TransformationAnalysis,
output_dir: str,
overwrite: bool = False,
) -> None:
r"""
Generate representative snapshots for old and new ligands.
Illustration of frames:
old ---[0]\ /[3]
\ /
new \[1]---[2]/
Parameters
----------
transformation: TransformationAnalysis
The transformation record to operate on.
output_dir : str
Path where snapshots will be written.
overwrite : bool
If `True`, write over existing output files if present.
Otherwise, skip writing output files for a given transformation when already present.
Assumes that for a given `run_id` the output files do not ever change;
does *no* checking that files wouldn't be different if inputs for a given `run_id` have changed.
Returns
-------
None
"""
max_binding_free_energy = self.config.max_binding_free_energy
# create output directory if not present
run_id = transformation.transformation.run_id
os.makedirs(os.path.join(output_dir, f"RUN{run_id}"), exist_ok=True)
# TODO: Cache results and only update RUNs for which we have received new data
if (
max_binding_free_energy is not None
and transformation.binding_free_energy.point > max_binding_free_energy
):
logging.info(
"Skipping snapshot for RUN %d. Binding free energy estimate %g exceeds threshold %g",
transformation.transformation.run_id,
transformation.binding_free_energy.point,
max_binding_free_energy,
)
return None
gen_works = [
(gen, work)
for gen in transformation.complex_phase.gens
for work in gen.works
]
for ligand in ["old", "new"]:
# check if output files all exist; if so, skip unless we are told not to
if not overwrite:
outfiles = self._transformation_to_file_mapping(
output_dir, run_id, ligand
)
if all(map(os.path.exists, outfiles)):
continue
if ligand == "old":
gen_work = min(gen_works, key=lambda gen_work: gen_work[1].reverse)
frame = 3 # TODO: Magic numbers
else:
gen_work = min(gen_works, key=lambda gen_work: gen_work[1].forward)
frame = 1 # TODO: Magic numbers
gen_analysis, workpair = gen_work
# Extract representative snapshot
try:
sliced_snapshots, components = self.extract_snapshot(
project_dir=self.project_dir,
project_data_dir=self.project_data_dir,
run=run_id,
clone=workpair.clone,
gen=gen_analysis.gen,
frame=frame,
fragment_id=transformation.transformation.xchem_fragment_id,
)
# Write protein PDB
name = f"{ligand}_protein"
sliced_snapshots["protein"].save(
os.path.join(output_dir, f"RUN{run_id}", f"{name}.pdb")
)
# Write old and new complex PDBs
name = f"{ligand}_complex"
sliced_snapshots[name].save(
os.path.join(output_dir, f"RUN{run_id}", f"{name}.pdb")
)
# Write ligand SDFs
from openeye import oechem
name = f"{ligand}_ligand"
with oechem.oemolostream(
os.path.join(output_dir, f"RUN{run_id}", f"{name}.sdf")
) as ofs:
oechem.OEWriteMolecule(ofs, components[name])
except Exception as e:
print(
f"\nException occurred extracting snapshot from {self.project_dir} data {self.project_data_dir} run {run_id} clone {gen_work[1].clone} gen {gen_work[0].gen}"
)
print(e)
def generate_representative_snapshots(
self,
transformations: List[TransformationAnalysis],
output_dir: str,
num_procs: Optional[int],
overwrite: bool = False,
) -> None:
from rich.progress import track
with multiprocessing.Pool(num_procs) as pool:
result_iter = pool.imap_unordered(
partial(
self.generate_representative_snapshot,
output_dir=output_dir,
overwrite=overwrite,
),
transformations,
)
for _ in track(
result_iter,
total=len(transformations),
description="Generating representative snapshots",
):
pass
|
[
"functools.partial",
"tempfile.TemporaryDirectory",
"mdtraj.load",
"openeye.oechem.oemolistream",
"logging.info",
"pydantic.Field",
"openeye.oechem.OEWriteMolecule",
"multiprocessing.Pool",
"os.path.join",
"joblib.Memory"
] |
[((819, 915), 'pydantic.Field', 'Field', ([], {'description': '"""Path to project directory (e.g. \'/home/server/server2/projects/13422\')"""'}), '(description=\n "Path to project directory (e.g. \'/home/server/server2/projects/13422\')")\n', (824, 915), False, 'from pydantic import BaseModel, Field\n'), ((962, 1081), 'pydantic.Field', 'Field', ([], {'description': '"""Path to project data directory (e.g. \'/home/server/server2/data/SVR314342810/PROJ13422\')"""'}), '(description=\n "Path to project data directory (e.g. \'/home/server/server2/data/SVR314342810/PROJ13422\')"\n )\n', (967, 1081), False, 'from pydantic import BaseModel, Field\n'), ((1116, 1239), 'pydantic.Field', 'Field', (['None'], {'description': '"""If specified, cache relevant parts of \'htf.npz\' file in a local directory of this name"""'}), '(None, description=\n "If specified, cache relevant parts of \'htf.npz\' file in a local directory of this name"\n )\n', (1121, 1239), False, 'from pydantic import BaseModel, Field\n'), ((2408, 2476), 'os.path.join', 'os.path.join', (['project_dir', '"""RUNS"""', 'f"""RUN{run}"""', '"""hybrid_complex.pdb"""'], {}), "(project_dir, 'RUNS', f'RUN{run}', 'hybrid_complex.pdb')\n", (2420, 2476), False, 'import os\n'), ((2578, 2676), 'os.path.join', 'os.path.join', (['project_data_dir', 'f"""RUN{run}"""', 'f"""CLONE{clone}"""', 'f"""results{gen}"""', '"""positions.xtc"""'], {}), "(project_data_dir, f'RUN{run}', f'CLONE{clone}',\n f'results{gen}', 'positions.xtc')\n", (2590, 2676), False, 'import os\n'), ((4400, 4495), 'mdtraj.load', 'md.load', (['f"""{structure_path}/{target_name}-{fragment_id}{annotations}-{component}.pdb"""'], {}), "(\n f'{structure_path}/{target_name}-{fragment_id}{annotations}-{component}.pdb'\n )\n", (4407, 4495), True, 'import mdtraj as md\n'), ((8393, 8439), 'os.path.join', 'os.path.join', (['project_dir', '"""RUNS"""', 'f"""RUN{run}"""'], {}), "(project_dir, 'RUNS', f'RUN{run}')\n", (8405, 8439), False, 'import os\n'), ((1514, 1566), 'os.path.join', 'os.path.join', (['output_dir', 'f"""RUN{run_id}"""', 'f"""{fname}"""'], {}), "(output_dir, f'RUN{run_id}', f'{fname}')\n", (1526, 1566), False, 'import os\n'), ((2779, 2800), 'mdtraj.load', 'md.load', (['pdbfile_path'], {}), '(pdbfile_path)\n', (2786, 2800), True, 'import mdtraj as md\n'), ((2931, 2972), 'mdtraj.load', 'md.load', (['trajectory_path'], {'top': 'pdbfile.top'}), '(trajectory_path, top=pdbfile.top)\n', (2938, 2972), True, 'import mdtraj as md\n'), ((4998, 5027), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (5025, 5027), False, 'import tempfile\n'), ((5062, 5093), 'os.path.join', 'os.path.join', (['tmpdir', '"""tmp.pdb"""'], {}), "(tmpdir, 'tmp.pdb')\n", (5074, 5093), False, 'import os\n'), ((13296, 13336), 'os.path.join', 'os.path.join', (['output_dir', 'f"""RUN{run_id}"""'], {}), "(output_dir, f'RUN{run_id}')\n", (13308, 13336), False, 'import os\n'), ((13608, 13826), 'logging.info', 'logging.info', (['"""Skipping snapshot for RUN %d. Binding free energy estimate %g exceeds threshold %g"""', 'transformation.transformation.run_id', 'transformation.binding_free_energy.point', 'max_binding_free_energy'], {}), "(\n 'Skipping snapshot for RUN %d. Binding free energy estimate %g exceeds threshold %g'\n , transformation.transformation.run_id, transformation.\n binding_free_energy.point, max_binding_free_energy)\n", (13620, 13826), False, 'import logging\n'), ((16631, 16662), 'multiprocessing.Pool', 'multiprocessing.Pool', (['num_procs'], {}), '(num_procs)\n', (16651, 16662), False, 'import multiprocessing\n'), ((5215, 5244), 'openeye.oechem.oemolistream', 'oechem.oemolistream', (['filename'], {}), '(filename)\n', (5234, 5244), False, 'from openeye import oechem\n'), ((16735, 16829), 'functools.partial', 'partial', (['self.generate_representative_snapshot'], {'output_dir': 'output_dir', 'overwrite': 'overwrite'}), '(self.generate_representative_snapshot, output_dir=output_dir,\n overwrite=overwrite)\n', (16742, 16829), False, 'from functools import partial\n'), ((11569, 11613), 'joblib.Memory', 'joblib.Memory', ([], {'cachedir': 'cache_dir', 'verbose': '(0)'}), '(cachedir=cache_dir, verbose=0)\n', (11582, 11613), False, 'import joblib\n'), ((15439, 15494), 'os.path.join', 'os.path.join', (['output_dir', 'f"""RUN{run_id}"""', 'f"""{name}.pdb"""'], {}), "(output_dir, f'RUN{run_id}', f'{name}.pdb')\n", (15451, 15494), False, 'import os\n'), ((15671, 15726), 'os.path.join', 'os.path.join', (['output_dir', 'f"""RUN{run_id}"""', 'f"""{name}.pdb"""'], {}), "(output_dir, f'RUN{run_id}', f'{name}.pdb')\n", (15683, 15726), False, 'import os\n'), ((16032, 16077), 'openeye.oechem.OEWriteMolecule', 'oechem.OEWriteMolecule', (['ofs', 'components[name]'], {}), '(ofs, components[name])\n', (16054, 16077), False, 'from openeye import oechem\n'), ((8462, 8491), 'os.path.join', 'os.path.join', (['path', '"""htf.npz"""'], {}), "(path, 'htf.npz')\n", (8474, 8491), False, 'import os\n'), ((15930, 15985), 'os.path.join', 'os.path.join', (['output_dir', 'f"""RUN{run_id}"""', 'f"""{name}.sdf"""'], {}), "(output_dir, f'RUN{run_id}', f'{name}.sdf')\n", (15942, 15985), False, 'import os\n')]
|
"""Implements a fully blocking kernel client.
Useful for test suites and blocking terminal interfaces.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from IPython.utils.traitlets import Type
from IPython.kernel.client import KernelClient
from .channels import (
BlockingIOPubChannel, BlockingHBChannel,
BlockingShellChannel, BlockingStdInChannel
)
#-----------------------------------------------------------------------------
# Blocking kernel manager
#-----------------------------------------------------------------------------
class BlockingKernelClient(KernelClient):
# The classes to use for the various channels
shell_channel_class = Type(BlockingShellChannel)
iopub_channel_class = Type(BlockingIOPubChannel)
stdin_channel_class = Type(BlockingStdInChannel)
hb_channel_class = Type(BlockingHBChannel)
|
[
"IPython.utils.traitlets.Type"
] |
[((1139, 1165), 'IPython.utils.traitlets.Type', 'Type', (['BlockingShellChannel'], {}), '(BlockingShellChannel)\n', (1143, 1165), False, 'from IPython.utils.traitlets import Type\n'), ((1192, 1218), 'IPython.utils.traitlets.Type', 'Type', (['BlockingIOPubChannel'], {}), '(BlockingIOPubChannel)\n', (1196, 1218), False, 'from IPython.utils.traitlets import Type\n'), ((1245, 1271), 'IPython.utils.traitlets.Type', 'Type', (['BlockingStdInChannel'], {}), '(BlockingStdInChannel)\n', (1249, 1271), False, 'from IPython.utils.traitlets import Type\n'), ((1295, 1318), 'IPython.utils.traitlets.Type', 'Type', (['BlockingHBChannel'], {}), '(BlockingHBChannel)\n', (1299, 1318), False, 'from IPython.utils.traitlets import Type\n')]
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
#! [auto_compilation]
import openvino.runtime as ov
compiled_model = ov.compile_model("model.xml")
#! [auto_compilation]
#! [properties_example]
core = ov.Core()
input_a = ov.opset8.parameter([8])
res = ov.opset8.absolute(input_a)
model = ov.Model(res, [input_a])
compiled = core.compile_model(model, "CPU")
print(model.inputs)
print(model.outputs)
print(compiled.inputs)
print(compiled.outputs)
#! [properties_example]
#! [tensor_basics]
data_float64 = np.ones(shape=(2,8))
tensor = ov.Tensor(data_float64)
assert tensor.element_type == ov.Type.f64
data_int32 = np.ones(shape=(2,8), dtype=np.int32)
tensor = ov.Tensor(data_int32)
assert tensor.element_type == ov.Type.i32
#! [tensor_basics]
#! [tensor_shared_mode]
data_to_share = np.ones(shape=(2,8))
shared_tensor = ov.Tensor(data_to_share, shared_memory=True)
# Editing of the numpy array affects Tensor's data
data_to_share[0][2] = 6.0
assert shared_tensor.data[0][2] == 6.0
# Editing of Tensor's data affects the numpy array
shared_tensor.data[0][2] = 0.6
assert data_to_share[0][2] == 0.6
#! [tensor_shared_mode]
infer_request = compiled.create_infer_request()
data = np.random.randint(-5, 3 + 1, size=(8))
#! [passing_numpy_array]
# Passing inputs data in form of a dictionary
infer_request.infer(inputs={0: data})
# Passing inputs data in form of a list
infer_request.infer(inputs=[data])
#! [passing_numpy_array]
#! [getting_results]
# Get output tensor
results = infer_request.get_output_tensor().data
# Get tensor with CompiledModel's output node
results = infer_request.get_tensor(compiled.outputs[0]).data
# Get all results with special helper property
results = list(infer_request.results.values())
#! [getting_results]
#! [sync_infer]
# Simple call to InferRequest
results = infer_request.infer(inputs={0: data})
# Extra feature: calling CompiledModel directly
results = compiled_model(inputs={0: data})
#! [sync_infer]
#! [asyncinferqueue]
core = ov.Core()
# Simple model that adds two inputs together
input_a = ov.opset8.parameter([8])
input_b = ov.opset8.parameter([8])
res = ov.opset8.add(input_a, input_b)
model = ov.Model(res, [input_a, input_b])
compiled = core.compile_model(model, "CPU")
# Number of InferRequests that AsyncInferQueue holds
jobs = 4
infer_queue = ov.AsyncInferQueue(compiled, jobs)
# Create data
data = [np.array([i] * 8, dtype=np.float32) for i in range(jobs)]
# Run all jobs
for i in range(len(data)):
infer_queue.start_async({0: data[i], 1: data[i]})
infer_queue.wait_all()
#! [asyncinferqueue]
#! [asyncinferqueue_access]
results = infer_queue[3].get_output_tensor().data
#! [asyncinferqueue_access]
#! [asyncinferqueue_set_callback]
data_done = [False for _ in range(jobs)]
def f(request, userdata):
print(f"Done! Result: {request.get_output_tensor().data}")
data_done[userdata] = True
infer_queue.set_callback(f)
for i in range(len(data)):
infer_queue.start_async({0: data[i], 1: data[i]}, userdata=i)
infer_queue.wait_all()
assert all(data_done)
#! [asyncinferqueue_set_callback]
unt8_data = np.ones([100])
#! [packing_data]
from openvino.helpers import pack_data
packed_buffer = pack_data(unt8_data, ov.Type.u4)
# Create tensor with shape in element types
t = ov.Tensor(packed_buffer, [1, 128], ov.Type.u4)
#! [packing_data]
#! [unpacking]
from openvino.helpers import unpack_data
unpacked_data = unpack_data(t.data, t.element_type, t.shape)
assert np.array_equal(unpacked_data , unt8_data)
#! [unpacking]
#! [releasing_gil]
import openvino.runtime as ov
import cv2 as cv
from threading import Thread
input_data = []
# Processing input data will be done in a separate thread
# while compilation of the model and creation of the infer request
# is going to be executed in the main thread.
def prepare_data(input, image_path):
image = cv.imread(image_path)
h, w = list(input.shape)[-2:]
image = cv.resize(image, (h, w))
image = image.transpose((2, 0, 1))
image = np.expand_dims(image, 0)
input_data.append(image)
core = ov.Core()
model = core.read_model("model.xml")
# Create thread with prepare_data function as target and start it
thread = Thread(target=prepare_data, args=[model.input(), "path/to/image"])
thread.start()
# The GIL will be released in compile_model.
# It allows a thread above to start the job,
# while main thread is running in the background.
compiled = core.compile_model(model, "GPU")
# After returning from compile_model, the main thread acquires the GIL
# and starts create_infer_request which releases it once again.
request = compiled.create_infer_request()
# Join the thread to make sure the input_data is ready
thread.join()
# running the inference
request.infer(input_data)
#! [releasing_gil]
|
[
"openvino.runtime.opset8.parameter",
"openvino.runtime.Core",
"numpy.array_equal",
"openvino.runtime.opset8.absolute",
"openvino.runtime.Model",
"openvino.runtime.opset8.add",
"numpy.ones",
"numpy.expand_dims",
"openvino.runtime.AsyncInferQueue",
"openvino.runtime.compile_model",
"cv2.imread",
"numpy.random.randint",
"numpy.array",
"openvino.helpers.pack_data",
"openvino.runtime.Tensor",
"openvino.helpers.unpack_data",
"cv2.resize"
] |
[((173, 202), 'openvino.runtime.compile_model', 'ov.compile_model', (['"""model.xml"""'], {}), "('model.xml')\n", (189, 202), True, 'import openvino.runtime as ov\n'), ((257, 266), 'openvino.runtime.Core', 'ov.Core', ([], {}), '()\n', (264, 266), True, 'import openvino.runtime as ov\n'), ((278, 302), 'openvino.runtime.opset8.parameter', 'ov.opset8.parameter', (['[8]'], {}), '([8])\n', (297, 302), True, 'import openvino.runtime as ov\n'), ((309, 336), 'openvino.runtime.opset8.absolute', 'ov.opset8.absolute', (['input_a'], {}), '(input_a)\n', (327, 336), True, 'import openvino.runtime as ov\n'), ((345, 369), 'openvino.runtime.Model', 'ov.Model', (['res', '[input_a]'], {}), '(res, [input_a])\n', (353, 369), True, 'import openvino.runtime as ov\n'), ((563, 584), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 8)'}), '(shape=(2, 8))\n', (570, 584), True, 'import numpy as np\n'), ((594, 617), 'openvino.runtime.Tensor', 'ov.Tensor', (['data_float64'], {}), '(data_float64)\n', (603, 617), True, 'import openvino.runtime as ov\n'), ((674, 711), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 8)', 'dtype': 'np.int32'}), '(shape=(2, 8), dtype=np.int32)\n', (681, 711), True, 'import numpy as np\n'), ((721, 742), 'openvino.runtime.Tensor', 'ov.Tensor', (['data_int32'], {}), '(data_int32)\n', (730, 742), True, 'import openvino.runtime as ov\n'), ((845, 866), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 8)'}), '(shape=(2, 8))\n', (852, 866), True, 'import numpy as np\n'), ((883, 927), 'openvino.runtime.Tensor', 'ov.Tensor', (['data_to_share'], {'shared_memory': '(True)'}), '(data_to_share, shared_memory=True)\n', (892, 927), True, 'import openvino.runtime as ov\n'), ((1242, 1278), 'numpy.random.randint', 'np.random.randint', (['(-5)', '(3 + 1)'], {'size': '(8)'}), '(-5, 3 + 1, size=8)\n', (1259, 1278), True, 'import numpy as np\n'), ((2037, 2046), 'openvino.runtime.Core', 'ov.Core', ([], {}), '()\n', (2044, 2046), True, 'import openvino.runtime as ov\n'), ((2103, 2127), 'openvino.runtime.opset8.parameter', 'ov.opset8.parameter', (['[8]'], {}), '([8])\n', (2122, 2127), True, 'import openvino.runtime as ov\n'), ((2138, 2162), 'openvino.runtime.opset8.parameter', 'ov.opset8.parameter', (['[8]'], {}), '([8])\n', (2157, 2162), True, 'import openvino.runtime as ov\n'), ((2169, 2200), 'openvino.runtime.opset8.add', 'ov.opset8.add', (['input_a', 'input_b'], {}), '(input_a, input_b)\n', (2182, 2200), True, 'import openvino.runtime as ov\n'), ((2209, 2242), 'openvino.runtime.Model', 'ov.Model', (['res', '[input_a, input_b]'], {}), '(res, [input_a, input_b])\n', (2217, 2242), True, 'import openvino.runtime as ov\n'), ((2364, 2398), 'openvino.runtime.AsyncInferQueue', 'ov.AsyncInferQueue', (['compiled', 'jobs'], {}), '(compiled, jobs)\n', (2382, 2398), True, 'import openvino.runtime as ov\n'), ((3141, 3155), 'numpy.ones', 'np.ones', (['[100]'], {}), '([100])\n', (3148, 3155), True, 'import numpy as np\n'), ((3231, 3263), 'openvino.helpers.pack_data', 'pack_data', (['unt8_data', 'ov.Type.u4'], {}), '(unt8_data, ov.Type.u4)\n', (3240, 3263), False, 'from openvino.helpers import pack_data\n'), ((3312, 3358), 'openvino.runtime.Tensor', 'ov.Tensor', (['packed_buffer', '[1, 128]', 'ov.Type.u4'], {}), '(packed_buffer, [1, 128], ov.Type.u4)\n', (3321, 3358), True, 'import openvino.runtime as ov\n'), ((3451, 3495), 'openvino.helpers.unpack_data', 'unpack_data', (['t.data', 't.element_type', 't.shape'], {}), '(t.data, t.element_type, t.shape)\n', (3462, 3495), False, 'from openvino.helpers import unpack_data\n'), ((3503, 3543), 'numpy.array_equal', 'np.array_equal', (['unpacked_data', 'unt8_data'], {}), '(unpacked_data, unt8_data)\n', (3517, 3543), True, 'import numpy as np\n'), ((4100, 4109), 'openvino.runtime.Core', 'ov.Core', ([], {}), '()\n', (4107, 4109), True, 'import openvino.runtime as ov\n'), ((2422, 2457), 'numpy.array', 'np.array', (['([i] * 8)'], {'dtype': 'np.float32'}), '([i] * 8, dtype=np.float32)\n', (2430, 2457), True, 'import numpy as np\n'), ((3894, 3915), 'cv2.imread', 'cv.imread', (['image_path'], {}), '(image_path)\n', (3903, 3915), True, 'import cv2 as cv\n'), ((3962, 3986), 'cv2.resize', 'cv.resize', (['image', '(h, w)'], {}), '(image, (h, w))\n', (3971, 3986), True, 'import cv2 as cv\n'), ((4038, 4062), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (4052, 4062), True, 'import numpy as np\n')]
|
from fly_tello import FlyTello
my_tellos = list()
'''
// scenario specifique amphi MPG
// position initiale : 1 2 3 4 (tous les drones à 12h)
// Séparés de 1 m
// TO DO : à terminer
'''
#
# MAIN FLIGHT CONTROL LOGIC
#
# Define the Tello's we're using, in the order we want them numbered
my_tellos.append('0TQDG2KEDB4FH3') # numéro 1 == DC5CE0
my_tellos.append('0TQDG2KEDBWK3X') # numéro 2 == DC5F6C
my_tellos.append('0TQDFCHEDB3F86') # numéro 3 == D3FCE4
my_tellos.append('0TQDG2KEDB04T1') # numéro 4 == DC5CF3
#my_tellos.append('0TQDFCHEDBY3H0') # numéro 5 == D3F926
#my_tellos.append('0TQDG2KEDBPE19') # numéro 6 == DC5F05
# Control the flight
with FlyTello(my_tellos, get_status=True) as fly:
# TO DO : battery_check 20
fly.get_battery()
fly.print_status(sync=True)
# tous décolent à 2 secondes d'écart
for i in range(1,5):
fly.takeoff(i)
fly.pause(2)
fly.print_status(sync=True)
# tous montent de 1 m
fly.up(100)
#rotation 180 pour tous
fly.rotate_cw(180)
#deplacement latéral
fly.left(100,1)
fly.left(100,2)
fly.right(100,3)
fly.right(100,4)
#deplacement escalier
fly.up(80,1)
fly.up(20,2)
fly.up(20,3)
fly.up(80,4)
#fly.up(40,5)
#fly.up(60,6)
#deplacement avant de 400
#fly.forward(100)
#rotation 90
fly.rotate_cw(90,1)
fly.rotate_cw(90,2)
fly.rotate_ccw(90,3)
fly.rotate_ccw(90,4)
#deplacement avant de 200
fly.right(50,2)
fly.right(50,3)
fly.forward(200,1)
fly.forward(400,2)
fly.forward(400,3)
fly.forward(200,4)
#rotation 90
fly.rotate_cw(90,1)
fly.rotate_cw(90,2)
fly.rotate_ccw(90,3)
fly.rotate_ccw(90,4)
#deplacement latéral de 400
#fly.forward(400)
#rotation 180
fly.rotate_ccw(180)
fly.forward(50,2)
fly.down(2)
#flip avant
for i in range(1,5):
fly.flip("forward",i)
fly.pause(2)
#fly.flip("forward")
#fly.print_status(sync=True)
#atterrisage
#deplacement latéral
fly.left(50,1)
fly.left(350,2)
fly.right(250,3)
fly.right(100,4)
fly.land()
fly.get_battery(sync=True)
fly.get_sn()
|
[
"fly_tello.FlyTello"
] |
[((663, 699), 'fly_tello.FlyTello', 'FlyTello', (['my_tellos'], {'get_status': '(True)'}), '(my_tellos, get_status=True)\n', (671, 699), False, 'from fly_tello import FlyTello\n')]
|
from importlib import import_module, invalidate_caches
from inspect import getmembers, isfunction
from aiogram import Dispatcher, Bot
from os.path import abspath
from ujson import loads
from os import environ
import logging
logging.basicConfig(level=logging.INFO, filename=".log", filemode="w+")
_langs = loads(open('source/langs.json', 'r').read())
conf = environ
bot = Bot(conf['API_TOKEN'], parse_mode='HTML')
dp = Dispatcher(bot)
plugins = loads(open('source/plugins.json').read())["plugins"]
helper = []
for plugin in plugins:
_tmp = import_module(plugin['module_path'])
for member in getmembers(_tmp, isfunction):
if member[0] == plugin['func']:
try:
helper.append({
"commands": plugin["commands"],
"doc": plugin['doc'],
})
dp.register_message_handler(member[1], commands=plugin["commands"])
except:
dp.register_message_handler(member[1], regexp=plugin["regex"])
invalidate_caches()
|
[
"importlib.invalidate_caches",
"importlib.import_module",
"logging.basicConfig",
"aiogram.Dispatcher",
"aiogram.Bot",
"inspect.getmembers"
] |
[((225, 296), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'filename': '""".log"""', 'filemode': '"""w+"""'}), "(level=logging.INFO, filename='.log', filemode='w+')\n", (244, 296), False, 'import logging\n'), ((372, 413), 'aiogram.Bot', 'Bot', (["conf['API_TOKEN']"], {'parse_mode': '"""HTML"""'}), "(conf['API_TOKEN'], parse_mode='HTML')\n", (375, 413), False, 'from aiogram import Dispatcher, Bot\n'), ((419, 434), 'aiogram.Dispatcher', 'Dispatcher', (['bot'], {}), '(bot)\n', (429, 434), False, 'from aiogram import Dispatcher, Bot\n'), ((546, 582), 'importlib.import_module', 'import_module', (["plugin['module_path']"], {}), "(plugin['module_path'])\n", (559, 582), False, 'from importlib import import_module, invalidate_caches\n'), ((601, 629), 'inspect.getmembers', 'getmembers', (['_tmp', 'isfunction'], {}), '(_tmp, isfunction)\n', (611, 629), False, 'from inspect import getmembers, isfunction\n'), ((1016, 1035), 'importlib.invalidate_caches', 'invalidate_caches', ([], {}), '()\n', (1033, 1035), False, 'from importlib import import_module, invalidate_caches\n')]
|
from GamesKeeper.db import BaseModel
from peewee import (BigIntegerField, IntegerField, TextField, BooleanField,
DoesNotExist)
from playhouse.postgres_ext import BinaryJSONField, ArrayField
@BaseModel.register
class Guild(BaseModel):
guild_id = BigIntegerField(primary_key=True)
owner_id = BigIntegerField(null=False)
prefix = TextField(default="+", null=False)
games_category = BigIntegerField(null=True)
spectator_roles = ArrayField(BigIntegerField, null=True, index=False)
enabled_games = IntegerField()
referee_role = BigIntegerField(null=True)
role_allow_startgames = BigIntegerField(null=True)
booster_perks = BooleanField(default=False)
commands_disabled_channels = ArrayField(
BigIntegerField, null=True, index=False
)
logs_enabled = BooleanField(default=True)
log_channel = BigIntegerField(null=True)
class Meta:
db_table = 'guilds'
@classmethod
def get_settings(cls, guild_id):
try:
return Guild.get(guild_id=guild_id)
except Guild.DoesNotExist:
return
@classmethod
def using_id(cls, guild_id):
return Guild.get(guild_id=guild_id)
def enabled_games_emotes(self):
game_types = {
1 << 0: "<:uno:594231154098438153>", # Uno
1 << 1: "<:connectfour:594231155172179985>", # Connect4
1 << 2: "<:tictactoe:594231153830133761>", # TicTacToe
1 << 3: "<:hangman:594231153914019840>", # Hangman
# 1 << 4: "2048", #2048
# 1 << 5: "<:trivia:594231155012665354>", #Trivia
}
if self.enabled_games == 0:
return ['`None`']
games = []
for i in range(10):
if self.enabled_games & 1 << i:
games.append(game_types[1 << i])
return games
def disabled_games_emotes(self):
game_types = {
1 << 0: "<:uno:594231154098438153>", # Uno
1 << 1: "<:connectfour:594231155172179985>", # Connect4
1 << 2: "<:tictactoe:594231153830133761>", # TicTacToe
1 << 3: "<:hangman:594231153914019840>", # Hangman
# 1 << 4: "2048", #2048
# 1 << 5: "<:trivia:594231155012665354>", #Trivia
}
games = []
for i in range(len(game_types)):
if not self.enabled_games & 1 << i:
games.append(game_types[1 << i])
return games
def enabled_games_strings(self):
game_types = {
1 << 0: "Uno",
1 << 1: "Connect4",
1 << 2: "TicTacToe",
1 << 3: "HangMan",
# 1 << 4: "2048",
# 1 << 5: "Trivia",
}
games = []
for i in range(10):
if self.enabled_games & 1 << i:
games.append(game_types[1 << i])
return games
def check_if_listed(self, game, check_type):
game_types = {
"uno": 1 << 0, # Uno
'c4': 1 << 1, # Connect4
'ttt': 1 << 2, # TicTacToe
'hm': 1 << 3, # Hangman
# '2048': 1 << 4, #2048
# 'trivia': 1 << 5, #Trivia
}
if check_type == 'enabled':
if self.enabled_games & game_types[game]:
return True
else:
return False
if check_type == 'disabled':
if not self.enabled_games & game_types[game]:
return True
else:
return False
|
[
"peewee.IntegerField",
"playhouse.postgres_ext.ArrayField",
"peewee.TextField",
"peewee.BooleanField",
"peewee.BigIntegerField"
] |
[((271, 304), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (286, 304), False, 'from peewee import BigIntegerField, IntegerField, TextField, BooleanField, DoesNotExist\n'), ((320, 347), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(False)'}), '(null=False)\n', (335, 347), False, 'from peewee import BigIntegerField, IntegerField, TextField, BooleanField, DoesNotExist\n'), ((361, 395), 'peewee.TextField', 'TextField', ([], {'default': '"""+"""', 'null': '(False)'}), "(default='+', null=False)\n", (370, 395), False, 'from peewee import BigIntegerField, IntegerField, TextField, BooleanField, DoesNotExist\n'), ((417, 443), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (432, 443), False, 'from peewee import BigIntegerField, IntegerField, TextField, BooleanField, DoesNotExist\n'), ((466, 517), 'playhouse.postgres_ext.ArrayField', 'ArrayField', (['BigIntegerField'], {'null': '(True)', 'index': '(False)'}), '(BigIntegerField, null=True, index=False)\n', (476, 517), False, 'from playhouse.postgres_ext import BinaryJSONField, ArrayField\n'), ((538, 552), 'peewee.IntegerField', 'IntegerField', ([], {}), '()\n', (550, 552), False, 'from peewee import BigIntegerField, IntegerField, TextField, BooleanField, DoesNotExist\n'), ((572, 598), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (587, 598), False, 'from peewee import BigIntegerField, IntegerField, TextField, BooleanField, DoesNotExist\n'), ((627, 653), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (642, 653), False, 'from peewee import BigIntegerField, IntegerField, TextField, BooleanField, DoesNotExist\n'), ((674, 701), 'peewee.BooleanField', 'BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (686, 701), False, 'from peewee import BigIntegerField, IntegerField, TextField, BooleanField, DoesNotExist\n'), ((735, 786), 'playhouse.postgres_ext.ArrayField', 'ArrayField', (['BigIntegerField'], {'null': '(True)', 'index': '(False)'}), '(BigIntegerField, null=True, index=False)\n', (745, 786), False, 'from playhouse.postgres_ext import BinaryJSONField, ArrayField\n'), ((820, 846), 'peewee.BooleanField', 'BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (832, 846), False, 'from peewee import BigIntegerField, IntegerField, TextField, BooleanField, DoesNotExist\n'), ((865, 891), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (880, 891), False, 'from peewee import BigIntegerField, IntegerField, TextField, BooleanField, DoesNotExist\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019-2021 CERN.
# Copyright (C) 2019-2021 Northwestern University.
# Copyright (C) 2021 <NAME>.
#
# Invenio App RDM is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Routes for record-related pages provided by Invenio-App-RDM."""
from flask import current_app, g, render_template
from flask_login import login_required
from invenio_i18n.ext import current_i18n
from invenio_rdm_records.proxies import current_rdm_records
from invenio_rdm_records.resources.config import RDMDraftFilesResourceConfig
from invenio_rdm_records.resources.serializers import UIJSONSerializer
from invenio_rdm_records.services.schemas import RDMRecordSchema
from invenio_rdm_records.services.schemas.utils import dump_empty
from invenio_rdm_records.vocabularies import Vocabularies
from ..utils import set_default_value
from .decorators import pass_draft, service
#
# Helpers
#
def get_form_config(**kwargs):
"""Get the react form configration."""
return dict(
vocabularies=Vocabularies.dump(),
current_locale=str(current_i18n.locale),
**kwargs
)
def get_search_url():
"""Get the search URL."""
# TODO: this should not be used
return current_app.config["APP_RDM_ROUTES"]["record_search"]
def new_record():
"""Create an empty record with default values."""
record = dump_empty(RDMRecordSchema)
defaults = current_app.config.get("APP_RDM_DEPOSIT_FORM_DEFAULTS") or {}
for key, value in defaults.items():
set_default_value(record, value, key)
return record
#
# Views
#
@login_required
def deposit_search():
"""List of user deposits page."""
return render_template(
"invenio_app_rdm/records/search_deposit.html",
searchbar_config=dict(searchUrl=get_search_url()),
)
@login_required
def deposit_create():
"""Create a new deposit."""
return render_template(
"invenio_app_rdm/records/deposit.html",
forms_config=get_form_config(createUrl=("/api/records")),
searchbar_config=dict(searchUrl=get_search_url()),
record=new_record(),
files=dict(
default_preview=None, enabled=True, entries=[], links={}
),
)
@login_required
@pass_draft
def deposit_edit(draft=None, pid_value=None):
"""Edit an existing deposit."""
files_list = current_rdm_records.draft_files_service.list_files(
id_=pid_value,
identity=g.identity,
links_config=RDMDraftFilesResourceConfig.links_config,
)
serializer = UIJSONSerializer()
record = serializer.serialize_object_to_dict(draft.to_dict())
return render_template(
"invenio_app_rdm/records/deposit.html",
forms_config=get_form_config(apiUrl=f"/api/records/{pid_value}/draft"),
record=record,
files=files_list.to_dict(),
searchbar_config=dict(searchUrl=get_search_url()),
permissions=draft.has_permissions_to(['new_version'])
)
|
[
"invenio_rdm_records.services.schemas.utils.dump_empty",
"flask.current_app.config.get",
"invenio_rdm_records.resources.serializers.UIJSONSerializer",
"invenio_rdm_records.vocabularies.Vocabularies.dump",
"invenio_rdm_records.proxies.current_rdm_records.draft_files_service.list_files"
] |
[((1438, 1465), 'invenio_rdm_records.services.schemas.utils.dump_empty', 'dump_empty', (['RDMRecordSchema'], {}), '(RDMRecordSchema)\n', (1448, 1465), False, 'from invenio_rdm_records.services.schemas.utils import dump_empty\n'), ((2422, 2568), 'invenio_rdm_records.proxies.current_rdm_records.draft_files_service.list_files', 'current_rdm_records.draft_files_service.list_files', ([], {'id_': 'pid_value', 'identity': 'g.identity', 'links_config': 'RDMDraftFilesResourceConfig.links_config'}), '(id_=pid_value, identity=\n g.identity, links_config=RDMDraftFilesResourceConfig.links_config)\n', (2472, 2568), False, 'from invenio_rdm_records.proxies import current_rdm_records\n'), ((2613, 2631), 'invenio_rdm_records.resources.serializers.UIJSONSerializer', 'UIJSONSerializer', ([], {}), '()\n', (2629, 2631), False, 'from invenio_rdm_records.resources.serializers import UIJSONSerializer\n'), ((1481, 1536), 'flask.current_app.config.get', 'current_app.config.get', (['"""APP_RDM_DEPOSIT_FORM_DEFAULTS"""'], {}), "('APP_RDM_DEPOSIT_FORM_DEFAULTS')\n", (1503, 1536), False, 'from flask import current_app, g, render_template\n'), ((1103, 1122), 'invenio_rdm_records.vocabularies.Vocabularies.dump', 'Vocabularies.dump', ([], {}), '()\n', (1120, 1122), False, 'from invenio_rdm_records.vocabularies import Vocabularies\n')]
|
# <NAME> 2014-2020
# mlxtend Machine Learning Library Extensions
#
# Nonparametric Permutation Test
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
from itertools import combinations
from math import factorial
try:
from nose.tools import nottest
except ImportError:
# Use a no-op decorator if nose is not available
def nottest(f):
return f
# decorator to prevent nose to consider
# this as a unit test due to "test" in the name
@nottest
def permutation_test(x, y, func='x_mean != y_mean', method='exact',
num_rounds=1000, seed=None):
"""
Nonparametric permutation test
Parameters
-------------
x : list or numpy array with shape (n_datapoints,)
A list or 1D numpy array of the first sample
(e.g., the treatment group).
y : list or numpy array with shape (n_datapoints,)
A list or 1D numpy array of the second sample
(e.g., the control group).
func : custom function or str (default: 'x_mean != y_mean')
function to compute the statistic for the permutation test.
- If 'x_mean != y_mean', uses
`func=lambda x, y: np.abs(np.mean(x) - np.mean(y)))`
for a two-sided test.
- If 'x_mean > y_mean', uses
`func=lambda x, y: np.mean(x) - np.mean(y))`
for a one-sided test.
- If 'x_mean < y_mean', uses
`func=lambda x, y: np.mean(y) - np.mean(x))`
for a one-sided test.
method : 'approximate' or 'exact' (default: 'exact')
If 'exact' (default), all possible permutations are considered.
If 'approximate' the number of drawn samples is
given by `num_rounds`.
Note that 'exact' is typically not feasible unless the dataset
size is relatively small.
num_rounds : int (default: 1000)
The number of permutation samples if `method='approximate'`.
seed : int or None (default: None)
The random seed for generating permutation samples if
`method='approximate'`.
Returns
----------
p-value under the null hypothesis
Examples
-----------
For usage examples, please see
http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/
"""
if method not in ('approximate', 'exact'):
raise AttributeError('method must be "approximate"'
' or "exact", got %s' % method)
if isinstance(func, str):
if func not in (
'x_mean != y_mean', 'x_mean > y_mean', 'x_mean < y_mean'):
raise AttributeError('Provide a custom function'
' lambda x,y: ... or a string'
' in ("x_mean != y_mean", '
'"x_mean > y_mean", "x_mean < y_mean")')
elif func == 'x_mean != y_mean':
def func(x, y):
return np.abs(np.mean(x) - np.mean(y))
elif func == 'x_mean > y_mean':
def func(x, y):
return np.mean(x) - np.mean(y)
else:
def func(x, y):
return np.mean(y) - np.mean(x)
rng = np.random.RandomState(seed)
m, n = len(x), len(y)
combined = np.hstack((x, y))
more_extreme = 0.
reference_stat = func(x, y)
# Note that whether we compute the combinations or permutations
# does not affect the results, since the number of permutations
# n_A specific objects in A and n_B specific objects in B is the
# same for all combinations in x_1, ... x_{n_A} and
# x_{n_{A+1}}, ... x_{n_A + n_B}
# In other words, for any given number of combinations, we get
# n_A! x n_B! times as many permutations; hoewever, the computed
# value of those permutations that are merely re-arranged combinations
# does not change. Hence, the result, since we divide by the number of
# combinations or permutations is the same, the permutations simply have
# "n_A! x n_B!" as a scaling factor in the numerator and denominator
# and using combinations instead of permutations simply saves computational
# time
if method == 'exact':
for indices_x in combinations(range(m + n), m):
indices_y = [i for i in range(m + n) if i not in indices_x]
diff = func(combined[list(indices_x)], combined[indices_y])
if diff > reference_stat:
more_extreme += 1.
num_rounds = factorial(m + n) / (factorial(m)*factorial(n))
else:
for i in range(num_rounds):
rng.shuffle(combined)
if func(combined[:m], combined[m:]) > reference_stat:
more_extreme += 1.
return more_extreme / num_rounds
|
[
"numpy.mean",
"math.factorial",
"numpy.random.RandomState",
"numpy.hstack"
] |
[((3160, 3187), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (3181, 3187), True, 'import numpy as np\n'), ((3230, 3247), 'numpy.hstack', 'np.hstack', (['(x, y)'], {}), '((x, y))\n', (3239, 3247), True, 'import numpy as np\n'), ((4453, 4469), 'math.factorial', 'factorial', (['(m + n)'], {}), '(m + n)\n', (4462, 4469), False, 'from math import factorial\n'), ((4473, 4485), 'math.factorial', 'factorial', (['m'], {}), '(m)\n', (4482, 4485), False, 'from math import factorial\n'), ((4486, 4498), 'math.factorial', 'factorial', (['n'], {}), '(n)\n', (4495, 4498), False, 'from math import factorial\n'), ((2918, 2928), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2925, 2928), True, 'import numpy as np\n'), ((2931, 2941), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (2938, 2941), True, 'import numpy as np\n'), ((3035, 3045), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (3042, 3045), True, 'import numpy as np\n'), ((3048, 3058), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (3055, 3058), True, 'import numpy as np\n'), ((3125, 3135), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (3132, 3135), True, 'import numpy as np\n'), ((3138, 3148), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (3145, 3148), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
Simple application that logs on to the APIC and displays all
EPGs.
"""
import socket
import yaml
import sys
from acitoolkit import Credentials, Session, Tenant, AppProfile, EPG, Endpoint
def main():
"""
Main show EPGs routine
:return: None
"""
# Login to APIC
description = ('Simple application that logs on to the APIC'
' and displays all of the EPGs.')
creds = Credentials('apic', description)
args = creds.get()
session = Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print('%% Could not login to APIC')
return
# Download all of the tenants, app profiles, and EPGs
# and store the names as tuples in a list
tenants = Tenant.get_deep(session)
tenants_list = []
for tenant in tenants:
tenants_dict = {}
tenants_dict['name'] = tenant.name
if tenant.descr:
tenants_dict['description'] = tenant.descr
tenants_dict['app-profiles'] = []
for app in tenant.get_children(AppProfile):
app_profiles = {'name': app.name}
if app.descr:
app_profiles['description'] = app.descr
app_profiles['epgs'] = []
for epg in app.get_children(EPG):
epgs_info = {'name': epg.name}
if epg.descr:
epgs_info['description'] = epg.descr
epgs_info['endpoints'] = []
for endpoint in epg.get_children(Endpoint):
endpoint_info = {'name': endpoint.name}
if endpoint.ip != '0.0.0.0':
endpoint_info['ip'] = endpoint.ip
try:
hostname = socket.gethostbyaddr(endpoint.ip)[0]
except socket.error:
hostname = None
if hostname:
endpoint_info['hostname'] = hostname
if endpoint.descr:
endpoint_info['description'] = endpoint.descr
epgs_info['endpoints'].append(endpoint_info)
app_profiles['epgs'].append(epgs_info)
tenants_dict['app-profiles'].append(app_profiles)
tenants_list.append(tenants_dict)
tenants_info = {'tenants': tenants_list}
print(yaml.safe_dump(tenants_info, sys.stdout,
indent=4, default_flow_style=False))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
[
"acitoolkit.Session",
"acitoolkit.Tenant.get_deep",
"yaml.safe_dump",
"acitoolkit.Credentials",
"socket.gethostbyaddr"
] |
[((439, 471), 'acitoolkit.Credentials', 'Credentials', (['"""apic"""', 'description'], {}), "('apic', description)\n", (450, 471), False, 'from acitoolkit import Credentials, Session, Tenant, AppProfile, EPG, Endpoint\n'), ((509, 553), 'acitoolkit.Session', 'Session', (['args.url', 'args.login', 'args.password'], {}), '(args.url, args.login, args.password)\n', (516, 553), False, 'from acitoolkit import Credentials, Session, Tenant, AppProfile, EPG, Endpoint\n'), ((779, 803), 'acitoolkit.Tenant.get_deep', 'Tenant.get_deep', (['session'], {}), '(session)\n', (794, 803), False, 'from acitoolkit import Credentials, Session, Tenant, AppProfile, EPG, Endpoint\n'), ((2403, 2479), 'yaml.safe_dump', 'yaml.safe_dump', (['tenants_info', 'sys.stdout'], {'indent': '(4)', 'default_flow_style': '(False)'}), '(tenants_info, sys.stdout, indent=4, default_flow_style=False)\n', (2417, 2479), False, 'import yaml\n'), ((1785, 1818), 'socket.gethostbyaddr', 'socket.gethostbyaddr', (['endpoint.ip'], {}), '(endpoint.ip)\n', (1805, 1818), False, 'import socket\n')]
|
# OLD USAGE
# python align_faces.py --shape-predictor shape_predictor_68_face_landmarks.dat --image images/example_01.jpg
# import the necessary packages
from imutils.face_utils import FaceAligner
from PIL import Image
import numpy as np
# import argparse
import imutils
import dlib
import cv2
# construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("--shape-predictor", help="path to facial landmark predictor", default='shape_predictor_68_face_landmarks.dat')
# ap.add_argument("--input", help="path to input images", default='input_raw')
# ap.add_argument("--output", help="path to input images", default='input_aligned')
# args = vars(ap.parse_args())
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor and the face aligner
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
fa = FaceAligner(predictor, desiredFaceWidth=256,
desiredLeftEye=(0.371, 0.480))
# Input: numpy array for image with RGB channels
# Output: (numpy array, face_found)
def align_face(img):
img = img[:, :, ::-1] # Convert from RGB to BGR format
img = imutils.resize(img, width=800)
# detect faces in the grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 2)
if len(rects) > 0:
# align the face using facial landmarks
align_img = fa.align(img, gray, rects[0])[:, :, ::-1]
align_img = np.array(Image.fromarray(align_img).convert('RGB'))
return align_img, True
else:
# No face found
return None, False
# Input: img_path
# Output: aligned_img if face_found, else None
def align(img_path):
img = Image.open(img_path)
img = img.convert('RGB') # if image is RGBA or Grayscale etc
img = np.array(img)
x, face_found = align_face(img)
return x
|
[
"cv2.cvtColor",
"PIL.Image.open",
"PIL.Image.fromarray",
"numpy.array",
"dlib.get_frontal_face_detector",
"imutils.resize",
"dlib.shape_predictor",
"imutils.face_utils.FaceAligner"
] |
[((836, 868), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (866, 868), False, 'import dlib\n'), ((881, 942), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""shape_predictor_68_face_landmarks.dat"""'], {}), "('shape_predictor_68_face_landmarks.dat')\n", (901, 942), False, 'import dlib\n'), ((948, 1022), 'imutils.face_utils.FaceAligner', 'FaceAligner', (['predictor'], {'desiredFaceWidth': '(256)', 'desiredLeftEye': '(0.371, 0.48)'}), '(predictor, desiredFaceWidth=256, desiredLeftEye=(0.371, 0.48))\n', (959, 1022), False, 'from imutils.face_utils import FaceAligner\n'), ((1219, 1249), 'imutils.resize', 'imutils.resize', (['img'], {'width': '(800)'}), '(img, width=800)\n', (1233, 1249), False, 'import imutils\n'), ((1304, 1341), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1316, 1341), False, 'import cv2\n'), ((1767, 1787), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1777, 1787), False, 'from PIL import Image\n'), ((1864, 1877), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1872, 1877), True, 'import numpy as np\n'), ((1535, 1561), 'PIL.Image.fromarray', 'Image.fromarray', (['align_img'], {}), '(align_img)\n', (1550, 1561), False, 'from PIL import Image\n')]
|
from tkinter import *
from tkinter.font import Font
from tkinter.ttk import *
from tkinter.messagebox import *
#import tkinter.filedialog as tkFileDialog
#import tkinter.simpledialog as tkSimpleDialog #askstring()
from node.ui.access import access
from node.ui.trade import trade
from node.ui.post import post
from node.ui.charge import charge
from node.ui.watch import watch
from node.ui.show import show
class Ui(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master.title('Self-coin')
self.master.geometry('1200x1000')
self.createWidgets()
def createWidgets(self):
self.top = self.winfo_toplevel()
# self.style = Style()
## info
self.info_notebook = Notebook(self.top)
self.info_notebook.place(relx=0.0, rely=0.0, relwidth=0.5, relheight=1.0)
# self.info_notebook.grid(row = 0, column = 0, sticky=N+S+E+W)
show(self)
## operate
self.operate_notebook = Notebook(self.top)
self.operate_notebook.place(relx=0.5, rely=0.0, relwidth=0.5, relheight=1.0)
# self.operate_notebook.grid(row = 0, column = 1, sticky=N+S+E+W)
# tab 0 (access)
self.tab_access = Frame(self.operate_notebook)
access(self)
self.operate_notebook.add(self.tab_access, text='Access')
# tab 1 (post)
self.tab_post = Frame(self.operate_notebook)
post(self)
self.operate_notebook.add(self.tab_post, text='Post')
# tab 2 (charge)
self.tab_charge = Frame(self.operate_notebook)
charge(self)
self.operate_notebook.add(self.tab_charge, text='Charge')
# tab 3 (trade)
self.tab_trade = Frame(self.operate_notebook)
trade(self)
self.operate_notebook.add(self.tab_trade, text='Trade')
# tab 4 (watch)
self.tab_watch = Frame(self.operate_notebook)
watch(self)
self.operate_notebook.add(self.tab_watch, text='Watch')
|
[
"node.ui.watch.watch",
"node.ui.trade.trade",
"node.ui.post.post",
"node.ui.access.access",
"node.ui.charge.charge",
"node.ui.show.show"
] |
[((971, 981), 'node.ui.show.show', 'show', (['self'], {}), '(self)\n', (975, 981), False, 'from node.ui.show import show\n'), ((1312, 1324), 'node.ui.access.access', 'access', (['self'], {}), '(self)\n', (1318, 1324), False, 'from node.ui.access import access\n'), ((1482, 1492), 'node.ui.post.post', 'post', (['self'], {}), '(self)\n', (1486, 1492), False, 'from node.ui.post import post\n'), ((1650, 1662), 'node.ui.charge.charge', 'charge', (['self'], {}), '(self)\n', (1656, 1662), False, 'from node.ui.charge import charge\n'), ((1837, 1848), 'node.ui.trade.trade', 'trade', (['self'], {}), '(self)\n', (1842, 1848), False, 'from node.ui.trade import trade\n'), ((2005, 2016), 'node.ui.watch.watch', 'watch', (['self'], {}), '(self)\n', (2010, 2016), False, 'from node.ui.watch import watch\n')]
|
#! /usr/bin/env python
# encoding: utf-8
"""
This script is intended to be used to automate the process of
clearing configurations and reloading, via a console connection,
for devices that support Cisco's IOx platform.
The outcome is a device that has no startup configuration, and which has
been booted from rommon-2 for a given image. This is the software equivalent
of using the reset button.
It assumes that there are one or more devices connected, typically via a mini-USB
cable, to the machine upon which this script is running. For example, a MacBookPro
with three such devices connected with a USB hub.
The script uses pyserial to connect to the serial ports and carry out a set of
interactions. The style is very reminiscent of "expect". That also means, though,
that things don't always work as expected because the CLI wasn't really meant for
automation like this, hence the copious logging.
Note that these drivers will likely be required for pyserial:
https://www.silabs.com/products/mcu/Pages/USBtoUARTBridgeVCPDrivers.aspx#mac
Copyright 2016 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import serial
import time
import sys
import logging
from logging.config import fileConfig
import os
from pyserial_util.cli_utils import *
usb_port_base = "cu.SLAB_USBtoUART"
enable_password = "<PASSWORD>"
def main(argv=None):
boot_image = "ir800-universalk9_npe-mz.SPA.156-2.T"
device_serial_ports = get_console_ports(usb_port_base)
logger.info("About to start on these serial ports and devices:\n")
for dev_ser_port in device_serial_ports:
logger.info("Port = " + str(dev_ser_port.serial_port) + " device type = " +
str(dev_ser_port.device_type) + "\n")
summary = []
for dev_ser_port in device_serial_ports:
logger.info("Working with a " + dev_ser_port.device_type + " at " + dev_ser_port.serial_port.port
+ " to clear startup configuration and reload.")
if enable(dev_ser_port.serial_port, enable_password) == 0:
dev_ser_port.serial_port.write("clear start\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
if "[confirm]" in response:
dev_ser_port.serial_port.write("\r")
time.sleep(1)
dev_ser_port.serial_port.write("\r")
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
if response.endswith("#"):
dev_ser_port.serial_port.write("reload\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
if "Do you want to reload the internal AP ? [yes/no]:" in response:
dev_ser_port.serial_port.write("yes\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
if "Do you want to save the configuration of the AP? [yes/no]" in response:
dev_ser_port.serial_port.write("no\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
if "System configuration has been modified. Save? [yes/no]" in response:
dev_ser_port.serial_port.write("no\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
if "Proceed with reload? [confirm]" in response:
dev_ser_port.serial_port.write("\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
summary.append("Cleared and reloaded a " + dev_ser_port.device_type + " at "
+ dev_ser_port.serial_port.port + ".\n")
time.sleep(60)
for dev_ser_port in device_serial_ports:
logger.info("Working with a " + dev_ser_port.device_type + " at " + dev_ser_port.serial_port.port
+ " to boot from rommon-2.")
while True:
dev_ser_port.serial_port.write("\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
if "rommon-2>" in response:
dev_ser_port.serial_port.write("boot flash:/" + boot_image + "\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
break
summary.append("Booted from rommon-2 a " + dev_ser_port.device_type + " at "
+ dev_ser_port.serial_port.port + ".\n")
logger.info("The summary is:\n")
for result in summary:
logger.info(str(result) + "\n")
return 0
if __name__ == "__main__":
sys.exit(main())
|
[
"time.sleep"
] |
[((5029, 5043), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (5039, 5043), False, 'import time\n'), ((2695, 2708), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2705, 2708), False, 'import time\n'), ((5360, 5373), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5370, 5373), False, 'import time\n'), ((2969, 2982), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2979, 2982), False, 'import time\n'), ((3334, 3347), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3344, 3347), False, 'import time\n'), ((3671, 3684), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3681, 3684), False, 'import time\n'), ((4020, 4033), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4030, 4033), False, 'import time\n'), ((4366, 4379), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4376, 4379), False, 'import time\n'), ((4682, 4695), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4692, 4695), False, 'import time\n'), ((5664, 5677), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5674, 5677), False, 'import time\n')]
|
from styx_msgs.msg import TrafficLight
import cv2
import numpy as np
class TLClassifier(object):
def __init__(self):
pass
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
img_blur = cv2.medianBlur(image,3)
img_hsv = cv2.cvtColor(img_blur,cv2.COLOR_BGR2HSV)
red_lower_range = cv2.inRange(hsv_image, np.array([0, 100, 100],np.uint8), np.array([10, 255, 255],np.uint8))
red_upper_range = cv2.inRange(hsv_image, np.array([160, 100, 100],np.uint8), np.array([179, 255, 255],np.uint8))
yellow_range = cv2.inRange(hsv_image, np.array([28, 120, 120],np.uint8), np.array([47, 255, 255],np.uint8))
if cv2.countNonZero(red_lower_range) + cv2.countNonZero(red_upper_range) > 48 or cv2.countNonZero(yellow_range) > 48:
return TrafficLight.RED
else:
return TrafficLight.GREEN
# return TrafficLight.UNKNOWN
|
[
"cv2.cvtColor",
"cv2.countNonZero",
"numpy.array",
"cv2.medianBlur"
] |
[((453, 477), 'cv2.medianBlur', 'cv2.medianBlur', (['image', '(3)'], {}), '(image, 3)\n', (467, 477), False, 'import cv2\n'), ((495, 536), 'cv2.cvtColor', 'cv2.cvtColor', (['img_blur', 'cv2.COLOR_BGR2HSV'], {}), '(img_blur, cv2.COLOR_BGR2HSV)\n', (507, 536), False, 'import cv2\n'), ((586, 619), 'numpy.array', 'np.array', (['[0, 100, 100]', 'np.uint8'], {}), '([0, 100, 100], np.uint8)\n', (594, 619), True, 'import numpy as np\n'), ((620, 654), 'numpy.array', 'np.array', (['[10, 255, 255]', 'np.uint8'], {}), '([10, 255, 255], np.uint8)\n', (628, 654), True, 'import numpy as np\n'), ((704, 739), 'numpy.array', 'np.array', (['[160, 100, 100]', 'np.uint8'], {}), '([160, 100, 100], np.uint8)\n', (712, 739), True, 'import numpy as np\n'), ((740, 775), 'numpy.array', 'np.array', (['[179, 255, 255]', 'np.uint8'], {}), '([179, 255, 255], np.uint8)\n', (748, 775), True, 'import numpy as np\n'), ((822, 856), 'numpy.array', 'np.array', (['[28, 120, 120]', 'np.uint8'], {}), '([28, 120, 120], np.uint8)\n', (830, 856), True, 'import numpy as np\n'), ((857, 891), 'numpy.array', 'np.array', (['[47, 255, 255]', 'np.uint8'], {}), '([47, 255, 255], np.uint8)\n', (865, 891), True, 'import numpy as np\n'), ((982, 1012), 'cv2.countNonZero', 'cv2.countNonZero', (['yellow_range'], {}), '(yellow_range)\n', (998, 1012), False, 'import cv2\n'), ((904, 937), 'cv2.countNonZero', 'cv2.countNonZero', (['red_lower_range'], {}), '(red_lower_range)\n', (920, 937), False, 'import cv2\n'), ((940, 973), 'cv2.countNonZero', 'cv2.countNonZero', (['red_upper_range'], {}), '(red_upper_range)\n', (956, 973), False, 'import cv2\n')]
|
import pytest
from botx import (
ChatCreatedEvent,
InternalBotNotificationEvent,
InternalBotNotificationPayload,
Message,
MessageBuilder,
UserKinds,
)
from botx.models.events import UserInChatCreated
@pytest.fixture()
def incoming_message(host, bot_id):
builder = MessageBuilder()
builder.bot_id = bot_id
builder.user.host = host
return builder.message
@pytest.fixture()
def message(incoming_message, bot):
return Message.from_dict(incoming_message.dict(), bot)
@pytest.fixture()
def chat_created_message(host, bot_id):
builder = MessageBuilder()
builder.bot_id = bot_id
builder.command_data = ChatCreatedEvent(
group_chat_id=builder.user.group_chat_id,
chat_type=builder.user.chat_type,
name="chat",
creator=builder.user.user_huid,
members=[
UserInChatCreated(
huid=builder.user.user_huid,
user_kind=UserKinds.user,
name=builder.user.username,
admin=True,
),
UserInChatCreated(
huid=builder.bot_id,
user_kind=UserKinds.bot,
name="bot",
admin=False,
),
],
)
builder.user.user_huid = None
builder.user.ad_login = None
builder.user.ad_domain = None
builder.user.username = None
builder.body = "system:chat_created"
builder.system_command = True
return builder.message
@pytest.fixture()
def internal_bot_notification_message(host, bot_id, bot):
builder = MessageBuilder()
builder.bot_id = bot_id
builder.command_data = InternalBotNotificationEvent(
data=InternalBotNotificationPayload(message="ping"), # noqa: WPS110
opts={},
)
builder.body = "system:internal_bot_notification"
builder.system_command = True
return Message.from_dict(builder.message.dict(), bot)
|
[
"botx.models.events.UserInChatCreated",
"botx.MessageBuilder",
"pytest.fixture",
"botx.InternalBotNotificationPayload"
] |
[((228, 244), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (242, 244), False, 'import pytest\n'), ((399, 415), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (413, 415), False, 'import pytest\n'), ((514, 530), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (528, 530), False, 'import pytest\n'), ((1490, 1506), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1504, 1506), False, 'import pytest\n'), ((295, 311), 'botx.MessageBuilder', 'MessageBuilder', ([], {}), '()\n', (309, 311), False, 'from botx import ChatCreatedEvent, InternalBotNotificationEvent, InternalBotNotificationPayload, Message, MessageBuilder, UserKinds\n'), ((585, 601), 'botx.MessageBuilder', 'MessageBuilder', ([], {}), '()\n', (599, 601), False, 'from botx import ChatCreatedEvent, InternalBotNotificationEvent, InternalBotNotificationPayload, Message, MessageBuilder, UserKinds\n'), ((1579, 1595), 'botx.MessageBuilder', 'MessageBuilder', ([], {}), '()\n', (1593, 1595), False, 'from botx import ChatCreatedEvent, InternalBotNotificationEvent, InternalBotNotificationPayload, Message, MessageBuilder, UserKinds\n'), ((1694, 1740), 'botx.InternalBotNotificationPayload', 'InternalBotNotificationPayload', ([], {'message': '"""ping"""'}), "(message='ping')\n", (1724, 1740), False, 'from botx import ChatCreatedEvent, InternalBotNotificationEvent, InternalBotNotificationPayload, Message, MessageBuilder, UserKinds\n'), ((858, 974), 'botx.models.events.UserInChatCreated', 'UserInChatCreated', ([], {'huid': 'builder.user.user_huid', 'user_kind': 'UserKinds.user', 'name': 'builder.user.username', 'admin': '(True)'}), '(huid=builder.user.user_huid, user_kind=UserKinds.user,\n name=builder.user.username, admin=True)\n', (875, 974), False, 'from botx.models.events import UserInChatCreated\n'), ((1063, 1155), 'botx.models.events.UserInChatCreated', 'UserInChatCreated', ([], {'huid': 'builder.bot_id', 'user_kind': 'UserKinds.bot', 'name': '"""bot"""', 'admin': '(False)'}), "(huid=builder.bot_id, user_kind=UserKinds.bot, name='bot',\n admin=False)\n", (1080, 1155), False, 'from botx.models.events import UserInChatCreated\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from contextlib import contextmanager
from kafka import KafkaClient
from kafka import SimpleConsumer
from data_pipeline.config import get_config
from data_pipeline.message import create_from_offset_and_message
_ONE_MEGABYTE = 1024 * 1024
logger = get_config().logger
@contextmanager
def capture_new_data_pipeline_messages(topic):
"""contextmanager that moves to the tail of the given topic, and waits to
receive new messages, returning a function that can be called zero or more
times which will retrieve decoded data pipeline messages from the topic.
Returns:
Callable[[int], List[Message]]: Function that takes a single
optional argument, count, and returns up to count decoded data pipeline
messages. This function does not block, and will return however many
messages are available immediately. Default count is 100.
"""
with capture_new_messages(topic) as get_kafka_messages:
def get_data_pipeline_messages(count=100):
kafka_messages = get_kafka_messages(count)
return [
create_from_offset_and_message(kafka_message)
for kafka_message in kafka_messages
]
yield get_data_pipeline_messages
@contextmanager
def capture_new_messages(topic):
"""Seeks to the tail of the topic then returns a function that can
consume messages from that point.
"""
with setup_capture_new_messages_consumer(topic) as consumer:
def get_messages(count=100):
return consumer.get_messages(count=count)
yield get_messages
@contextmanager
def setup_capture_new_messages_consumer(topic):
"""Seeks to the tail of the topic then returns a function that can
consume messages from that point.
"""
kafka = KafkaClient(get_config().cluster_config.broker_list)
group = str('data_pipeline_clientlib_test')
consumer = SimpleConsumer(kafka, group, topic, max_buffer_size=_ONE_MEGABYTE)
consumer.seek(0, 2) # seek to tail, 0 is the offset, and 2 is the tail
yield consumer
kafka.close()
|
[
"kafka.SimpleConsumer",
"data_pipeline.message.create_from_offset_and_message",
"data_pipeline.config.get_config"
] |
[((929, 941), 'data_pipeline.config.get_config', 'get_config', ([], {}), '()\n', (939, 941), False, 'from data_pipeline.config import get_config\n'), ((2596, 2662), 'kafka.SimpleConsumer', 'SimpleConsumer', (['kafka', 'group', 'topic'], {'max_buffer_size': '_ONE_MEGABYTE'}), '(kafka, group, topic, max_buffer_size=_ONE_MEGABYTE)\n', (2610, 2662), False, 'from kafka import SimpleConsumer\n'), ((1779, 1824), 'data_pipeline.message.create_from_offset_and_message', 'create_from_offset_and_message', (['kafka_message'], {}), '(kafka_message)\n', (1809, 1824), False, 'from data_pipeline.message import create_from_offset_and_message\n'), ((2492, 2504), 'data_pipeline.config.get_config', 'get_config', ([], {}), '()\n', (2502, 2504), False, 'from data_pipeline.config import get_config\n')]
|
import jpype
jpype.addClassPath('./lib/NeqSim.jar')
if not(jpype.isJVMStarted()):
jpype.startJVM(convertStrings =True)
neqsim = jpype.JPackage('neqsim')
|
[
"jpype.startJVM",
"jpype.isJVMStarted",
"jpype.addClassPath",
"jpype.JPackage"
] |
[((13, 51), 'jpype.addClassPath', 'jpype.addClassPath', (['"""./lib/NeqSim.jar"""'], {}), "('./lib/NeqSim.jar')\n", (31, 51), False, 'import jpype\n'), ((133, 157), 'jpype.JPackage', 'jpype.JPackage', (['"""neqsim"""'], {}), "('neqsim')\n", (147, 157), False, 'import jpype\n'), ((59, 79), 'jpype.isJVMStarted', 'jpype.isJVMStarted', ([], {}), '()\n', (77, 79), False, 'import jpype\n'), ((86, 121), 'jpype.startJVM', 'jpype.startJVM', ([], {'convertStrings': '(True)'}), '(convertStrings=True)\n', (100, 121), False, 'import jpype\n')]
|
# Generated by Django 4.0.1 on 2022-05-09 17:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=200)),
('is_active', models.BooleanField()),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='enfants', to='sales.category')),
],
),
migrations.CreateModel(
name='DeliveryZoneInfo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zone', models.CharField(max_length=100)),
('delivery_charges', models.DecimalField(decimal_places=2, max_digits=15)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=200)),
('description', models.TextField()),
('qte_stock', models.IntegerField(default=100)),
('principal_image', models.ImageField(null=True, upload_to='products')),
('is_variant', models.BooleanField(default=False)),
('variant_value', models.CharField(blank=True, max_length=200, null=True)),
('price', models.CharField(max_length=50)),
('promo_price', models.CharField(blank=True, max_length=50)),
('is_active', models.BooleanField(default=True)),
('pub_date', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.category')),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='variants', to='sales.product')),
],
),
migrations.CreateModel(
name='Tags',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='VariantType',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='products_video')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='videos', to='sales.product')),
],
),
migrations.AddField(
model_name='product',
name='variant_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='sales.varianttype'),
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_date', models.DateTimeField(auto_now=True)),
('total', models.DecimalField(decimal_places=2, max_digits=15)),
('country', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('first_name', models.CharField(max_length=100)),
('address', models.CharField(max_length=255)),
('phone_number', models.CharField(max_length=100)),
('town', models.CharField(max_length=100)),
('delivery_charges', models.DecimalField(decimal_places=2, max_digits=15)),
('postal_code', models.CharField(max_length=20, null=True)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('payment_date', models.DateTimeField(null=True)),
('is_paid', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(upload_to='products')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='sales.product')),
],
),
migrations.CreateModel(
name='DeliveryAddress',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.CharField(max_length=100)),
('address', models.CharField(max_length=255)),
('phone_number', models.CharField(max_length=100)),
('town', models.CharField(max_length=100)),
('postal_code', models.CharField(max_length=20)),
('additional_informations', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"django.db.models.FileField",
"django.db.models.TextField",
"django.db.migrations.swappable_dependency",
"django.db.models.ForeignKey",
"django.db.models.BigAutoField",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.ImageField",
"django.db.models.DecimalField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((3512, 3613), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""sales.varianttype"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='sales.varianttype')\n", (3529, 3613), False, 'from django.db import migrations, models\n'), ((437, 533), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (456, 533), False, 'from django.db import migrations, models\n'), ((558, 590), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (574, 590), False, 'from django.db import migrations, models\n'), ((623, 644), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (642, 644), False, 'from django.db import migrations, models\n'), ((674, 796), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""enfants"""', 'to': '"""sales.category"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='enfants', to='sales.category')\n", (691, 796), False, 'from django.db import migrations, models\n'), ((934, 1030), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (953, 1030), False, 'from django.db import migrations, models\n'), ((1054, 1086), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1070, 1086), False, 'from django.db import migrations, models\n'), ((1126, 1178), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(15)'}), '(decimal_places=2, max_digits=15)\n', (1145, 1178), False, 'from django.db import migrations, models\n'), ((1311, 1407), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1330, 1407), False, 'from django.db import migrations, models\n'), ((1432, 1464), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1448, 1464), False, 'from django.db import migrations, models\n'), ((1499, 1517), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1515, 1517), False, 'from django.db import migrations, models\n'), ((1550, 1582), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(100)'}), '(default=100)\n', (1569, 1582), False, 'from django.db import migrations, models\n'), ((1621, 1671), 'django.db.models.ImageField', 'models.ImageField', ([], {'null': '(True)', 'upload_to': '"""products"""'}), "(null=True, upload_to='products')\n", (1638, 1671), False, 'from django.db import migrations, models\n'), ((1705, 1739), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1724, 1739), False, 'from django.db import migrations, models\n'), ((1776, 1831), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'null': '(True)'}), '(blank=True, max_length=200, null=True)\n', (1792, 1831), False, 'from django.db import migrations, models\n'), ((1860, 1891), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1876, 1891), False, 'from django.db import migrations, models\n'), ((1926, 1969), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)'}), '(blank=True, max_length=50)\n', (1942, 1969), False, 'from django.db import migrations, models\n'), ((2002, 2035), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (2021, 2035), False, 'from django.db import migrations, models\n'), ((2067, 2102), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2087, 2102), False, 'from django.db import migrations, models\n'), ((2134, 2222), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""sales.category"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'sales.category')\n", (2151, 2222), False, 'from django.db import migrations, models\n'), ((2247, 2369), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""variants"""', 'to': '"""sales.product"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='variants', to='sales.product')\n", (2264, 2369), False, 'from django.db import migrations, models\n'), ((2495, 2591), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2514, 2591), False, 'from django.db import migrations, models\n'), ((2616, 2648), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2632, 2648), False, 'from django.db import migrations, models\n'), ((2785, 2881), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2804, 2881), False, 'from django.db import migrations, models\n'), ((2906, 2938), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2922, 2938), False, 'from django.db import migrations, models\n'), ((3069, 3165), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3088, 3165), False, 'from django.db import migrations, models\n'), ((3189, 3233), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""products_video"""'}), "(upload_to='products_video')\n", (3205, 3233), False, 'from django.db import migrations, models\n'), ((3264, 3374), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""videos"""', 'to': '"""sales.product"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='videos', to='sales.product')\n", (3281, 3374), False, 'from django.db import migrations, models\n'), ((3724, 3820), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3743, 3820), False, 'from django.db import migrations, models\n'), ((3850, 3885), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (3870, 3885), False, 'from django.db import migrations, models\n'), ((3914, 3966), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(15)'}), '(decimal_places=2, max_digits=15)\n', (3933, 3966), False, 'from django.db import migrations, models\n'), ((3997, 4029), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (4013, 4029), False, 'from django.db import migrations, models\n'), ((4062, 4094), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (4078, 4094), False, 'from django.db import migrations, models\n'), ((4128, 4160), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (4144, 4160), False, 'from django.db import migrations, models\n'), ((4191, 4223), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (4207, 4223), False, 'from django.db import migrations, models\n'), ((4259, 4291), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (4275, 4291), False, 'from django.db import migrations, models\n'), ((4319, 4351), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (4335, 4351), False, 'from django.db import migrations, models\n'), ((4391, 4443), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(15)'}), '(decimal_places=2, max_digits=15)\n', (4410, 4443), False, 'from django.db import migrations, models\n'), ((4478, 4520), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)'}), '(max_length=20, null=True)\n', (4494, 4520), False, 'from django.db import migrations, models\n'), ((4557, 4596), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4577, 4596), False, 'from django.db import migrations, models\n'), ((4632, 4663), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (4652, 4663), False, 'from django.db import migrations, models\n'), ((4694, 4728), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (4713, 4728), False, 'from django.db import migrations, models\n'), ((4756, 4852), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (4773, 4852), False, 'from django.db import migrations, models\n'), ((4978, 5074), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (4997, 5074), False, 'from django.db import migrations, models\n'), ((5099, 5138), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""products"""'}), "(upload_to='products')\n", (5116, 5138), False, 'from django.db import migrations, models\n'), ((5169, 5279), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""images"""', 'to': '"""sales.product"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='images', to='sales.product')\n", (5186, 5279), False, 'from django.db import migrations, models\n'), ((5415, 5511), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (5434, 5511), False, 'from django.db import migrations, models\n'), ((5538, 5570), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (5554, 5570), False, 'from django.db import migrations, models\n'), ((5601, 5633), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5617, 5633), False, 'from django.db import migrations, models\n'), ((5669, 5701), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (5685, 5701), False, 'from django.db import migrations, models\n'), ((5729, 5761), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (5745, 5761), False, 'from django.db import migrations, models\n'), ((5796, 5827), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (5812, 5827), False, 'from django.db import migrations, models\n'), ((5874, 5892), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (5890, 5892), False, 'from django.db import migrations, models\n'), ((5920, 6016), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (5937, 6016), False, 'from django.db import migrations, models\n')]
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from yahoo_finance import Share
class Customer(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=200)
cust_number = models.IntegerField(blank=False, null=False)
city = models.CharField(max_length=50)
state = models.CharField(max_length=50)
zipcode = models.CharField(max_length=10)
email = models.EmailField(max_length=200)
cell_phone = models.CharField(max_length=50)
created_date = models.DateTimeField(
default=timezone.now)
updated_date = models.DateTimeField(auto_now_add=True)
def created(self):
self.created_date = timezone.now()
self.save()
def updated(self):
self.updated_date = timezone.now()
self.save()
def __str__(self):
return str(self.cust_number)
class Investment(models.Model):
customer = models.ForeignKey(Customer, related_name='investments')
category = models.CharField(max_length=50)
description = models.CharField(max_length=200)
acquired_value = models.DecimalField(max_digits=10, decimal_places=2)
acquired_date = models.DateField(default=timezone.now)
recent_value = models.DecimalField(max_digits=10, decimal_places=2)
recent_date = models.DateField(default=timezone.now, blank=True, null=True)
def created(self):
self.acquired_date = timezone.now()
self.save()
def updated(self):
self.recent_date = timezone.now()
self.save()
def __str__(self):
return str(self.customer)
def results_by_investment(self):
return self.recent_value - self.acquired_value
class Stock(models.Model):
customer = models.ForeignKey(Customer, related_name='stocks')
symbol = models.CharField(max_length=10)
name = models.CharField(max_length=50)
shares = models.DecimalField(max_digits=10, decimal_places=2)
share_value = models.DecimalField (max_digits=10, decimal_places=2)
purchase_price = models.DecimalField(max_digits=10, decimal_places=2)
purchase_date = models.DateField(default=timezone.now)
def created(self):
self.recent_date = timezone.now()
self.save()
def __str__(self):
return str(self.customer)
def initial_stock_value(self):
return self.shares * self.purchase_price
def current_stock_price(self):
symbol_f = self.symbol
data = Share(symbol_f)
share_value = (data.get_open())
return share_value
def current_stock_value(self):
symbol_f = self.symbol
data = Share(symbol_f)
share_value = (data.get_open())
if share_value is None:
return float(self.shares)
else:
return '{0:.2f}'.format(float(share_value) * float(self.shares))
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.utils.timezone.now",
"django.db.models.EmailField",
"django.db.models.IntegerField",
"django.db.models.DecimalField",
"yahoo_finance.Share",
"django.db.models.DateField",
"django.db.models.DateTimeField"
] |
[((181, 212), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (197, 212), False, 'from django.db import models\n'), ((227, 259), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (243, 259), False, 'from django.db import models\n'), ((278, 322), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(False)', 'null': '(False)'}), '(blank=False, null=False)\n', (297, 322), False, 'from django.db import models\n'), ((334, 365), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (350, 365), False, 'from django.db import models\n'), ((378, 409), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (394, 409), False, 'from django.db import models\n'), ((424, 455), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (440, 455), False, 'from django.db import models\n'), ((468, 501), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (485, 501), False, 'from django.db import models\n'), ((519, 550), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (535, 550), False, 'from django.db import models\n'), ((570, 612), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (590, 612), False, 'from django.db import models\n'), ((641, 680), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (661, 680), False, 'from django.db import models\n'), ((966, 1021), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Customer'], {'related_name': '"""investments"""'}), "(Customer, related_name='investments')\n", (983, 1021), False, 'from django.db import models\n'), ((1037, 1068), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1053, 1068), False, 'from django.db import models\n'), ((1087, 1119), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1103, 1119), False, 'from django.db import models\n'), ((1141, 1193), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)'}), '(max_digits=10, decimal_places=2)\n', (1160, 1193), False, 'from django.db import models\n'), ((1214, 1252), 'django.db.models.DateField', 'models.DateField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (1230, 1252), False, 'from django.db import models\n'), ((1272, 1324), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)'}), '(max_digits=10, decimal_places=2)\n', (1291, 1324), False, 'from django.db import models\n'), ((1343, 1404), 'django.db.models.DateField', 'models.DateField', ([], {'default': 'timezone.now', 'blank': '(True)', 'null': '(True)'}), '(default=timezone.now, blank=True, null=True)\n', (1359, 1404), False, 'from django.db import models\n'), ((1773, 1823), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Customer'], {'related_name': '"""stocks"""'}), "(Customer, related_name='stocks')\n", (1790, 1823), False, 'from django.db import models\n'), ((1837, 1868), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (1853, 1868), False, 'from django.db import models\n'), ((1880, 1911), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1896, 1911), False, 'from django.db import models\n'), ((1925, 1977), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)'}), '(max_digits=10, decimal_places=2)\n', (1944, 1977), False, 'from django.db import models\n'), ((1996, 2048), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)'}), '(max_digits=10, decimal_places=2)\n', (2015, 2048), False, 'from django.db import models\n'), ((2071, 2123), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)'}), '(max_digits=10, decimal_places=2)\n', (2090, 2123), False, 'from django.db import models\n'), ((2144, 2182), 'django.db.models.DateField', 'models.DateField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (2160, 2182), False, 'from django.db import models\n'), ((734, 748), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (746, 748), False, 'from django.utils import timezone\n'), ((821, 835), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (833, 835), False, 'from django.utils import timezone\n'), ((1458, 1472), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1470, 1472), False, 'from django.utils import timezone\n'), ((1544, 1558), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1556, 1558), False, 'from django.utils import timezone\n'), ((2234, 2248), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2246, 2248), False, 'from django.utils import timezone\n'), ((2494, 2509), 'yahoo_finance.Share', 'Share', (['symbol_f'], {}), '(symbol_f)\n', (2499, 2509), False, 'from yahoo_finance import Share\n'), ((2659, 2674), 'yahoo_finance.Share', 'Share', (['symbol_f'], {}), '(symbol_f)\n', (2664, 2674), False, 'from yahoo_finance import Share\n')]
|
#-------------------------------------------------------------------------------
# Author: <NAME> <<EMAIL>>
# Date: 22.01.2018
#
# Licensed under the 3-Clause BSD License, see the LICENSE file for details.
#-------------------------------------------------------------------------------
import requests
import urllib3
from scrapy_do.client import ClientException
from requests.auth import HTTPDigestAuth
#-------------------------------------------------------------------------------
def request(method, url, payload={}, auth=None, ssl_verify=True):
"""
Send a request to the server and retrieva the response.
:param method: request method ('POST' or 'GET')
:param url: url to be queried
:param payload: parameters of the request
:param auth: tuple containing the authorization
information
:param ssl_verify: SSL verification flag
:raises scrapy_do.client.ClientException: an error
:return: parsed JSON response of the server
or raw data
"""
assert method == 'POST' or method == 'GET'
if not ssl_verify:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if auth is not None:
auth = HTTPDigestAuth(*auth)
try:
if method == 'POST':
r = requests.post(url, data=payload, auth=auth, verify=ssl_verify)
else:
r = requests.get(url, params=payload, auth=auth, verify=ssl_verify)
except Exception as e:
raise ClientException(str(e))
if r.headers['Content-Type'] == 'application/json':
data = r.json()
else:
data = r.text
if r.status_code != 200:
if r.headers['Content-Type'] == 'application/json':
raise ClientException(data['msg'])
else:
raise ClientException(data)
return data
|
[
"requests.auth.HTTPDigestAuth",
"requests.get",
"requests.post",
"scrapy_do.client.ClientException",
"urllib3.disable_warnings"
] |
[((1329, 1396), 'urllib3.disable_warnings', 'urllib3.disable_warnings', (['urllib3.exceptions.InsecureRequestWarning'], {}), '(urllib3.exceptions.InsecureRequestWarning)\n', (1353, 1396), False, 'import urllib3\n'), ((1438, 1459), 'requests.auth.HTTPDigestAuth', 'HTTPDigestAuth', (['*auth'], {}), '(*auth)\n', (1452, 1459), False, 'from requests.auth import HTTPDigestAuth\n'), ((1515, 1577), 'requests.post', 'requests.post', (['url'], {'data': 'payload', 'auth': 'auth', 'verify': 'ssl_verify'}), '(url, data=payload, auth=auth, verify=ssl_verify)\n', (1528, 1577), False, 'import requests\n'), ((1608, 1671), 'requests.get', 'requests.get', (['url'], {'params': 'payload', 'auth': 'auth', 'verify': 'ssl_verify'}), '(url, params=payload, auth=auth, verify=ssl_verify)\n', (1620, 1671), False, 'import requests\n'), ((1958, 1986), 'scrapy_do.client.ClientException', 'ClientException', (["data['msg']"], {}), "(data['msg'])\n", (1973, 1986), False, 'from scrapy_do.client import ClientException\n'), ((2019, 2040), 'scrapy_do.client.ClientException', 'ClientException', (['data'], {}), '(data)\n', (2034, 2040), False, 'from scrapy_do.client import ClientException\n')]
|
"""
Script to compute dci score of learned representation.
"""
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import numpy as np
from absl import flags, app
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from disentanglement_lib.evaluation.metrics import dci
from disentanglement_lib.visualize import visualize_scores
import os
FLAGS = flags.FLAGS
flags.DEFINE_string('c_path', '', 'File path for underlying factors c')
flags.DEFINE_string('assign_mat_path', 'data/hirid/assign_mats/hirid_assign_mat.npy', 'Path for assignment matrix')
flags.DEFINE_string('model_name', '', 'Name of model directory to get learned latent code')
flags.DEFINE_enum('data_type_dci', 'dsprites', ['hmnist', 'physionet', 'hirid', 'sprites', 'dsprites', 'smallnorb', 'cars3d', 'shapes3d'], 'Type of data and how to evaluate')
flags.DEFINE_list('score_factors', [], 'Underlying factors to consider in DCI score calculation')
flags.DEFINE_enum('rescaling', 'linear', ['linear', 'standard'], 'Rescaling of ground truth factors')
flags.DEFINE_bool('shuffle', False, 'Whether or not to shuffle evaluation data.')
flags.DEFINE_integer('dci_seed', 42, 'Random seed.')
flags.DEFINE_bool('visualize_score', False, 'Whether or not to visualize score')
flags.DEFINE_bool('save_score', False, 'Whether or not to save calculated score')
def load_z_c(c_path, z_path):
try:
c_full = np.load(c_path)['factors_test']
except IndexError:
c_full = np.load(c_path)
z = np.load(z_path)
c = c_full
return c, z
def main(argv, model_dir=None):
del argv # Unused
if model_dir is None:
out_dir = FLAGS.model_name
else:
out_dir = model_dir
z_path = '{}/z_mean.npy'.format(out_dir)
if FLAGS.c_path == '':
if FLAGS.data_type_dci != 'hirid':
c_path = os.path.join(F'/data/{FLAGS.data_type_dci}', F'factors_{FLAGS.data_type_dci}.npz')
else:
c_path = os.path.join(F'/data/{FLAGS.data_type_dci}', F'{FLAGS.data_type_dci}.npz')
else:
c_path = FLAGS.c_path
if FLAGS.data_type_dci == "physionet":
# Use imputed values as ground truth for physionet data
c, z = load_z_c('{}/imputed.npy'.format(out_dir), z_path)
c = np.transpose(c, (0,2,1))
elif FLAGS.data_type_dci == "hirid":
c = np.load(c_path)['x_test_miss']
c = np.transpose(c, (0, 2, 1))
c = c.astype(int)
z = np.load(z_path)
else:
c, z = load_z_c(c_path, z_path)
z_shape = z.shape
c_shape = c.shape
z_reshape = np.reshape(np.transpose(z, (0,2,1)),(z_shape[0]*z_shape[2],z_shape[1]))
c_reshape = np.reshape(np.transpose(c, (0,2,1)),(c_shape[0]*c_shape[2],c_shape[1]))
c_reshape = c_reshape[:z_reshape.shape[0], ...]
# Experimental physionet rescaling
if FLAGS.data_type_dci == 'physionet':
if FLAGS.rescaling == 'linear':
# linear rescaling
c_rescale = 10 * c_reshape
c_reshape = c_rescale.astype(int)
elif FLAGS.rescaling == 'standard':
# standardizing
scaler = StandardScaler()
c_rescale = scaler.fit_transform(c_reshape)
c_reshape = (10*c_rescale).astype(int)
else:
raise ValueError("Rescaling must be 'linear' or 'standard'")
# Include all factors in score calculation, if not specified otherwise
if not FLAGS.score_factors:
FLAGS.score_factors = np.arange(c_shape[1]).astype(str)
# Check if ground truth factor doesn't change and remove if is the case
mask = np.ones(c_reshape.shape[1], dtype=bool)
for i in range(c_reshape.shape[1]):
c_change = np.sum(abs(np.diff(c_reshape[:8000,i])))
if (not c_change) or (F"{i}" not in FLAGS.score_factors):
mask[i] = False
c_reshape = c_reshape[:,mask]
print(F'C shape: {c_reshape.shape}')
print(F'Z shape: {z_reshape.shape}')
print(F'Shuffle: {FLAGS.shuffle}')
c_train, c_test, z_train, z_test = train_test_split(c_reshape, z_reshape, test_size=0.2, shuffle=FLAGS.shuffle, random_state=FLAGS.dci_seed)
if FLAGS.data_type_dci == "hirid":
n_train = 20000
n_test = 5000
else:
n_train = 8000
n_test = 2000
importance_matrix, i_train, i_test = dci.compute_importance_gbt(
z_train[:n_train, :].transpose(),
c_train[:n_train, :].transpose().astype(int),
z_test[:n_test, :].transpose(), c_test[:n_test, :].transpose().astype(int))
# Calculate scores
d = dci.disentanglement(importance_matrix)
c = dci.completeness(importance_matrix)
print(F'D: {d}')
print(F'C: {c}')
print(F'I: {i_test}')
if FLAGS.data_type_dci in ['hirid', 'physionet']:
miss_idxs = np.nonzero(np.invert(mask))[0]
for idx in miss_idxs:
importance_matrix = np.insert(importance_matrix,
idx,
0, axis=1)
assign_mat = np.load(FLAGS.assign_mat_path)
impt_mat_assign = np.matmul(importance_matrix, assign_mat)
impt_mat_assign_norm = np.nan_to_num(
impt_mat_assign / np.sum(impt_mat_assign, axis=0))
d_assign = dci.disentanglement(impt_mat_assign_norm)
c_assign = dci.completeness(impt_mat_assign_norm)
print(F'D assign: {d_assign}')
print(F'C assign: {c_assign}')
if FLAGS.save_score:
if FLAGS.data_type_dci in ['hirid', 'physionet']:
np.savez(F'{out_dir}/dci_assign_2_{FLAGS.dci_seed}', informativeness_train=i_train, informativeness_test=i_test,
disentanglement=d, completeness=c,
disentanglement_assign=d_assign, completeness_assign=c_assign)
else:
np.savez(F'{out_dir}/dci_{FLAGS.dci_seed}', informativeness_train=i_train, informativeness_test=i_test,
disentanglement=d, completeness=c)
# Visualization
if FLAGS.visualize_score:
if FLAGS.data_type_dci == 'hirid':
# Visualize
visualize_scores.heat_square(np.transpose(importance_matrix), out_dir,
F"dci_matrix_{FLAGS.dci_seed}",
"feature", "latent dim")
visualize_scores.heat_square(np.transpose(impt_mat_assign_norm), out_dir,
F"dci_matrix_assign_{FLAGS.dci_seed}",
"feature", "latent_dim")
# Save importance matrices
if FLAGS.save_score:
np.save(F"{out_dir}/impt_matrix_{FLAGS.dci_seed}", importance_matrix)
np.save(F"{out_dir}/impt_matrix_assign_{FLAGS.dci_seed}", impt_mat_assign_norm)
else:
# Visualize
visualize_scores.heat_square(importance_matrix, out_dir,
F"dci_matrix_{FLAGS.dci_seed}",
"x_axis", "y_axis")
# Save importance matrices
np.save(F"{out_dir}/impt_matrix_{FLAGS.dci_seed}", importance_matrix)
print("Evaluation finished")
if __name__ == '__main__':
app.run(main)
|
[
"numpy.load",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"numpy.invert",
"sklearn.model_selection.train_test_split",
"numpy.ones",
"numpy.arange",
"absl.flags.DEFINE_list",
"os.path.join",
"warnings.simplefilter",
"disentanglement_lib.visualize.visualize_scores.heat_square",
"absl.flags.DEFINE_bool",
"numpy.transpose",
"numpy.insert",
"absl.flags.DEFINE_integer",
"absl.flags.DEFINE_enum",
"numpy.save",
"numpy.savez",
"disentanglement_lib.evaluation.metrics.dci.completeness",
"absl.flags.DEFINE_string",
"absl.app.run",
"numpy.diff",
"numpy.matmul",
"disentanglement_lib.evaluation.metrics.dci.disentanglement"
] |
[((79, 141), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (100, 141), False, 'import warnings\n'), ((438, 509), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""c_path"""', '""""""', '"""File path for underlying factors c"""'], {}), "('c_path', '', 'File path for underlying factors c')\n", (457, 509), False, 'from absl import flags, app\n'), ((510, 634), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""assign_mat_path"""', '"""data/hirid/assign_mats/hirid_assign_mat.npy"""', '"""Path for assignment matrix"""'], {}), "('assign_mat_path',\n 'data/hirid/assign_mats/hirid_assign_mat.npy', 'Path for assignment matrix'\n )\n", (529, 634), False, 'from absl import flags, app\n'), ((626, 721), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""model_name"""', '""""""', '"""Name of model directory to get learned latent code"""'], {}), "('model_name', '',\n 'Name of model directory to get learned latent code')\n", (645, 721), False, 'from absl import flags, app\n'), ((718, 900), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""data_type_dci"""', '"""dsprites"""', "['hmnist', 'physionet', 'hirid', 'sprites', 'dsprites', 'smallnorb',\n 'cars3d', 'shapes3d']", '"""Type of data and how to evaluate"""'], {}), "('data_type_dci', 'dsprites', ['hmnist', 'physionet',\n 'hirid', 'sprites', 'dsprites', 'smallnorb', 'cars3d', 'shapes3d'],\n 'Type of data and how to evaluate')\n", (735, 900), False, 'from absl import flags, app\n'), ((893, 994), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""score_factors"""', '[]', '"""Underlying factors to consider in DCI score calculation"""'], {}), "('score_factors', [],\n 'Underlying factors to consider in DCI score calculation')\n", (910, 994), False, 'from absl import flags, app\n'), ((991, 1096), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""rescaling"""', '"""linear"""', "['linear', 'standard']", '"""Rescaling of ground truth factors"""'], {}), "('rescaling', 'linear', ['linear', 'standard'],\n 'Rescaling of ground truth factors')\n", (1008, 1096), False, 'from absl import flags, app\n'), ((1093, 1178), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""shuffle"""', '(False)', '"""Whether or not to shuffle evaluation data."""'], {}), "('shuffle', False,\n 'Whether or not to shuffle evaluation data.')\n", (1110, 1178), False, 'from absl import flags, app\n'), ((1175, 1227), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""dci_seed"""', '(42)', '"""Random seed."""'], {}), "('dci_seed', 42, 'Random seed.')\n", (1195, 1227), False, 'from absl import flags, app\n'), ((1228, 1313), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""visualize_score"""', '(False)', '"""Whether or not to visualize score"""'], {}), "('visualize_score', False, 'Whether or not to visualize score'\n )\n", (1245, 1313), False, 'from absl import flags, app\n'), ((1309, 1394), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""save_score"""', '(False)', '"""Whether or not to save calculated score"""'], {}), "('save_score', False,\n 'Whether or not to save calculated score')\n", (1326, 1394), False, 'from absl import flags, app\n'), ((1544, 1559), 'numpy.load', 'np.load', (['z_path'], {}), '(z_path)\n', (1551, 1559), True, 'import numpy as np\n'), ((3634, 3673), 'numpy.ones', 'np.ones', (['c_reshape.shape[1]'], {'dtype': 'bool'}), '(c_reshape.shape[1], dtype=bool)\n', (3641, 3673), True, 'import numpy as np\n'), ((4063, 4172), 'sklearn.model_selection.train_test_split', 'train_test_split', (['c_reshape', 'z_reshape'], {'test_size': '(0.2)', 'shuffle': 'FLAGS.shuffle', 'random_state': 'FLAGS.dci_seed'}), '(c_reshape, z_reshape, test_size=0.2, shuffle=FLAGS.shuffle,\n random_state=FLAGS.dci_seed)\n', (4079, 4172), False, 'from sklearn.model_selection import train_test_split\n'), ((4591, 4629), 'disentanglement_lib.evaluation.metrics.dci.disentanglement', 'dci.disentanglement', (['importance_matrix'], {}), '(importance_matrix)\n', (4610, 4629), False, 'from disentanglement_lib.evaluation.metrics import dci\n'), ((4638, 4673), 'disentanglement_lib.evaluation.metrics.dci.completeness', 'dci.completeness', (['importance_matrix'], {}), '(importance_matrix)\n', (4654, 4673), False, 'from disentanglement_lib.evaluation.metrics import dci\n'), ((7256, 7269), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (7263, 7269), False, 'from absl import flags, app\n'), ((2304, 2330), 'numpy.transpose', 'np.transpose', (['c', '(0, 2, 1)'], {}), '(c, (0, 2, 1))\n', (2316, 2330), True, 'import numpy as np\n'), ((2629, 2655), 'numpy.transpose', 'np.transpose', (['z', '(0, 2, 1)'], {}), '(z, (0, 2, 1))\n', (2641, 2655), True, 'import numpy as np\n'), ((2717, 2743), 'numpy.transpose', 'np.transpose', (['c', '(0, 2, 1)'], {}), '(c, (0, 2, 1))\n', (2729, 2743), True, 'import numpy as np\n'), ((5060, 5090), 'numpy.load', 'np.load', (['FLAGS.assign_mat_path'], {}), '(FLAGS.assign_mat_path)\n', (5067, 5090), True, 'import numpy as np\n'), ((5117, 5157), 'numpy.matmul', 'np.matmul', (['importance_matrix', 'assign_mat'], {}), '(importance_matrix, assign_mat)\n', (5126, 5157), True, 'import numpy as np\n'), ((5286, 5327), 'disentanglement_lib.evaluation.metrics.dci.disentanglement', 'dci.disentanglement', (['impt_mat_assign_norm'], {}), '(impt_mat_assign_norm)\n', (5305, 5327), False, 'from disentanglement_lib.evaluation.metrics import dci\n'), ((5347, 5385), 'disentanglement_lib.evaluation.metrics.dci.completeness', 'dci.completeness', (['impt_mat_assign_norm'], {}), '(impt_mat_assign_norm)\n', (5363, 5385), False, 'from disentanglement_lib.evaluation.metrics import dci\n'), ((1448, 1463), 'numpy.load', 'np.load', (['c_path'], {}), '(c_path)\n', (1455, 1463), True, 'import numpy as np\n'), ((1520, 1535), 'numpy.load', 'np.load', (['c_path'], {}), '(c_path)\n', (1527, 1535), True, 'import numpy as np\n'), ((1885, 1971), 'os.path.join', 'os.path.join', (['f"""/data/{FLAGS.data_type_dci}"""', 'f"""factors_{FLAGS.data_type_dci}.npz"""'], {}), "(f'/data/{FLAGS.data_type_dci}',\n f'factors_{FLAGS.data_type_dci}.npz')\n", (1897, 1971), False, 'import os\n'), ((2003, 2077), 'os.path.join', 'os.path.join', (['f"""/data/{FLAGS.data_type_dci}"""', 'f"""{FLAGS.data_type_dci}.npz"""'], {}), "(f'/data/{FLAGS.data_type_dci}', f'{FLAGS.data_type_dci}.npz')\n", (2015, 2077), False, 'import os\n'), ((2425, 2451), 'numpy.transpose', 'np.transpose', (['c', '(0, 2, 1)'], {}), '(c, (0, 2, 1))\n', (2437, 2451), True, 'import numpy as np\n'), ((2490, 2505), 'numpy.load', 'np.load', (['z_path'], {}), '(z_path)\n', (2497, 2505), True, 'import numpy as np\n'), ((4910, 4954), 'numpy.insert', 'np.insert', (['importance_matrix', 'idx', '(0)'], {'axis': '(1)'}), '(importance_matrix, idx, 0, axis=1)\n', (4919, 4954), True, 'import numpy as np\n'), ((5560, 5779), 'numpy.savez', 'np.savez', (['f"""{out_dir}/dci_assign_2_{FLAGS.dci_seed}"""'], {'informativeness_train': 'i_train', 'informativeness_test': 'i_test', 'disentanglement': 'd', 'completeness': 'c', 'disentanglement_assign': 'd_assign', 'completeness_assign': 'c_assign'}), "(f'{out_dir}/dci_assign_2_{FLAGS.dci_seed}', informativeness_train=\n i_train, informativeness_test=i_test, disentanglement=d, completeness=c,\n disentanglement_assign=d_assign, completeness_assign=c_assign)\n", (5568, 5779), True, 'import numpy as np\n'), ((5839, 5981), 'numpy.savez', 'np.savez', (['f"""{out_dir}/dci_{FLAGS.dci_seed}"""'], {'informativeness_train': 'i_train', 'informativeness_test': 'i_test', 'disentanglement': 'd', 'completeness': 'c'}), "(f'{out_dir}/dci_{FLAGS.dci_seed}', informativeness_train=i_train,\n informativeness_test=i_test, disentanglement=d, completeness=c)\n", (5847, 5981), True, 'import numpy as np\n'), ((6877, 6989), 'disentanglement_lib.visualize.visualize_scores.heat_square', 'visualize_scores.heat_square', (['importance_matrix', 'out_dir', 'f"""dci_matrix_{FLAGS.dci_seed}"""', '"""x_axis"""', '"""y_axis"""'], {}), "(importance_matrix, out_dir,\n f'dci_matrix_{FLAGS.dci_seed}', 'x_axis', 'y_axis')\n", (6905, 6989), False, 'from disentanglement_lib.visualize import visualize_scores\n'), ((7119, 7188), 'numpy.save', 'np.save', (['f"""{out_dir}/impt_matrix_{FLAGS.dci_seed}"""', 'importance_matrix'], {}), "(f'{out_dir}/impt_matrix_{FLAGS.dci_seed}', importance_matrix)\n", (7126, 7188), True, 'import numpy as np\n'), ((2382, 2397), 'numpy.load', 'np.load', (['c_path'], {}), '(c_path)\n', (2389, 2397), True, 'import numpy as np\n'), ((3162, 3178), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3176, 3178), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3512, 3533), 'numpy.arange', 'np.arange', (['c_shape[1]'], {}), '(c_shape[1])\n', (3521, 3533), True, 'import numpy as np\n'), ((3744, 3772), 'numpy.diff', 'np.diff', (['c_reshape[:8000, i]'], {}), '(c_reshape[:8000, i])\n', (3751, 3772), True, 'import numpy as np\n'), ((4828, 4843), 'numpy.invert', 'np.invert', (['mask'], {}), '(mask)\n', (4837, 4843), True, 'import numpy as np\n'), ((5234, 5265), 'numpy.sum', 'np.sum', (['impt_mat_assign'], {'axis': '(0)'}), '(impt_mat_assign, axis=0)\n', (5240, 5265), True, 'import numpy as np\n'), ((6158, 6189), 'numpy.transpose', 'np.transpose', (['importance_matrix'], {}), '(importance_matrix)\n', (6170, 6189), True, 'import numpy as np\n'), ((6380, 6414), 'numpy.transpose', 'np.transpose', (['impt_mat_assign_norm'], {}), '(impt_mat_assign_norm)\n', (6392, 6414), True, 'import numpy as np\n'), ((6660, 6729), 'numpy.save', 'np.save', (['f"""{out_dir}/impt_matrix_{FLAGS.dci_seed}"""', 'importance_matrix'], {}), "(f'{out_dir}/impt_matrix_{FLAGS.dci_seed}', importance_matrix)\n", (6667, 6729), True, 'import numpy as np\n'), ((6746, 6825), 'numpy.save', 'np.save', (['f"""{out_dir}/impt_matrix_assign_{FLAGS.dci_seed}"""', 'impt_mat_assign_norm'], {}), "(f'{out_dir}/impt_matrix_assign_{FLAGS.dci_seed}', impt_mat_assign_norm)\n", (6753, 6825), True, 'import numpy as np\n')]
|
import json
import os
import requests
def test_server_loaded_notebooks(notebook_server):
name = "/example-jupyter-notebooks.git/01-MPI-monte-carlo-pi.ipynb"
resp = requests.get(
"{}/templates/names".format(notebook_server),
)
assert resp.status_code == 200, resp.content
assert resp.json() == {"example-jupyter-notebooks.git": [{"name": name}]}
resp2 = requests.get(
"{}/templates/get".format(notebook_server), params={"template": name}
)
assert resp2.status_code == 200, resp.content
data = resp2.json()
assert data["name"] == name
assert data["dirname"] == "/example-jupyter-notebooks.git"
assert json.loads(data["content"]) != {}
dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "git-templates", name[1:])
)
assert data["path"] == dir
|
[
"os.path.dirname",
"json.loads"
] |
[((668, 695), 'json.loads', 'json.loads', (["data['content']"], {}), "(data['content'])\n", (678, 695), False, 'import json\n'), ((751, 776), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (766, 776), False, 'import os\n')]
|
# coding: utf-8
"""
SCORM Cloud Rest API
REST API used for SCORM Cloud integrations.
OpenAPI spec version: 2.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class XapiActivityDefinition(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, description=None, type=None, more_info=None, interaction_type=None, correct_responses_pattern=None, choices=None, scale=None, source=None, target=None, steps=None, extensions=None):
"""
XapiActivityDefinition - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'dict(str, str)',
'description': 'dict(str, str)',
'type': 'str',
'more_info': 'str',
'interaction_type': 'str',
'correct_responses_pattern': 'list[str]',
'choices': 'list[XapiInteractionComponent]',
'scale': 'list[XapiInteractionComponent]',
'source': 'list[XapiInteractionComponent]',
'target': 'list[XapiInteractionComponent]',
'steps': 'list[XapiInteractionComponent]',
'extensions': 'dict(str, object)'
}
self.attribute_map = {
'name': 'name',
'description': 'description',
'type': 'type',
'more_info': 'moreInfo',
'interaction_type': 'interactionType',
'correct_responses_pattern': 'correctResponsesPattern',
'choices': 'choices',
'scale': 'scale',
'source': 'source',
'target': 'target',
'steps': 'steps',
'extensions': 'extensions'
}
self._name = name
self._description = description
self._type = type
self._more_info = more_info
self._interaction_type = interaction_type
self._correct_responses_pattern = correct_responses_pattern
self._choices = choices
self._scale = scale
self._source = source
self._target = target
self._steps = steps
self._extensions = extensions
@property
def name(self):
"""
Gets the name of this XapiActivityDefinition.
:return: The name of this XapiActivityDefinition.
:rtype: dict(str, str)
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this XapiActivityDefinition.
:param name: The name of this XapiActivityDefinition.
:type: dict(str, str)
"""
self._name = name
@property
def description(self):
"""
Gets the description of this XapiActivityDefinition.
:return: The description of this XapiActivityDefinition.
:rtype: dict(str, str)
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this XapiActivityDefinition.
:param description: The description of this XapiActivityDefinition.
:type: dict(str, str)
"""
self._description = description
@property
def type(self):
"""
Gets the type of this XapiActivityDefinition.
:return: The type of this XapiActivityDefinition.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this XapiActivityDefinition.
:param type: The type of this XapiActivityDefinition.
:type: str
"""
self._type = type
@property
def more_info(self):
"""
Gets the more_info of this XapiActivityDefinition.
:return: The more_info of this XapiActivityDefinition.
:rtype: str
"""
return self._more_info
@more_info.setter
def more_info(self, more_info):
"""
Sets the more_info of this XapiActivityDefinition.
:param more_info: The more_info of this XapiActivityDefinition.
:type: str
"""
self._more_info = more_info
@property
def interaction_type(self):
"""
Gets the interaction_type of this XapiActivityDefinition.
:return: The interaction_type of this XapiActivityDefinition.
:rtype: str
"""
return self._interaction_type
@interaction_type.setter
def interaction_type(self, interaction_type):
"""
Sets the interaction_type of this XapiActivityDefinition.
:param interaction_type: The interaction_type of this XapiActivityDefinition.
:type: str
"""
self._interaction_type = interaction_type
@property
def correct_responses_pattern(self):
"""
Gets the correct_responses_pattern of this XapiActivityDefinition.
:return: The correct_responses_pattern of this XapiActivityDefinition.
:rtype: list[str]
"""
return self._correct_responses_pattern
@correct_responses_pattern.setter
def correct_responses_pattern(self, correct_responses_pattern):
"""
Sets the correct_responses_pattern of this XapiActivityDefinition.
:param correct_responses_pattern: The correct_responses_pattern of this XapiActivityDefinition.
:type: list[str]
"""
self._correct_responses_pattern = correct_responses_pattern
@property
def choices(self):
"""
Gets the choices of this XapiActivityDefinition.
:return: The choices of this XapiActivityDefinition.
:rtype: list[XapiInteractionComponent]
"""
return self._choices
@choices.setter
def choices(self, choices):
"""
Sets the choices of this XapiActivityDefinition.
:param choices: The choices of this XapiActivityDefinition.
:type: list[XapiInteractionComponent]
"""
self._choices = choices
@property
def scale(self):
"""
Gets the scale of this XapiActivityDefinition.
:return: The scale of this XapiActivityDefinition.
:rtype: list[XapiInteractionComponent]
"""
return self._scale
@scale.setter
def scale(self, scale):
"""
Sets the scale of this XapiActivityDefinition.
:param scale: The scale of this XapiActivityDefinition.
:type: list[XapiInteractionComponent]
"""
self._scale = scale
@property
def source(self):
"""
Gets the source of this XapiActivityDefinition.
:return: The source of this XapiActivityDefinition.
:rtype: list[XapiInteractionComponent]
"""
return self._source
@source.setter
def source(self, source):
"""
Sets the source of this XapiActivityDefinition.
:param source: The source of this XapiActivityDefinition.
:type: list[XapiInteractionComponent]
"""
self._source = source
@property
def target(self):
"""
Gets the target of this XapiActivityDefinition.
:return: The target of this XapiActivityDefinition.
:rtype: list[XapiInteractionComponent]
"""
return self._target
@target.setter
def target(self, target):
"""
Sets the target of this XapiActivityDefinition.
:param target: The target of this XapiActivityDefinition.
:type: list[XapiInteractionComponent]
"""
self._target = target
@property
def steps(self):
"""
Gets the steps of this XapiActivityDefinition.
:return: The steps of this XapiActivityDefinition.
:rtype: list[XapiInteractionComponent]
"""
return self._steps
@steps.setter
def steps(self, steps):
"""
Sets the steps of this XapiActivityDefinition.
:param steps: The steps of this XapiActivityDefinition.
:type: list[XapiInteractionComponent]
"""
self._steps = steps
@property
def extensions(self):
"""
Gets the extensions of this XapiActivityDefinition.
:return: The extensions of this XapiActivityDefinition.
:rtype: dict(str, object)
"""
return self._extensions
@extensions.setter
def extensions(self, extensions):
"""
Sets the extensions of this XapiActivityDefinition.
:param extensions: The extensions of this XapiActivityDefinition.
:type: dict(str, object)
"""
self._extensions = extensions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, XapiActivityDefinition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"six.iteritems"
] |
[((9146, 9175), 'six.iteritems', 'iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (9155, 9175), False, 'from six import iteritems\n')]
|
import io
from os.path import abspath, dirname, join
from setuptools import setup, find_packages
from dcspy.dcs_g13 import __version__
here = abspath(dirname(__file__))
with io.open(join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
with io.open(join(here, 'requirements.txt'), encoding='utf-8') as f:
requires = f.read().splitlines()
setup(name='dcspy', # Required
version=__version__, # Required
description='Software for integrating DCS: F/A-18C, F-16C and Ka-50 with Logitech G13', # Required
long_description=long_description, # Optional
long_description_content_type='text/markdown', # Optional (see note above)
url='https://github.com/emcek/dcspy', # Optional
author='<NAME>', # Optional
license='MIT License',
entry_points={'console_scripts': ['dcs_g13 = dcspy.dcs_g13:run']},
data_files=[('dcspy_data', ['images/dcspy.ico'])],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Operating System :: Microsoft :: Windows',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Topic :: Games/Entertainment',
'Topic :: Games/Entertainment :: Simulation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Hardware',
'Topic :: Utilities'],
keywords='logitech logitech-sdk logitech-keyboards logitech-gaming logitech-gaming-keyboard dcs-world dcs g13',
# packages=find_packages(exclude=['tests']), # Required
packages=find_packages(), # Required
install_requires=requires, # Optional
platforms=['win32', 'nt', 'Windows'],
# extras_require={'testing': ['pytest']},
project_urls={'Bug Reports': 'https://github.com/emcek/dcspy/issues',
'Source': 'https://github.com/emcek/dcspy'})
|
[
"os.path.dirname",
"os.path.join",
"setuptools.find_packages"
] |
[((153, 170), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (160, 170), False, 'from os.path import abspath, dirname, join\n'), ((186, 209), 'os.path.join', 'join', (['here', '"""README.md"""'], {}), "(here, 'README.md')\n", (190, 209), False, 'from os.path import abspath, dirname, join\n'), ((281, 311), 'os.path.join', 'join', (['here', '"""requirements.txt"""'], {}), "(here, 'requirements.txt')\n", (285, 311), False, 'from os.path import abspath, dirname, join\n'), ((1903, 1918), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1916, 1918), False, 'from setuptools import setup, find_packages\n')]
|
import os
import save_figures as fig
from data_reader import DataReader
def merge_results(path):
sorted_files = sorted(os.listdir(path + 'left'))
rebuilt_img = []
dr = DataReader()
for file_name in sorted_files:
print('')
l_img = dr.load_pandas(path + 'left/' + file_name)
c_img = dr.load_pandas(path + 'center/' + file_name)
r_img = dr.load_pandas(path + 'right/' + file_name)
print(type(l_img), type(c_img), type(r_img))
img = l_img.append(c_img).append(r_img)
print(type(img))
dump_name = 'dump.' + file_name
img.to_csv(
path + dump_name,
sep =' ',
header = True,
quotechar = ' ',
index = False)
result_name = 'result.' + file_name
dump = open(path + dump_name, 'r+')
result = open(path + result_name, 'w')
header = "# " + dump.readline()
body = dump.read()
result.write(header)
result.write(body)
dump.close()
result.close()
|
[
"data_reader.DataReader",
"os.listdir"
] |
[((182, 194), 'data_reader.DataReader', 'DataReader', ([], {}), '()\n', (192, 194), False, 'from data_reader import DataReader\n'), ((127, 152), 'os.listdir', 'os.listdir', (["(path + 'left')"], {}), "(path + 'left')\n", (137, 152), False, 'import os\n')]
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from apache_ranger.model.ranger_base import RangerBase
class RangerPolicyResource:
def __init__(self, values=None, isExcludes=None, isRecursive=None):
self.values = values if values is not None else []
self.isExcludes = isExcludes if isExcludes is not None else False
self.isRecursive = isRecursive if isRecursive is not None else False
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
class RangerPolicyItemCondition:
def __init__(self, type=None, values=None):
self.type = type
self.values = values if values is not None else []
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
class RangerPolicyItem:
def __init__(self, accesses=None, users=None, groups=None, roles=None, conditions=None, delegateAdmin=None):
self.accesses = accesses if accesses is not None else []
self.users = users if users is not None else []
self.groups = groups if groups is not None else []
self.roles = roles if roles is not None else []
self.conditions = conditions if conditions is not None else []
self.delegateAdmin = delegateAdmin if delegateAdmin is not None else False
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
class RangerPolicyItemAccess:
def __init__(self, type=None, isAllowed=None):
self.type = type
self.isAllowed = isAllowed if isAllowed is not None else True
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
class RangerPolicyItemDataMaskInfo:
def __init__(self, dataMaskType=None, conditionExpr=None, valueExpr=None):
self.dataMaskType = dataMaskType
self.conditionExpr = conditionExpr
self.valueExpr = valueExpr
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
class RangerDataMaskPolicyItem(RangerPolicyItem):
def __init__(self, dataMaskInfo=None, accesses=None, users=None, groups=None, roles=None, conditions=None, delegateAdmin=None):
super().__init__(accesses, users, groups, roles, conditions, delegateAdmin)
self.dataMaskInfo = dataMaskInfo if dataMaskInfo is not None else RangerPolicyItemDataMaskInfo()
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
class RangerRowFilterPolicyItem(RangerPolicyItem):
def __init__(self, rowFilterInfo=None, accesses=None, users=None, groups=None, roles=None, conditions=None, delegateAdmin=None):
super().__init__(accesses, users, groups, roles, conditions, delegateAdmin)
self.rowFilterInfo = rowFilterInfo
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
class RangerPolicy(RangerBase):
def __init__(self, id=None, guid=None, createdBy=None, updatedBy=None, createTime=None, updateTime=None,
service=None, name=None, description=None, isEnabled=True, isAuditEnabled=None, resources=None,
policyItems=None, dataMaskPolicyItems=None, rowFilterPolicyItems=None, serviceType=None, options=None,
policyLabels=None, zoneName=None, isDenyAllElse=None, validitySchedules=None, version=None,
denyPolicyItems=None, denyExceptions=None, allowExceptions=None, resourceSignature=None,
policyType=None, policyPriority=None, conditions=None):
super().__init__(id, guid, createdBy, updatedBy, createTime, updateTime, version, isEnabled)
self.service = service
self.name = name
self.policyType = policyType
self.policyPriority = policyPriority if policyPriority is not None else 0
self.description = description
self.resourceSignature = resourceSignature
self.isAuditEnabled = isAuditEnabled if isAuditEnabled is not None else True
self.resources = resources if resources is not None else {}
self.policyItems = policyItems if policyItems is not None else []
self.denyPolicyItems = denyPolicyItems if denyPolicyItems is not None else []
self.allowExceptions = allowExceptions if allowExceptions is not None else []
self.denyExceptions = denyExceptions if denyExceptions is not None else []
self.dataMaskPolicyItems = dataMaskPolicyItems if dataMaskPolicyItems is not None else []
self.rowFilterPolicyItems = rowFilterPolicyItems if rowFilterPolicyItems is not None else []
self.serviceType = serviceType
self.options = options if options is not None else {}
self.validitySchedules = validitySchedules if validitySchedules is not None else []
self.policyLabels = policyLabels if policyLabels is not None else []
self.zoneName = zoneName
self.conditions = conditions
self.isDenyAllElse = isDenyAllElse if isDenyAllElse is not None else False
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
|
[
"json.dumps"
] |
[((1229, 1301), 'json.dumps', 'json.dumps', (['self'], {'default': '(lambda x: x.__dict__)', 'sort_keys': '(True)', 'indent': '(4)'}), '(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)\n', (1239, 1301), False, 'import json\n'), ((1511, 1583), 'json.dumps', 'json.dumps', (['self'], {'default': '(lambda x: x.__dict__)', 'sort_keys': '(True)', 'indent': '(4)'}), '(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)\n', (1521, 1583), False, 'import json\n'), ((2184, 2256), 'json.dumps', 'json.dumps', (['self'], {'default': '(lambda x: x.__dict__)', 'sort_keys': '(True)', 'indent': '(4)'}), '(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)\n', (2194, 2256), False, 'import json\n'), ((2480, 2552), 'json.dumps', 'json.dumps', (['self'], {'default': '(lambda x: x.__dict__)', 'sort_keys': '(True)', 'indent': '(4)'}), '(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)\n', (2490, 2552), False, 'import json\n'), ((2834, 2906), 'json.dumps', 'json.dumps', (['self'], {'default': '(lambda x: x.__dict__)', 'sort_keys': '(True)', 'indent': '(4)'}), '(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)\n', (2844, 2906), False, 'import json\n'), ((3321, 3393), 'json.dumps', 'json.dumps', (['self'], {'default': '(lambda x: x.__dict__)', 'sort_keys': '(True)', 'indent': '(4)'}), '(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)\n', (3331, 3393), False, 'import json\n'), ((3748, 3820), 'json.dumps', 'json.dumps', (['self'], {'default': '(lambda x: x.__dict__)', 'sort_keys': '(True)', 'indent': '(4)'}), '(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)\n', (3758, 3820), False, 'import json\n'), ((6158, 6230), 'json.dumps', 'json.dumps', (['self'], {'default': '(lambda x: x.__dict__)', 'sort_keys': '(True)', 'indent': '(4)'}), '(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)\n', (6168, 6230), False, 'import json\n')]
|
import numpy as np
import math as math
import cv2
def get_ideal_low_pass_filter( shape, cutoff,width):
[h, w] = shape
mask_image = np.zeros((h, w))
for i in range(h):
for j in range(w):
distance = math.sqrt((i - (h / 2)) * (i - (h / 2)) + (j - (w / 2)) * (j - (w / 2)))
if distance >= cutoff-(width/2) and distance <= cutoff+(width/2):
mask_image[i][j] = 0
else:
mask_image[i][j] = 1
return mask_image
def get_ideal_high_pass_filter( shape, cutoff,width):
mask_image = 1 - get_ideal_low_pass_filter(shape, cutoff,width)
return mask_image
def get_butterworth_low_pass_filter( shape, cutoff,order,width):
[h, w] = shape
mask_image = np.zeros((h, w))
for i in range(h):
for j in range(w):
distance = math.sqrt((i - (h / 2)) ** 2 + ((j - (w / 2)) ** 2))
if distance ** 2 - cutoff ** 2 == 0:
mask_image[i][j] = 0
else:
mask_image[i][j] = 1 / (1 + (((distance * width) / (distance ** 2 - cutoff ** 2)) ** (2 * order)))
return mask_image
def get_butterworth_high_pass_filter( shape, cutoff,order,width):
mask_image = 1 - get_butterworth_low_pass_filter(shape, cutoff,order,width)
return mask_image
def get_gaussian_low_pass_filter(shape, cutoff,width):
[h, w] = shape
mask_image = np.zeros((h, w))
for i in range(h):
for j in range(w):
distance = math.sqrt((i - (h / 2)) ** 2 + ((j - (w / 2)) ** 2))
if (distance == 0):
mask_image[i][j] = 0
else:
mask_image[i][j] = 1 - math.exp(-(((distance ** 2 - cutoff ** 2) / (distance * width)) ** 2))
return mask_image
def get_gaussian_high_pass_filter(shape, cutoff,width):
mask_image = 1 - get_gaussian_low_pass_filter(shape, cutoff,width)
return mask_image
def post_process_image(image):
c_min = np.min(image)
c_max = np.max(image)
new_min = 0
new_max = 255
stretch_image = np.zeros((np.shape(image)), dtype=np.uint8)
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
stretch_image[i][j] = (image[i][j] - c_min) * ((new_max - new_min) / (c_max - c_min)) + new_min
return stretch_image
def filtering_band_filter(image,cutoff,width,filtertype):
s = image.shape
fft_image = np.fft.fft2(image)
shift_image = np.fft.fftshift(fft_image)
dft_image = np.uint8(np.log(np.absolute(shift_image)) * 10)
if filtertype=='Ideal Low Pass' :
mask = get_ideal_low_pass_filter(s, cutoff,width)
elif filtertype=='Ideal High Pass' :
mask=get_ideal_high_pass_filter(s, cutoff,width)
elif filtertype=='Gaussain Low Pass':
mask=get_gaussian_low_pass_filter(s,cutoff,width)
elif filtertype=='Gaussian High Pass':
mask=get_gaussian_high_pass_filter(s,cutoff,width)
elif filtertype=='Butterworth Low Pass':
mask=get_butterworth_low_pass_filter(s,cutoff,width,order=2)
else:
mask=0
filter_image = shift_image * mask
filter_finalimg =np.uint8(np.log(np.absolute(filter_image))*10)
ishift_image = np.fft.ifftshift(filter_image)
ifft_image = np.fft.ifft2(ishift_image)
mag_image = np.absolute(ifft_image)
f = post_process_image(mag_image)
return [f,filter_finalimg]
def filtering_band_filter_order(image,cutoff,order,width,filtertype):
s = image.shape
fft_image = np.fft.fft2(image)
shift_image = np.fft.fftshift(fft_image)
dft_image = np.uint8(np.log(np.absolute(shift_image)) * 10)
if filtertype=='Butterworth Low Pass':
mask=get_butterworth_low_pass_filter(s,cutoff,order,width)
else:
mask=get_butterworth_high_pass_filter(s,cutoff,order,width)
filter_image = shift_image * (mask*200)
# filter_finalimg1= np.log(np.absolute(filter_image)) * 10
filter_finalimg = np.uint8(np.log(np.absolute(filter_image)) * 10)
# cv2.imshow("ButterLow",filter_finalimg)
#cv2.waitKey(0)
ishift_image = np.fft.ifftshift(filter_image)
ifft_image = np.fft.ifft2(ishift_image)
mag_image = np.absolute(ifft_image)
f = post_process_image(mag_image)
return [f,filter_finalimg]
|
[
"numpy.fft.ifftshift",
"numpy.absolute",
"math.exp",
"math.sqrt",
"numpy.zeros",
"numpy.shape",
"numpy.min",
"numpy.max",
"numpy.fft.fftshift",
"numpy.fft.fft2",
"numpy.fft.ifft2"
] |
[((143, 159), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (151, 159), True, 'import numpy as np\n'), ((747, 763), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (755, 763), True, 'import numpy as np\n'), ((1395, 1411), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (1403, 1411), True, 'import numpy as np\n'), ((1952, 1965), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (1958, 1965), True, 'import numpy as np\n'), ((1978, 1991), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (1984, 1991), True, 'import numpy as np\n'), ((2402, 2420), 'numpy.fft.fft2', 'np.fft.fft2', (['image'], {}), '(image)\n', (2413, 2420), True, 'import numpy as np\n'), ((2439, 2465), 'numpy.fft.fftshift', 'np.fft.fftshift', (['fft_image'], {}), '(fft_image)\n', (2454, 2465), True, 'import numpy as np\n'), ((3192, 3222), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['filter_image'], {}), '(filter_image)\n', (3208, 3222), True, 'import numpy as np\n'), ((3240, 3266), 'numpy.fft.ifft2', 'np.fft.ifft2', (['ishift_image'], {}), '(ishift_image)\n', (3252, 3266), True, 'import numpy as np\n'), ((3283, 3306), 'numpy.absolute', 'np.absolute', (['ifft_image'], {}), '(ifft_image)\n', (3294, 3306), True, 'import numpy as np\n'), ((3484, 3502), 'numpy.fft.fft2', 'np.fft.fft2', (['image'], {}), '(image)\n', (3495, 3502), True, 'import numpy as np\n'), ((3521, 3547), 'numpy.fft.fftshift', 'np.fft.fftshift', (['fft_image'], {}), '(fft_image)\n', (3536, 3547), True, 'import numpy as np\n'), ((4064, 4094), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['filter_image'], {}), '(filter_image)\n', (4080, 4094), True, 'import numpy as np\n'), ((4112, 4138), 'numpy.fft.ifft2', 'np.fft.ifft2', (['ishift_image'], {}), '(ishift_image)\n', (4124, 4138), True, 'import numpy as np\n'), ((4155, 4178), 'numpy.absolute', 'np.absolute', (['ifft_image'], {}), '(ifft_image)\n', (4166, 4178), True, 'import numpy as np\n'), ((2056, 2071), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (2064, 2071), True, 'import numpy as np\n'), ((233, 297), 'math.sqrt', 'math.sqrt', (['((i - h / 2) * (i - h / 2) + (j - w / 2) * (j - w / 2))'], {}), '((i - h / 2) * (i - h / 2) + (j - w / 2) * (j - w / 2))\n', (242, 297), True, 'import math as math\n'), ((837, 883), 'math.sqrt', 'math.sqrt', (['((i - h / 2) ** 2 + (j - w / 2) ** 2)'], {}), '((i - h / 2) ** 2 + (j - w / 2) ** 2)\n', (846, 883), True, 'import math as math\n'), ((1485, 1531), 'math.sqrt', 'math.sqrt', (['((i - h / 2) ** 2 + (j - w / 2) ** 2)'], {}), '((i - h / 2) ** 2 + (j - w / 2) ** 2)\n', (1494, 1531), True, 'import math as math\n'), ((2498, 2522), 'numpy.absolute', 'np.absolute', (['shift_image'], {}), '(shift_image)\n', (2509, 2522), True, 'import numpy as np\n'), ((3141, 3166), 'numpy.absolute', 'np.absolute', (['filter_image'], {}), '(filter_image)\n', (3152, 3166), True, 'import numpy as np\n'), ((3580, 3604), 'numpy.absolute', 'np.absolute', (['shift_image'], {}), '(shift_image)\n', (3591, 3604), True, 'import numpy as np\n'), ((3945, 3970), 'numpy.absolute', 'np.absolute', (['filter_image'], {}), '(filter_image)\n', (3956, 3970), True, 'import numpy as np\n'), ((1664, 1732), 'math.exp', 'math.exp', (['(-((distance ** 2 - cutoff ** 2) / (distance * width)) ** 2)'], {}), '(-((distance ** 2 - cutoff ** 2) / (distance * width)) ** 2)\n', (1672, 1732), True, 'import math as math\n')]
|
from algoliasearch_django import AlgoliaIndex
from algoliasearch_django.decorators import register
from .models import Product
@register(Product)
class ProductIndex(AlgoliaIndex):
fields = ('product_name', 'model_no', 'product_category',
'product_details', 'slug', 'main_product_image')
|
[
"algoliasearch_django.decorators.register"
] |
[((135, 152), 'algoliasearch_django.decorators.register', 'register', (['Product'], {}), '(Product)\n', (143, 152), False, 'from algoliasearch_django.decorators import register\n')]
|
from setuptools import setup
setup(name='pyffe',
version='0.1',
description='Tools and utils for PyCaffe',
# url='http://github.com/fabiocarrara/pyffe',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['pyffe', 'pyffe.models'],
zip_safe=False,
requires=['functools32', 'tqdm', 'pandas', 'lmdb', 'caffe']
)
|
[
"setuptools.setup"
] |
[((30, 289), 'setuptools.setup', 'setup', ([], {'name': '"""pyffe"""', 'version': '"""0.1"""', 'description': '"""Tools and utils for PyCaffe"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages': "['pyffe', 'pyffe.models']", 'zip_safe': '(False)', 'requires': "['functools32', 'tqdm', 'pandas', 'lmdb', 'caffe']"}), "(name='pyffe', version='0.1', description=\n 'Tools and utils for PyCaffe', author='<NAME>', author_email='<EMAIL>',\n license='MIT', packages=['pyffe', 'pyffe.models'], zip_safe=False,\n requires=['functools32', 'tqdm', 'pandas', 'lmdb', 'caffe'])\n", (35, 289), False, 'from setuptools import setup\n')]
|
"""
dronekit-sitl copter --home=-38.7460967,-72.6154299,0,180
mavproxy.py --master tcp:127.0.0.1:5760 --out udp:127.0.0.1:14550 --out udp:127.0.0.1:14551
mission planner en puerto udp:14550
dronekit en puerto udp:14551
https://github.com/ArduPilot/MAVProxy/issues/543
"""
# from pymavlink import mavutil
"""
------------------------------
IMPORTS E INICIALIZACION
------------------------------
"""
from dronekit import connect, VehicleMode
from time import sleep
from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto
from utils import agregarDistanciaMetros
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--connect', default='udp:192.168.8.120:14551')
args = parser.parse_args()
print('Conectando al vehiculo en: %s' % args.connect)
vehicle = connect(args.connect, baud=57600, wait_ready=True)
"""
------------------------------
CODIGO PRINCIPAL
------------------------------
"""
def prueba_default():
print("------COMENZANDO LA PRIMERA PRUEBA ------")
despegarVehiculo(vehicle, 3)
sleep(8)
aterrizarVehiculo(vehicle)
sleep(1)
vehicle.close()
def prueba_movimiento():
print("------COMENZANDO LA SEGUNDA PRUEBA ------")
despegarVehiculo(vehicle, 5)
goto(vehicle, 2.5, 2.5)
sleep(3)
goto(vehicle, 2.5, 2.5)
sleep(3)
aterrizarVehiculo(vehicle)
sleep(1)
vehicle.close()
def prueba_final():
print("------COMENZANDO LA TERCERA PRUEBA ------")
despegarVehiculo(vehicle, 30)
goto(vehicle, 5, 0)
sleep(1)
goto(vehicle, -1.5, 3.5)
sleep(1)
goto(vehicle, -3.5, 1.5)
sleep(1)
goto(vehicle, -3.5, -1.5)
sleep(1)
goto(vehicle, -1.5, -3.5)
sleep(1)
goto(vehicle, 1.5, -3.5)
sleep(1)
goto(vehicle, 3.5, -1.5)
sleep(1)
goto(vehicle, 3.5, 1.5)
sleep(1)
goto(vehicle, 1.5, 3.5)
sleep(1)
goto(vehicle, -5, 0)
sleep(1)
aterrizarVehiculo(vehicle)
vehicle.close()
prueba_movimiento()
|
[
"vuelo.aterrizarVehiculo",
"argparse.ArgumentParser",
"vuelo.despegarVehiculo",
"time.sleep",
"dronekit.connect",
"vuelo.goto"
] |
[((613, 638), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (636, 638), False, 'import argparse\n'), ((798, 848), 'dronekit.connect', 'connect', (['args.connect'], {'baud': '(57600)', 'wait_ready': '(True)'}), '(args.connect, baud=57600, wait_ready=True)\n', (805, 848), False, 'from dronekit import connect, VehicleMode\n'), ((1029, 1057), 'vuelo.despegarVehiculo', 'despegarVehiculo', (['vehicle', '(3)'], {}), '(vehicle, 3)\n', (1045, 1057), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1062, 1070), 'time.sleep', 'sleep', (['(8)'], {}), '(8)\n', (1067, 1070), False, 'from time import sleep\n'), ((1075, 1101), 'vuelo.aterrizarVehiculo', 'aterrizarVehiculo', (['vehicle'], {}), '(vehicle)\n', (1092, 1101), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1106, 1114), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1111, 1114), False, 'from time import sleep\n'), ((1221, 1249), 'vuelo.despegarVehiculo', 'despegarVehiculo', (['vehicle', '(5)'], {}), '(vehicle, 5)\n', (1237, 1249), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1254, 1277), 'vuelo.goto', 'goto', (['vehicle', '(2.5)', '(2.5)'], {}), '(vehicle, 2.5, 2.5)\n', (1258, 1277), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1282, 1290), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (1287, 1290), False, 'from time import sleep\n'), ((1295, 1318), 'vuelo.goto', 'goto', (['vehicle', '(2.5)', '(2.5)'], {}), '(vehicle, 2.5, 2.5)\n', (1299, 1318), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1323, 1331), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (1328, 1331), False, 'from time import sleep\n'), ((1336, 1362), 'vuelo.aterrizarVehiculo', 'aterrizarVehiculo', (['vehicle'], {}), '(vehicle)\n', (1353, 1362), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1367, 1375), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1372, 1375), False, 'from time import sleep\n'), ((1477, 1506), 'vuelo.despegarVehiculo', 'despegarVehiculo', (['vehicle', '(30)'], {}), '(vehicle, 30)\n', (1493, 1506), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1511, 1530), 'vuelo.goto', 'goto', (['vehicle', '(5)', '(0)'], {}), '(vehicle, 5, 0)\n', (1515, 1530), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1535, 1543), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1540, 1543), False, 'from time import sleep\n'), ((1548, 1572), 'vuelo.goto', 'goto', (['vehicle', '(-1.5)', '(3.5)'], {}), '(vehicle, -1.5, 3.5)\n', (1552, 1572), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1577, 1585), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1582, 1585), False, 'from time import sleep\n'), ((1590, 1614), 'vuelo.goto', 'goto', (['vehicle', '(-3.5)', '(1.5)'], {}), '(vehicle, -3.5, 1.5)\n', (1594, 1614), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1619, 1627), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1624, 1627), False, 'from time import sleep\n'), ((1632, 1657), 'vuelo.goto', 'goto', (['vehicle', '(-3.5)', '(-1.5)'], {}), '(vehicle, -3.5, -1.5)\n', (1636, 1657), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1662, 1670), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1667, 1670), False, 'from time import sleep\n'), ((1675, 1700), 'vuelo.goto', 'goto', (['vehicle', '(-1.5)', '(-3.5)'], {}), '(vehicle, -1.5, -3.5)\n', (1679, 1700), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1705, 1713), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1710, 1713), False, 'from time import sleep\n'), ((1718, 1742), 'vuelo.goto', 'goto', (['vehicle', '(1.5)', '(-3.5)'], {}), '(vehicle, 1.5, -3.5)\n', (1722, 1742), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1747, 1755), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1752, 1755), False, 'from time import sleep\n'), ((1760, 1784), 'vuelo.goto', 'goto', (['vehicle', '(3.5)', '(-1.5)'], {}), '(vehicle, 3.5, -1.5)\n', (1764, 1784), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1789, 1797), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1794, 1797), False, 'from time import sleep\n'), ((1802, 1825), 'vuelo.goto', 'goto', (['vehicle', '(3.5)', '(1.5)'], {}), '(vehicle, 3.5, 1.5)\n', (1806, 1825), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1830, 1838), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1835, 1838), False, 'from time import sleep\n'), ((1843, 1866), 'vuelo.goto', 'goto', (['vehicle', '(1.5)', '(3.5)'], {}), '(vehicle, 1.5, 3.5)\n', (1847, 1866), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1871, 1879), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1876, 1879), False, 'from time import sleep\n'), ((1884, 1904), 'vuelo.goto', 'goto', (['vehicle', '(-5)', '(0)'], {}), '(vehicle, -5, 0)\n', (1888, 1904), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n'), ((1909, 1917), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1914, 1917), False, 'from time import sleep\n'), ((1922, 1948), 'vuelo.aterrizarVehiculo', 'aterrizarVehiculo', (['vehicle'], {}), '(vehicle)\n', (1939, 1948), False, 'from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by <NAME> at 2019-09-02
"""Step_simulate.py
:description : script
:param :
:returns:
:rtype:
"""
import os
import cobra
import matplotlib.pyplot as plt
import numpy as np
os.chdir('../../ComplementaryData/Step_03_Compare_Refine/')
print('----- loading model -----')
iHL622 = cobra.io.load_json_model('../../ModelFiles/iHL622.json')
# %% <biomass vs od >
print('----- change medium -----')
iHL622.objective = "BIOMASS"
experiment_group = ['A', 'B', 'C', 'D', 'E']
experiment_result = [1.38, 1.88, 1.92, 1.92, 1.90]
experiment_result_err = [0.66, 0.35, 0.69, 0.37, 0.47]
experiment_medium = {
'EX_glc__D_e': [-20, -20, -20, -20, -20, ],
'EX_glyc_e': [0.00, -5.00, -5.00, -10.00, -10.],
'EX_fru_e': [0.00, -1.00, -5.00, -1.00, -5.],
'EX_lac__L_e': [18.215, 21.334, 20.882, 17.881, 16.577],
'EX_ac_e': [17.058, 18.301, 18.285, 19.703, 19.643],
'EX_etoh_e': [5.135, 4.623, 4.312, 2.558, 2.230],
}
# for k in experiment_medium.keys(): # g/L --> mM
# temp = np.array(experiment_medium[k])*1000/iHL622.metabolites.get_by_id(k.replace('EX_','')).formula_weight
# experiment_medium[k] = temp
predict_result = []
for i in range(0, len(experiment_result)):
model = iHL622.copy()
for rea in experiment_medium.keys():
bound = experiment_medium[rea][i]
if bound <= 0:
model.reactions.get_by_id(rea).bounds = (bound, 0)
elif bound >= 0:
model.reactions.get_by_id(rea).bounds = (0, bound)
sol = model.optimize()
predict_result.append(round(sol.objective_value, 3))
print('Experiment Biomass:', experiment_result)
print('iHL622 Biomass:', predict_result)
# %% <vitmin B12 > NOTE: error
# experiment_medium = {
# 'BIOMASS': predict_result,
# 'EX_glc__D_e': [-20, -20, -20, -20, -20, ],
# 'EX_glyc_e': [0.00, -5.00, -5.00, -10.00, -10.],
# 'EX_fru_e': [0.00, -1.00, -5.00, -1.00, -5.], }
#
# predict_result_b12 = []
# for i in range(0, len(experiment_result)):
# model = iHL622.copy()
# rea = cobra.Reaction('EX_adeadocbl_c')
# model.add_reaction(rea)
# model.reactions.get_by_id('EX_adeadocbl_c').reaction = 'adeadocbl_c --> '
# model.objective = 'EX_adeadocbl_c'
# # model.reactions.get_by_id('EX_ade_e').bounds = (0,0)
# for rea in experiment_medium.keys():
# bound = experiment_medium[rea][i]
# if rea == 'BIOMASS':
# model.reactions.get_by_id(rea).bounds = (bound, bound)
#
# elif bound <= 0:
# model.reactions.get_by_id(rea).bounds = (bound, 0)
# elif bound >= 0:
# model.reactions.get_by_id(rea).bounds = (0, bound)
# predict_result_b12.append(
# round(model.optimize().objective_value * 1355.365, 3)) # Cobalamin: Molar mass: 1,355.365 g/mol
# print('iHL622 b12:', predict_result_b12)
# %% <draw>
import brewer2mpl
fig, ax = plt.subplots(figsize=(6, 4))
ax2 = ax.twinx()
bmap = brewer2mpl.get_map('Set2', 'qualitative', 7)
colors = bmap.mpl_colors
# plt.ylim((0.0, 1.0))
x = np.arange(0, 5)
width = 0.25 # the width of the bars
rects2 = ax.bar(x + width / 2, predict_result, width, label='Model Growth rate', color=colors[0]) # ,
rects1 = ax2.bar(x - width / 2, experiment_result, width, yerr=experiment_result_err, label='Experiment OD600',
color=colors[1]) #
rects1_ = ax2.bar(0, 0, label='Model Growth rate', color=colors[0], )
# Add some text for labels, title and custom x-axis tick labels, etc.
ax2.set_ylabel("OD600", fontsize=16)
ax.set_ylabel('Growth rate (mmol/gDW/h)', fontsize=16) # color = 'tab:blue'
# ax.tick_params(axis='y') # , labelcolor='tab:blue'
ax2.set_ylim((0, 3.2))
ax.set_ylim((0, 2.2))
ax.set_title('Growth rate simulation', fontsize=18)
labels = [''] + experiment_group
ax2.set_xticklabels(labels, fontsize=16)
ax2.legend(loc='best', fontsize=11)
# ax2.legend(loc='best', fontsize=14)
fig.tight_layout()
plt.show()
fig.savefig('Growth rate simulation case2_1.png')
|
[
"matplotlib.pyplot.show",
"numpy.arange",
"brewer2mpl.get_map",
"cobra.io.load_json_model",
"matplotlib.pyplot.subplots",
"os.chdir"
] |
[((234, 293), 'os.chdir', 'os.chdir', (['"""../../ComplementaryData/Step_03_Compare_Refine/"""'], {}), "('../../ComplementaryData/Step_03_Compare_Refine/')\n", (242, 293), False, 'import os\n'), ((338, 394), 'cobra.io.load_json_model', 'cobra.io.load_json_model', (['"""../../ModelFiles/iHL622.json"""'], {}), "('../../ModelFiles/iHL622.json')\n", (362, 394), False, 'import cobra\n'), ((2913, 2941), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (2925, 2941), True, 'import matplotlib.pyplot as plt\n'), ((2966, 3010), 'brewer2mpl.get_map', 'brewer2mpl.get_map', (['"""Set2"""', '"""qualitative"""', '(7)'], {}), "('Set2', 'qualitative', 7)\n", (2984, 3010), False, 'import brewer2mpl\n'), ((3064, 3079), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (3073, 3079), True, 'import numpy as np\n'), ((3946, 3956), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3954, 3956), True, 'import matplotlib.pyplot as plt\n')]
|
import select
import socketserver
import threading
from queue import Queue
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
"""A threaded tcp request handler
"""
def handle(self):
""" The handle function.
Reads data from the client as long as the server is not requested to close.
Sends the read data back to the client and stores it in a queue.
"""
while self.server.instance.keep_alive:
ready_read, ready_write, exceptional = select.select([self.request], [], [], 1)
for sock in ready_read:
if sock == self.request:
recv_msg = sock.recv(self.server.instance.receive_bytes)
if recv_msg is not None:
self.request.sendall(recv_msg)
self.server.instance._add(recv_msg)
class EchoServer:
socketserver.TCPServer.allow_reuse_address = True
"""A threaded tcp server
Attributes
----------
ip : str
The ip address of the tcp server.
port : int
The port of the tcp server.
bytes_to_receive : int, default 4096
Reads the number bytes from the socket. Returns fewer bytes than bytes_to_receive if fewer are available.
"""
def __init__(self, ip, port, receive_bytes=4096):
self.server = ThreadedTCPServer((ip, port), ThreadedTCPRequestHandler)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server.socket.setblocking(False)
self.server.instance = self
self.keep_alive = False
self.receive_bytes = receive_bytes
self._last_received = Queue(maxsize=1)
@property
def last_received(self):
"""bytes: Returns the last received message."""
return self._last_received.get()
def start_server(self):
""" Starts the tcp server.
"""
self.keep_alive = True
self.server_thread.start()
def stop_server(self):
""" Stops the tcp server.
"""
self.keep_alive = False
self.server.shutdown()
self.server.server_close()
def _add(self, message):
if not self._last_received.full():
self._last_received.put(message)
else:
self._last_received.get_nowait()
self._last_received.put(message)
|
[
"threading.Thread",
"select.select",
"queue.Queue"
] |
[((1532, 1582), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.server.serve_forever'}), '(target=self.server.serve_forever)\n', (1548, 1582), False, 'import threading\n'), ((1811, 1827), 'queue.Queue', 'Queue', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (1816, 1827), False, 'from queue import Queue\n'), ((611, 651), 'select.select', 'select.select', (['[self.request]', '[]', '[]', '(1)'], {}), '([self.request], [], [], 1)\n', (624, 651), False, 'import select\n')]
|
import threading
import errors
import vs
import logging
import gc
from blinker import signal
from utils import performance
from output.output import Output
# We need to be able to load (not run) vs_server on windows to generate the documentation.
# So we're skipping non-windows imports
try:
import psutil
except ImportError:
pass
PROFILING_STITCH_FORMAT = vs.NV12
class ProfilingOutput(Output):
"""Profiling output
"""
def __init__(self, stitcher, name="profiling", critical=False, preserved=False):
super(ProfilingOutput, self).__init__(stitcher, name, critical, preserved)
self.writer = None
self.pid = psutil.Process()
def reset(self):
self._transition_check()
self.pid.cpu_percent(interval=None)
vs.Output_reset(self.writer.object())
def _start(self, profiling_time=0, preserve=False):
# Todo I don't like that it's created differently from other outputs here, but for now I left it like this
panorama = self.stitcher.project_manager.panorama
self.writer = vs.Output_profiling(self.name,
panorama.width,
panorama.height,
self.stitcher.project_manager.controller.getFrameRateFromInputController(),
PROFILING_STITCH_FORMAT)
if self.writer is None:
raise errors.InternalError()
self.shared_writer = vs.writerSharedPtr(self.writer.object())
self.shared_video = vs.videoWriterSharedPtr(self.shared_writer)
self.has_audio = False
if self.shared_video is not None and not self.stitcher.stitch_output.addWriter(self.shared_video):
raise errors.InternalError("Cannot add profiling writer to stitcher")
if profiling_time > 0:
threading.Timer(profiling_time, self.t_stop).start()
self.pid.cpu_percent(interval=None)
#jump automatically from starting state to started state
self.t_writer_ok()
def _stop(self):
self.fps = vs.Output_getFps(self.writer.release())
self.writer = None
logging.info("fps is %f:" % self.fps)
logging.info("cpu_util is %d" % self.pid.cpu_percent(interval=None))
cuda = performance.getCudaInfo()
logging.info("gpu_util is %d" % int(cuda['utilization.gpu']))
logging.info("enc_util is %s" % cuda['utilization.enc'])
success = self.stitcher.stitch_output.removeWriterNoGIL(self.name)
signal("profiling_stopping").send()
if not success:
raise errors.InternalError("Cannot remove writer")
self.shared_video = None
self.shared_writer = None
gc.collect()
#jump automatically from stopping state to stopped state
self.t_writer_completed()
def get_statistics(self):
cuda = performance.getCudaInfo()
self._transition_check()
if self.writer is not None:
self.fps = vs.Output_getFps(self.writer.object())
return {"fps": self.fps,
"cpu": self.pid.cpu_percent(interval=None),
"gpu": float(cuda['utilization.gpu']),
"enc": float(cuda['utilization.enc'])}
|
[
"psutil.Process",
"threading.Timer",
"errors.InternalError",
"blinker.signal",
"vs.videoWriterSharedPtr",
"gc.collect",
"logging.info",
"utils.performance.getCudaInfo"
] |
[((659, 675), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (673, 675), False, 'import psutil\n'), ((1579, 1622), 'vs.videoWriterSharedPtr', 'vs.videoWriterSharedPtr', (['self.shared_writer'], {}), '(self.shared_writer)\n', (1602, 1622), False, 'import vs\n'), ((2191, 2228), 'logging.info', 'logging.info', (["('fps is %f:' % self.fps)"], {}), "('fps is %f:' % self.fps)\n", (2203, 2228), False, 'import logging\n'), ((2321, 2346), 'utils.performance.getCudaInfo', 'performance.getCudaInfo', ([], {}), '()\n', (2344, 2346), False, 'from utils import performance\n'), ((2425, 2481), 'logging.info', 'logging.info', (["('enc_util is %s' % cuda['utilization.enc'])"], {}), "('enc_util is %s' % cuda['utilization.enc'])\n", (2437, 2481), False, 'import logging\n'), ((2763, 2775), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2773, 2775), False, 'import gc\n'), ((2922, 2947), 'utils.performance.getCudaInfo', 'performance.getCudaInfo', ([], {}), '()\n', (2945, 2947), False, 'from utils import performance\n'), ((1456, 1478), 'errors.InternalError', 'errors.InternalError', ([], {}), '()\n', (1476, 1478), False, 'import errors\n'), ((1779, 1842), 'errors.InternalError', 'errors.InternalError', (['"""Cannot add profiling writer to stitcher"""'], {}), "('Cannot add profiling writer to stitcher')\n", (1799, 1842), False, 'import errors\n'), ((2643, 2687), 'errors.InternalError', 'errors.InternalError', (['"""Cannot remove writer"""'], {}), "('Cannot remove writer')\n", (2663, 2687), False, 'import errors\n'), ((2565, 2593), 'blinker.signal', 'signal', (['"""profiling_stopping"""'], {}), "('profiling_stopping')\n", (2571, 2593), False, 'from blinker import signal\n'), ((1886, 1930), 'threading.Timer', 'threading.Timer', (['profiling_time', 'self.t_stop'], {}), '(profiling_time, self.t_stop)\n', (1901, 1930), False, 'import threading\n')]
|
import miniml
import numpy as np
# Adapted from:
# https://lucidar.me/en/neural-networks/curve-fitting-nonlinear-regression/
# init data
np.random.seed(3)
X = np.linspace(-10, 10, num=1000)
Y = 0.1*X*np.cos(X) + 0.1*np.random.normal(size=1000)
X = X.reshape((len(X), 1))
Y = Y.reshape((len(Y), 1))
# create model
model = miniml.Model()
model.dense(1, None, 'plain')
model.dense(64, 'relu', 'he')
model.dense(32, 'relu', 'he')
model.dense(1, None, 'plain')
# init params
rate = 0.01
epochs = 1000
# train model
optimizer = miniml.Adam(
cost = 'mse',
epochs = epochs,
init_seed = 48,
store = 10,
verbose = 200)
costs = optimizer.train(model, X, Y, rate)
# plot results
miniml.plot_costs(epochs, costs=costs)
miniml.plot_regression(model, X, Y)
|
[
"miniml.Model",
"numpy.random.seed",
"miniml.plot_costs",
"miniml.plot_regression",
"numpy.linspace",
"numpy.cos",
"numpy.random.normal",
"miniml.Adam"
] |
[((139, 156), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (153, 156), True, 'import numpy as np\n'), ((161, 191), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)'], {'num': '(1000)'}), '(-10, 10, num=1000)\n', (172, 191), True, 'import numpy as np\n'), ((325, 339), 'miniml.Model', 'miniml.Model', ([], {}), '()\n', (337, 339), False, 'import miniml\n'), ((528, 603), 'miniml.Adam', 'miniml.Adam', ([], {'cost': '"""mse"""', 'epochs': 'epochs', 'init_seed': '(48)', 'store': '(10)', 'verbose': '(200)'}), "(cost='mse', epochs=epochs, init_seed=48, store=10, verbose=200)\n", (539, 603), False, 'import miniml\n'), ((695, 733), 'miniml.plot_costs', 'miniml.plot_costs', (['epochs'], {'costs': 'costs'}), '(epochs, costs=costs)\n', (712, 733), False, 'import miniml\n'), ((734, 769), 'miniml.plot_regression', 'miniml.plot_regression', (['model', 'X', 'Y'], {}), '(model, X, Y)\n', (756, 769), False, 'import miniml\n'), ((202, 211), 'numpy.cos', 'np.cos', (['X'], {}), '(X)\n', (208, 211), True, 'import numpy as np\n'), ((218, 245), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1000)'}), '(size=1000)\n', (234, 245), True, 'import numpy as np\n')]
|
# Library imports
from bs4 import BeautifulSoup
import requests
# Generic web parser providing basic functionality to load a webpage
# Use this as a base class for more specific webpage parsers
class WebpageParser:
# WebpageParser version
version = '0.1'
# Used to request web page
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'}
def __init__(self, url = None):
# Store current URL
self.current_url = url
# Store full page content
self.pageContent = None
self.pageLoaded = False
# Load page if URL provided
if not (url == None):
# setUrl also calls loadCurrentPage
self.setUrl(url)
def getVersion(self):
return self.version
# Set URL and attempt to load page
# Return the return value of 'loadCurrentPage()
def setUrl(self, url):
self.current_url = url
self.pageContent = None
self.pageLoaded = False
return self.loadCurrentPage()
def getUrl(self):
# Check if URL has not been specified yet
if self.current_url == None:
print("Note: A valid URL has not been specified!")
return ""
# Return URL
else:
return self.current_url
# Attempt to load current page; Return True if successful
def loadCurrentPage(self):
# Check if a URL has been specified
if self.current_url == None:
print("Error: Cannot retrieve current page! A URL has not been specified!")
self.pageLoaded = False
return self.pageLoaded
# Check if page for current URL has already been loaded
elif self.pageLoaded:
print("Note: Page has already been loaded for current URL.")
# Assume that page for current URL has not been loaded
else:
# Try to load page
try:
req = requests.get(self.current_url, headers=self.headers)
self.pageContent = BeautifulSoup(req.content, "lxml")
self.pageLoaded = True
except:
print("Warning: Could not load page: %s" % self.current_url)
self.pageContent = None
self.pageLoaded = False
# Return Boolean indicating success or failuer
return self.pageLoaded
# Get text from the html section specified by tag and attributes
# tag is a string and attributes is a dictionary of attribute/value pairs
def getTextFromSection(self, tag, attributes):
text = ""
# Return if URL has not been specified
if self.current_url == None:
print("Warning: Cannot get text from specified html, because URL has not been specified!")
return text
# If this point is reached, URL has been specified
html = self.pageContent.find(tag, attributes)
if not (html == None):
text = html.text
else:
print("Warning: Cannot get text from specified html! Returning empty string.")
text = ""
return text
|
[
"bs4.BeautifulSoup",
"requests.get"
] |
[((2025, 2077), 'requests.get', 'requests.get', (['self.current_url'], {'headers': 'self.headers'}), '(self.current_url, headers=self.headers)\n', (2037, 2077), False, 'import requests\n'), ((2113, 2147), 'bs4.BeautifulSoup', 'BeautifulSoup', (['req.content', '"""lxml"""'], {}), "(req.content, 'lxml')\n", (2126, 2147), False, 'from bs4 import BeautifulSoup\n')]
|
#!/usr/bin/env python
import pathlib, sys
file_path = pathlib.Path(__file__).parent.absolute()
from argparse import ArgumentParser
import numpy as np
import time
import pressiodemoapps as pda
def schemeStringToSchemeEnum(s):
if s=="FirstOrder":
return pda.InviscidFluxReconstruction.FirstOrder
elif s=="Weno3":
return pda.InviscidFluxReconstruction.Weno3
elif s=="Weno5":
return pda.InviscidFluxReconstruction.Weno5
# ---------------------------
if __name__ == '__main__':
# ---------------------------
parser = ArgumentParser()
parser.add_argument("-m, --mesh", dest="meshDir", default="empty")
parser.add_argument("-n", dest="loopCount", default=10, type=int)
parser.add_argument("-s", dest="scheme");
args = parser.parse_args()
start = time.time()
meshPath = str(args.meshDir)
meshObj = pda.load_cellcentered_uniform_mesh(meshPath)
schemeEnum = schemeStringToSchemeEnum(args.scheme)
probId = pda.Euler2d.PeriodicSmooth
appObj = pda.create_problem(meshObj, probId, schemeEnum)
yn = appObj.initialCondition()
V = appObj.createVelocity()
B = np.ones((len(yn), 25), order='F')
AJ = appObj.createApplyJacobianResult(B)
# warmup
appObj.applyJacobian(yn, 0., B, AJ)
#appObj.velocity(yn, 0., V)
print("starting loop")
start = time.time()
for i in range(args.loopCount):
#appObj.velocity(yn, 0., V)
appObj.applyJacobian(yn, 0., B, AJ)
end = time.time()
print("elapsed ", end - start)
|
[
"pressiodemoapps.load_cellcentered_uniform_mesh",
"argparse.ArgumentParser",
"time.time",
"pathlib.Path",
"pressiodemoapps.create_problem"
] |
[((536, 552), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (550, 552), False, 'from argparse import ArgumentParser\n'), ((774, 785), 'time.time', 'time.time', ([], {}), '()\n', (783, 785), False, 'import time\n'), ((830, 874), 'pressiodemoapps.load_cellcentered_uniform_mesh', 'pda.load_cellcentered_uniform_mesh', (['meshPath'], {}), '(meshPath)\n', (864, 874), True, 'import pressiodemoapps as pda\n'), ((985, 1032), 'pressiodemoapps.create_problem', 'pda.create_problem', (['meshObj', 'probId', 'schemeEnum'], {}), '(meshObj, probId, schemeEnum)\n', (1003, 1032), True, 'import pressiodemoapps as pda\n'), ((1295, 1306), 'time.time', 'time.time', ([], {}), '()\n', (1304, 1306), False, 'import time\n'), ((1422, 1433), 'time.time', 'time.time', ([], {}), '()\n', (1431, 1433), False, 'import time\n'), ((55, 77), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (67, 77), False, 'import pathlib, sys\n')]
|
import panaetius
def test_set_config(header, shared_datadir):
# arrange
config_path = str(shared_datadir / "without_logging")
# act
config = panaetius.Config(header, config_path)
panaetius.set_config(config, "some_top_string")
# assert
assert getattr(config, "some_top_string") == "some_top_value"
|
[
"panaetius.set_config",
"panaetius.Config"
] |
[((160, 197), 'panaetius.Config', 'panaetius.Config', (['header', 'config_path'], {}), '(header, config_path)\n', (176, 197), False, 'import panaetius\n'), ((202, 249), 'panaetius.set_config', 'panaetius.set_config', (['config', '"""some_top_string"""'], {}), "(config, 'some_top_string')\n", (222, 249), False, 'import panaetius\n')]
|
from copy import deepcopy
from .helpers import set_n_jobs, replace_with_in_params
from sklearn.ensemble import (StackingRegressor, StackingClassifier,
VotingClassifier, VotingRegressor)
from joblib import Parallel, delayed
from sklearn.base import clone, is_classifier
from sklearn.utils import Bunch
from sklearn.model_selection import check_cv, cross_val_predict
import numpy as np
import pandas as pd
from .base import _fit_single_estimator, _get_est_fit_params
from ..main.CV import BPtCV
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.metaestimators import available_if, if_delegate_has_method
from sklearn.preprocessing import LabelEncoder
from .helpers import (get_mean_fis, get_concat_fis, get_concat_fis_len,
check_for_nested_loader, get_nested_final_estimator)
def _fit_all_estimators(self, X, y, sample_weight=None, mapping=None,
fit_index=None):
# Validate
names, all_estimators = self._validate_estimators()
# Fit all estimators
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_single_estimator)(clone(est), X, y, sample_weight,
mapping, fit_index)
for est in all_estimators if est != 'drop'
)
self.named_estimators_ = Bunch()
est_fitted_idx = 0
for name_est, org_est in zip(names, all_estimators):
if org_est != 'drop':
self.named_estimators_[name_est] = self.estimators_[
est_fitted_idx]
est_fitted_idx += 1
else:
self.named_estimators_[name_est] = 'drop'
return names, all_estimators
def voting_fit(self, X, y, sample_weight=None, mapping=None,
fit_index=None, **kwargs):
# Fit self.estimators_ on all data
self._fit_all_estimators(
X, y, sample_weight=sample_weight, mapping=mapping,
fit_index=fit_index)
return self
def _get_cv_inds(self, index):
# If BPtCV call get_cv
if isinstance(self.cv, BPtCV):
random_state = None
if hasattr(self, 'random_state'):
random_state = self.random_state
return self.cv.get_cv(fit_index=index,
random_state=random_state,
return_index=True)
# Otherwise treat as sklearn arg directly
return self.cv
def stacking_fit(self, X, y, sample_weight=None, mapping=None,
fit_index=None, **kwargs):
# Validate final estimator
self._validate_final_estimator()
# Fit self.estimators_ on all data
names, all_estimators = self._fit_all_estimators(
X, y, sample_weight=sample_weight, mapping=mapping,
fit_index=fit_index)
# To train the meta-classifier using the most data as possible, we use
# a cross-validation to obtain the output of the stacked estimators.
# Get cv inds w/ handle cases for BPtCV
cv_inds = self._get_cv_inds(fit_index)
# To ensure that the data provided to each estimator are the same, we
# need to set the random state of the cv if there is one and we need to
# take a copy.
cv = check_cv(cv_inds, y=y, classifier=is_classifier(self))
if hasattr(cv, 'random_state') and cv.random_state is None:
cv.random_state = np.random.RandomState()
# Proc stack method
stack_method = [self.stack_method] * len(all_estimators)
self.stack_method_ = [
self._method_name(name, est, meth)
for name, est, meth in zip(names, all_estimators, stack_method)
]
# Base fit params for sample weight
sample_weight_params = ({"sample_weight": sample_weight}
if sample_weight is not None else None)
# Get the fit params for each indv estimator
all_fit_params = [_get_est_fit_params(est, mapping=mapping,
fit_index=fit_index,
other_params=sample_weight_params)
for est in all_estimators]
# Catch rare error - TODO come up with fix
if X.shape[0] == X.shape[1]:
raise RuntimeError('Same numbers of data points and ',
'features can lead to error.')
# Make the cross validated internal predictions to train
# the final_estimator
predictions = Parallel(n_jobs=self.n_jobs)(
delayed(cross_val_predict)(clone(est), X, y, cv=deepcopy(cv),
method=meth, n_jobs=self.n_jobs,
fit_params=fit_params,
verbose=self.verbose)
for est, meth, fit_params in zip(all_estimators,
self.stack_method_,
all_fit_params) if est != 'drop'
)
# Only not None or not 'drop' estimators will be used in transform.
# Remove the None from the method as well.
self.stack_method_ = [
meth for (meth, est) in zip(self.stack_method_, all_estimators)
if est != 'drop'
]
# @TODO make sure train data index is concatenated correctly
X_meta = self._concatenate_predictions(X, predictions)
_fit_single_estimator(self.final_estimator_, X_meta, y,
sample_weight=sample_weight,
mapping=None,
fit_index=fit_index)
return self
def ensemble_classifier_fit(self, X, y,
sample_weight=None, mapping=None,
fit_index=None, **kwargs):
check_classification_targets(y)
# To make compatible with each Voting and Stacking ...
self._le = LabelEncoder().fit(y)
self.le_ = self._le
self.classes_ = self._le.classes_
transformed_y = self._le.transform(y)
return self.bpt_fit(X, transformed_y,
sample_weight=sample_weight,
mapping=mapping,
fit_index=fit_index,
**kwargs)
def _base_transform_feat_names(self, X_df, encoders=None, nested_model=False):
'''This base functions works under the assumption of calculating
mean coef's.'''
# Check each sub estimator for the method
# transform feat names
all_feat_names = []
for est in self.estimators_:
if hasattr(est, 'transform_feat_names'):
feat_names = est.transform_feat_names(X_df, encoders=encoders,
nested_model=nested_model)
all_feat_names.append(feat_names)
# If None found
if len(all_feat_names) == 0:
return list(X_df)
# If some found, only return updated if all the same
# So check if all same as first
# if any not the same, return base
for fn in all_feat_names[1:]:
if fn != all_feat_names[0]:
return list(X_df)
# Otherwise, return first
return all_feat_names[0]
def _loader_transform_feat_names(self, X_df, encoders=None, nested_model=False):
# Check each estimator
all_feat_names = []
for est in self.estimators_:
if hasattr(est, 'transform_feat_names'):
feat_names = est.transform_feat_names(X_df, encoders=encoders,
nested_model=nested_model)
all_feat_names.append(feat_names)
# If none found
if len(all_feat_names) == 0:
return list(X_df)
# Get concat list
all_concat = list(np.concatenate(all_feat_names))
# If all unique, return concat
if len(set(all_concat)) == len(all_concat):
return all_concat
# Otherwise, append unique identifier
all_concat = []
for i, fn in enumerate(all_feat_names):
all_concat += [str(i) + '_' + str(name) for name in fn]
return all_concat
def _transform_feat_names(self, X_df, encoders=None, nested_model=False):
if self.has_nested_loader():
return self._loader_transform_feat_names(X_df, encoders=encoders, nested_model=nested_model)
else:
return self._base_transform_feat_names(X_df, encoders=encoders, nested_model=nested_model)
def _get_fis_lens(self):
'''This method is used in loader version of voting ensembles'''
# If already stored as attribute, use that
if hasattr(self, 'concat_est_lens_'):
return getattr(self, 'concat_est_lens_')
# Try coef
fi_len = get_concat_fis_len(self.estimators_, 'coef_')
if fi_len is not None:
return fi_len
# Then feature importances
fi_len = get_concat_fis_len(self.estimators_, 'feature_importances_')
if fi_len is not None:
return fi_len
# TODO - could do a search in each base estimator to try and determine
# the final n features in ?
return None
def base_inverse_transform_fis(self, fis, avg_method):
# If not loader, return as is
if not self.has_nested_loader():
return fis
# Get underlying lengths
concat_fi_lens_ = self._get_fis_lens()
if concat_fi_lens_ is None:
return fis
# Go through and inverse transform each chunk
fi_chunks, ind = [], 0
for est, l in zip(self.estimators_, concat_fi_lens_):
# If any don't have it, return passed original
if not hasattr(est, 'inverse_transform_fis'):
return fis
# Append the inverse transformed chunk
fi_chunks.append(est.inverse_transform_fis(fis.iloc[ind:ind+l]))
ind += l
# Combine together in DataFrame
fi_df = pd.DataFrame(fi_chunks)
avg = avg_method(fi_df)
# Put back together in series, and return that
return pd.Series(avg, index=list(fi_df))
def voting_inverse_transform_fis(self, fis):
def mean_avg(fi_df):
return np.mean(np.array(fi_df), axis=0)
return self.base_inverse_transform_fis(fis, mean_avg)
def _get_estimator_fi_weights(estimator):
weights = None
if hasattr(estimator, 'coef_'):
weights = getattr(estimator, 'coef_')
if weights is None and hasattr(estimator, 'feature_importances_'):
weights = getattr(estimator, 'feature_importances_')
if weights is None:
return None
# Set to absolute
weights = np.abs(weights)
# Shape if not 1D is (1, n_features) or (n_classes, n_features)
# TODO handle multiclass
if len(np.shape(weights)) > 1:
weights = weights[0]
return weights
def stacking_inverse_transform_fis(self, fis):
def stacked_avg(fi_df):
# First assumption we need to make is that we
# are only interested in absolute values
fis = np.abs(np.array(fi_df))
# Use coef / feat importance from estimator as weights
weights = _get_estimator_fi_weights(self.final_estimator_)
if weights is None:
return None
# Return weighted average
try:
return np.average(fis, axis=0, weights=weights)
except ZeroDivisionError:
return np.average(fis, axis=0)
return self.base_inverse_transform_fis(fis, stacked_avg)
def has_nested_loader(self):
# If not already set, set
if not hasattr(self, 'nested_loader_'):
setattr(self, 'nested_loader_',
check_for_nested_loader(self.estimators_))
return getattr(self, 'nested_loader_')
def ensemble_transform(self, X):
# If nested model case, return concatenation of transforms
if self.has_nested_loader():
# Init
Xts, self.concat_est_lens_ = [], []
for estimator in self.estimators_:
# Get transformed X, passing along nested model True
Xt = estimator.transform(X, nested_model=True)
# Keep track of transformed + length
Xts.append(Xt)
self.concat_est_lens_.append(Xt.shape[-1])
# Return concat along axis 1
return np.concatenate(Xts, axis=1)
# TODO - non nested loader case, but still nested model case
else:
raise RuntimeError('Not implemented.')
def _get_estimators_pred_chunks(self, X, method='predict'):
# Convert method to list if not
if not isinstance(method, list):
method = [method for _ in range(len(self.estimators_))]
# Go through each estimator, to make predictions
# on just the chunk of transformed input relevant for each.
pred_chunks, ind = [], 0
for estimator, l, m in zip(self.estimators_, self.concat_est_lens_, method):
# Get the corresponding final estimator
final_estimator = get_nested_final_estimator(estimator)
# Get predictions
pred_chunk = getattr(final_estimator, m)(X[:, ind:ind+l])
# Append predictions
pred_chunks.append(pred_chunk)
# Increment index
ind += l
return np.asarray(pred_chunks)
def _stacked_classifier_predict(self, X, method, **predict_params):
check_is_fitted(self)
# Nested loader case
if self.has_nested_loader():
# Get predict probas from each
predict_probas = self._get_estimators_pred_chunks(X, method=self.stack_method_)
concat_preds = self._concatenate_predictions(X, predict_probas)
# Make preds with final estimator on concat preds
y_pred = getattr(self.final_estimator_, method)(concat_preds)
# If predict, cast to inverse transform
if method == 'predict':
y_pred = self._le.inverse_transform(y_pred)
return y_pred
# TODO finish other case for stacked classifier
raise RuntimeError('Not Implemented')
class BPtStackingRegressor(StackingRegressor):
_needs_mapping = True
_needs_fit_index = True
_fit_all_estimators = _fit_all_estimators
fit = stacking_fit
_get_cv_inds = _get_cv_inds
has_nested_loader = has_nested_loader
transform_feat_names = _transform_feat_names
_base_transform_feat_names = _base_transform_feat_names
_loader_transform_feat_names = _loader_transform_feat_names
_get_fis_lens = _get_fis_lens
inverse_transform_fis = stacking_inverse_transform_fis
base_inverse_transform_fis = base_inverse_transform_fis
_get_estimators_pred_chunks = _get_estimators_pred_chunks
ensemble_transform = ensemble_transform
@property
def feature_importances_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'feature_importances_')
# TODO - average according to stacked ...
@property
def coef_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'coef_')
# TODO - average according to stacked ...
def transform(self, X, nested_model=False):
# Not nested, base case transform
if not nested_model:
return super().transform(X)
return self.ensemble_transform(X)
def predict(self, X):
# Base case is when number of features stays the same as expected.
if X.shape[-1] == self.n_features_in_:
return super().predict(X)
check_is_fitted(self)
# Nested loader case
if self.has_nested_loader():
# If nested loader, then the expectation is that this
# predict is receiving the concat fully model nested transformed
# output from each of the self.estimators_
pred_chunks = self._get_estimators_pred_chunks(X, method='predict').T
# Return predictions from final estimator
return self.final_estimator_.predict(pred_chunks)
# TODO fill in other case?
raise RuntimeError('Not Implemented')
class BPtStackingClassifier(StackingClassifier):
_needs_mapping = True
_needs_fit_index = True
_fit_all_estimators = _fit_all_estimators
bpt_fit = stacking_fit
fit = ensemble_classifier_fit
_get_cv_inds = _get_cv_inds
has_nested_loader = has_nested_loader
transform_feat_names = _transform_feat_names
_base_transform_feat_names = _base_transform_feat_names
_loader_transform_feat_names = _loader_transform_feat_names
_get_fis_lens = _get_fis_lens
inverse_transform_fis = stacking_inverse_transform_fis
base_inverse_transform_fis = base_inverse_transform_fis
_get_estimators_pred_chunks = _get_estimators_pred_chunks
ensemble_transform = ensemble_transform
_stacked_classifier_predict = _stacked_classifier_predict
@property
def feature_importances_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'feature_importances_')
# TODO - average according to stacked ...
@property
def coef_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'coef_')
# TODO - average according to stacked ...
def transform(self, X, nested_model=False):
# Not nested, base case transform
if not nested_model:
return super().transform(X)
return self.ensemble_transform(X)
@if_delegate_has_method(delegate="final_estimator_")
def predict(self, X, **predict_params):
# Base case
if X.shape[-1] == self.n_features_in_:
return super().predict(X, **predict_params)
# Other case
return self._stacked_classifier_predict(X, method='predict', **predict_params)
@if_delegate_has_method(delegate="final_estimator_")
def predict_proba(self, X):
# Base case
if X.shape[-1] == self.n_features_in_:
return super().predict_proba(X)
# Other case
return self._stacked_classifier_predict(X, method='predict_proba')
@if_delegate_has_method(delegate="final_estimator_")
def decision_function(self, X):
# Base case
if X.shape[-1] == self.n_features_in_:
return super().decision_function(X)
# Other case
return self._stacked_classifier_predict(X, method='decision_function')
class BPtVotingRegressor(VotingRegressor):
# Set tags
_needs_mapping = True
_needs_fit_index = True
# Override / set methods
_fit_all_estimators = _fit_all_estimators
fit = voting_fit
has_nested_loader = has_nested_loader
transform_feat_names = _transform_feat_names
_base_transform_feat_names = _base_transform_feat_names
_loader_transform_feat_names = _loader_transform_feat_names
_get_fis_lens = _get_fis_lens
inverse_transform_fis = voting_inverse_transform_fis
base_inverse_transform_fis = base_inverse_transform_fis
ensemble_transform = ensemble_transform
_get_estimators_pred_chunks = _get_estimators_pred_chunks
@property
def feature_importances_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'feature_importances_')
return get_mean_fis(self.estimators_, 'feature_importances_')
@property
def coef_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'coef_')
return get_mean_fis(self.estimators_, 'coef_')
def predict(self, X):
# Make sure fitted
check_is_fitted(self)
# Base case is when number of features stays the same as expected.
if X.shape[-1] == self.n_features_in_:
return super().predict(X)
# Otherwise, two cases, nested loader or not
if self.has_nested_loader():
# If nested loader, then the expectation is that this
# predict is receiving the concat fully model nested transformed
# output from each of the self.estimators_
pred_chunks = self._get_estimators_pred_chunks(X, method='predict')
# The voting ensemble just uses the mean from each
mean_preds = np.mean(pred_chunks, axis=0)
return mean_preds
# TODO fill in other case?
raise RuntimeError('Not Implemented')
def transform(self, X, nested_model=False):
# Not nested, base case transform
if not nested_model:
return super().transform(X)
return self.ensemble_transform(X)
class BPtVotingClassifier(VotingClassifier):
_needs_mapping = True
_needs_fit_index = True
_fit_all_estimators = _fit_all_estimators
bpt_fit = voting_fit
fit = ensemble_classifier_fit
has_nested_loader = has_nested_loader
transform_feat_names = _transform_feat_names
_base_transform_feat_names = _base_transform_feat_names
_loader_transform_feat_names = _loader_transform_feat_names
_get_fis_lens = _get_fis_lens
inverse_transform_fis = voting_inverse_transform_fis
base_inverse_transform_fis = base_inverse_transform_fis
ensemble_transform = ensemble_transform
_get_estimators_pred_chunks = _get_estimators_pred_chunks
@property
def feature_importances_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'feature_importances_')
return get_mean_fis(self.estimators_, 'feature_importances_')
@property
def coef_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'coef_')
return get_mean_fis(self.estimators_, 'coef_')
def _check_voting(self):
if self.voting == "hard":
raise AttributeError(
f"predict_proba is not available when voting={repr(self.voting)}"
)
return True
def predict(self, X):
# Make sure fitted
check_is_fitted(self)
# Base case is when number of features stays the same as expected.
if X.shape[-1] == self.n_features_in_:
return super().predict(X)
# If loader based
if self.has_nested_loader():
# If nested loader, then the expectation is that this
# predict is receiving the concat fully model nested transformed
# output from each of the self.estimators_
# If soft voting, can use predict proba instead
if self.voting == "soft":
maj = np.argmax(self.predict_proba(X), axis=1)
# Hard voting, use base pred
else:
# Get predictions with special nested
predictions = self._get_estimators_pred_chunks(X, method='predict')
# Get majority vote w/
maj = np.apply_along_axis(
lambda x: np.argmax(np.bincount(x, weights=self._weights_not_none)),
axis=1,
arr=predictions,
)
# Use label encoder to inverse transform before returning
maj = self.le_.inverse_transform(maj)
return maj
# TODO fill in other case?
raise RuntimeError('Not Implemented')
def transform(self, X, nested_model=False):
# Not nested, base case transform
if not nested_model:
return super().transform(X)
return self.ensemble_transform(X)
@available_if(_check_voting)
def predict_proba(self, X):
check_is_fitted(self)
# Base case is when number of features stays the same as expected.
if X.shape[-1] == self.n_features_in_:
return super().predict_proba(X)
# Otherwise, two cases, nested loader or not
if self.has_nested_loader():
# Get predict probas from each
predict_probas = self._get_estimators_pred_chunks(X, method='predict_proba')
# Calculate average
avg = np.average(predict_probas, axis=0, weights=self._weights_not_none)
# And return
return avg
# TODO fill in other case?
raise RuntimeError('Not Implemented')
class EnsembleWrapper():
def __init__(self, model_params, ensemble_params,
_get_ensembler, n_jobs, random_state):
self.model_params = model_params
self.ensemble_params = ensemble_params
self._get_ensembler = _get_ensembler
self.n_jobs = n_jobs
self.random_state = random_state
def _update_params(self, p_name, to_add):
# Get existing
params = getattr(self, p_name)
# Fill in new
new_params = {}
for key in params:
new_params[to_add + '__' + key] = params[key]
# Update
setattr(self, p_name, new_params)
def _update_model_ensemble_params(self, to_add, model=True, ensemble=True):
if model:
self._update_params('model_params', to_add)
if ensemble:
self._update_params('ensemble_params', to_add)
def _basic_ensemble(self, models, name, ensemble=False):
if len(models) == 1:
return models
else:
basic_ensemble = self._get_ensembler(models)
self._update_model_ensemble_params(name, ensemble=ensemble)
return [(name, basic_ensemble)]
def get_updated_params(self):
self.model_params.update(self.ensemble_params)
return self.model_params
def wrap_ensemble(self, models, ensemble, ensemble_params,
final_estimator=None,
final_estimator_params=None):
# If no ensemble is passed, return either the 1 model,
# or a voting wrapper
if ensemble is None or len(ensemble) == 0:
return self._basic_ensemble(models=models,
name='Default Voting',
ensemble=True)
# Otherwise special ensembles
else:
# If needs a single estimator, but multiple models passed,
# wrap in ensemble!
if ensemble_params.single_estimator:
se_ensemb_name = 'Single-Estimator Compatible Ensemble'
models = self._basic_ensemble(models,
se_ensemb_name,
ensemble=False)
# If no split and single estimator
if ensemble_params.single_estimator:
return self._wrap_single(models, ensemble,
ensemble_params.n_jobs_type)
# Last case is, no split/DES ensemble and also
# not single estimator based
# e.g., in case of stacking regressor.
else:
return self._wrap_multiple(models, ensemble,
final_estimator,
final_estimator_params,
ensemble_params.n_jobs_type,
ensemble_params.cv)
def _wrap_single(self, models, ensemble_info, n_jobs_type):
'''If passed single_estimator flag'''
# Unpack ensemble info
ensemble_name = ensemble_info[0]
ensemble_obj = ensemble_info[1][0]
ensemble_extra_params = ensemble_info[1][1]
# Models here since single estimator is assumed
# to be just a list with
# of one tuple as
# [(model or ensemble name, model or ensemble)]
base_estimator = models[0][1]
# Set n jobs based on passed type
if n_jobs_type == 'ensemble':
model_n_jobs = 1
ensemble_n_jobs = self.n_jobs
else:
model_n_jobs = self.n_jobs
ensemble_n_jobs = 1
# Set model / base_estimator n_jobs
set_n_jobs(base_estimator, model_n_jobs)
# Make sure random_state is set (should be already)
if hasattr(base_estimator, 'random_state'):
setattr(base_estimator, 'random_state', self.random_state)
# Create the ensemble object
ensemble = ensemble_obj(base_estimator=base_estimator,
**ensemble_extra_params)
# Set ensemble n_jobs
set_n_jobs(ensemble, ensemble_n_jobs)
# Set random state
if hasattr(ensemble, 'random_state'):
setattr(ensemble, 'random_state', self.random_state)
# Wrap as object
new_ensemble = [(ensemble_name, ensemble)]
# Have to change model name to base_estimator
self.model_params =\
replace_with_in_params(self.model_params, models[0][0],
'base_estimator')
# Append ensemble name to all model params
self._update_model_ensemble_params(ensemble_name,
ensemble=False)
return new_ensemble
def _wrap_multiple(self, models, ensemble_info,
final_estimator, final_estimator_params,
n_jobs_type, cv):
'''In case of no split/DES ensemble, and not single estimator based.'''
# Unpack ensemble info
ensemble_name = ensemble_info[0]
ensemble_obj = ensemble_info[1][0]
ensemble_extra_params = ensemble_info[1][1]
# Models here just self.models a list of tuple of
# all models.
# So, ensemble_extra_params should contain the
# final estimator + other params
# Set model_n_jobs and ensemble n_jobs based on type
if n_jobs_type == 'ensemble':
model_n_jobs = 1
ensemble_n_jobs = self.n_jobs
else:
model_n_jobs = self.n_jobs
ensemble_n_jobs = 1
# Set the model jobs
set_n_jobs(models, model_n_jobs)
# Make sure random state is propegated
for model in models:
if hasattr(model[1], 'random_state'):
setattr(model[1], 'random_state', self.random_state)
# Determine the parameters to init the ensemble
pass_params = ensemble_extra_params
pass_params['estimators'] = models
# Process final_estimator if passed
if final_estimator is not None:
# Replace name of final estimator w/ final_estimator in params
final_estimator_params =\
replace_with_in_params(params=final_estimator_params,
original=final_estimator[0][0],
replace='final_estimator')
# Add final estimator params to model_params - once name changed
# to avoid potential overlap.
self.model_params.update(final_estimator_params)
# Unpack actual model obj
final_estimator_obj = final_estimator[0][1]
# Set final estimator n_jobs to model n_jobs
set_n_jobs(final_estimator_obj, model_n_jobs)
# Redundant random state check
if hasattr(final_estimator_obj, 'random_state'):
setattr(final_estimator_obj, 'random_state', self.random_state)
# Add to pass params
pass_params['final_estimator'] = final_estimator_obj
# Check if cv passed
if cv is not None:
pass_params['cv'] = cv
# Init the ensemble object
ensemble = ensemble_obj(**pass_params)
# Set ensemble n_jobs
set_n_jobs(ensemble, ensemble_n_jobs)
# Set random state
if hasattr(ensemble, 'random_state'):
setattr(ensemble, 'random_state', self.random_state)
# Wrap as pipeline compatible object
new_ensemble = [(ensemble_name, ensemble)]
# Append ensemble name to all model params
self._update_model_ensemble_params(ensemble_name,
ensemble=False)
return new_ensemble
|
[
"numpy.abs",
"numpy.shape",
"numpy.mean",
"sklearn.base.clone",
"pandas.DataFrame",
"sklearn.utils.Bunch",
"numpy.random.RandomState",
"sklearn.preprocessing.LabelEncoder",
"sklearn.base.is_classifier",
"sklearn.utils.metaestimators.available_if",
"numpy.bincount",
"copy.deepcopy",
"numpy.average",
"numpy.asarray",
"sklearn.utils.metaestimators.if_delegate_has_method",
"numpy.concatenate",
"sklearn.utils.validation.check_is_fitted",
"numpy.array",
"joblib.Parallel",
"joblib.delayed",
"sklearn.utils.multiclass.check_classification_targets"
] |
[((1400, 1407), 'sklearn.utils.Bunch', 'Bunch', ([], {}), '()\n', (1405, 1407), False, 'from sklearn.utils import Bunch\n'), ((5663, 5694), 'sklearn.utils.multiclass.check_classification_targets', 'check_classification_targets', (['y'], {}), '(y)\n', (5691, 5694), False, 'from sklearn.utils.multiclass import check_classification_targets\n'), ((9608, 9631), 'pandas.DataFrame', 'pd.DataFrame', (['fi_chunks'], {}), '(fi_chunks)\n', (9620, 9631), True, 'import pandas as pd\n'), ((10299, 10314), 'numpy.abs', 'np.abs', (['weights'], {}), '(weights)\n', (10305, 10314), True, 'import numpy as np\n'), ((12871, 12894), 'numpy.asarray', 'np.asarray', (['pred_chunks'], {}), '(pred_chunks)\n', (12881, 12894), True, 'import numpy as np\n'), ((12970, 12991), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (12985, 12991), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((17162, 17213), 'sklearn.utils.metaestimators.if_delegate_has_method', 'if_delegate_has_method', ([], {'delegate': '"""final_estimator_"""'}), "(delegate='final_estimator_')\n", (17184, 17213), False, 'from sklearn.utils.metaestimators import available_if, if_delegate_has_method\n'), ((17497, 17548), 'sklearn.utils.metaestimators.if_delegate_has_method', 'if_delegate_has_method', ([], {'delegate': '"""final_estimator_"""'}), "(delegate='final_estimator_')\n", (17519, 17548), False, 'from sklearn.utils.metaestimators import available_if, if_delegate_has_method\n'), ((17796, 17847), 'sklearn.utils.metaestimators.if_delegate_has_method', 'if_delegate_has_method', ([], {'delegate': '"""final_estimator_"""'}), "(delegate='final_estimator_')\n", (17818, 17847), False, 'from sklearn.utils.metaestimators import available_if, if_delegate_has_method\n'), ((23233, 23260), 'sklearn.utils.metaestimators.available_if', 'available_if', (['_check_voting'], {}), '(_check_voting)\n', (23245, 23260), False, 'from sklearn.utils.metaestimators import available_if, if_delegate_has_method\n'), ((1152, 1180), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (1160, 1180), False, 'from joblib import Parallel, delayed\n'), ((3384, 3407), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (3405, 3407), True, 'import numpy as np\n'), ((4424, 4452), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (4432, 4452), False, 'from joblib import Parallel, delayed\n'), ((7573, 7603), 'numpy.concatenate', 'np.concatenate', (['all_feat_names'], {}), '(all_feat_names)\n', (7587, 7603), True, 'import numpy as np\n'), ((11953, 11980), 'numpy.concatenate', 'np.concatenate', (['Xts'], {'axis': '(1)'}), '(Xts, axis=1)\n', (11967, 11980), True, 'import numpy as np\n'), ((15155, 15176), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (15170, 15176), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((19308, 19329), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (19323, 19329), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((21714, 21735), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (21729, 21735), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((23302, 23323), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (23317, 23323), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((3273, 3292), 'sklearn.base.is_classifier', 'is_classifier', (['self'], {}), '(self)\n', (3286, 3292), False, 'from sklearn.base import clone, is_classifier\n'), ((5770, 5784), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (5782, 5784), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((9854, 9869), 'numpy.array', 'np.array', (['fi_df'], {}), '(fi_df)\n', (9862, 9869), True, 'import numpy as np\n'), ((10424, 10441), 'numpy.shape', 'np.shape', (['weights'], {}), '(weights)\n', (10432, 10441), True, 'import numpy as np\n'), ((10704, 10719), 'numpy.array', 'np.array', (['fi_df'], {}), '(fi_df)\n', (10712, 10719), True, 'import numpy as np\n'), ((10979, 11019), 'numpy.average', 'np.average', (['fis'], {'axis': '(0)', 'weights': 'weights'}), '(fis, axis=0, weights=weights)\n', (10989, 11019), True, 'import numpy as np\n'), ((19950, 19978), 'numpy.mean', 'np.mean', (['pred_chunks'], {'axis': '(0)'}), '(pred_chunks, axis=0)\n', (19957, 19978), True, 'import numpy as np\n'), ((23766, 23832), 'numpy.average', 'np.average', (['predict_probas'], {'axis': '(0)', 'weights': 'self._weights_not_none'}), '(predict_probas, axis=0, weights=self._weights_not_none)\n', (23776, 23832), True, 'import numpy as np\n'), ((1190, 1220), 'joblib.delayed', 'delayed', (['_fit_single_estimator'], {}), '(_fit_single_estimator)\n', (1197, 1220), False, 'from joblib import Parallel, delayed\n'), ((1221, 1231), 'sklearn.base.clone', 'clone', (['est'], {}), '(est)\n', (1226, 1231), False, 'from sklearn.base import clone, is_classifier\n'), ((4462, 4488), 'joblib.delayed', 'delayed', (['cross_val_predict'], {}), '(cross_val_predict)\n', (4469, 4488), False, 'from joblib import Parallel, delayed\n'), ((4489, 4499), 'sklearn.base.clone', 'clone', (['est'], {}), '(est)\n', (4494, 4499), False, 'from sklearn.base import clone, is_classifier\n'), ((11073, 11096), 'numpy.average', 'np.average', (['fis'], {'axis': '(0)'}), '(fis, axis=0)\n', (11083, 11096), True, 'import numpy as np\n'), ((4510, 4522), 'copy.deepcopy', 'deepcopy', (['cv'], {}), '(cv)\n', (4518, 4522), False, 'from copy import deepcopy\n'), ((22656, 22702), 'numpy.bincount', 'np.bincount', (['x'], {'weights': 'self._weights_not_none'}), '(x, weights=self._weights_not_none)\n', (22667, 22702), True, 'import numpy as np\n')]
|
import requests #导入requests包
import json
import records
import configparser
import time
from lxml import etree
import xml.sax
import time
# sz000001
db = records.Database("mysql://root:123456@localhost:3306/gupiao?charset=utf8")
# data = [
# {'name': 'Jiji', 'age': 23},
# {'name': 'Mini', 'age': 22}
# ]
# db.bulk_query("insert names(name, age) values(:name, :age)", data)
class myXmlHandler(xml.sax.ContentHandler):
def __init__(self):
self.CurrentName = ""
self.CurrentData = ""
# 元素开始调用
def startElement(self, tag, attributes):
self.CurrentName = tag
# 元素结束调用
def endElement(self, tag):
pass
# 读取字符时调用
def characters(self, content):
self.CurrentData = content
class stork():
def __init__(self,storkid):
self.storkid = storkid
self.dayData = {}
self.yearData = {}
self._dataList = {}
self._dataList["价格"] = "//*[@id='changyong']/p[1]/a"
self._dataList["市盈率"] = "//*[@id='changyong']/p[2]/a"
self._dataList["市净率"]= "//*[@id='changyong']/p[3]/a"
self._dataList["股息率"]= "//*[@id='changyong']/p[4]/a"
self._dataList["市值"]= "//*[@id='changyong']/p[5]/a"
self._dataList["roe"] ="//*[@id='changyong']/p[6]/a"
self._dataList["净利润"] ="//*[@id='caiwu']/p[2]/a"
self._dataList["营收"] ="//*[@id='caiwu']/p[3]/a"
self._dataList["利润扣非"] ="//*[@id='caiwu']/p[4]/a"
self._dataList["负债率"] ="//*[@id='caiwu']/p[5]/a"
self._dataList["现金流"] ="//*[@id='caiwu']/p[6]/a"
self._dataList["毛利率"] ="//*[@id='caiwu']/p[7]/a"
self._dataList["每股收益"] ="//*[@id='caiwu']/p[8]/a"
def getData(self):
url = 'https://eniu.com/gu/'+self.storkid
strhtml = requests.get(url) #Get方式获取网页数据
html = etree.HTML(strhtml.text)
for key,value in self._dataList.items():
html_data = html.xpath(value)
print(key+":"+html_data[0].text)
pass
def getPriceData(self):
"https://eniu.com/chart/pricea/sz000001/t/all"
url = "https://eniu.com/chart/pricea/{}/t/all".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'price'
if str(params_json['date'][d]) in self.dayData:
self.dayData[str(params_json['date'][d])]['price'] = str(params_json[price_in][d])
else:
self.dayData[str(params_json['date'][d])] = {'price':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getPEAData(self):
"https://eniu.com/chart/pea/sz000001/t/all"
url = "https://eniu.com/chart/pea/{}/t/all".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'pe_ttm'
if str(params_json['date'][d]) in self.dayData:
self.dayData[str(params_json['date'][d])]['pea'] = str(params_json[price_in][d])
else:
self.dayData[str(params_json['date'][d])] = {'pea':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getPbData(self):
"https://eniu.com/chart/pba/sz000001/t/all"
url = "https://eniu.com/chart/pba/{}/t/all".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'pb'
if str(params_json['date'][d]) in self.dayData:
self.dayData[str(params_json['date'][d])]['pb'] = str(params_json[price_in][d])
else:
self.dayData[str(params_json['date'][d])] = {'pb':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getguxiData(self):
"https://eniu.com/chart/dva/sz000001/t/all"
url = "https://eniu.com/chart/dva/{}/t/all".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'dv'
if str(params_json['date'][d]) in self.dayData:
self.dayData[str(params_json['date'][d])]['guxi'] = str(params_json[price_in][d])
else:
self.dayData[str(params_json['date'][d])] = {'guxi':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getvalueData(self):
"https://eniu.com/chart/marketvaluea/sz000001"
url = "https://eniu.com/chart/marketvaluea/{}".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'market_value'
if str(params_json['date'][d]) in self.dayData:
self.dayData[str(params_json['date'][d])]['value'] = str(params_json[price_in][d])
else:
self.dayData[str(params_json['date'][d])] = {'value':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getRoeaData(self):
"https://eniu.com/chart/roea/sz000001/q/4"
url = "https://eniu.com/chart/roea/{}/q/4".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'roe'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['roe'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'roe':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
price_in = 'roa'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['roa'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'roa':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getProfitData(self):
"https://eniu.com/chart/profita/sz000001/q/4"
url = "https://eniu.com/chart/profita/{}/q/4".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'profit'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['profit'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'profit':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getIncomeData(self):
"https://eniu.com/chart/incomea/sz000001/q/4"
url = "https://eniu.com/chart/incomea/{}/q/4".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'income'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['income'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'income':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getprofitkfData(self):
"https://eniu.com/chart/profitkfa/sz000001/q/4"
url = "https://eniu.com/chart/profitkfa/{}/q/4".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'profit_kf'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['profit_kf'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'profit_kf':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getdebtratioData(self):
"https://eniu.com/chart/debtratioa/sz000001/q/4"
url = "https://eniu.com/chart/debtratioa/{}/q/4".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'asset'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['debtratio'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'debtratio':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getcashflowData(self):
"https://eniu.com/chart/cashflowa/sz000001/q/4"
url = "https://eniu.com/chart/cashflowa/{}/q/4".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'cash_flow'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['cashflow'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'cashflow':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getgrossprofitData(self):
"https://eniu.com/chart/grossprofitmargina/sz000001/q/4"
url = "https://eniu.com/chart/grossprofitmargina/{}/q/4".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'net_profit_margin'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['grossprofit'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'grossprofit':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getCawuData(self):
"https://eniu.com/table/cwzba/sz000001/q/4"
url = 'https://eniu.com/table/cwzba/'+self.storkid+"/q/4"
strhtml = requests.get(url) #Get方式获取网页数据
# html = etree.HTML(strhtml.text)
try:
params_json = json.loads(strhtml.text)
except:
return
# with open("{}财务.txt".format(self.storkid),'w') as f:
# json.dump(strhtml.text,f)
if len(params_json) == 0:
return
data = []
for p in params_json:
if p['keyName'].find('<')!=-1:
continue
for k,v in p.items():
if k == 'keyName':
continue
v_temp = v
if type(v) == type("str") and v.find('<')!=-1:
saxParse = xml.sax.make_parser()
saxParse.setFeature(xml.sax.handler.feature_namespaces, 0) # 关闭命名解析
Handler = myXmlHandler()
xml.sax.parseString(v,Handler)
v_temp = Handler.CurrentData
data.append({'id':0 , 'date':str(k),
'value':str(v_temp),
'stokeid':str(self.storkid),
'caiwutype':str(p['keyName'])})
if len(data)>0:
db.bulk_query("insert stoke_caiwu_history(id,date, value,stokeid,caiwutype) \
values(:id, :date, :value,:stokeid,:caiwutype)", data)
# CREATE TABLE `gupiao`.`stoke_day_history` (
# `id` INT NOT NULL AUTO_INCREMENT,
# `day` VARCHAR(45) NULL,
# `guxi` VARCHAR(45) NULL,
# `pea` VARCHAR(45) NULL,
# `price` VARCHAR(45) NULL,
# `value` VARCHAR(45) NULL,
# `pb` VARCHAR(45) NULL,
# PRIMARY KEY (`id`));
def insertDayData(self):
data = []
for k,v in self.dayData.items():
guxi=''
pea=''
price=''
value=''
pb=''
if 'guxi' in v:
guxi = v['guxi']
if 'pea'in v:
pea = v['pea']
if 'price' in v:
price = v['price']
if 'value' in v:
value = v['value']
if 'pb' in v:
pb = v['pb']
data.append({'id':0,'day':k, 'guxi':guxi,'pea':pea,
'price':price,'value':value,'pb':pb,'stoke_id':self.storkid})
if len(data)>0:
db.bulk_query("insert stoke_day_history(id,day, guxi,pea,price,value,pb,stoke_id) \
values(:id, :day, :guxi,:pea,:price,:value,:pb,:stoke_id)", data)
pass
# CREATE TABLE `gupiao`.`stoke_year_history` (
# `id` INT NOT NULL AUTO_INCREMENT,
# `years` VARCHAR(45) NULL,
# `cashflow` VARCHAR(45) NULL,
# `debtratio` VARCHAR(45) NULL,
# `grossprofit` VARCHAR(45) NULL,
# `income` VARCHAR(45) NULL,
# `profit` VARCHAR(45) NULL,
# `profit_kf` VARCHAR(45) NULL,
# `roa` VARCHAR(45) NULL,
# `roe` VARCHAR(45) NULL,
# PRIMARY KEY (`id`));
def insertYearData(self):
data = []
for k,v in self.yearData.items():
cashflow=''
debtratio=''
grossprofit=''
income=''
profit=''
profit_kf =''
roa=''
roe = ''
if 'cashflow' in v:
cashflow = v['cashflow']
if 'debtratio' in v:
debtratio = v['debtratio']
if 'grossprofit' in v:
grossprofit = v['grossprofit']
if 'income' in v:
income = v['income']
if 'profit' in v:
profit = v['profit']
if 'profit_kf' in v:
profit_kf = v['profit_kf']
if 'roa' in v:
roa = v['roa']
if 'roe' in v:
roe = v['roe']
data.append({'id':0,'years':k, 'cashflow':cashflow,'debtratio':debtratio,
'grossprofit':grossprofit,'income':income,'profit':profit,
'profit_kf':profit_kf,'roa':roa,'roe':roe,'stoke_id':self.storkid})
if len(data)>0:
db.bulk_query("insert stoke_year_history(id,years, cashflow,debtratio,grossprofit,income,profit,profit_kf,roa,roe,stoke_id) \
values(:id,:years,:cashflow,:debtratio,:grossprofit,:income,:profit,:profit_kf,:roa,:roe,:stoke_id)", data)
def done(self):
insertSql = "UPDATE main_list set done = 'ok' where stock_id='{}'"\
.format(str(self.storkid))
try:
db.query(insertSql)
except Exception as e:
print(e)
pass
def getAlldata(stoke_id):
s = stork(stoke_id)
print('stoke_id:'+ stoke_id)
# s.getData()
# print('getPriceData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getPriceData()
print('getPEAData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getPEAData()
print('getPbData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getPbData()
print('getguxiData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getguxiData()
print('getvalueData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getvalueData()
print('getRoeaData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getRoeaData()
print('getProfitData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getProfitData()
print('getIncomeData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getIncomeData()
print('getprofitkfData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getprofitkfData()
print('getdebtratioData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getdebtratioData()
print('getcashflowData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getcashflowData()
print('getgrossprofitData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getgrossprofitData()
print('getCawuData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.insertDayData()
s.insertYearData()
s.getCawuData()
print('end:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.done()
# time.sleep(2)
def getStoke():
Sql = "select * from main_list where done!='ok'"
try:
dataList = db.query(Sql)
except Exception as e:
print(e)
# print(len(dataList))
dd = dataList.as_dict()
# print(dd[0])
# {'id': None, 'stock_abbr': 'pfyh', 'stock_id': 'sh600000', 'stock_name': '浦发银行', 'stock_number': '600000',
# 'stock_pinyin': 'pufayinhang', 'dt': datetime.datetime(2021, 4, 23, 8, 21, 30)}
for d in dd:
getAlldata(d['stock_id'])
getStoke()
# getAlldata('hk01626')
|
[
"json.loads",
"records.Database",
"requests.get",
"lxml.etree.HTML",
"time.localtime"
] |
[((173, 247), 'records.Database', 'records.Database', (['"""mysql://root:123456@localhost:3306/gupiao?charset=utf8"""'], {}), "('mysql://root:123456@localhost:3306/gupiao?charset=utf8')\n", (189, 247), False, 'import records\n'), ((1837, 1854), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1849, 1854), False, 'import requests\n'), ((1891, 1915), 'lxml.etree.HTML', 'etree.HTML', (['strhtml.text'], {}), '(strhtml.text)\n', (1901, 1915), False, 'from lxml import etree\n'), ((2252, 2269), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2264, 2269), False, 'import requests\n'), ((3029, 3046), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3041, 3046), False, 'import requests\n'), ((3798, 3815), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3810, 3815), False, 'import requests\n'), ((4561, 4578), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4573, 4578), False, 'import requests\n'), ((5347, 5364), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (5359, 5364), False, 'import requests\n'), ((6120, 6137), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6132, 6137), False, 'import requests\n'), ((7238, 7255), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (7250, 7255), False, 'import requests\n'), ((8020, 8037), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (8032, 8037), False, 'import requests\n'), ((8809, 8826), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (8821, 8826), False, 'import requests\n'), ((9609, 9626), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (9621, 9626), False, 'import requests\n'), ((10402, 10419), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (10414, 10419), False, 'import requests\n'), ((11216, 11233), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (11228, 11233), False, 'import requests\n'), ((12010, 12027), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (12022, 12027), False, 'import requests\n'), ((2331, 2355), 'json.loads', 'json.loads', (['strhtml.text'], {}), '(strhtml.text)\n', (2341, 2355), False, 'import json\n'), ((3108, 3132), 'json.loads', 'json.loads', (['strhtml.text'], {}), '(strhtml.text)\n', (3118, 3132), False, 'import json\n'), ((3877, 3901), 'json.loads', 'json.loads', (['strhtml.text'], {}), '(strhtml.text)\n', (3887, 3901), False, 'import json\n'), ((4640, 4664), 'json.loads', 'json.loads', (['strhtml.text'], {}), '(strhtml.text)\n', (4650, 4664), False, 'import json\n'), ((5426, 5450), 'json.loads', 'json.loads', (['strhtml.text'], {}), '(strhtml.text)\n', (5436, 5450), False, 'import json\n'), ((6199, 6223), 'json.loads', 'json.loads', (['strhtml.text'], {}), '(strhtml.text)\n', (6209, 6223), False, 'import json\n'), ((7317, 7341), 'json.loads', 'json.loads', (['strhtml.text'], {}), '(strhtml.text)\n', (7327, 7341), False, 'import json\n'), ((8099, 8123), 'json.loads', 'json.loads', (['strhtml.text'], {}), '(strhtml.text)\n', (8109, 8123), False, 'import json\n'), ((8888, 8912), 'json.loads', 'json.loads', (['strhtml.text'], {}), '(strhtml.text)\n', (8898, 8912), False, 'import json\n'), ((9688, 9712), 'json.loads', 'json.loads', (['strhtml.text'], {}), '(strhtml.text)\n', (9698, 9712), False, 'import json\n'), ((10481, 10505), 'json.loads', 'json.loads', (['strhtml.text'], {}), '(strhtml.text)\n', (10491, 10505), False, 'import json\n'), ((11295, 11319), 'json.loads', 'json.loads', (['strhtml.text'], {}), '(strhtml.text)\n', (11305, 11319), False, 'import json\n'), ((12132, 12156), 'json.loads', 'json.loads', (['strhtml.text'], {}), '(strhtml.text)\n', (12142, 12156), False, 'import json\n'), ((16865, 16881), 'time.localtime', 'time.localtime', ([], {}), '()\n', (16879, 16881), False, 'import time\n'), ((16965, 16981), 'time.localtime', 'time.localtime', ([], {}), '()\n', (16979, 16981), False, 'import time\n'), ((17066, 17082), 'time.localtime', 'time.localtime', ([], {}), '()\n', (17080, 17082), False, 'import time\n'), ((17170, 17186), 'time.localtime', 'time.localtime', ([], {}), '()\n', (17184, 17186), False, 'import time\n'), ((17274, 17290), 'time.localtime', 'time.localtime', ([], {}), '()\n', (17288, 17290), False, 'import time\n'), ((17379, 17395), 'time.localtime', 'time.localtime', ([], {}), '()\n', (17393, 17395), False, 'import time\n'), ((17486, 17502), 'time.localtime', 'time.localtime', ([], {}), '()\n', (17500, 17502), False, 'import time\n'), ((17595, 17611), 'time.localtime', 'time.localtime', ([], {}), '()\n', (17609, 17611), False, 'import time\n'), ((17707, 17723), 'time.localtime', 'time.localtime', ([], {}), '()\n', (17721, 17723), False, 'import time\n'), ((17819, 17835), 'time.localtime', 'time.localtime', ([], {}), '()\n', (17833, 17835), False, 'import time\n'), ((17933, 17949), 'time.localtime', 'time.localtime', ([], {}), '()\n', (17947, 17949), False, 'import time\n'), ((18043, 18059), 'time.localtime', 'time.localtime', ([], {}), '()\n', (18057, 18059), False, 'import time\n'), ((18189, 18205), 'time.localtime', 'time.localtime', ([], {}), '()\n', (18203, 18205), False, 'import time\n')]
|
# @l2g 326 python3
# [326] Power of Three
# Difficulty: Easy
# https://leetcode.com/problems/power-of-three
#
# Given an integer n, return true if it is a power of three. Otherwise, return false.
# An integer n is a power of three, if there exists an integer x such that n == 3x.
#
# Example 1:
# Input: n = 27
# Output: true
# Example 2:
# Input: n = 0
# Output: false
# Example 3:
# Input: n = 9
# Output: true
# Example 4:
# Input: n = 45
# Output: false
#
#
# Constraints:
#
# -2^31 <= n <= 2^31 - 1
#
#
# Follow up: Could you solve it without loops/recursion?
class Solution:
def isPowerOfThree(self, n: int) -> bool:
if n <= 0:
return False
while n > 1:
div, rem = divmod(n, 3)
if rem != 0:
return False
n = div
return True if n == 1 else False
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_326.py")])
|
[
"os.path.join"
] |
[((925, 961), 'os.path.join', 'os.path.join', (['"""tests"""', '"""test_326.py"""'], {}), "('tests', 'test_326.py')\n", (937, 961), False, 'import os\n')]
|
import sys
from collections import deque
n, k = list(map(int,sys.stdin.readline().strip("\n").split(" ")))
circle = deque(list(range(1, n+1)))
order = []
for i in range(n):
for j in range(k):
if(k-1 !=j):
circle.append(circle.popleft())
else:
order.append(circle.popleft())
print("<" + ", ".join(str(x) for x in order) + ">")
|
[
"sys.stdin.readline"
] |
[((62, 82), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (80, 82), False, 'import sys\n')]
|
import pytest
from case_style_changer.case_style import CamelCase
from case_style_changer.case_style import CapitalCase
from case_style_changer.case_style import Case
from case_style_changer.case_style import ConstantCase
from case_style_changer.case_style import KebabCase
from case_style_changer.case_style import PascalCase
from case_style_changer.case_style import SentenceCase
from case_style_changer.case_style import SnakeCase
@pytest.mark.parametrize(
"string, expected",
[
("camel", CamelCase),
("pascal", PascalCase),
("snake", SnakeCase),
("constant", ConstantCase),
("kebab", KebabCase),
("sentence", SentenceCase),
("capital", CapitalCase),
],
)
def test_from_string(string, expected):
case = Case.from_string(string)
assert case == expected
def test_from_string_gives_error():
with pytest.raises(Exception):
Case.from_string("")
def test_no_duplicates_in_the_available_list():
available_list = Case.available_list()
expected_length = len(available_list)
length = len(set(available_list))
assert length == expected_length
|
[
"pytest.mark.parametrize",
"pytest.raises",
"case_style_changer.case_style.Case.from_string",
"case_style_changer.case_style.Case.available_list"
] |
[((438, 669), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""string, expected"""', "[('camel', CamelCase), ('pascal', PascalCase), ('snake', SnakeCase), (\n 'constant', ConstantCase), ('kebab', KebabCase), ('sentence',\n SentenceCase), ('capital', CapitalCase)]"], {}), "('string, expected', [('camel', CamelCase), (\n 'pascal', PascalCase), ('snake', SnakeCase), ('constant', ConstantCase),\n ('kebab', KebabCase), ('sentence', SentenceCase), ('capital', CapitalCase)]\n )\n", (461, 669), False, 'import pytest\n'), ((781, 805), 'case_style_changer.case_style.Case.from_string', 'Case.from_string', (['string'], {}), '(string)\n', (797, 805), False, 'from case_style_changer.case_style import Case\n'), ((1008, 1029), 'case_style_changer.case_style.Case.available_list', 'Case.available_list', ([], {}), '()\n', (1027, 1029), False, 'from case_style_changer.case_style import Case\n'), ((882, 906), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (895, 906), False, 'import pytest\n'), ((916, 936), 'case_style_changer.case_style.Case.from_string', 'Case.from_string', (['""""""'], {}), "('')\n", (932, 936), False, 'from case_style_changer.case_style import Case\n')]
|
import unittest
class Issue8TestCase(unittest.TestCase):
def test_none_as_string_instance(self):
from pyjsg.jsglib import JSGString, JSGPattern
class S(JSGString):
pattern = JSGPattern(r'[a-zA-Z]+')
self.assertTrue(isinstance('abc', S))
self.assertFalse(isinstance('abc1', S))
self.assertFalse(isinstance('', S))
self.assertFalse(isinstance(None, S))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"pyjsg.jsglib.JSGPattern"
] |
[((454, 469), 'unittest.main', 'unittest.main', ([], {}), '()\n', (467, 469), False, 'import unittest\n'), ((211, 234), 'pyjsg.jsglib.JSGPattern', 'JSGPattern', (['"""[a-zA-Z]+"""'], {}), "('[a-zA-Z]+')\n", (221, 234), False, 'from pyjsg.jsglib import JSGString, JSGPattern\n')]
|
from django import forms
class TopicSearchForm(forms.Form):
topic = forms.CharField(max_length=50, required=False, label='')
class ProjectOrderForm(forms.Form):
ORDER_CHOICES = (
('relevance-desc', 'Relevance'),
('publish_datetime-desc', 'Latest'),
('publish_datetime-asc', 'Oldest'),
('title-asc', 'Title (Asc.)'),
('title-desc', 'Title (Desc.)'),
('main_storage_size-asc', 'Size (Asc.)'),
('main_storage_size-desc', 'Size (Desc.)'),
)
orderby = forms.ChoiceField(choices=ORDER_CHOICES, label='')
def clean_order_by(self):
pass
class ProjectTypeForm(forms.Form):
PROJECT_TYPES = (
(0, 'Data'),
(1, 'Software'),
(2, 'Challenge'),
(3, 'Model'),
)
types = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,
choices=PROJECT_TYPES, label='')
|
[
"django.forms.MultipleChoiceField",
"django.forms.CharField",
"django.forms.ChoiceField"
] |
[((73, 129), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(50)', 'required': '(False)', 'label': '""""""'}), "(max_length=50, required=False, label='')\n", (88, 129), False, 'from django import forms\n'), ((523, 573), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'ORDER_CHOICES', 'label': '""""""'}), "(choices=ORDER_CHOICES, label='')\n", (540, 573), False, 'from django import forms\n'), ((790, 890), 'django.forms.MultipleChoiceField', 'forms.MultipleChoiceField', ([], {'widget': 'forms.CheckboxSelectMultiple', 'choices': 'PROJECT_TYPES', 'label': '""""""'}), "(widget=forms.CheckboxSelectMultiple, choices=\n PROJECT_TYPES, label='')\n", (815, 890), False, 'from django import forms\n')]
|
'''
This script plots spectrograms for pre-ictal periods.
Then, it uses NMF to find subgraphs and expressions for pre-ictal periods.
Finally, it calculates states as the subgraph with maximal expression at each time point
and calculates the dissimilarity between states.
Inputs:
target-electrodes-{mode}.mat
bandpower-windows-pre-sz-{mode}.mat
Outputs:
'''
# %%
# %load_ext autoreload
# %autoreload 2
# Imports and environment setup
import numpy as np
import sys
import os
import pandas as pd
import json
from scipy.io import loadmat
import matplotlib.pyplot as plt
from tqdm import tqdm
from os.path import join as ospj
from scipy.stats import zscore
import time
from kneed import KneeLocator
sys.path.append('tools')
from plot_spectrogram import plot_spectrogram
from movmean import movmean
from pull_sz_starts import pull_sz_starts
from pull_patient_localization import pull_patient_localization
from mpl_toolkits.axes_grid1 import make_axes_locatable
from time2ind import time2ind
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
from sklearn.decomposition import NMF
from sklearn.metrics.cluster import adjusted_rand_score
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
# Get paths from config file and metadata
with open("config.json") as f:
config = json.load(f)
repo_path = config['repositoryPath']
metadata_path = config['metadataPath']
palette = config['lightColors']
DTW_FLAG = config['flags']["DTW_FLAG"]
electrodes_opt = config['electrodes']
band_opt = config['bands']
data_path = ospj(repo_path, 'data')
figure_path = ospj(repo_path, 'figures')
metadata_fname = ospj(metadata_path, "DATA_MASTER.json")
with open(metadata_fname) as f:
metadata = json.load(f)['PATIENTS']
patient_cohort = pd.read_excel(ospj(data_path, "patient_cohort.xlsx"))
# flags
SAVE_PLOT = True
NMF_OPT_FLAG = True
SUBJ_LEVEL = False
FIXED_PREICTAL_SEC = 60 * config['preictal_window_min']
LEAD_SZ_WINDOW_SEC = (FIXED_PREICTAL_SEC + 60 * 15) # 15 min buffer
# %%
patient_localization_mat = loadmat(ospj(metadata_path, 'patient_localization_final.mat'))['patient_localization']
patients, labels, ignore, resect, gm_wm, coords, region, soz = pull_patient_localization(ospj(metadata_path, 'patient_localization_final.mat'))
for index, row in patient_cohort.iterrows():
if row['Ignore']:
continue
pt = row["Patient"]
print("Calculating pre-ictal NMF for {}".format(pt))
pt_data_path = ospj(data_path, pt)
pt_figure_path = ospj(figure_path, pt)
if not os.path.exists(pt_figure_path):
os.makedirs(pt_figure_path)
# pull and format electrode metadata
electrodes_mat = loadmat(ospj(pt_data_path, "selected_electrodes_elec-{}.mat".format(electrodes_opt)))
target_electrode_region_inds = electrodes_mat['targetElectrodesRegionInds'][0]
pt_index = patients.index(pt)
sz_starts = pull_sz_starts(pt, metadata)
# get bandpower from pre-ictal period and log transform
# bandpower_mat_data = loadmat(ospj(pt_data_path, "bandpower-windows-pre-sz-{}.mat".format(electrodes_opt)))
# bandpower_data = 10*np.log10(bandpower_mat_data['allFeats'])
# t_sec = np.squeeze(bandpower_mat_data['entireT']) / 1e6
# sz_id = np.squeeze(bandpower_mat_data['szID'])
df = pd.read_pickle(ospj(pt_data_path, "bandpower_elec-{}_period-preictal.pkl".format(electrodes_opt)))
if band_opt == "all":
bandpower_data = df.filter(regex=("^((?!broad).)*$"), axis=1)
bandpower_data = bandpower_data.drop(['Seizure id'], axis=1)
elif band_opt == "broad":
bandpower_data = df.filter(regex=("broad"), axis=1)
else:
print("Band configuration not given properly")
exit
sz_id = np.squeeze(df['Seizure id'])
t_sec = np.array(df.index / np.timedelta64(1, 's'))
n_sz = np.size(np.unique(sz_id))
# remove short inter-seizure intervals
lead_sz = np.diff(np.insert(sz_starts, 0, [0])) > (FIXED_PREICTAL_SEC + 60 * 15) # 15 min buffer
remaining_sz_ids = np.where(lead_sz)[0]
remove_sz_ids = np.where(~lead_sz)[0]
print("\tremoving seizures {}".format(remove_sz_ids))
print(type(sz_id))
for remv in remove_sz_ids:
t_sec = np.delete(t_sec, np.where(sz_id == remv))
bandpower_data.drop(bandpower_data.index[np.where(sz_id == remv)[0]], inplace=True)
sz_id.drop(sz_id.index[np.where(sz_id == remv)[0]], inplace=True)
np.save(ospj(pt_data_path, "remaining_sz_ids.npy"), remaining_sz_ids)
# Apply NMF to pre-ictal period to find components (H) and expression (W)
n_remaining_sz = np.size(remaining_sz_ids)
n_components = range(2, 20)
for sz_idx in tqdm(range(n_remaining_sz)):
i_sz = remaining_sz_ids[sz_idx]
data = bandpower_data[sz_id == i_sz]
# print("\trunning NMF")
start_time = time.time()
reconstruction_err = np.zeros(np.size(n_components))
for ind, i_components in enumerate(n_components):
# print("\t\tTesting NMF with {} components".format(i_components))
model = NMF(n_components=i_components, init='nndsvd', random_state=0, max_iter=1000)
W = model.fit_transform(data - np.min(data))
reconstruction_err[ind] = model.reconstruction_err_
end_time = time.time()
# print("\tNMF took {} seconds".format(end_time - start_time))
kneedle = KneeLocator(n_components, reconstruction_err, curve="convex", direction="decreasing")
n_opt_components = kneedle.knee
# print("\t{} components was found as optimal, rerunning for final iteration".format(n_opt_components))
model = NMF(n_components=n_opt_components, init='nndsvd', random_state=0, max_iter=1000)
W = model.fit_transform(data - np.min(data))
H = model.components_
np.save(ospj(pt_data_path, "nmf_expression_band-{}_elec-{}_sz-{}.npy".format(band_opt, electrodes_opt, i_sz)), W)
np.save(ospj(pt_data_path, "nmf_components_band-{}_elec-{}_sz-{}.npy".format(band_opt, electrodes_opt, i_sz)), H)
np.save(ospj(pt_data_path, "lead_sz_t_sec_band-{}_elec-{}.npy".format(band_opt, electrodes_opt)), t_sec)
np.save(ospj(pt_data_path, "lead_sz_sz_id_band-{}_elec-{}.npy".format(band_opt, electrodes_opt)), sz_id)
##############################################
# %%
# # States are defined as the max expressed component at each time point
# states = np.argmax(movmean(W[:, 1:].T, k=100).T, axis=-1) + 1
# # take the dissimilarity in states, optionally using fast dynamic time warping
# if DTW_FLAG:
# states_dissim_mat = np.zeros((n_remaining_sz, n_remaining_sz))
# for ind1, i in enumerate(remaining_sz_ids):
# for ind2, j in enumerate(remaining_sz_ids):
# distance, path = fastdtw(states[sz_id == i], states[sz_id == j], dist=euclidean)
# states_dissim_mat[ind1, ind2] = distance
# else:
# # find how long pre-ictal segments are for each sz and take shortest one
# pre_ictal_lengths = np.zeros(remaining_sz_ids.shape, dtype=int)
# for ind, i_sz in enumerate(remaining_sz_ids):
# pre_ictal_lengths[ind] = np.size(states[sz_id == i_sz])
# pre_ictal_length = np.min(pre_ictal_lengths)
# # matrix of adjusted rand score for similar state occurences
# states_dissim_mat = np.zeros((n_remaining_sz, n_remaining_sz))
# for ind1, i in enumerate(remaining_sz_ids):
# for ind2, j in enumerate(remaining_sz_ids):
# rand = adjusted_rand_score(states[sz_id == i][-pre_ictal_length:], states[sz_id == j][-pre_ictal_length:])
# states_dissim_mat[ind1, ind2] = 1 - rand
# np.save(ospj(pt_data_path, "states_dissim_mat_{}.npy".format(mode)), states_dissim_mat)
# np.save(ospj(pt_data_path, "remaining_sz_ids.npy"), remaining_sz_ids)
# # Plot the NMF subgraphs and expression
# if PLOT:
# for i in remaining_sz_ids:
# fig, ax = plt.subplots()
# t_arr_min = (t_sec[sz_id == i] - t_sec[sz_id == i][-1]) / 60
# ax.plot(t_arr_min, movmean(W[sz_id == i, 1:].T, k=100, mode='same').T)
# ax.set_xlabel("Time from seizure onset (min)")
# ax.set_ylabel("Subgraph coefficient")
# ax.set_title("Seizure {}".format(i))
# ax.legend(np.arange(n_components - 1) + 2, title="Component")
# if SAVE_PLOT:
# plt.savefig(ospj(pt_figure_path, "subgraph_expression_sz_{}_{}.svg".format(i, mode)), bbox_inches='tight', transparent='true')
# plt.savefig(ospj(pt_figure_path, "subgraph_expression_sz_{}_{}.png".format(i, mode)), bbox_inches='tight', transparent='true')
# plt.close()
# ax = plot_spectrogram(H, start_time=0, end_time=n_components)
# ax.set_title("{}".format(pt))
# ax.set_xlabel("Component")
# if SAVE_PLOT:
# plt.savefig(ospj(pt_figure_path, "subgraphs_{}.svg".format(mode)), bbox_inches='tight', transparent='true')
# plt.savefig(ospj(pt_figure_path, "subgraphs_{}.png".format(mode)), bbox_inches='tight', transparent='true')
# plt.close()
# if PLOT:
# n_electrodes = soz_electrodes.shape[0]
# # plot all states
# component_arr = np.reshape(H, (n_components, -1, n_electrodes))
# # component_z = np.zeros(component_arr.shape)
# # for i_comp in range(n_components):
# # component_z[i_comp, :, :] = zscore(component_arr[i_comp, :, :], axis=1)
# # sort to put non-soz first
# sort_soz_inds = np.argsort(soz_electrodes)
# n_soz = np.sum(soz_electrodes)
# n_non_soz = n_electrodes - n_soz
# for i_comp in range(n_components):
# fig, ax = plt.subplots()
# divider = make_axes_locatable(ax)
# cax = divider.append_axes('right', size='5%', pad=0.05)
# im = ax.imshow(component_arr[i_comp, :, sort_soz_inds].T)
# ax.axvline(n_non_soz - 0.5, c='r', lw=2)
# ax.set_title("Subgraph {}, {}".format(i_comp, pt))
# ax.set_yticks(np.arange(6))
# ax.set_yticklabels([r'$\delta$', r'$\theta$', r'$\alpha$', r'$\beta$', r'low-$\gamma$', r'high-$\gamma$'])
# ax.set_xticks(np.arange(n_electrodes))
# ax.set_xticks([n_non_soz / 2, n_non_soz + n_soz / 2])
# ax.set_xticklabels(["Non SOZ", "SOZ"])
# ax.set_xlabel("Electrodes")
# ax.set_ylabel("Frequency band")
# cbar = fig.colorbar(im, cax=cax, orientation='vertical')
# cbar.ax.set_ylabel('Power (dB)', rotation=90)
# if SAVE_PLOT:
# plt.savefig(ospj(pt_figure_path, "soz_subgraph_{}_heatmap_{}.svg".format(i_comp, mode)), bbox_inches='tight', transparent='true')
# plt.savefig(ospj(pt_figure_path, "soz_subgraph_{}_heatmap_{}.png".format(i_comp, mode)), bbox_inches='tight', transparent='true')
# plt.close()
# # plot soz state expression for all seizures
# for i in remaining_sz_ids:
# fig, ax = plt.subplots()
# t_arr_min = (t_sec[sz_id == i] - t_sec[sz_id == i][-1]) / 60
# ax.plot(t_arr_min, movmean(W[sz_id == i,pt_soz_state].T, k=100).T)
# ax.set_xlabel("Time from seizure onset (min)")
# ax.set_ylabel("SOZ subgraph coefficient")
# ax.set_title("Seizure {}".format(i))
# if SAVE_PLOT:
# plt.savefig(ospj(pt_figure_path, "soz_expression_sz_{}_{}.svg".format(i, mode)), bbox_inches='tight', transparent='true')
# plt.savefig(ospj(pt_figure_path, "soz_expression_sz_{}_{}.png".format(i, mode)), bbox_inches='tight', transparent='true')
# plt.close()
# break
# # %%
# min_pre_ictal_size = min([W[sz_id == i,pt_soz_state].shape[0] for i in remaining_sz_ids])
# pre_ictal_soz_state = np.zeros((np.size(remaining_sz_ids), min_pre_ictal_size))
# for ind, i_sz in enumerate(remaining_sz_ids):
# pre_ictal_soz_state[ind, :] = W[sz_id == i_sz,pt_soz_state][-min_pre_ictal_size:]
# # %%
# # %%
|
[
"sys.path.append",
"kneed.KneeLocator",
"numpy.size",
"json.load",
"sklearn.decomposition.NMF",
"os.makedirs",
"warnings.filterwarnings",
"os.path.exists",
"time.time",
"numpy.insert",
"numpy.min",
"pull_sz_starts.pull_sz_starts",
"numpy.where",
"numpy.timedelta64",
"numpy.squeeze",
"os.path.join",
"numpy.unique"
] |
[((699, 723), 'sys.path.append', 'sys.path.append', (['"""tools"""'], {}), "('tools')\n", (714, 723), False, 'import sys\n'), ((1227, 1296), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'category': 'ConvergenceWarning'}), "(action='ignore', category=ConvergenceWarning)\n", (1250, 1296), False, 'import warnings\n'), ((1622, 1645), 'os.path.join', 'ospj', (['repo_path', '"""data"""'], {}), "(repo_path, 'data')\n", (1626, 1645), True, 'from os.path import join as ospj\n'), ((1660, 1686), 'os.path.join', 'ospj', (['repo_path', '"""figures"""'], {}), "(repo_path, 'figures')\n", (1664, 1686), True, 'from os.path import join as ospj\n'), ((1705, 1744), 'os.path.join', 'ospj', (['metadata_path', '"""DATA_MASTER.json"""'], {}), "(metadata_path, 'DATA_MASTER.json')\n", (1709, 1744), True, 'from os.path import join as ospj\n'), ((1384, 1396), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1393, 1396), False, 'import json\n'), ((1849, 1887), 'os.path.join', 'ospj', (['data_path', '"""patient_cohort.xlsx"""'], {}), "(data_path, 'patient_cohort.xlsx')\n", (1853, 1887), True, 'from os.path import join as ospj\n'), ((2288, 2341), 'os.path.join', 'ospj', (['metadata_path', '"""patient_localization_final.mat"""'], {}), "(metadata_path, 'patient_localization_final.mat')\n", (2292, 2341), True, 'from os.path import join as ospj\n'), ((2530, 2549), 'os.path.join', 'ospj', (['data_path', 'pt'], {}), '(data_path, pt)\n', (2534, 2549), True, 'from os.path import join as ospj\n'), ((2571, 2592), 'os.path.join', 'ospj', (['figure_path', 'pt'], {}), '(figure_path, pt)\n', (2575, 2592), True, 'from os.path import join as ospj\n'), ((2954, 2982), 'pull_sz_starts.pull_sz_starts', 'pull_sz_starts', (['pt', 'metadata'], {}), '(pt, metadata)\n', (2968, 2982), False, 'from pull_sz_starts import pull_sz_starts\n'), ((3795, 3823), 'numpy.squeeze', 'np.squeeze', (["df['Seizure id']"], {}), "(df['Seizure id'])\n", (3805, 3823), True, 'import numpy as np\n'), ((4663, 4688), 'numpy.size', 'np.size', (['remaining_sz_ids'], {}), '(remaining_sz_ids)\n', (4670, 4688), True, 'import numpy as np\n'), ((1792, 1804), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1801, 1804), False, 'import json\n'), ((2120, 2173), 'os.path.join', 'ospj', (['metadata_path', '"""patient_localization_final.mat"""'], {}), "(metadata_path, 'patient_localization_final.mat')\n", (2124, 2173), True, 'from os.path import join as ospj\n'), ((2604, 2634), 'os.path.exists', 'os.path.exists', (['pt_figure_path'], {}), '(pt_figure_path)\n', (2618, 2634), False, 'import os\n'), ((2644, 2671), 'os.makedirs', 'os.makedirs', (['pt_figure_path'], {}), '(pt_figure_path)\n', (2655, 2671), False, 'import os\n'), ((3899, 3915), 'numpy.unique', 'np.unique', (['sz_id'], {}), '(sz_id)\n', (3908, 3915), True, 'import numpy as np\n'), ((4089, 4106), 'numpy.where', 'np.where', (['lead_sz'], {}), '(lead_sz)\n', (4097, 4106), True, 'import numpy as np\n'), ((4130, 4148), 'numpy.where', 'np.where', (['(~lead_sz)'], {}), '(~lead_sz)\n', (4138, 4148), True, 'import numpy as np\n'), ((4501, 4543), 'os.path.join', 'ospj', (['pt_data_path', '"""remaining_sz_ids.npy"""'], {}), "(pt_data_path, 'remaining_sz_ids.npy')\n", (4505, 4543), True, 'from os.path import join as ospj\n'), ((4909, 4920), 'time.time', 'time.time', ([], {}), '()\n', (4918, 4920), False, 'import time\n'), ((5356, 5367), 'time.time', 'time.time', ([], {}), '()\n', (5365, 5367), False, 'import time\n'), ((5458, 5548), 'kneed.KneeLocator', 'KneeLocator', (['n_components', 'reconstruction_err'], {'curve': '"""convex"""', 'direction': '"""decreasing"""'}), "(n_components, reconstruction_err, curve='convex', direction=\n 'decreasing')\n", (5469, 5548), False, 'from kneed import KneeLocator\n'), ((5714, 5799), 'sklearn.decomposition.NMF', 'NMF', ([], {'n_components': 'n_opt_components', 'init': '"""nndsvd"""', 'random_state': '(0)', 'max_iter': '(1000)'}), "(n_components=n_opt_components, init='nndsvd', random_state=0, max_iter=1000\n )\n", (5717, 5799), False, 'from sklearn.decomposition import NMF\n'), ((3856, 3878), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""s"""'], {}), "(1, 's')\n", (3870, 3878), True, 'import numpy as np\n'), ((3987, 4015), 'numpy.insert', 'np.insert', (['sz_starts', '(0)', '[0]'], {}), '(sz_starts, 0, [0])\n', (3996, 4015), True, 'import numpy as np\n'), ((4298, 4321), 'numpy.where', 'np.where', (['(sz_id == remv)'], {}), '(sz_id == remv)\n', (4306, 4321), True, 'import numpy as np\n'), ((4959, 4980), 'numpy.size', 'np.size', (['n_components'], {}), '(n_components)\n', (4966, 4980), True, 'import numpy as np\n'), ((5139, 5215), 'sklearn.decomposition.NMF', 'NMF', ([], {'n_components': 'i_components', 'init': '"""nndsvd"""', 'random_state': '(0)', 'max_iter': '(1000)'}), "(n_components=i_components, init='nndsvd', random_state=0, max_iter=1000)\n", (5142, 5215), False, 'from sklearn.decomposition import NMF\n'), ((5834, 5846), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (5840, 5846), True, 'import numpy as np\n'), ((4372, 4395), 'numpy.where', 'np.where', (['(sz_id == remv)'], {}), '(sz_id == remv)\n', (4380, 4395), True, 'import numpy as np\n'), ((4446, 4469), 'numpy.where', 'np.where', (['(sz_id == remv)'], {}), '(sz_id == remv)\n', (4454, 4469), True, 'import numpy as np\n'), ((5259, 5271), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (5265, 5271), True, 'import numpy as np\n')]
|
import point
import math
class MonteCarloArea(object):
def __init__(self, problem, num_sample_points):
self.num_sample_points = num_sample_points
self.problem = problem
self.total_efficiency = 0.0
self.number_of_updates = 0
self.moving_average = 0.0
self.learning_rate = 1.0
def update_average_efficiency(self, quads):
num_in = 0
total = 0
for _ in xrange(self.num_sample_points):
r_p = point.get_random_point(
self.problem.width, self.problem.height
)
for quad in quads:
old_x = r_p.x - quad.x
old_y = r_p.y - quad.y
beta_r = math.radians(quad.beta)
X = old_x * math.cos(-beta_r) - old_y * math.sin(-beta_r)
Y = old_y * math.cos(-beta_r) + old_x * math.sin(-beta_r)
r_ma = quad.get_ellipse_major()
r_mi = quad.get_ellipse_minor()
h = quad.get_ellipse_center_dist()
k = 0
el_eval_x = pow(X - h, 2) / float(pow(r_ma, 2))
el_eval_y = pow(Y - k, 2) / float(pow(r_mi, 2))
if el_eval_x + el_eval_y <= 1:
num_in += 1
break
total += 1
self.total_efficiency += float(num_in) / total
self.number_of_updates += 1
self.moving_average = (1 - self.learning_rate) * self.moving_average +\
self.learning_rate * float(num_in) / total
return self
def get_moving_average_efficiency(self):
return self.moving_average
def get_average_efficiency(self):
return self.total_efficiency / float(self.number_of_updates)
class SensorQualityAverage(object):
def __init__(self, planner):
self.moving_average = 0.0
self.learning_rate = 1.0
self.planner = planner
def update_average_sq(self, quads):
sub_total = 0.0
for quad in quads:
sub_total += self.planner.sq(quad.z, quad.phi)
self.moving_average = (1 - self.learning_rate) *\
self.moving_average + self.learning_rate *\
sub_total / len(quads)
return self
def get_moving_average(self):
return self.moving_average
class RiskAverage(object):
def __init__(self, planner):
self.moving_average = 0.0
self.learning_rate = 1.0
self.planner = planner
def update_average_risk(self, quads):
sub_total = 0.0
for quad in quads:
sub_total += self.planner.risk(quad.x, quad.y, quad.z)
self.moving_average = (1 - self.learning_rate) *\
self.moving_average + self.learning_rate *\
sub_total / len(quads)
return self
def get_moving_average(self):
return self.moving_average
class AverageTimeDifference(object):
def __init__(self, grid):
self.avg = 0.0
self.grid = grid
def update_average_time_difference(self, current_time):
inner_sum = 0.0
for x in xrange(self.grid.width):
for y in xrange(self.grid.height):
t = self.grid.get_raw(x, y)
inner_sum += (current_time - t)
self.avg = inner_sum / (self.grid.width * self.grid.height)
def get_average(self):
return self.avg
class AverageMotionBlur(object):
def __init__(self):
self.avg = 0.0
self.learning_rate = 1.0
def update(self, val):
self.avg = (1 - self.learning_rate) * self.avg\
+ val * self.learning_rate
return self
def get_average(self):
return self.avg
|
[
"point.get_random_point",
"math.radians",
"math.cos",
"math.sin"
] |
[((484, 547), 'point.get_random_point', 'point.get_random_point', (['self.problem.width', 'self.problem.height'], {}), '(self.problem.width, self.problem.height)\n', (506, 547), False, 'import point\n'), ((714, 737), 'math.radians', 'math.radians', (['quad.beta'], {}), '(quad.beta)\n', (726, 737), False, 'import math\n'), ((766, 783), 'math.cos', 'math.cos', (['(-beta_r)'], {}), '(-beta_r)\n', (774, 783), False, 'import math\n'), ((794, 811), 'math.sin', 'math.sin', (['(-beta_r)'], {}), '(-beta_r)\n', (802, 811), False, 'import math\n'), ((840, 857), 'math.cos', 'math.cos', (['(-beta_r)'], {}), '(-beta_r)\n', (848, 857), False, 'import math\n'), ((868, 885), 'math.sin', 'math.sin', (['(-beta_r)'], {}), '(-beta_r)\n', (876, 885), False, 'import math\n')]
|
"""
https://rosettacode.org/wiki/Bitcoin/address_validation#Python
"""
# Libraries
from hashlib import sha256
# Constants
DIGITS_58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def decode_base58(addr, length):
n = 0
for char in addr:
n = n * 58 + DIGITS_58.index(char)
return n.to_bytes(length, 'big')
def check_addr(addr):
try:
# Get addr in bytes
addr_bytes = decode_base58(addr, 25)
# Retrieve checksum
cs = addr_bytes[-4:]
# Check equality
return cs == sha256(sha256(addr_bytes[:-4]).digest()).digest()[:4]
except Exception:
return False
|
[
"hashlib.sha256"
] |
[((559, 582), 'hashlib.sha256', 'sha256', (['addr_bytes[:-4]'], {}), '(addr_bytes[:-4])\n', (565, 582), False, 'from hashlib import sha256\n')]
|
from obvody import obvod_ctverce, obvod_trojuhelnika, obvod_obdelnika, obvod_kruhu
import obsahy as o
# čtverec, obdélník, rovnostranný trojúhelník, kruh
typ = input("Zadej typ útvaru: ")
strana = int(input("Zadej délku strany / poloměr: "))
if typ == "ctverec":
obvod = obvod_ctverce(strana)
obsah = o.obsah_ctverce(strana)
elif typ == "obdelnik":
b = int(input("Zadej délku druhé strany: "))
obvod = obvod_obdelnika(strana,b)
obsah = o.obsah_obdelnika(strana,b)
elif typ == "trojuhelnik":
obvod = obvod_trojuhelnika(strana)
obsah = o.obsah_trojuhelnika(strana)
elif typ=="kruh":
obvod = obvod_kruhu(strana)
obsah = o.obsah_kruhu(strana)
else:
print("Zadal jsi neplatný tvar")
exit(1)
print(f"Obvod je {obvod} cm")
print(f"Obsah je {obsah} cm^2")
|
[
"obvody.obvod_kruhu",
"obsahy.obsah_trojuhelnika",
"obvody.obvod_trojuhelnika",
"obsahy.obsah_ctverce",
"obsahy.obsah_kruhu",
"obvody.obvod_obdelnika",
"obvody.obvod_ctverce",
"obsahy.obsah_obdelnika"
] |
[((277, 298), 'obvody.obvod_ctverce', 'obvod_ctverce', (['strana'], {}), '(strana)\n', (290, 298), False, 'from obvody import obvod_ctverce, obvod_trojuhelnika, obvod_obdelnika, obvod_kruhu\n'), ((311, 334), 'obsahy.obsah_ctverce', 'o.obsah_ctverce', (['strana'], {}), '(strana)\n', (326, 334), True, 'import obsahy as o\n'), ((420, 446), 'obvody.obvod_obdelnika', 'obvod_obdelnika', (['strana', 'b'], {}), '(strana, b)\n', (435, 446), False, 'from obvody import obvod_ctverce, obvod_trojuhelnika, obvod_obdelnika, obvod_kruhu\n'), ((458, 486), 'obsahy.obsah_obdelnika', 'o.obsah_obdelnika', (['strana', 'b'], {}), '(strana, b)\n', (475, 486), True, 'import obsahy as o\n'), ((525, 551), 'obvody.obvod_trojuhelnika', 'obvod_trojuhelnika', (['strana'], {}), '(strana)\n', (543, 551), False, 'from obvody import obvod_ctverce, obvod_trojuhelnika, obvod_obdelnika, obvod_kruhu\n'), ((564, 592), 'obsahy.obsah_trojuhelnika', 'o.obsah_trojuhelnika', (['strana'], {}), '(strana)\n', (584, 592), True, 'import obsahy as o\n'), ((623, 642), 'obvody.obvod_kruhu', 'obvod_kruhu', (['strana'], {}), '(strana)\n', (634, 642), False, 'from obvody import obvod_ctverce, obvod_trojuhelnika, obvod_obdelnika, obvod_kruhu\n'), ((655, 676), 'obsahy.obsah_kruhu', 'o.obsah_kruhu', (['strana'], {}), '(strana)\n', (668, 676), True, 'import obsahy as o\n')]
|
# This script compares reading from an array in a loop using the
# tables.Array.read method. In the first case, read is used without supplying
# an 'out' argument, which causes a new output buffer to be pre-allocated
# with each call. In the second case, the buffer is created once, and then
# reused.
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
import numpy as np
import tables
def create_file(array_size):
array = np.ones(array_size, dtype='i8')
with tables.open_file('test.h5', 'w') as fobj:
array = fobj.create_array('/', 'test', array)
print('file created, size: {0} MB'.format(array.size_on_disk / 1e6))
def standard_read(array_size):
N = 10
with tables.open_file('test.h5', 'r') as fobj:
array = fobj.get_node('/', 'test')
start = time.time()
for i in range(N):
output = array.read(0, array_size, 1)
end = time.time()
assert(np.all(output == 1))
print('standard read \t {0:5.5f}'.format((end - start) / N))
def pre_allocated_read(array_size):
N = 10
with tables.open_file('test.h5', 'r') as fobj:
array = fobj.get_node('/', 'test')
start = time.time()
output = np.empty(array_size, 'i8')
for i in range(N):
array.read(0, array_size, 1, out=output)
end = time.time()
assert(np.all(output == 1))
print('pre-allocated read\t {0:5.5f}'.format((end - start) / N))
if __name__ == '__main__':
array_num_bytes = [int(x) for x in [1e5, 1e6, 1e7, 1e8]]
for array_bytes in array_num_bytes:
array_size = int(array_bytes // 8)
create_file(array_size)
standard_read(array_size)
pre_allocated_read(array_size)
print()
|
[
"numpy.empty",
"numpy.ones",
"time.time",
"tables.open_file",
"numpy.all"
] |
[((506, 537), 'numpy.ones', 'np.ones', (['array_size'], {'dtype': '"""i8"""'}), "(array_size, dtype='i8')\n", (513, 537), True, 'import numpy as np\n'), ((547, 579), 'tables.open_file', 'tables.open_file', (['"""test.h5"""', '"""w"""'], {}), "('test.h5', 'w')\n", (563, 579), False, 'import tables\n'), ((773, 805), 'tables.open_file', 'tables.open_file', (['"""test.h5"""', '"""r"""'], {}), "('test.h5', 'r')\n", (789, 805), False, 'import tables\n'), ((874, 885), 'time.time', 'time.time', ([], {}), '()\n', (883, 885), False, 'import time\n'), ((977, 988), 'time.time', 'time.time', ([], {}), '()\n', (986, 988), False, 'import time\n'), ((1004, 1023), 'numpy.all', 'np.all', (['(output == 1)'], {}), '(output == 1)\n', (1010, 1023), True, 'import numpy as np\n'), ((1154, 1186), 'tables.open_file', 'tables.open_file', (['"""test.h5"""', '"""r"""'], {}), "('test.h5', 'r')\n", (1170, 1186), False, 'import tables\n'), ((1255, 1266), 'time.time', 'time.time', ([], {}), '()\n', (1264, 1266), False, 'import time\n'), ((1284, 1310), 'numpy.empty', 'np.empty', (['array_size', '"""i8"""'], {}), "(array_size, 'i8')\n", (1292, 1310), True, 'import numpy as np\n'), ((1405, 1416), 'time.time', 'time.time', ([], {}), '()\n', (1414, 1416), False, 'import time\n'), ((1432, 1451), 'numpy.all', 'np.all', (['(output == 1)'], {}), '(output == 1)\n', (1438, 1451), True, 'import numpy as np\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_convert.ipynb (unless otherwise specified).
__all__ = ['code_cell', 'write_module_cell', 'init_nb', 'write_cell', 'write_nb', 'convert_lib']
# Cell
import json
from fastcore.basics import Path
from fastcore.xtras import is_listy
from fastcore.foundation import Config
from fastcore.script import call_parse
from fastprogress.fastprogress import progress_bar
from nbdev.export import nbglob, export_names, _re_class_func_def, _re_obj_def
from nbdev.sync import _split
from .generators import generate_settings, generate_ci, generate_doc_foundations, generate_setup
# Cell
def code_cell(code:str=None) -> str:
"""
Returns a Jupyter cell with potential `code`
"""
cell = {
"cell_type": "code",
"execution_count": None,
"metadata": {},
"outputs": [],
"source": []
}
if is_listy(code):
for i, c in enumerate(code):
if i < len(code)-1:
cell["source"].append(c+'\n')
else:
cell["source"].append(c)
elif code: cell["source"].append(code)
return cell
# Cell
def write_module_cell() -> str:
"""
Writes a template `Markdown` cell for the title and description of a notebook
"""
return {
"cell_type": "markdown",
"metadata": {},
"source": [
"# Default Title (change me)\n",
"> Default description (change me)"
]
}
# Cell
def init_nb(module_name:str) -> str:
"""
Initializes a complete blank notebook based on `module_name`
Also writes the first #default_exp cell and checks for a nested module (moduleA.moduleB)
"""
if module_name[0] == '.': module_name = module_name.split('.')[1]
if '.ipynb' in module_name: module_name = module_name.split('.ipynb')[0]
return {"cells":[code_cell(f"# default_exp {module_name}"), write_module_cell()],
"metadata":{
"jupytext":{"split_at_heading":True},
"kernelspec":{"display_name":"Python 3", "language": "python", "name": "python3"}
},
"nbformat":4,
"nbformat_minor":4}
# Cell
def write_cell(code:str, is_public:bool=False) -> str:
"""
Takes source `code`, adds an initial #export tag, and writes a Jupyter cell
"""
if is_public is None: export = ''
export = '#export' if is_public else '#exporti'
source = [f"{export}"] + code.split("\n")
return code_cell(source)
# Cell
def write_nb(cfg_path:str, cfg_name:str, splits:list, num:int, parent:str=None, private_list:list=[]) -> str:
"""
Writes a fully converted Jupyter Notebook based on `splits` and saves it in `Config`'s `nbs_path`.
The notebook number is based on `num`
`parent` denotes if the current notebook module is based on a parent module
such as `moduleA.moduleB`
`private_list` is a by-cell list of `True`/`False` for each block of code of whether it is private or public
"""
# Get filename
fname = splits[0][0]
if fname[0] == '.': fname = fname[1:]
if parent is not None: fname = f'{parent}.{fname}'
# Initialize and write notebook
nb = init_nb(fname)
for i, (_, code) in enumerate(splits):
c = write_cell(code, private_list[i])
nb["cells"].append(c)
# Figure out the notebook number
if num < 10:
fname = f'0{num}_{fname}'
else:
fname = f'{num}_{fname}'
# Save notebook in `nbs_path`
with open(f'{Config(cfg_path, cfg_name).path("nbs_path")/fname}', 'w+') as source_nb:
source_nb.write(json.dumps(nb))
# Internal Cell
def _not_private(n):
"Checks if a func is private or not, alternative to nbdev's"
for t in n.split('.'):
if (t.startswith('_') and not t.startswith('__')): return False
return '\\' not in t and '^' not in t and t != 'else'
# Cell
@call_parse
def convert_lib():
"""
Converts existing library to an nbdev one by autogenerating notebooks.
Optional prerequisites:
- Make a nbdev settings.ini file beforehand
- Optionally you can add `# Cell` and `# Internal Cell` tags in the source files where you would like specific cells to be
Run this command in the base of your repo
**Can only be run once**
"""
print('Checking for a settings.ini...')
cfg_path, cfg_name = '.', 'settings.ini'
generate_settings()
print('Gathering files...')
files = nbglob(extension='.py', config_key='lib_path', recursive=True)
if len(files) == 0: raise ValueError("No files were found, please ensure that `lib_path` is configured properly in `settings.ini`")
print(f'{len(files)} modules found in the library')
num_nbs = len(files)
nb_path = Config(cfg_path, cfg_name).path('nbs_path')
nb_path.mkdir(exist_ok=True)
print(f'Writing notebooks to {nb_path}...')
if nb_path.name == Config(cfg_path, cfg_name).lib_name:
nb_path = Path('')
slash = ''
else:
nb_path = Path(nb_path.name)
slash = '/'
for num, file in enumerate(progress_bar(files)):
if (file.parent.name != Config(cfg_path, cfg_name).lib_name) and slash is not None:
parent = file.parent.name
else:
parent = None
fname = file.name.split('.py')[0] + '.ipynb'
if fname[0] == '.': fname = fname[1:]
# Initial string in the .py
init_str = f"# AUTOGENERATED! DO NOT EDIT! File to edit: {nb_path}{slash}{fname} (unless otherwise specified).\n\n# Cell\n"
# Override existing code to include nbdev magic and one code cell
with open(file, encoding='utf8') as f: code = f.read()
if "AUTOGENERATED" not in code:
code = init_str + code
# Check to ensure we haven't tried exporting once yet
if "# Cell" and "# Internal Cell" not in code and '__all__' not in code:
split_code = code.split('\n')
private_list = [True]
_do_pass, _private, _public = False, '# Internal Cell\n', '# Cell\n'
for row, line in enumerate(split_code):
if _do_pass: _do_pass = False; continue
# Deal with decorators
if '@' in line:
code = split_code[row+1]
if code[:4] == 'def ': code = code[4:]
if 'patch' in line or 'typedispatch' in line or not line[0].isspace():
is_private = _not_private(code.split('(')[0])
private_list.append(is_private)
split_code[row] = f'{_public}{line}' if is_private else f'{_private}{line}'
_do_pass = True
# Deal with objects
elif _re_obj_def.match(line) and not _do_pass:
is_private = _not_private(line.split('(')[0])
private_list.append(is_private)
split_code[row] = f'{_public}{line}' if is_private else f'{_private}{line}'
# Deal with classes or functions
elif _re_class_func_def.match(line) and not _do_pass:
is_private = _not_private(line.split(' ')[1].split('(')[0])
private_list.append(is_private)
split_code[row] = f'{_public}{line}' if is_private else f'{_private}{line}'
code = '\n'.join(split_code)
# Write to file
with open(file, 'w', encoding='utf8') as f: f.write(code)
# Build notebooks
splits = _split(code)
write_nb(cfg_path, cfg_name, splits, num, parent, private_list)
# Generate the `__all__` in the top of each .py
if '__all__' not in code:
c = code.split("(unless otherwise specified).")
code = c[0] + "(unless otherwise specified).\n" + f'\n__all__ = {export_names(code)}\n\n# Cell' + c[1]
with open(file, 'w', encoding='utf8') as f: f.write(code)
else:
print(f"{file.name} was already converted.")
generate_doc_foundations()
print(f"{Config(cfg_path, cfg_name).lib_name} successfully converted!")
_setup = int(input("Would you like to setup this project to be pip installable and configure a setup.py? (0/1)"))
if _setup:
generate_setup()
print('Project is configured for pypi, please see `setup.py` for any advanced configurations')
_workflow = int(input("Would you like to setup the automated Github workflow that nbdev provides? (0/1)"))
if _workflow:
generate_ci()
print("Github actions generated! Please make sure to include .github/actions/main.yml in your next commit!")
|
[
"nbdev.export._re_class_func_def.match",
"nbdev.export.nbglob",
"fastcore.xtras.is_listy",
"fastcore.basics.Path",
"fastprogress.fastprogress.progress_bar",
"json.dumps",
"fastcore.foundation.Config",
"nbdev.export._re_obj_def.match",
"nbdev.export.export_names",
"nbdev.sync._split"
] |
[((859, 873), 'fastcore.xtras.is_listy', 'is_listy', (['code'], {}), '(code)\n', (867, 873), False, 'from fastcore.xtras import is_listy\n'), ((4401, 4463), 'nbdev.export.nbglob', 'nbglob', ([], {'extension': '""".py"""', 'config_key': '"""lib_path"""', 'recursive': '(True)'}), "(extension='.py', config_key='lib_path', recursive=True)\n", (4407, 4463), False, 'from nbdev.export import nbglob, export_names, _re_class_func_def, _re_obj_def\n'), ((4899, 4907), 'fastcore.basics.Path', 'Path', (['""""""'], {}), "('')\n", (4903, 4907), False, 'from fastcore.basics import Path\n'), ((4956, 4974), 'fastcore.basics.Path', 'Path', (['nb_path.name'], {}), '(nb_path.name)\n', (4960, 4974), False, 'from fastcore.basics import Path\n'), ((5027, 5046), 'fastprogress.fastprogress.progress_bar', 'progress_bar', (['files'], {}), '(files)\n', (5039, 5046), False, 'from fastprogress.fastprogress import progress_bar\n'), ((3553, 3567), 'json.dumps', 'json.dumps', (['nb'], {}), '(nb)\n', (3563, 3567), False, 'import json\n'), ((4696, 4722), 'fastcore.foundation.Config', 'Config', (['cfg_path', 'cfg_name'], {}), '(cfg_path, cfg_name)\n', (4702, 4722), False, 'from fastcore.foundation import Config\n'), ((4844, 4870), 'fastcore.foundation.Config', 'Config', (['cfg_path', 'cfg_name'], {}), '(cfg_path, cfg_name)\n', (4850, 4870), False, 'from fastcore.foundation import Config\n'), ((7490, 7502), 'nbdev.sync._split', '_split', (['code'], {}), '(code)\n', (7496, 7502), False, 'from nbdev.sync import _split\n'), ((5081, 5107), 'fastcore.foundation.Config', 'Config', (['cfg_path', 'cfg_name'], {}), '(cfg_path, cfg_name)\n', (5087, 5107), False, 'from fastcore.foundation import Config\n'), ((8050, 8076), 'fastcore.foundation.Config', 'Config', (['cfg_path', 'cfg_name'], {}), '(cfg_path, cfg_name)\n', (8056, 8076), False, 'from fastcore.foundation import Config\n'), ((6694, 6717), 'nbdev.export._re_obj_def.match', '_re_obj_def.match', (['line'], {}), '(line)\n', (6711, 6717), False, 'from nbdev.export import nbglob, export_names, _re_class_func_def, _re_obj_def\n'), ((7020, 7050), 'nbdev.export._re_class_func_def.match', '_re_class_func_def.match', (['line'], {}), '(line)\n', (7044, 7050), False, 'from nbdev.export import nbglob, export_names, _re_class_func_def, _re_obj_def\n'), ((3456, 3482), 'fastcore.foundation.Config', 'Config', (['cfg_path', 'cfg_name'], {}), '(cfg_path, cfg_name)\n', (3462, 3482), False, 'from fastcore.foundation import Config\n'), ((7823, 7841), 'nbdev.export.export_names', 'export_names', (['code'], {}), '(code)\n', (7835, 7841), False, 'from nbdev.export import nbglob, export_names, _re_class_func_def, _re_obj_def\n')]
|
import torch
import config
import engine
import dataset_prep
import torch.nn as nn
import model
import metrics
import matplotlib.pyplot as plt
if __name__ == '__main__':
# DataLoaders
train_loader, val_loader = dataset_prep.tr_dataset(batch_size = config.BATCH_SIZE)
classes = val_loader.dataset.dataset.class_to_idx
print (classes)
# Model
# model_load = model.resnet_model_50()
model_load = model.resnet_model_101()
model_load.to(config.DEVICE)
print ('Model Loaded ---------- \n')
# # Loss, Optimizer and Scheduler
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model_load.parameters(), lr = config.LEARNING_RATE)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
print ('Training Started ---------- \n')
trained_model, train_losses, val_losses = engine.training_func(model_load, train_loader, val_loader, config.EPOCHS, config.DEVICE, optimizer, criterion)
torch.save(trained_model.state_dict(), config.OUT + 'resnet101_e5_0.0001.pth')
print ('Model Saved ---------- \n')
# Plotting the training and validation loss
plt.plot(train_losses, label='Training loss')
plt.plot(val_losses, label='Validation loss')
plt.legend(frameon=False)
plt.show()
|
[
"engine.training_func",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"torch.nn.CrossEntropyLoss",
"model.resnet_model_101",
"dataset_prep.tr_dataset"
] |
[((221, 274), 'dataset_prep.tr_dataset', 'dataset_prep.tr_dataset', ([], {'batch_size': 'config.BATCH_SIZE'}), '(batch_size=config.BATCH_SIZE)\n', (244, 274), False, 'import dataset_prep\n'), ((429, 453), 'model.resnet_model_101', 'model.resnet_model_101', ([], {}), '()\n', (451, 453), False, 'import model\n'), ((583, 604), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (602, 604), True, 'import torch.nn as nn\n'), ((854, 968), 'engine.training_func', 'engine.training_func', (['model_load', 'train_loader', 'val_loader', 'config.EPOCHS', 'config.DEVICE', 'optimizer', 'criterion'], {}), '(model_load, train_loader, val_loader, config.EPOCHS,\n config.DEVICE, optimizer, criterion)\n', (874, 968), False, 'import engine\n'), ((1151, 1196), 'matplotlib.pyplot.plot', 'plt.plot', (['train_losses'], {'label': '"""Training loss"""'}), "(train_losses, label='Training loss')\n", (1159, 1196), True, 'import matplotlib.pyplot as plt\n'), ((1201, 1246), 'matplotlib.pyplot.plot', 'plt.plot', (['val_losses'], {'label': '"""Validation loss"""'}), "(val_losses, label='Validation loss')\n", (1209, 1246), True, 'import matplotlib.pyplot as plt\n'), ((1251, 1276), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)'}), '(frameon=False)\n', (1261, 1276), True, 'import matplotlib.pyplot as plt\n'), ((1281, 1291), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1289, 1291), True, 'import matplotlib.pyplot as plt\n')]
|
# -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
"""
Uploads an icon for the custom connector
"""
import os
import mimetypes
from urllib.parse import urlparse, urlunparse
from azure.storage.blob import ContentSettings, BlockBlobService
def upload_icon(sas_url, file_path):
# Break the SAS URL
(scheme, netloc, path, params, query, fragment) = urlparse(sas_url)
# Account is the first part of the netlocation upto the dot
account_name = netloc[0:netloc.index('.')]
# Container name is the path
container_name = path.strip('/')
# Create a block blob service
blockblob_service = BlockBlobService(
account_name=account_name,
sas_token=query)
# Get the file name of the icon
file_name = os.path.basename(file_path)
# Determine the content type and encoding for the file
(content_type, content_encoding) = mimetypes.guess_type(file_name)
content_settings = ContentSettings(
content_type=content_type,
content_encoding=content_encoding)
# Upload the icon
blockblob_service.create_blob_from_path(
container_name=container_name,
blob_name=file_name,
file_path=file_path,
content_settings=content_settings)
# Append the icon name to the path to generate the download link
path = path + '/' + file_name
urlparts = (scheme, netloc, path, params, query, fragment)
sas_download_url = urlunparse(urlparts)
return sas_download_url
|
[
"azure.storage.blob.BlockBlobService",
"azure.storage.blob.ContentSettings",
"os.path.basename",
"urllib.parse.urlunparse",
"urllib.parse.urlparse",
"mimetypes.guess_type"
] |
[((622, 639), 'urllib.parse.urlparse', 'urlparse', (['sas_url'], {}), '(sas_url)\n', (630, 639), False, 'from urllib.parse import urlparse, urlunparse\n'), ((880, 940), 'azure.storage.blob.BlockBlobService', 'BlockBlobService', ([], {'account_name': 'account_name', 'sas_token': 'query'}), '(account_name=account_name, sas_token=query)\n', (896, 940), False, 'from azure.storage.blob import ContentSettings, BlockBlobService\n'), ((1011, 1038), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (1027, 1038), False, 'import os\n'), ((1137, 1168), 'mimetypes.guess_type', 'mimetypes.guess_type', (['file_name'], {}), '(file_name)\n', (1157, 1168), False, 'import mimetypes\n'), ((1192, 1269), 'azure.storage.blob.ContentSettings', 'ContentSettings', ([], {'content_type': 'content_type', 'content_encoding': 'content_encoding'}), '(content_type=content_type, content_encoding=content_encoding)\n', (1207, 1269), False, 'from azure.storage.blob import ContentSettings, BlockBlobService\n'), ((1685, 1705), 'urllib.parse.urlunparse', 'urlunparse', (['urlparts'], {}), '(urlparts)\n', (1695, 1705), False, 'from urllib.parse import urlparse, urlunparse\n')]
|
#! /usr/bin/env python
"""
Build an index that can be used to retrieve individual reads or contigs
by cDBG node ID; produce a SQLite database for fast retrieval.
Briefly, this script creates a sqlite database with a single table,
'sequences', where a query like this:
SELECT DISTINCT sequences.offset FROM sequences WHERE label ...
can be executed to return the offset of all sequences with the given
label. Here, 'label' is the cDBG ID to which the sequence belongs.
The script extract_reads_by_frontier_sqlite.py is a downstream script to
extract the reads with a frontier search.
Specifically,
* walk through the contigs assembled from the cDBG;
* build a DBG cover using khmer tags, such that every k-mer in the DBG
is within distance d=40 of a tag;
* label each tag with the cDBG node ID from the contig;
* save for later use.
"""
import sys
import os
import argparse
import screed
import khmer
import collections
import sqlite3
from spacegraphcats.search import search_utils
# graph settings
DEFAULT_KSIZE = 31
DEFAULT_MEMORY = 1e9
def main(argv=sys.argv[1:]):
p = argparse.ArgumentParser()
p.add_argument('catlas_prefix', help='catlas prefix')
p.add_argument('reads')
p.add_argument('savename')
p.add_argument('-k', '--ksize', default=DEFAULT_KSIZE, type=int)
p.add_argument('-M', '--memory', default=DEFAULT_MEMORY,
type=float)
args = p.parse_args(argv)
dbfilename = args.savename
if os.path.exists(dbfilename):
print('removing existing db {}'.format(dbfilename))
os.unlink(dbfilename)
db = sqlite3.connect(dbfilename)
cursor = db.cursor()
cursor.execute('CREATE TABLE sequences (offset INTEGER, label INTEGER)');
db.commit()
# @CTB support different sizes.
graph_tablesize = int(args.memory * 8.0 / 4.0)
ng = khmer.Nodegraph(args.ksize, graph_tablesize, 4)
basename = os.path.basename(args.catlas_prefix)
contigfile = os.path.join(args.catlas_prefix, "contigs.fa.gz")
total_bp = 0
watermark_size = 1e6
watermark = watermark_size
print('walking catlas cDBG contigs: {}'.format(contigfile))
n = 0
tags_to_label = collections.defaultdict(int)
for contig in screed.open(contigfile):
n += 1
if total_bp >= watermark:
print('... {:5.2e} bp thru contigs'.format(int(watermark)),
file=sys.stderr)
watermark += watermark_size
total_bp += len(contig.sequence)
if len(contig.sequence) < args.ksize:
continue
cdbg_id = int(contig.name)
ng.consume_and_tag(contig.sequence)
tags = ng.get_tags_for_sequence(contig.sequence)
for t in tags:
tags_to_label[t] = cdbg_id
###
total_bp = 0
watermark_size = 1e7
watermark = watermark_size
print('walking read file: {}'.format(args.reads))
n = 0
cursor.execute('PRAGMA cache_size=1000000')
cursor.execute('PRAGMA synchronous = OFF')
cursor.execute('PRAGMA journal_mode = MEMORY')
# some sqlite installs start in transactions
try:
cursor.execute('BEGIN TRANSACTION')
except sqlite3.OperationalError:
pass
reader = search_utils.BgzfReader(args.reads)
for record, offset in search_utils.iterate_bgzf(reader):
n += 1
if total_bp >= watermark:
print('... {:5.2e} bp thru reads'.format(int(watermark)),
file=sys.stderr)
watermark += watermark_size
total_bp += len(record.sequence)
if len(record.sequence) < args.ksize:
continue
tags = ng.get_tags_for_sequence(record.sequence)
labels = set([ tags_to_label[t] for t in tags ])
for lb in labels:
cursor.execute('INSERT INTO sequences (offset, label) VALUES (?, ?)', (offset, lb))
db.commit()
db.close()
print('done!')
return 0
if __name__ == '__main__':
sys.exit(main())
|
[
"argparse.ArgumentParser",
"os.unlink",
"os.path.basename",
"spacegraphcats.search.search_utils.BgzfReader",
"os.path.exists",
"collections.defaultdict",
"khmer.Nodegraph",
"sqlite3.connect",
"spacegraphcats.search.search_utils.iterate_bgzf",
"os.path.join",
"screed.open"
] |
[((1087, 1112), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1110, 1112), False, 'import argparse\n'), ((1469, 1495), 'os.path.exists', 'os.path.exists', (['dbfilename'], {}), '(dbfilename)\n', (1483, 1495), False, 'import os\n'), ((1597, 1624), 'sqlite3.connect', 'sqlite3.connect', (['dbfilename'], {}), '(dbfilename)\n', (1612, 1624), False, 'import sqlite3\n'), ((1841, 1888), 'khmer.Nodegraph', 'khmer.Nodegraph', (['args.ksize', 'graph_tablesize', '(4)'], {}), '(args.ksize, graph_tablesize, 4)\n', (1856, 1888), False, 'import khmer\n'), ((1905, 1941), 'os.path.basename', 'os.path.basename', (['args.catlas_prefix'], {}), '(args.catlas_prefix)\n', (1921, 1941), False, 'import os\n'), ((1959, 2008), 'os.path.join', 'os.path.join', (['args.catlas_prefix', '"""contigs.fa.gz"""'], {}), "(args.catlas_prefix, 'contigs.fa.gz')\n", (1971, 2008), False, 'import os\n'), ((2178, 2206), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (2201, 2206), False, 'import collections\n'), ((2225, 2248), 'screed.open', 'screed.open', (['contigfile'], {}), '(contigfile)\n', (2236, 2248), False, 'import screed\n'), ((3226, 3261), 'spacegraphcats.search.search_utils.BgzfReader', 'search_utils.BgzfReader', (['args.reads'], {}), '(args.reads)\n', (3249, 3261), False, 'from spacegraphcats.search import search_utils\n'), ((3288, 3321), 'spacegraphcats.search.search_utils.iterate_bgzf', 'search_utils.iterate_bgzf', (['reader'], {}), '(reader)\n', (3313, 3321), False, 'from spacegraphcats.search import search_utils\n'), ((1565, 1586), 'os.unlink', 'os.unlink', (['dbfilename'], {}), '(dbfilename)\n', (1574, 1586), False, 'import os\n')]
|
# Generated by Django 2.0.3 on 2020-02-18 03:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app1', '0005_flight_printed'),
]
operations = [
migrations.AddField(
model_name='flight',
name='squack',
field=models.CharField(max_length=20, null=True),
),
]
|
[
"django.db.models.CharField"
] |
[((327, 369), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)'}), '(max_length=20, null=True)\n', (343, 369), False, 'from django.db import migrations, models\n')]
|
from rest_flex_fields import FlexFieldsModelSerializer
from rest_framework import serializers
# from rest_framework_utils.key_related_field import key_related_field
from staff_management_models.staff_group_payments.class_models.staff_worker_payment import StaffWorkerPayment, \
StaffWorkerPaymentGroup
from staff_management_models.staff_group_payments.class_serializers.staff_worker_payment_serializer import \
StaffWorkerPaymentSerializer
class StaffWorkerPaymentGroupSerializer(FlexFieldsModelSerializer):
# staff_phone = serializers.PrimaryKeyRelatedField(read_only=True, many=True)
staff_worker_payment = serializers.PrimaryKeyRelatedField(read_only=True, many=True)
class Meta:
model = StaffWorkerPaymentGroup
fields = [
'id',
'url',
'pay_date',
'staff_worker_payment'
]
expandable_fields = {
# 'staff_phone': (StaffPhoneSerializer, {'many': True}),
'staff_worker_payment': (StaffWorkerPaymentSerializer, {'many': True})
}
|
[
"rest_framework.serializers.PrimaryKeyRelatedField"
] |
[((627, 688), 'rest_framework.serializers.PrimaryKeyRelatedField', 'serializers.PrimaryKeyRelatedField', ([], {'read_only': '(True)', 'many': '(True)'}), '(read_only=True, many=True)\n', (661, 688), False, 'from rest_framework import serializers\n')]
|
"""1337. The K Weakest Rows in a Matrix
https://leetcode.com/problems/the-k-weakest-rows-in-a-matrix/
"""
import collections
from typing import List
class Solution:
def kWeakestRows(self, mat: List[List[int]], k: int) -> List[int]:
def count(nums: List[int]) -> int:
cnt = 0
for num in nums:
if num == 0:
break
cnt += 1
return cnt
values = [False] * (len(mat[0]) + 1)
store = collections.defaultdict(list)
for i in range(len(mat)):
cur = count(mat[i])
store[cur].append(i)
values[cur] = True
ans = []
for i, value in enumerate(values):
if value:
rows = store[i]
for row in rows:
if k > 0:
ans.append(row)
k -= 1
return ans
|
[
"collections.defaultdict"
] |
[((495, 524), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (518, 524), False, 'import collections\n')]
|
# Generated by Django 2.2.13 on 2020-08-30 23:18
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('slug', models.SlugField(blank=True, max_length=200, null=True)),
('description', models.TextField(blank=True, null=True)),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='blog.Category')),
],
options={
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('slug', models.SlugField(max_length=200)),
('category', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Category')),
],
),
]
|
[
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.PositiveIntegerField",
"django.db.models.SlugField",
"django.db.models.AutoField"
] |
[((357, 450), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (373, 450), False, 'from django.db import migrations, models\n'), ((474, 519), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'unique': '(True)'}), '(max_length=200, unique=True)\n', (490, 519), False, 'from django.db import migrations, models\n'), ((547, 602), 'django.db.models.SlugField', 'models.SlugField', ([], {'blank': '(True)', 'max_length': '(200)', 'null': '(True)'}), '(blank=True, max_length=200, null=True)\n', (563, 602), False, 'from django.db import migrations, models\n'), ((637, 676), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (653, 676), False, 'from django.db import migrations, models\n'), ((703, 746), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'editable': '(False)'}), '(editable=False)\n', (730, 746), False, 'from django.db import migrations, models\n'), ((774, 817), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'editable': '(False)'}), '(editable=False)\n', (801, 817), False, 'from django.db import migrations, models\n'), ((848, 906), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'db_index': '(True)', 'editable': '(False)'}), '(db_index=True, editable=False)\n', (875, 906), False, 'from django.db import migrations, models\n'), ((935, 978), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'editable': '(False)'}), '(editable=False)\n', (962, 978), False, 'from django.db import migrations, models\n'), ((1367, 1460), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1383, 1460), False, 'from django.db import migrations, models\n'), ((1485, 1517), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1501, 1517), False, 'from django.db import migrations, models\n'), ((1545, 1577), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1561, 1577), False, 'from django.db import migrations, models\n')]
|
# -*- coding: UTF-8 -*-
__author__ = 'hunter'
from flask import Blueprint
from app.util.response_util import code_handle
from app.util.login_util import login
api = Blueprint('api', __name__)
api.after_app_request(code_handle)
api.before_app_request(login)
from . import views, errors
|
[
"flask.Blueprint"
] |
[((167, 193), 'flask.Blueprint', 'Blueprint', (['"""api"""', '__name__'], {}), "('api', __name__)\n", (176, 193), False, 'from flask import Blueprint\n')]
|
from game_files.character import Character
from game_files.menu import Menu
from game_files.game import Game
menu = Menu()
button = menu.main()
if (button == "play"):
game = Game()
game.maps_draw()
elif (button == "saves"):
menu.saves()
elif (button == "options"):
menu.options()
|
[
"game_files.menu.Menu",
"game_files.game.Game"
] |
[((117, 123), 'game_files.menu.Menu', 'Menu', ([], {}), '()\n', (121, 123), False, 'from game_files.menu import Menu\n'), ((180, 186), 'game_files.game.Game', 'Game', ([], {}), '()\n', (184, 186), False, 'from game_files.game import Game\n')]
|
# Django
from django.db import models
# Local Django
from exam import constants
class NewExam(models.Model):
exam_description = models.CharField(max_length=constants.DESC_TUSS_MAX_LENGTH)
|
[
"django.db.models.CharField"
] |
[((135, 194), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': 'constants.DESC_TUSS_MAX_LENGTH'}), '(max_length=constants.DESC_TUSS_MAX_LENGTH)\n', (151, 194), False, 'from django.db import models\n')]
|
import sys, os
sys.path.append(os.getcwd()+'/d3m_ta2s_eval/ta3ta2api')
import pipeline_pb2 as pipeline__pb2
import primitive_pb2 as primitive__pb2
import problem_pb2 as problem__pb2
import value_pb2 as value__pb2
|
[
"os.getcwd"
] |
[((31, 42), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (40, 42), False, 'import sys, os\n')]
|
import numpy as np
def distance_from_region(label_mask, distance_mask=None, scale=1, ord=2):
"""Find the distance at every point in an image from a set of labeled points.
Parameters
==========
label_mask : ndarray
A mask designating the points to find the distance from. A True value
indicates that the pixel is in the region, a False value indicates it is not.
distance_mask : ndarray
A mask inidicating which regions to calculate the distance in
scale : int
Scale the calculated distance to another distance measure (eg. to millimeters)
ord : int
Order of norm to use when calculating distance. See np.linalg.norm for more details
Returns
=======
distances : ndarray
A masked array of the same size as label_mask.
If distance_mask is passed in then the output array is masked by it.
"""
if distance_mask is None:
distance_mask = np.ones(label_mask.shape, dtype=bool)
assert label_mask.shape == distance_mask.shape
scale = np.array(scale)
output = np.zeros(label_mask.shape)
indxs = np.indices(label_mask.shape)
X = indxs[:, distance_mask].T
Y = indxs[:, label_mask].T
for x in X:
output[tuple(x)] = np.linalg.norm(scale*(x-Y), ord=ord, axis=1).min()
return np.ma.array(output, mask=np.logical_not(distance_mask))
def contours(distances, contours=10):
amin,amax = distances.min(), distances.max()
edges,step = np.linspace(amin, amax, contours, retstep=True)
mask = np.logical_not(np.ma.getmaskarray(distances))
return [np.ma.getdata(mask & (distances >= cntr) & (distances < (cntr+step))) for cntr in edges[:-1]], edges
def plot_by_contours(arr, contour_masks, contour_vals, ax=None):
if ax is None:
import pylab as pl
_,ax = pl.subplots()
x = contour_vals[:-1]
y = np.array([arr[mask].mean() for mask in contour_masks])
ax.set_xlabel('Distance from surface (mm)')
ax.set_ylabel('Mean R2* value')
return ax.plot(x, y, 'o--')[0], x, y
def plot_by_distance(arr, distances, ax=None):
assert arr.shape == distances.shape
if ax is None:
import pylab
_,ax = pylab.subplots()
mask = np.logical_not(np.ma.getmaskarray(distances))
x = distances[mask].ravel()
y = arr[mask].ravel()
return ax.plot(x,y,'o')
|
[
"numpy.ma.getdata",
"numpy.ma.getmaskarray",
"numpy.zeros",
"numpy.ones",
"numpy.logical_not",
"numpy.indices",
"pylab.subplots",
"numpy.array",
"numpy.linalg.norm",
"numpy.linspace"
] |
[((1046, 1061), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (1054, 1061), True, 'import numpy as np\n'), ((1075, 1101), 'numpy.zeros', 'np.zeros', (['label_mask.shape'], {}), '(label_mask.shape)\n', (1083, 1101), True, 'import numpy as np\n'), ((1115, 1143), 'numpy.indices', 'np.indices', (['label_mask.shape'], {}), '(label_mask.shape)\n', (1125, 1143), True, 'import numpy as np\n'), ((1476, 1523), 'numpy.linspace', 'np.linspace', (['amin', 'amax', 'contours'], {'retstep': '(True)'}), '(amin, amax, contours, retstep=True)\n', (1487, 1523), True, 'import numpy as np\n'), ((945, 982), 'numpy.ones', 'np.ones', (['label_mask.shape'], {'dtype': 'bool'}), '(label_mask.shape, dtype=bool)\n', (952, 982), True, 'import numpy as np\n'), ((1550, 1579), 'numpy.ma.getmaskarray', 'np.ma.getmaskarray', (['distances'], {}), '(distances)\n', (1568, 1579), True, 'import numpy as np\n'), ((1822, 1835), 'pylab.subplots', 'pl.subplots', ([], {}), '()\n', (1833, 1835), True, 'import pylab as pl\n'), ((2195, 2211), 'pylab.subplots', 'pylab.subplots', ([], {}), '()\n', (2209, 2211), False, 'import pylab\n'), ((2239, 2268), 'numpy.ma.getmaskarray', 'np.ma.getmaskarray', (['distances'], {}), '(distances)\n', (2257, 2268), True, 'import numpy as np\n'), ((1339, 1368), 'numpy.logical_not', 'np.logical_not', (['distance_mask'], {}), '(distance_mask)\n', (1353, 1368), True, 'import numpy as np\n'), ((1593, 1662), 'numpy.ma.getdata', 'np.ma.getdata', (['(mask & (distances >= cntr) & (distances < cntr + step))'], {}), '(mask & (distances >= cntr) & (distances < cntr + step))\n', (1606, 1662), True, 'import numpy as np\n'), ((1252, 1300), 'numpy.linalg.norm', 'np.linalg.norm', (['(scale * (x - Y))'], {'ord': 'ord', 'axis': '(1)'}), '(scale * (x - Y), ord=ord, axis=1)\n', (1266, 1300), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Account authorization tests for Facebook and Google accounts.
"""
__authors__ = ['<EMAIL> (<NAME>)',
'<EMAIL> (<NAME>)']
import json
import mock
import os
import time
import unittest
import urllib
from copy import deepcopy
from cStringIO import StringIO
from tornado import httpclient, options
from tornado.ioloop import IOLoop
from urlparse import urlparse
from viewfinder.backend.base import message, util
from viewfinder.backend.db.contact import Contact
from viewfinder.backend.db.identity import Identity
from viewfinder.backend.db.notification import Notification
from viewfinder.backend.db.settings import AccountSettings
from viewfinder.backend.db.user import User
from viewfinder.backend.op.fetch_contacts_op import FetchContactsOperation
from viewfinder.backend.www import auth
from viewfinder.backend.www.test import service_base_test
from viewfinder.backend.www.www_util import GzipEncode
@unittest.skipIf('NO_NETWORK' in os.environ, 'no network')
class AuthTestCase(service_base_test.ServiceBaseTestCase):
"""Initializes the test datastore and the viewfinder schema.
"""
def setUp(self):
super(AuthTestCase, self).setUp()
self._CreateSimpleTestAssets()
self._google_user_dict = {'family_name': 'Kimball', 'name': '<NAME>', 'locale': 'en',
'gender': 'male', 'email': '<EMAIL>',
'link': 'https://plus.google.com/id',
'given_name': 'Andrew', 'id': 'id', 'verified_email': True}
self._facebook_user_dict = {'first_name': 'Andrew', 'last_name': 'Kimball', 'name': '<NAME>',
'id': 'id', 'link': 'http://www.facebook.com/andrew.kimball.50',
'timezone':-7, 'locale': 'en_US', 'email': '<EMAIL>',
'picture': {'data': {'url': 'http://foo.com/pic.jpg',
'is_silhouette': False}},
'verified': True}
self._viewfinder_user_dict = {'name': '<NAME>', 'given_name': 'Andrew', 'email': '<EMAIL>'}
self._mobile_device_dict = {'name': 'Andy\'s IPhone', 'version': '1.0', 'platform': 'IPhone 4S',
'os': 'iOS 5.0.1', 'push_token': 'push_token',
'device_uuid': '926744AC-8540-4103-9F3F-C84AA2F6D648',
'test_udid': '7d527095d4e0539aba40c852547db5da00000000',
'country': 'US', 'language': 'en'}
self._prospective_user, _, _ = self._CreateProspectiveUser()
self._register_user_dict = {'email': self._prospective_user.email,
'name': '<NAME>',
'given_name': 'Jimmy',
'family_name': 'John'}
def tearDown(self):
super(AuthTestCase, self).tearDown()
options.options.freeze_new_accounts = False
def testRegisterWithCookie(self):
"""Register user, overriding current logged-in user."""
# Override registered user.
user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict)
google_cookie = self._GetSecureUserCookie(user, device_id)
user2, _ = self._tester.RegisterFacebookUser(self._facebook_user_dict,
self._mobile_device_dict,
user_cookie=google_cookie)
self.assertNotEqual(user.user_id, user2.user_id)
# Override prospective user.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
user, _ = self._tester.RegisterViewfinderUser(self._viewfinder_user_dict, user_cookie=cookie)
self.assertNotEqual(self._prospective_user.user_id, user.user_id)
# Override with registration of prospective user.
user, _ = self._tester.RegisterViewfinderUser(self._register_user_dict, user_cookie=self._cookie)
self.assertNotEqual(user.user_id, self._user.user_id)
def testEmailAlertSettings(self):
"""Test that email/push alert settings are updated properly during registration."""
def _ValidateAlerts(email_alerts, push_alerts):
settings = self._RunAsync(AccountSettings.QueryByUser, self._client, self._prospective_user.user_id, None)
self.assertEqual(settings.email_alerts, email_alerts)
self.assertEqual(settings.sms_alerts, AccountSettings.SMS_NONE)
self.assertEqual(settings.push_alerts, push_alerts)
# Skip cleanup validation of alerts because a new device is created in this test that did not receive
# notifications sent as part of setUp() call.
self._skip_validation_for = ['Alerts']
# Register a prospective user using the web device.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
_ValidateAlerts(AccountSettings.EMAIL_ON_SHARE_NEW, AccountSettings.PUSH_NONE)
user, device_id = self._tester.RegisterViewfinderUser(self._register_user_dict)
_ValidateAlerts(AccountSettings.EMAIL_ON_SHARE_NEW, AccountSettings.PUSH_NONE)
# Update the user's email alert setting and validate the changed setting.
self._tester.UpdateUser(cookie, settings_dict={'email_alerts': 'none'})
_ValidateAlerts(AccountSettings.EMAIL_NONE, AccountSettings.PUSH_NONE)
# Login and register a new mobile device and validate that email alerts were turned off
# and push alerts turned on.
self._tester.UpdateUser(cookie, settings_dict={'email_alerts': 'on_share_new'})
self._tester.LoginViewfinderUser(self._register_user_dict, self._mobile_device_dict)
_ValidateAlerts(AccountSettings.EMAIL_NONE, AccountSettings.PUSH_ALL)
# Turn off push alerts, and then re-login, and validate that they were not turned back on.
self._tester.UpdateUser(cookie, settings_dict={'push_alerts': 'none'})
self._tester.LoginViewfinderUser(self._register_user_dict)
self._tester.LoginViewfinderUser(self._register_user_dict, self._mobile_device_dict)
_ValidateAlerts(AccountSettings.EMAIL_NONE, AccountSettings.PUSH_NONE)
def testSmsAlertSettings(self):
"""Test that SMS/push alert settings are updated properly during registration."""
def _ValidateAlerts(sms_alerts, push_alerts):
settings = self._RunAsync(AccountSettings.QueryByUser, self._client, prospective_user.user_id, None)
self.assertEqual(settings.email_alerts, AccountSettings.EMAIL_NONE)
self.assertEqual(settings.sms_alerts, sms_alerts)
self.assertEqual(settings.push_alerts, push_alerts)
# Skip cleanup validation of alerts because a new device is created in this test that did not receive
# notifications sent as part of setUp() call.
self._skip_validation_for = ['Alerts']
# Create prospective user with mobile phone.
ident_key = 'Phone:+14251234567'
vp_id, ep_ids = self._tester.ShareNew(self._cookie,
[(self._episode_id, self._photo_ids)],
[ident_key])
prospective_ident = self._RunAsync(Identity.Query, self._client, ident_key, None)
prospective_user = self._RunAsync(User.Query, self._client, prospective_ident.user_id, None)
register_user_dict = {'phone': prospective_user.phone,
'name': '<NAME>',
'given_name': 'Jimmy',
'family_name': 'John'}
# Register a prospective user using the web device.
cookie = self._GetSecureUserCookie(prospective_user, prospective_user.webapp_dev_id)
_ValidateAlerts(AccountSettings.SMS_ON_SHARE_NEW, AccountSettings.PUSH_NONE)
user, device_id = self._tester.RegisterViewfinderUser(register_user_dict)
_ValidateAlerts(AccountSettings.SMS_ON_SHARE_NEW, AccountSettings.PUSH_NONE)
# Login and register a new mobile device and validate that SMS alerts were turned off
# and push alerts turned on.
self._tester.LoginViewfinderUser(register_user_dict, self._mobile_device_dict)
_ValidateAlerts(AccountSettings.SMS_NONE, AccountSettings.PUSH_ALL)
# Turn off push alerts, and then re-login, and validate that they were not turned back on.
self._tester.UpdateUser(cookie, settings_dict={'push_alerts': 'none'})
self._tester.LoginViewfinderUser(register_user_dict)
self._tester.LoginViewfinderUser(register_user_dict, self._mobile_device_dict)
_ValidateAlerts(AccountSettings.SMS_NONE, AccountSettings.PUSH_NONE)
def testMultipleAuthorities(self):
"""Test multiple authorities that authenticate same identity."""
# Login as Google user, then as Viewfinder user with same email, then again as same Google user.
self._tester.RegisterGoogleUser({'name': '<NAME>', 'email': '<EMAIL>', 'verified_email': True})
self._tester.LoginViewfinderUser({'email': '<EMAIL>'},
self._mobile_device_dict)
identity = self._RunAsync(Identity.Query, self._client, 'Email:<EMAIL>', None)
self.assertEqual(identity.authority, 'Viewfinder')
self.assertEqual(identity.expires, 0)
self._tester.LoginGoogleUser({'email': '<EMAIL>', 'verified_email': True})
identity = self._RunAsync(Identity.Query, self._client, 'Email:<EMAIL>', None)
self.assertEqual(identity.authority, 'Google')
def testLoginWithCookie(self):
"""Test successful login override of current logged-in user."""
# Login with cookie from same user.
user, device_id = self._tester.RegisterFacebookUser(self._facebook_user_dict, self._mobile_device_dict)
facebook_cookie = self._GetSecureUserCookie(user, device_id)
self._tester.LoginFacebookUser(self._facebook_user_dict, self._mobile_device_dict, user_cookie=facebook_cookie)
# Login with cookie from different user.
user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict)
google_cookie = self._GetSecureUserCookie(user, device_id)
self._tester.LoginFacebookUser(self._facebook_user_dict, self._mobile_device_dict, user_cookie=google_cookie)
# Login with cookie from prospective user.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
self._tester.LoginFacebookUser(self._facebook_user_dict, user_cookie=cookie)
def testErrorFormat(self):
"""Test that error returned by the service handler is properly formed."""
ident_dict = {'key': 'Email:<EMAIL>', 'authority': 'FakeViewfinder'}
auth_info_dict = {'identity': ident_dict['key']}
url = self._tester.GetUrl('/login/viewfinder')
request_dict = _CreateRegisterRequest(self._mobile_device_dict, auth_info_dict, synchronous=False)
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict, allow_errors=[403])
self.assertEqual(json.loads(response.body),
{'error': {'id': 'NO_USER_ACCOUNT',
'method': 'login',
'message': 'We can\'t find your Viewfinder account. Are you sure you used ' +
'<EMAIL> to sign up?'}})
def testLoginWithProspective(self):
"""ERROR: Try to log into a prospective user account."""
self.assertRaisesHttpError(403, self._tester.LoginViewfinderUser, self._register_user_dict)
def testLinkWithProspective(self):
"""ERROR: Try to link another identity to a prospective user."""
# Link with cookie from prospective user, using Facebook account that is not yet linked.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
self.assertRaisesHttpError(403, self._tester.LinkFacebookUser, self._facebook_user_dict, user_cookie=cookie)
def testLinkAlreadyLinked(self):
"""ERROR: Try to link a Google account that is already linked to a different Viewfinder account."""
user, device_id = self._tester.RegisterFacebookUser(self._facebook_user_dict)
facebook_cookie = self._GetSecureUserCookie(user, device_id)
self._tester.RegisterGoogleUser(self._google_user_dict)
self.assertRaisesHttpError(403, self._tester.LinkGoogleUser, self._google_user_dict,
self._mobile_device_dict, user_cookie=facebook_cookie)
def testUpdateFriendAttribute(self):
"""Update name of a user and ensure that each friend is notified."""
# Create a prospective user by sharing with an email.
vp_id, ep_ids = self._tester.ShareNew(self._cookie,
[(self._episode_id, self._photo_ids)],
['Email:<EMAIL>', self._user2.user_id])
# Register the user and verify friends are notified.
self._tester.RegisterGoogleUser(self._google_user_dict)
response_dict = self._tester.QueryNotifications(self._cookie2, 1, scan_forward=False)
self.assertEqual(response_dict['notifications'][0]['invalidate'], {u'users': [5]})
def testRegisterContact(self):
"""Register an identity that is the target of a contact, which will
be bound to a user_id as a result.
"""
# Create a contact.
user_dict = {'name': '<NAME>', 'email': '<EMAIL>', 'verified_email': True}
identity_key = 'Email:%s' % user_dict['email']
contact_dict = Contact.CreateContactDict(self._user.user_id,
[(identity_key, None)],
util._TEST_TIME,
Contact.GMAIL,
name=user_dict['name'])
self._UpdateOrAllocateDBObject(Contact, **contact_dict)
# Register the new user.
user, device_id = self._tester.RegisterGoogleUser(user_dict)
response_dict = self._tester.QueryNotifications(self._cookie, 1, scan_forward=False)
self.assertEqual([notify_dict['name'] for notify_dict in response_dict['notifications']],
['first register contact'])
def testRegisterProspectiveContact(self):
"""Register an identity that is the target of a contact (that is still a prospective user)."""
for user_id in [self._user.user_id, self._user2.user_id]:
# Create several contacts.
identity_key = 'Email:%s' % self._prospective_user.email
contact_dict = Contact.CreateContactDict(user_id,
[(identity_key, None)],
util._TEST_TIME,
Contact.GMAIL,
name='Mr. John')
self._UpdateOrAllocateDBObject(Contact, **contact_dict)
# Register the prospective user.
user, device_id = self._tester.RegisterViewfinderUser(self._register_user_dict)
# Expect friend & contact notifications.
response_dict = self._tester.QueryNotifications(self._cookie, 2, scan_forward=False)
self.assertEqual([notify_dict['name'] for notify_dict in response_dict['notifications']],
['register friend', 'first register contact'])
# Expect only contact notification.
response_dict = self._tester.QueryNotifications(self._cookie2, 1, scan_forward=False)
self.assertEqual([notify_dict['name'] for notify_dict in response_dict['notifications']],
['first register contact'])
# Expect only friend notification.
cookie = self._GetSecureUserCookie(self._prospective_user, self._prospective_user.webapp_dev_id)
response_dict = self._tester.QueryNotifications(cookie, 2, scan_forward=False)
self.assertEqual([notify_dict['name'] for notify_dict in response_dict['notifications']],
['register friend', 'share_new'])
def testNewIdentityOnly(self):
"""Register existing user and device, but create new identity via link."""
user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict)
cookie = self._GetSecureUserCookie(user, device_id)
self._mobile_device_dict['device_id'] = device_id
self._tester.LinkFacebookUser(self._facebook_user_dict, self._mobile_device_dict, cookie)
def testNewDeviceOnly(self):
"""Register existing user and identity, but create new device as part of login."""
self._tester.RegisterGoogleUser(self._google_user_dict)
self._tester.LoginGoogleUser(self._google_user_dict, self._mobile_device_dict)
def testDuplicateToken(self):
"""Register device with push token that is already in use by another device."""
self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict)
self._tester.RegisterFacebookUser(self._facebook_user_dict, self._mobile_device_dict)
def testAsyncRequest(self):
"""Send async register request."""
ident_dict = {'key': 'Email:<EMAIL>', 'authority': 'FakeViewfinder'}
auth_info_dict = {'identity': ident_dict['key']}
url = self._tester.GetUrl('/link/fakeviewfinder')
request_dict = _CreateRegisterRequest(self._mobile_device_dict, auth_info_dict, synchronous=False)
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict, user_cookie=self._cookie)
response_dict = json.loads(response.body)
self._validate = False
# Wait until notification is written by the background fetch_contacts op.
while True:
notification = self._RunAsync(Notification.QueryLast, self._client, response_dict['user_id'])
if notification.name == 'fetch_contacts':
self.assertEqual(notification.op_id, response_dict['headers']['op_id'])
break
self._RunAsync(IOLoop.current().add_timeout, time.time() + .1)
def testDeviceNoUser(self):
"""ERROR: Try to register existing device without existing user."""
user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict)
self._mobile_device_dict['device_id'] = device_id
self.assertRaisesHttpError(403, self._tester.RegisterFacebookUser, self._facebook_user_dict,
self._mobile_device_dict)
def testDeviceNotOwned(self):
"""ERROR: Try to register existing device that is not owned by the
existing user.
"""
self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict)
self._mobile_device_dict['device_id'] = 1000
self.assertRaisesHttpError(403, self._tester.RegisterGoogleUser, self._google_user_dict,
self._mobile_device_dict)
def testRegisterFreezeNewAccounts(self):
"""ERROR: Verify that attempt to register fails if --freeze_new_accounts
is true. This is the kill switch the server can throw to stop the tide
of incoming account registrations.
"""
options.options.freeze_new_accounts = True
exc = self.assertRaisesHttpError(403, self._tester.RegisterGoogleUser, self._google_user_dict,
self._mobile_device_dict)
error_dict = json.loads(exc.response.body)
self.assertEqual(error_dict['error']['message'], auth._FREEZE_NEW_ACCOUNTS_MESSAGE)
self.assertRaisesHttpError(403, self._tester.RegisterFacebookUser, self._facebook_user_dict)
def testLoginWithUnboundIdentity(self):
"""ERROR: Try to login with an identity that exists, but is not bound to a user."""
self._UpdateOrAllocateDBObject(Identity, key='Email:<EMAIL>')
self.assertRaisesHttpError(403,
self._tester.LoginViewfinderUser,
self._viewfinder_user_dict,
self._mobile_device_dict)
def testBadRequest(self):
"""ERROR: Verify that various malformed and missing register fields result
in a bad request (400) error.
"""
# Missing request dict.
url = self.get_url('/register/facebook') + '?' + urllib.urlencode({'access_token': 'dummy'})
self.assertRaisesHttpError(400, _SendAuthRequest, self._tester, url, 'POST', request_dict='')
# Malformed request dict.
self.assertRaisesHttpError(400, _SendAuthRequest, self._tester, url, 'POST', request_dict={'device': 'foo'})
def testRegisterExisting(self):
"""ERROR: Try to register a user that already exists."""
self._tester.RegisterViewfinderUser(self._viewfinder_user_dict)
self.assertRaisesHttpError(403,
self._tester.RegisterViewfinderUser,
self._viewfinder_user_dict,
self._mobile_device_dict)
def testLogout(self):
"""Ensure that logout sends back a cookie with an expiration time."""
url = self._tester.GetUrl('/logout')
response = _SendAuthRequest(self._tester, url, 'GET', user_cookie=self._cookie)
self.assertEqual(response.code, 302)
self.assertEqual(response.headers['location'], '/')
self.assertIn('user', response.headers['Set-Cookie'])
self.assertIn('expires', response.headers['Set-Cookie'])
self.assertIn('Domain', response.headers['Set-Cookie'])
def testSessionCookie(self):
"""Test "use_session_cookie" option in auth request."""
# First register a user, requesting a session cookie.
auth_info_dict = {'identity': 'Email:<EMAIL>',
'name': '<NAME>',
'given_name': 'Andy',
'password': '<PASSWORD>'}
url = self._tester.GetUrl('/register/viewfinder')
request_dict = _CreateRegisterRequest(None, auth_info_dict)
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict)
self.assertNotIn('Set-Cookie', response.headers)
identity = self._tester._RunAsync(Identity.Query, self._client, auth_info_dict['identity'], None)
url = self._tester.GetUrl('/verify/viewfinder')
request_dict = {'headers': {'version': message.MAX_SUPPORTED_MESSAGE_VERSION,
'synchronous': True},
'identity': identity.key,
'access_token': identity.access_token,
'use_session_cookie': True}
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict)
self.assertNotIn('expires', response.headers['Set-Cookie'])
cookie_user_dict = self._tester.DecodeUserCookie(self._tester.GetCookieFromResponse(response))
self.assertTrue(cookie_user_dict.get('is_session_cookie', False))
# Now log in and request a session cookie.
del auth_info_dict['name']
del auth_info_dict['given_name']
url = self._tester.GetUrl('/login/viewfinder')
request_dict = _CreateRegisterRequest(None, auth_info_dict, synchronous=False)
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict)
self.assertIn('expires', response.headers['Set-Cookie'])
request_dict['use_session_cookie'] = True
response = _SendAuthRequest(self._tester, url, 'POST', request_dict=request_dict)
self.assertNotIn('expires', response.headers['Set-Cookie'])
cookie = self._tester.GetCookieFromResponse(response)
cookie_user_dict = self._tester.DecodeUserCookie(cookie)
self.assertTrue(cookie_user_dict.get('is_session_cookie', False))
# Now use the session cookie to make a service request and verify it's preserved.
request_dict = {'headers': {'version': message.MAX_SUPPORTED_MESSAGE_VERSION, 'synchronous': True}}
headers = {'Content-Type': 'application/json',
'X-Xsrftoken': 'fake_xsrf',
'Cookie': '_xsrf=fake_xsrf;user=%s' % cookie}
response = self._RunAsync(self.http_client.fetch,
self._tester.GetUrl('/service/query_followed'),
method='POST',
body=json.dumps(request_dict),
headers=headers)
cookie_user_dict = self._tester.DecodeUserCookie(self._tester.GetCookieFromResponse(response))
self.assertTrue(cookie_user_dict.get('is_session_cookie', False))
def _CreateRegisterRequest(device_dict=None, auth_info_dict=None, synchronous=True,
version=message.MAX_SUPPORTED_MESSAGE_VERSION):
"""Returns a new AUTH_REQUEST dict that has been populated with information from the
specified dicts.
"""
request_dict = {'headers': {'version': version}}
util.SetIfNotNone(request_dict, 'device', device_dict)
util.SetIfNotNone(request_dict, 'auth_info', auth_info_dict)
if synchronous:
request_dict['headers']['synchronous'] = True
return request_dict
def _AddMockJSONResponse(mock_client, url, response_dict):
"""Add a mapping entry to the mock client such that requests to
"url" will return an HTTP response containing the JSON-formatted
"response_dict".
"""
def _CreateResponse(request):
return httpclient.HTTPResponse(request, 200,
headers={'Content-Type': 'application/json'},
buffer=StringIO(json.dumps(response_dict)))
mock_client.map(url, _CreateResponse)
def _SendAuthRequest(tester, url, http_method, user_cookie=None, request_dict=None, allow_errors=None):
"""Sends request to auth service. If "request_dict" is defined, dumps it as a JSON body.
If "user_cookie" is defined, automatically adds a "Cookie" header. Raises an HTTPError if
an HTTP error is returned, unless the error code is part of the "allow_errors" set. Returns
the HTTP response object on success.
"""
headers = {'Content-Type': 'application/json',
'Content-Encoding': 'gzip'}
if user_cookie is not None:
headers['Cookie'] = 'user=%s' % user_cookie
# All requests are expected to have xsrf cookie/header.
headers['X-Xsrftoken'] = 'fake_xsrf'
headers['Cookie'] = headers['Cookie'] + ';_xsrf=fake_xsrf' if headers.has_key('Cookie') else '_xsrf=fake_xsrf'
with mock.patch.object(FetchContactsOperation, '_SKIP_UPDATE_FOR_TEST', True):
response = tester._RunAsync(tester.http_client.fetch, url, method=http_method,
body=None if request_dict is None else GzipEncode(json.dumps(request_dict)),
headers=headers, follow_redirects=False)
if response.code >= 400:
if allow_errors is None or response.code not in allow_errors:
response.rethrow()
return response
def _AuthFacebookOrGoogleUser(tester, action, user_dict, ident_dict, device_dict, user_cookie):
"""Registers a user, identity, and device using the auth web service. The interface to Facebook
or Google is mocked, with the contents of "user_dict" returned in lieu of what the real service
would return. If "device_dict" is None, then simulates the web experience; else simulates the
mobile device experience. If "user_cookie" is not None, then simulates case where calling user
is already logged in when registering the new user. Returns the HTTP response that was returned
by the auth service.
"""
if device_dict is None:
# Web client.
url = tester.GetUrl('/%s/%s' % (action, ident_dict['authority'].lower()))
response = _SendAuthRequest(tester, url, 'GET', user_cookie=user_cookie)
assert response.code == 302, response.code
# Invoke authentication again, this time sending code."""
url = tester.GetUrl('/%s/%s?code=code' % (action, ident_dict['authority'].lower()))
response = _SendAuthRequest(tester, url, 'GET', user_cookie=user_cookie)
assert response.code == 302, response.code
assert response.headers['location'].startswith('/view')
else:
if ident_dict['authority'] == 'Facebook':
url = tester.GetUrl('/%s/facebook?access_token=access_token' % action)
else:
url = tester.GetUrl('/%s/google?refresh_token=refresh_token' % action)
request_dict = _CreateRegisterRequest(device_dict)
response = _SendAuthRequest(tester, url, 'POST', user_cookie=user_cookie, request_dict=request_dict)
return response
def _ValidateAuthUser(tester, action, user_dict, ident_dict, device_dict, user_cookie, auth_response):
"""Validates an auth action that has taken place and resulted in the HTTP response given
by "auth_response".
"""
validator = tester.validator
# Validate the response from a GET (device_dict is None) or POST to auth service.
if device_dict is None:
# Get the id of the user that should have been created by the registration.
actual_identity = tester._RunAsync(Identity.Query, validator.client, ident_dict['key'], None)
actual_user_id = actual_identity.user_id
else:
# Extract the user_id and device_id from the JSON response.
response_dict = json.loads(auth_response.body)
actual_op_id = response_dict['headers']['op_id']
actual_user_id = response_dict['user_id']
actual_device_id = response_dict.get('device_id', None)
# Verify that the cookie in the response contains the correct information.
cookie_user_dict = tester.DecodeUserCookie(tester.GetCookieFromResponse(auth_response))
assert cookie_user_dict['user_id'] == actual_user_id, (cookie_user_dict, actual_user_id)
assert device_dict is None or 'device_id' not in device_dict or \
cookie_user_dict['device_id'] == device_dict['device_id'], \
(cookie_user_dict, device_dict)
actual_user = tester._RunAsync(User.Query, validator.client, actual_user_id, None)
if device_dict is None:
# If no mobile device was used, then web device id is expected.
actual_device_id = actual_user.webapp_dev_id
# Get notifications that were created. There could be up to 2: a register_user notification and
# a fetch_contacts notification (in link case).
notification_list = tester._RunAsync(Notification.RangeQuery,
tester.validator.client,
actual_user_id,
range_desc=None,
limit=3,
col_names=None,
scan_forward=False)
if device_dict is None:
actual_op_id = notification_list[1 if action == 'link' else 0].op_id
# Determine what the registered user's id should have been.
if user_cookie is None or action != 'link':
expected_user_id = None
else:
expected_user_id, device_id = tester.GetIdsFromCookie(user_cookie)
expected_identity = validator.GetModelObject(Identity, ident_dict['key'], must_exist=False)
if expected_identity is not None:
# Identity already existed, so expect registered user's id to equal the user id of that identity.
expected_user_id = expected_identity.user_id
# Verify that identity is linked to expected user.
assert expected_user_id is None or expected_user_id == actual_user_id, \
(expected_user_id, actual_user_id)
# Validate the device if it should have been created.
if device_dict is None:
expected_device_dict = None
else:
expected_device_dict = deepcopy(device_dict)
if 'device_id' not in device_dict:
expected_device_dict['device_id'] = actual_device_id
# Re-map picture element for Facebook authority (Facebook changed format in Oct 2012).
scratch_user_dict = deepcopy(user_dict)
if ident_dict['authority'] == 'Facebook':
if device_dict is None:
scratch_user_dict['session_expires'] = ['3600']
if 'picture' in scratch_user_dict:
scratch_user_dict['picture'] = scratch_user_dict['picture']['data']['url']
elif ident_dict['authority'] == 'Viewfinder' and action != 'register':
# Only use name in registration case.
scratch_user_dict.pop('name', None)
# Validate the Identity object.
expected_ident_dict = deepcopy(ident_dict)
expected_ident_dict.pop('json_attrs', None)
if ident_dict['authority'] == 'Viewfinder':
identity = tester._RunAsync(Identity.Query, tester.validator.client, ident_dict['key'], None)
expected_ident_dict['access_token'] = identity.access_token
expected_ident_dict['expires'] = identity.expires
# Validate the User object.
expected_user_dict = {}
before_user = validator.GetModelObject(User, actual_user_id, must_exist=False)
before_user_dict = {} if before_user is None else before_user._asdict()
for k, v in scratch_user_dict.items():
user_key = auth.AuthHandler._AUTH_ATTRIBUTE_MAP.get(k, None)
if user_key is not None:
if before_user is None or getattr(before_user, user_key) is None:
expected_user_dict[auth.AuthHandler._AUTH_ATTRIBUTE_MAP[k]] = v
# Set facebook email if it has not yet been set.
if user_key == 'email' and ident_dict['authority'] == 'Facebook':
if before_user is None or getattr(before_user, 'facebook_email') is None:
expected_user_dict['facebook_email'] = v
expected_user_dict['user_id'] = actual_user_id
expected_user_dict['webapp_dev_id'] = actual_user.webapp_dev_id
op_dict = {'op_timestamp': util._TEST_TIME,
'op_id': notification_list[1 if action == 'link' else 0].op_id,
'user_id': actual_user_id,
'device_id': actual_device_id}
if expected_device_dict:
expected_device_dict.pop('device_uuid', None)
expected_device_dict.pop('test_udid', None)
is_prospective = before_user is None or not before_user.IsRegistered()
validator.ValidateUpdateUser('first register contact' if is_prospective else 'link contact',
op_dict,
expected_user_dict,
expected_ident_dict,
device_dict=expected_device_dict)
after_user_dict = validator.GetModelObject(User, actual_user_id)._asdict()
if expected_identity is not None:
expected_ident_dict['user_id'] = expected_identity.user_id
if action == 'link':
ignored_keys = ['user_id', 'webapp_dev_id']
if 'user_id' not in expected_ident_dict and all(k in ignored_keys for k in expected_user_dict.keys()):
# Only notify self if it hasn't been done through Friends.
validator.ValidateUserNotification('register friend self', actual_user_id, op_dict)
# Validate fetch_contacts notification.
op_dict['op_id'] = notification_list[0].op_id
invalidate = {'contacts': {'start_key': Contact.CreateSortKey(None, util._TEST_TIME)}}
validator.ValidateNotification('fetch_contacts', actual_user_id, op_dict, invalidate)
return actual_user, actual_device_id if device_dict is not None else None
|
[
"unittest.skipIf",
"copy.deepcopy",
"mock.patch.object",
"tornado.ioloop.IOLoop.current",
"json.loads",
"viewfinder.backend.base.util.SetIfNotNone",
"json.dumps",
"time.time",
"viewfinder.backend.db.contact.Contact.CreateSortKey",
"viewfinder.backend.www.auth.AuthHandler._AUTH_ATTRIBUTE_MAP.get",
"urllib.urlencode",
"viewfinder.backend.db.contact.Contact.CreateContactDict"
] |
[((1004, 1061), 'unittest.skipIf', 'unittest.skipIf', (["('NO_NETWORK' in os.environ)", '"""no network"""'], {}), "('NO_NETWORK' in os.environ, 'no network')\n", (1019, 1061), False, 'import unittest\n'), ((24276, 24330), 'viewfinder.backend.base.util.SetIfNotNone', 'util.SetIfNotNone', (['request_dict', '"""device"""', 'device_dict'], {}), "(request_dict, 'device', device_dict)\n", (24293, 24330), False, 'from viewfinder.backend.base import message, util\n'), ((24333, 24393), 'viewfinder.backend.base.util.SetIfNotNone', 'util.SetIfNotNone', (['request_dict', '"""auth_info"""', 'auth_info_dict'], {}), "(request_dict, 'auth_info', auth_info_dict)\n", (24350, 24393), False, 'from viewfinder.backend.base import message, util\n'), ((31107, 31126), 'copy.deepcopy', 'deepcopy', (['user_dict'], {}), '(user_dict)\n', (31115, 31126), False, 'from copy import deepcopy\n'), ((31587, 31607), 'copy.deepcopy', 'deepcopy', (['ident_dict'], {}), '(ident_dict)\n', (31595, 31607), False, 'from copy import deepcopy\n'), ((13320, 13450), 'viewfinder.backend.db.contact.Contact.CreateContactDict', 'Contact.CreateContactDict', (['self._user.user_id', '[(identity_key, None)]', 'util._TEST_TIME', 'Contact.GMAIL'], {'name': "user_dict['name']"}), "(self._user.user_id, [(identity_key, None)], util.\n _TEST_TIME, Contact.GMAIL, name=user_dict['name'])\n", (13345, 13450), False, 'from viewfinder.backend.db.contact import Contact\n'), ((17221, 17246), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (17231, 17246), False, 'import json\n'), ((18981, 19010), 'json.loads', 'json.loads', (['exc.response.body'], {}), '(exc.response.body)\n', (18991, 19010), False, 'import json\n'), ((25799, 25871), 'mock.patch.object', 'mock.patch.object', (['FetchContactsOperation', '"""_SKIP_UPDATE_FOR_TEST"""', '(True)'], {}), "(FetchContactsOperation, '_SKIP_UPDATE_FOR_TEST', True)\n", (25816, 25871), False, 'import mock\n'), ((28548, 28578), 'json.loads', 'json.loads', (['auth_response.body'], {}), '(auth_response.body)\n', (28558, 28578), False, 'import json\n'), ((30875, 30896), 'copy.deepcopy', 'deepcopy', (['device_dict'], {}), '(device_dict)\n', (30883, 30896), False, 'from copy import deepcopy\n'), ((32184, 32233), 'viewfinder.backend.www.auth.AuthHandler._AUTH_ATTRIBUTE_MAP.get', 'auth.AuthHandler._AUTH_ATTRIBUTE_MAP.get', (['k', 'None'], {}), '(k, None)\n', (32224, 32233), False, 'from viewfinder.backend.www import auth\n'), ((10864, 10889), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (10874, 10889), False, 'import json\n'), ((14336, 14447), 'viewfinder.backend.db.contact.Contact.CreateContactDict', 'Contact.CreateContactDict', (['user_id', '[(identity_key, None)]', 'util._TEST_TIME', 'Contact.GMAIL'], {'name': '"""Mr. John"""'}), "(user_id, [(identity_key, None)], util._TEST_TIME,\n Contact.GMAIL, name='Mr. John')\n", (14361, 14447), False, 'from viewfinder.backend.db.contact import Contact\n'), ((19841, 19884), 'urllib.urlencode', 'urllib.urlencode', (["{'access_token': 'dummy'}"], {}), "({'access_token': 'dummy'})\n", (19857, 19884), False, 'import urllib\n'), ((23707, 23731), 'json.dumps', 'json.dumps', (['request_dict'], {}), '(request_dict)\n', (23717, 23731), False, 'import json\n'), ((34146, 34190), 'viewfinder.backend.db.contact.Contact.CreateSortKey', 'Contact.CreateSortKey', (['None', 'util._TEST_TIME'], {}), '(None, util._TEST_TIME)\n', (34167, 34190), False, 'from viewfinder.backend.db.contact import Contact\n'), ((17633, 17649), 'tornado.ioloop.IOLoop.current', 'IOLoop.current', ([], {}), '()\n', (17647, 17649), False, 'from tornado.ioloop import IOLoop\n'), ((17663, 17674), 'time.time', 'time.time', ([], {}), '()\n', (17672, 17674), False, 'import time\n'), ((24916, 24941), 'json.dumps', 'json.dumps', (['response_dict'], {}), '(response_dict)\n', (24926, 24941), False, 'import json\n'), ((26038, 26062), 'json.dumps', 'json.dumps', (['request_dict'], {}), '(request_dict)\n', (26048, 26062), False, 'import json\n')]
|
from nose.plugins.attrib import attr
from numpy.testing.utils import assert_equal, assert_allclose, assert_raises
import numpy as np
from brian2.spatialneuron import *
from brian2.units import um, second
@attr('codegen-independent')
def test_basicshapes():
morpho = Soma(diameter=30*um)
morpho.L = Cylinder(length=10*um, diameter=1*um, n=10)
morpho.LL = Cylinder(length=5*um, diameter=2*um, n=5)
morpho.right = Cylinder(length=3*um, diameter=1*um, n=7)
morpho.right['nextone'] = Cylinder(length=2*um, diameter=1*um, n=3)
# Check total number of compartments
assert_equal(len(morpho),26)
assert_equal(len(morpho.L.main),10)
# Check that end point is at distance 15 um from soma
assert_allclose(morpho.LL.distance[-1],15*um)
@attr('codegen-independent')
def test_subgroup():
morpho = Soma(diameter=30*um)
morpho.L = Cylinder(length=10*um, diameter=1*um, n=10)
morpho.LL = Cylinder(length=5*um, diameter=2*um, n=5)
morpho.right = Cylinder(length=3*um, diameter=1*um, n=7)
# Getting a single compartment by index
assert_allclose(morpho.L[2].distance,3*um)
# Getting a single compartment by position
assert_allclose(morpho.LL[0*um].distance,11*um)
assert_allclose(morpho.LL[1*um].distance,11*um)
assert_allclose(morpho.LL[1.5*um].distance,12*um)
assert_allclose(morpho.LL[5*um].distance,15*um)
# Getting a segment
assert_allclose(morpho.L[3*um:5.1*um].distance, [3, 4, 5]*um)
# Indices cannot be obtained at this stage
assert_raises(AttributeError,lambda :morpho.L.indices[:])
# Compress the morphology and get absolute compartment indices
N = len(morpho)
morpho.compress(MorphologyData(N))
assert_equal(morpho.LL.indices[:], [11, 12, 13, 14, 15])
assert_equal(morpho.L.indices[3*um:5.1*um], [3, 4, 5])
assert_equal(morpho.L.indices[3*um:5.1*um],
morpho.L[3*um:5.1*um].indices[:])
assert_equal(morpho.L.indices[:5.1*um], [1, 2, 3, 4, 5])
assert_equal(morpho.L.indices[3*um:], [3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(morpho.L.indices[3.5*um], 4)
assert_equal(morpho.L.indices[3], 4)
assert_equal(morpho.L.indices[-1], 10)
assert_equal(morpho.L.indices[3:5], [4, 5])
assert_equal(morpho.L.indices[3:], [4, 5, 6, 7, 8, 9, 10])
assert_equal(morpho.L.indices[:5], [1, 2, 3, 4, 5])
# Main branch
assert_equal(len(morpho.L.main), 10)
# Non-existing branch
assert_raises(AttributeError, lambda: morpho.axon)
# Incorrect indexing
# wrong units or mixing units
assert_raises(TypeError, lambda: morpho.indices[3*second:5*second])
assert_raises(TypeError, lambda: morpho.indices[3.4:5.3])
assert_raises(TypeError, lambda: morpho.indices[3:5*um])
assert_raises(TypeError, lambda: morpho.indices[3*um:5])
# providing a step
assert_raises(TypeError, lambda: morpho.indices[3*um:5*um:2*um])
assert_raises(TypeError, lambda: morpho.indices[3:5:2])
# incorrect type
assert_raises(TypeError, lambda: morpho.indices[object()])
if __name__ == '__main__':
test_basicshapes()
test_subgroup()
|
[
"numpy.testing.utils.assert_equal",
"numpy.testing.utils.assert_allclose",
"numpy.testing.utils.assert_raises",
"nose.plugins.attrib.attr"
] |
[((207, 234), 'nose.plugins.attrib.attr', 'attr', (['"""codegen-independent"""'], {}), "('codegen-independent')\n", (211, 234), False, 'from nose.plugins.attrib import attr\n'), ((767, 794), 'nose.plugins.attrib.attr', 'attr', (['"""codegen-independent"""'], {}), "('codegen-independent')\n", (771, 794), False, 'from nose.plugins.attrib import attr\n'), ((719, 767), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['morpho.LL.distance[-1]', '(15 * um)'], {}), '(morpho.LL.distance[-1], 15 * um)\n', (734, 767), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1076, 1121), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['morpho.L[2].distance', '(3 * um)'], {}), '(morpho.L[2].distance, 3 * um)\n', (1091, 1121), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1170, 1222), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['morpho.LL[0 * um].distance', '(11 * um)'], {}), '(morpho.LL[0 * um].distance, 11 * um)\n', (1185, 1222), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1222, 1274), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['morpho.LL[1 * um].distance', '(11 * um)'], {}), '(morpho.LL[1 * um].distance, 11 * um)\n', (1237, 1274), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1274, 1328), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['morpho.LL[1.5 * um].distance', '(12 * um)'], {}), '(morpho.LL[1.5 * um].distance, 12 * um)\n', (1289, 1328), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1328, 1380), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['morpho.LL[5 * um].distance', '(15 * um)'], {}), '(morpho.LL[5 * um].distance, 15 * um)\n', (1343, 1380), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1404, 1471), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['morpho.L[3 * um:5.1 * um].distance', '([3, 4, 5] * um)'], {}), '(morpho.L[3 * um:5.1 * um].distance, [3, 4, 5] * um)\n', (1419, 1471), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1517, 1576), 'numpy.testing.utils.assert_raises', 'assert_raises', (['AttributeError', '(lambda : morpho.L.indices[:])'], {}), '(AttributeError, lambda : morpho.L.indices[:])\n', (1530, 1576), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1705, 1761), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.LL.indices[:]', '[11, 12, 13, 14, 15]'], {}), '(morpho.LL.indices[:], [11, 12, 13, 14, 15])\n', (1717, 1761), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1766, 1824), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[3 * um:5.1 * um]', '[3, 4, 5]'], {}), '(morpho.L.indices[3 * um:5.1 * um], [3, 4, 5])\n', (1778, 1824), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1825, 1915), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[3 * um:5.1 * um]', 'morpho.L[3 * um:5.1 * um].indices[:]'], {}), '(morpho.L.indices[3 * um:5.1 * um], morpho.L[3 * um:5.1 * um].\n indices[:])\n', (1837, 1915), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1924, 1982), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[:5.1 * um]', '[1, 2, 3, 4, 5]'], {}), '(morpho.L.indices[:5.1 * um], [1, 2, 3, 4, 5])\n', (1936, 1982), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1985, 2051), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[3 * um:]', '[3, 4, 5, 6, 7, 8, 9, 10]'], {}), '(morpho.L.indices[3 * um:], [3, 4, 5, 6, 7, 8, 9, 10])\n', (1997, 2051), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2054, 2097), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[3.5 * um]', '(4)'], {}), '(morpho.L.indices[3.5 * um], 4)\n', (2066, 2097), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2100, 2136), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[3]', '(4)'], {}), '(morpho.L.indices[3], 4)\n', (2112, 2136), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2141, 2179), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[-1]', '(10)'], {}), '(morpho.L.indices[-1], 10)\n', (2153, 2179), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2184, 2227), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[3:5]', '[4, 5]'], {}), '(morpho.L.indices[3:5], [4, 5])\n', (2196, 2227), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2232, 2290), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[3:]', '[4, 5, 6, 7, 8, 9, 10]'], {}), '(morpho.L.indices[3:], [4, 5, 6, 7, 8, 9, 10])\n', (2244, 2290), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2295, 2346), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[:5]', '[1, 2, 3, 4, 5]'], {}), '(morpho.L.indices[:5], [1, 2, 3, 4, 5])\n', (2307, 2346), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2438, 2489), 'numpy.testing.utils.assert_raises', 'assert_raises', (['AttributeError', '(lambda : morpho.axon)'], {}), '(AttributeError, lambda : morpho.axon)\n', (2451, 2489), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2554, 2626), 'numpy.testing.utils.assert_raises', 'assert_raises', (['TypeError', '(lambda : morpho.indices[3 * second:5 * second])'], {}), '(TypeError, lambda : morpho.indices[3 * second:5 * second])\n', (2567, 2626), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2626, 2684), 'numpy.testing.utils.assert_raises', 'assert_raises', (['TypeError', '(lambda : morpho.indices[3.4:5.3])'], {}), '(TypeError, lambda : morpho.indices[3.4:5.3])\n', (2639, 2684), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2688, 2747), 'numpy.testing.utils.assert_raises', 'assert_raises', (['TypeError', '(lambda : morpho.indices[3:5 * um])'], {}), '(TypeError, lambda : morpho.indices[3:5 * um])\n', (2701, 2747), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2749, 2808), 'numpy.testing.utils.assert_raises', 'assert_raises', (['TypeError', '(lambda : morpho.indices[3 * um:5])'], {}), '(TypeError, lambda : morpho.indices[3 * um:5])\n', (2762, 2808), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2835, 2906), 'numpy.testing.utils.assert_raises', 'assert_raises', (['TypeError', '(lambda : morpho.indices[3 * um:5 * um:2 * um])'], {}), '(TypeError, lambda : morpho.indices[3 * um:5 * um:2 * um])\n', (2848, 2906), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2904, 2960), 'numpy.testing.utils.assert_raises', 'assert_raises', (['TypeError', '(lambda : morpho.indices[3:5:2])'], {}), '(TypeError, lambda : morpho.indices[3:5:2])\n', (2917, 2960), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n')]
|
# Utilizando a função os.walk, crie uma página HTML com o nome e tamanho de cada arquivo de um diretório passado e de
# seus subdiretórios
# Programa 9.10 do livro, página 219
# Programa 9.10 - Árvore de diretórios sendo percorrida
#
# import os
# import sys
#
# for raiz, diretorios, arquivos in os.walk(sys.argv[1]):
# print(f'\nCaminho:', raiz)
# for d in diretorios:
# print(f' {d}/')
# for f in arquivos:
# print(f' {f}/')
# print(f'{len(diretorios)} diretório(s), {len(arquivos)} arquivo(s)')
import sys
import os
import os.path
import urllib.request
def generate_listing(page, directory):
for root, directories, files in os.walk(directory):
for file in files:
full_path = os.path.join(root, file)
size = os.path.getsize(full_path)
link = urllib.request.pathname2url(full_path)
page.write(f"<p><a href='{link}'>{file}</a> ({size} bytes)</p>")
if len(sys.argv) != 2:
print('\n\nDigite o nome do diretório para coletar os arquivos!')
print('Uso: ex35.py diretório\n\n')
sys.exit(1)
directory = sys.argv[1]
page = open('diretorios-e-arquivos.html', 'w', encoding='utf-8')
page.write('''
<!DOCTYPE html>
<html lang="pt-BR">
<head>
<meta charset="utf-8">
<title>Diretórios e Arquivos</title>
</head>
<body>
''')
page.write(f'<h1>Arquivos encontrados a partir do diretório: {directory}</h1>')
generate_listing(page, directory)
page.write('''
</body>
</html>
''')
page.close()
|
[
"os.path.getsize",
"os.walk",
"os.path.join",
"sys.exit"
] |
[((670, 688), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (677, 688), False, 'import os\n'), ((1087, 1098), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1095, 1098), False, 'import sys\n'), ((741, 765), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (753, 765), False, 'import os\n'), ((785, 811), 'os.path.getsize', 'os.path.getsize', (['full_path'], {}), '(full_path)\n', (800, 811), False, 'import os\n')]
|
# .env/bin/python
# coding: utf-8
from collections import namedtuple
from datetime import datetime
def pad_reading(_reading):
"""
Return a full binary representation of the individual bytes
:param _reading:
:return: binary
"""
prefix = 0
for i in range(len(_reading) - 1):
prefix += '0'
return prefix
def hex_to_dec(hex_str):
return int(str(hex_str), 16)
def bin_to_dec(binary_str):
return sum([int(binary_str[-i]) * 2 ** (i - 1) for i in range(1, len(binary_str) + 1)])
def inspect_header(h, n):
return [h[i:i+n] for i in range(0, len(h), n)]
def decode_header(header):
"""
Decode the payload header and return a named tuple as an OrderedDict
:param header: bytes representation of the first 32 bytes of data
:return: OrderedDict
"""
# define named tuple
DecodedHeader = namedtuple('DecodedHeader', 'product_type, hardware_rev, firmware_rev, contact_reason, '
'alarm_status, imei gsm_rssi, battery_status, message_type, '
'payload_len')
# start conversions: each byte has a different conversion method, so try this...
# optionally, use data.decode('utf-8') in hex_to_dec function
for idx, data in enumerate(header):
# print(idx, data)
if idx == 0:
product_type = int(str(data), 16)
elif idx == 1:
hardware_rev = int(data, 2)
elif idx == 2:
firmware = bin(int(data, 16)).replace('0b', '')
firmware_rev_minor = bin_to_dec(firmware[0:3])
firmware_rev_major = bin_to_dec(firmware[4:8])
firmware_rev = str(firmware_rev_major) + '.' + str(firmware_rev_minor)
elif idx == 3:
contact_reason = bin(int(data, 16))
elif idx == 4:
alarm_status = bin(int(data, 16))
elif idx == 5:
gsm_rssi = int(str(data), 16)
elif idx == 6:
battery_status = int(str(data), 16)
elif idx == 15:
message_type = int(str(data), 16)
elif idx == 16:
payload_len = int(str(data), 16)
# create imei from the middle of the string
imei_list = header[7:15]
# the list elements are bytes, re-encode to create string
imei = ''.join(str(i) for i in imei_list)
# print vars
print('Product Type: {}'.format(product_type))
print('Hardware Rev: {}'.format(hardware_rev))
print('Firmware Rev: {}'.format(firmware_rev))
print('Contact Reason: {}'.format(contact_reason))
print('Alarm Status: {}'.format(alarm_status))
print('RSSI: {}'.format(gsm_rssi))
print('Battery Status: {}'.format(battery_status))
print('IMEI: {}'.format(imei))
print('Message Type: {}'.format(message_type))
print('Payload Length: {}'.format(payload_len))
# set the variable to the decoded values
hdr = DecodedHeader(product_type=product_type, hardware_rev=hardware_rev, firmware_rev=firmware_rev,
contact_reason=contact_reason, alarm_status=alarm_status, gsm_rssi=gsm_rssi,
battery_status=battery_status, imei=imei, message_type=message_type, payload_len=payload_len)
# return hdr as an ordered dict
return hdr._asdict()
def decode_readings(reading):
"""
Decode the transmission readings from the payload
:param hex reading:
:return: decoded values
"""
_reading = bin(int(reading, 16)).replace('0b', '')
if len(_reading) < 32:
_reading = pad_reading(_reading)
distance, temperature, src, rssi = 0
timestamp = datetime.now()
# sample reading = 0A5B2877
byte1 = _reading[0:8] # 00001010
byte2 = _reading[8:16] # 01011011
byte3 = _reading[16:24] # 00101000
byte4 = _reading[24:33] # 01110111
# concatenate upper and lower bits from 3 and 4
# modify value for binary to hex to decode
_distance = byte3[:2] + byte4
_temp = hex(int(byte2, 2))
_rssi = hex(int(byte1, 2))
_src = byte3[2:6]
decoded_reading = {
'distance': bin_to_dec(_distance),
'temperature': float(hex_to_dec(_temp)) / 2 * 30,
'src': bin_to_dec(_src),
'rssi': float(hex_to_dec(_rssi))
}
return decoded_reading
|
[
"datetime.datetime.now",
"collections.namedtuple"
] |
[((866, 1034), 'collections.namedtuple', 'namedtuple', (['"""DecodedHeader"""', '"""product_type, hardware_rev, firmware_rev, contact_reason, alarm_status, imei gsm_rssi, battery_status, message_type, payload_len"""'], {}), "('DecodedHeader',\n 'product_type, hardware_rev, firmware_rev, contact_reason, alarm_status, imei gsm_rssi, battery_status, message_type, payload_len'\n )\n", (876, 1034), False, 'from collections import namedtuple\n'), ((3642, 3656), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3654, 3656), False, 'from datetime import datetime\n')]
|
# app.py
# python file to create database
from flask import Flask, render_template, request
from models import * # our file defined above to define the classes/tables
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = 'postgresql://postgres:pk8742@localhost:5432/airline2'
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.init_app(app) # tie this database with this flask application
def main():
db.create_all()
if __name__ == "__main__":
with app.app_context(): # we need this to properly interact with flask-application
main()
|
[
"flask.Flask"
] |
[((175, 190), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (180, 190), False, 'from flask import Flask, render_template, request\n')]
|
#
# Copyright 2020- IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
#
import copy
from CanonicalIntervalSet import CanonicalIntervalSet
class MethodSet(CanonicalIntervalSet):
"""
A class for holding a set of HTTP methods
"""
all_methods_list = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE', 'PATCH']
def __init__(self, all_methods=False):
"""
:param bool all_methods: whether to create the object holding all methods
"""
super().__init__()
if all_methods: # the whole range
self.add_interval(self._whole_range_interval())
@staticmethod
def _whole_range_interval():
"""
:return: the interval representing the whole range (all methods)
"""
return CanonicalIntervalSet.Interval(0, len(MethodSet.all_methods_list) - 1)
@staticmethod
def _whole_range_interval_set():
"""
:return: the interval set representing the whole range (all methods)
"""
interval = MethodSet._whole_range_interval()
return CanonicalIntervalSet.get_interval_set(interval.start, interval.end)
def is_whole_range(self):
"""
:return: True if the MethodSet contains all methods, False otherwise
"""
return self == self._whole_range_interval_set()
@staticmethod
def _get_method_names_from_interval_set(interval_set):
"""
Returns names of methods represented by a given interval set
:param CanonicalIntervalSet interval_set: the interval set
:return: the list of method names
"""
res = []
for interval in interval_set:
assert interval.start >= 0 and interval.end < len(MethodSet.all_methods_list)
for index in range(interval.start, interval.end + 1):
res.append(MethodSet.all_methods_list[index])
return res
@staticmethod
def _get_compl_method_names_from_interval_set(interval_set):
"""
Returns names of methods not included in a given interval set
:param CanonicalIntervalSet interval_set: the interval set
:return: the list of complement method names
"""
res = MethodSet.all_methods_list.copy()
for method in MethodSet._get_method_names_from_interval_set(interval_set):
res.remove(method)
return res
def __str__(self):
"""
:return: Compact string representation of the MethodSet
"""
if self.is_whole_range():
return '*'
if not self:
return 'Empty'
method_names = self._get_method_names_from_interval_set(self)
compl_method_names = self._get_compl_method_names_from_interval_set(self)
if len(method_names) <= len(compl_method_names):
values_list = ', '.join(method for method in method_names)
else:
values_list = 'all but ' + ', '.join(method for method in compl_method_names)
return values_list
def copy(self):
new_copy = copy.copy(self)
return new_copy
|
[
"copy.copy",
"CanonicalIntervalSet.CanonicalIntervalSet.get_interval_set"
] |
[((1101, 1168), 'CanonicalIntervalSet.CanonicalIntervalSet.get_interval_set', 'CanonicalIntervalSet.get_interval_set', (['interval.start', 'interval.end'], {}), '(interval.start, interval.end)\n', (1138, 1168), False, 'from CanonicalIntervalSet import CanonicalIntervalSet\n'), ((3077, 3092), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (3086, 3092), False, 'import copy\n')]
|
import math
def num_string(x, n=3):
return ('{}'.format(x + 10**n))[1:]
class ChannelConfig(object):
def __init__(self, nchans, chans=None, addref=True, istart=0):
self.addref = addref
self.istart = istart
if chans is None:
nchars = math.ceil(math.log10(nchans+istart))
self.chans = ["CHAN{}".format(num_string(i+istart, nchars)) for i in range(nchans)]
else:
self.chans = [chans[i] for i in range(nchans)]
if addref:
self.chans.append("REF")
self.nconn = len(self.chans)
self.nch = nchans
self.selected = [False for i in range(self.nconn)]
def isavailable(self, chidx):
return not self.selected[chidx]
def check(self, chidx):
self.selected[chidx] = True
def uncheck(self, chidx):
self.selected[chidx] = False
def findfirst(self):
for i in range(self.nch):
if not self.selected[i]:
self.selected[i] = True
return i
return -1
def names(self):
return self.chans
def nchans(self):
return self.nch
def save_config(self):
return dict(kind='channel', nchans=self.nch, chans=self.chans, use=self.selected)
|
[
"math.log10"
] |
[((300, 327), 'math.log10', 'math.log10', (['(nchans + istart)'], {}), '(nchans + istart)\n', (310, 327), False, 'import math\n')]
|
from common.numpy_fast import clip
def rate_limit(new_value, last_value, dw_step, up_step):
return clip(new_value, last_value + dw_step, last_value + up_step)
def learn_angle_offset(lateral_control, v_ego, angle_offset, d_poly, y_des, steer_override):
# simple integral controller that learns how much steering offset to put to have the car going straight
min_offset = -1. # deg
max_offset = 1. # deg
alpha = 1./36000. # correct by 1 deg in 2 mins, at 30m/s, with 50cm of error, at 20Hz
min_learn_speed = 1.
# learn less at low speed or when turning
alpha_v = alpha*(max(v_ego - min_learn_speed, 0.))/(1. + 0.5*abs(y_des))
# only learn if lateral control is active and if driver is not overriding:
if lateral_control and not steer_override:
angle_offset += d_poly[3] * alpha_v
angle_offset = clip(angle_offset, min_offset, max_offset)
return angle_offset
|
[
"common.numpy_fast.clip"
] |
[((102, 161), 'common.numpy_fast.clip', 'clip', (['new_value', '(last_value + dw_step)', '(last_value + up_step)'], {}), '(new_value, last_value + dw_step, last_value + up_step)\n', (106, 161), False, 'from common.numpy_fast import clip\n'), ((827, 869), 'common.numpy_fast.clip', 'clip', (['angle_offset', 'min_offset', 'max_offset'], {}), '(angle_offset, min_offset, max_offset)\n', (831, 869), False, 'from common.numpy_fast import clip\n')]
|
import warnings
from pathlib import Path
import astropy.units as u
import matplotlib.pyplot as plt
import pandas as pd
from astropy.coordinates import SkyCoord
from sunpy.map import Map, MapSequence
from sunpy.net import Fido
from sunpy.net import attrs as a
from sunpy.net import hek
from sunpy.util import SunpyUserWarning
__all__ = ['Sunspotter']
path = Path(__file__).parent.parent.parent / "data/all_clear"
class Sunspotter:
def __init__(self, *, timesfits: str = path / "lookup_timesfits.csv", get_all_timesfits_columns: bool = True,
properties: str = path / "lookup_properties.csv", get_all_properties_columns: bool = True,
timesfits_columns: list = ['#id'], properties_columns: list = ['#id'],
classifications=None, classifications_columns=None,
delimiter: str = ';', datetime_fmt: str = '%Y-%m-%d %H:%M:%S'):
"""
Parameters
----------
timesfits : str
filepath to `lookup_timesfits.csv`
by default points to the Timesfits file from All Clear Dataset
stored in `~pythia/data/all_clear`
get_all_timesfits_columns : bool, optional
Load all columns from the Timesfits CSV file, by default True
properties : str
filepath to `lookup_properties.csv`
by default points to the Properties file from All Clear Dataset
stored in `~pythia/data/all_clear`
get_all_properties_columns : bool, optional
Load all columns from the Properties CSV file, by default True
timesfits_columns : list, optional
Columns required from lookup_timesfits.csv, by default ['#id']
Will be overridden if `get_all_timesfits_columns` is True.
properties_columns : list, optional
Columns required from lookup_properties.csv, by default ['#id']
Will be overridden if `get_all_properties_columns` is True.
classifications : str, optional
filepath to `classifications.csv`
Default behaviour is not to load the file, hence by default None
classifications_columns : list, optional
Columns required from `classifications.csv`
Default behaviour is not to load the file, hence by default None
delimiter : str, optional
Delimiter for the CSV files, by default ';'
datetime_fmt : str, optional
Format for interpreting the observation datetimes in the CSV files,
by default '%Y-%m-%d %H:%M:%S'
"""
self.timesfits = timesfits
self.get_all_timesfits_columns = get_all_timesfits_columns
self.properties = properties
self.get_all_properties_columns = get_all_properties_columns
self.timesfits_columns = set(timesfits_columns)
self.properties_columns = set(properties_columns)
self.classifications = classifications
self.classifications_columns = classifications_columns
self.datetime_fmt = datetime_fmt
self._get_data(delimiter)
def _get_data(self, delimiter: str):
# Reading the Timesfits file
try:
if self.get_all_timesfits_columns:
self.timesfits = pd.read_csv(self.timesfits,
delimiter=delimiter)
else:
self.timesfits = pd.read_csv(self.timesfits,
delimiter=delimiter,
usecols=self.timesfits_columns)
except ValueError:
raise SunpyUserWarning("Sunspotter Object cannot be created."
" Either the Timesfits columns do not match, or the file is corrupted")
if not self.timesfits_columns.issubset(self.timesfits.columns):
missing_columns = self.timesfits_columns - self.timesfits_columns.intersection(self.timesfits.columns)
missing_columns = ", ".join(missing_columns)
raise SunpyUserWarning("Sunspotter Object cannot be created."
" The Timesfits CSV is missing the following columns: " +
missing_columns)
if 'obs_date' in self.timesfits.columns:
self.timesfits.obs_date = pd.to_datetime(self.timesfits.obs_date,
format=self.datetime_fmt)
self.timesfits.set_index("obs_date", inplace=True)
# Reading the Properties file
try:
if self.get_all_properties_columns:
self.properties = pd.read_csv(self.properties,
delimiter=delimiter)
else:
self.properties = pd.read_csv(self.properties,
delimiter=delimiter,
usecols=self.properties_columns)
except ValueError:
raise SunpyUserWarning("Sunspotter Object cannot be created."
" Either the Properties columns do not match, or the file is corrupted")
if not self.properties_columns.issubset(self.properties.columns):
missing_columns = self.properties_columns - self.properties_columns.intersection(self.properties.columns)
missing_columns = ", ".join(missing_columns)
raise SunpyUserWarning("Sunspotter Object cannot be created."
" The Properties CSV is missing the following columns: " +
missing_columns)
if '#id' in self.properties.columns:
self.properties.set_index("#id", inplace=True)
# Reading the Classification file
if self.classifications is not None:
if self.classifications_columns is None:
raise SunpyUserWarning("Classifications columns cannot be None"
" when classifications.csv is to be loaded.")
try:
self.classifications = pd.read_csv(self.classifications,
delimiter=delimiter,
usecols=self.classifications_columns)
except ValueError:
raise SunpyUserWarning("Sunspotter Object cannot be created."
" Either the Classifications columns do not match, or the file is corrupted")
self.classifications_columns = set(self.classifications_columns)
if not self.classifications_columns.issubset(self.classifications.columns):
missing_columns = self.classifications_columns - self.classifications_columns.intersection(self.classifications.columns)
missing_columns = ", ".join(missing_columns)
raise SunpyUserWarning("Sunspotter Object cannot be created."
" The Classifications CSV is missing the following columns: " +
missing_columns)
def get_timesfits_id(self, obsdate: str):
"""
Returns the Sunspotter observation id for the
first observation a given observation date and time.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
id : int
The Sunspotter observation id for the first observation
for the given observation date and time.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_timesfits_id(obsdate)
1
"""
obsdate = self.get_nearest_observation(obsdate)
return self.timesfits.loc[obsdate].get(key='#id').iloc[0]
def get_all_ids_for_observation(self, obsdate: str):
"""
Returns all the Sunspotter observation ids for the
given observation date and time.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
ids : pandas.Series
All the Sunspotter observation ids for the
given observation date and time.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_all_ids_for_observation(obsdate)
array([1, 2, 3, 4, 5])
"""
obsdate = self.get_nearest_observation(obsdate)
return self.timesfits.loc[obsdate].get(key='#id').values
def get_properties(self, idx: int):
"""
Returns the observed properties for a given Sunspotter id.
Parameters
----------
idx : int
The Sunspotter observation id for a particualar observation.
Returns
-------
properties : pandas.Series
The observed properties for the given Sunspotter id.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> idx = 0
>>> sunspotter.get_properties(idx)
filename 530be1183ae74079c300000d.jpg
zooniverse_id ASZ000090y
angle 37.8021
area 34400
areafrac 0.12
areathesh 2890
bipolesep 3.72
c1flr24hr 0
id_filename 1
flux 2.18e+22
fluxfrac 0.01
hale beta
hcpos_x 452.27
hcpos_y 443.93
m1flr12hr 0
m5flr12hr 0
n_nar 1
noaa 8809
pxpos_x 229.193
pxpos_y 166.877
sszn 1
zurich bxo
Name: 1, dtype: object
"""
return self.properties.loc[idx]
def get_properties_from_obsdate(self, obsdate: str):
"""
Returns the observed properties for a given observation time and date.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
properties : pandas.DataFrame
The observed properties for the given observation time and date.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_properties_from_obsdate(obsdate)
filename 530be1183ae74079c300000d.jpg
zooniverse_id ASZ000090y
angle 37.8021
area 34400
areafrac 0.12
areathesh 2890
bipolesep 3.72
c1flr24hr 0
id_filename 1
flux 2.18e+22
fluxfrac 0.01
hale beta
hcpos_x 452.27
hcpos_y 443.93
m1flr12hr 0
m5flr12hr 0
n_nar 1
noaa 8809
pxpos_x 229.193
pxpos_y 166.877
sszn 1
zurich bxo
Name: 1, dtype: object
[1 rows x 23 columns]
"""
return self.get_properties(self.get_timesfits_id(obsdate))
def number_of_observations(self, obsdate: str):
"""
Returns number of Sunspotter observations for the
given observation date and time.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
number_of_observations : int
Number of Sunspotter observations
for the given observation date and time.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.number_of_observations(obsdate)
5
"""
return self.timesfits.loc[obsdate].shape[0]
def get_nearest_observation(self, obsdate: str):
"""
Returns the observation time and date in the Timesfits that is
closest to the given observation time and date.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
closest_observation : str
Observation time and date in the Timesfits that is
closest to the given observation time and date.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 22:47:02'
>>> sunspotter.get_nearest_observation(obsdate)
'2000-01-01 12:47:02'
"""
unique_dates = self.timesfits.index.unique()
index = unique_dates.get_loc(obsdate, method='nearest')
nearest_date = str(unique_dates[index])
if nearest_date != str(obsdate): # casting to str because obsdate can be a pandas.Timestamp
warnings.warn(SunpyUserWarning("The given observation date isn't in the Timesfits file.\n"
"Using the observation nearest to the given obsdate instead."))
return nearest_date
def get_all_observations_ids_in_range(self, start: str, end: str):
"""
Returns all the observations ids in the given timerange.
The nearest start and end time in the Timesfits are used
to form the time range.
Parameters
----------
start : str
The starting observation time and date.
end : str
The ending observation time and date.
Returns
-------
ids : numpy.array
All the Sunspotter observation ids for the
given observation time range.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> start = '2000-01-02 12:51:02'
>>> end = '2000-01-03 12:51:02'
>>> sunspotter.get_all_observations_ids_in_range(start, end)
array([ 6, 7, 8, 9, 10, 11, 12, 13])
"""
start = self.get_nearest_observation(start)
end = self.get_nearest_observation(end)
return self.timesfits[start:end]['#id'].values
def get_fits_filenames_from_range(self, start: str, end: str):
"""
Returns all the FITS filenames for observations in the given timerange.
The nearest start and end time in the Timesfits are used to form the
time range.
Parameters
----------
start : str
The starting observation time and date.
end : str
The ending observation time and date.
Returns
-------
filenames : pandas.Series
all the FITS filenames for observations in the given timerange.
Notes
-----
If start time is equal to end time, all the filenames corresponding to
that particular observation will be returned.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> start = '2000-01-02 12:51:02'
>>> end = '2000-01-03 12:51:02'
>>> sunspotter.get_fits_filenames_from_range(start, end)
obs_date
2000-01-02 12:51:02 20000102_1251_mdiB_1_8810.fits
2000-01-02 12:51:02 20000102_1251_mdiB_1_8813.fits
2000-01-02 12:51:02 20000102_1251_mdiB_1_8814.fits
2000-01-02 12:51:02 20000102_1251_mdiB_1_8815.fits
2000-01-03 12:51:02 20000103_1251_mdiB_1_8810.fits
2000-01-03 12:51:02 20000103_1251_mdiB_1_8813.fits
2000-01-03 12:51:02 20000103_1251_mdiB_1_8814.fits
2000-01-03 12:51:02 20000103_1251_mdiB_1_8815.fits
Name: filename, dtype: object
"""
ids_in_range = self.get_all_observations_ids_in_range(start, end)
return self.timesfits[self.timesfits['#id'].isin(ids_in_range)]['filename']
def get_mdi_fulldisk_fits_file(self, obsdate: str, filepath: str = str(path) + "/fulldisk/"):
"""
Downloads the MDI Fulldisk FITS file corresponding to a particular observation.
Parameters
----------
obsdate : str
The observation time and date.
filepath : mdi_mapsequence : sunpy.map.MapSequence,
By default downloaded files are stored in `~pythia/data/fulldisk`
Returns
-------
filepath : str
Filepath to the downloaded FITS file.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_mdi_fulldisk_fits_file(obsdate)
'~pythia/data/all_clear/fulldisk/fd_m_96m_01d_2556_0008.fits'
"""
# TODO: Figure out a way to test the downloaded file.
obsdate = self.get_nearest_observation(obsdate)
search_results = Fido.search(a.Time(obsdate, obsdate), a.Instrument.mdi)
downloaded_file = Fido.fetch(search_results, path=filepath)
return downloaded_file[0]
def get_mdi_fulldisk_map(self, obsdate: str, filepath: str = str(path) + "/fulldisk/"):
"""
Downloads the MDI Fulldisk FITS file corresponding to a particular observation.
And returns a SunPy Map corresponding to the downloaded file.
Parameters
----------
obsdate : str
The observation time and date.
filepath : mdi_mapsequence : sunpy.map.MapSequence,
By default downloaded files are stored in `~pythia/data/fulldisk`
Returns
-------
filepath : str
Filepath to the downloaded FITS file.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_mdi_fulldisk_map(obsdate)
<sunpy.map.sources.soho.MDIMap object at 0x7f6ca7aedc88>
SunPy Map
---------
Observatory: SOHO
Instrument: MDI
Detector: MDI
Measurement: magnetogram
Wavelength: 0.0 Angstrom
Observation Date: 2000-01-01 12:47:02
Exposure Time: 0.000000 s
Dimension: [1024. 1024.] pix
Coordinate System: helioprojective
Scale: [1.98083342 1.98083342] arcsec / pix
Reference Pixel: [511.36929067 511.76453018] pix
Reference Coord: [0. 0.] arcsec
array([[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
...,
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan]], dtype=float32)
"""
# TODO: Figure out the file naming convention to check if the file has been downloaded already.
# TODO: Test this!
obsdate = self.get_nearest_observation(obsdate)
search_results = Fido.search(a.Time(obsdate, obsdate), a.Instrument.mdi)
downloaded_file = Fido.fetch(search_results, path=filepath)
return Map(downloaded_file[0])
def get_available_obsdatetime_range(self, start: str, end: str):
"""
Returns all the observations datetimes in the given timerange.
The nearest start and end time in the Timesfits are used
to form the time range.
Parameters
----------
start : str
The starting observation time and date.
end : str
The ending observation time and date.
Returns
-------
obs_list : pandas.DatetimeIndex
All the Sunspotter observation datetimes for the
given observation time range.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> start = '2000-01-01 12:47:02'
>>> end = '2000-01-15 12:47:02'
>>> sunspotter.get_available_obsdatetime_range(start, end)
DatetimeIndex(['2000-01-01 12:47:02', '2000-01-02 12:51:02',
'2000-01-03 12:51:02', '2000-01-04 12:51:02',
'2000-01-05 12:51:02', '2000-01-06 12:51:02',
'2000-01-11 12:51:02', '2000-01-12 12:51:02',
'2000-01-13 12:51:02', '2000-01-14 12:47:02',
'2000-01-15 12:47:02'],
dtype='datetime64[ns]', name='obs_date', freq=None)
"""
start = self.get_nearest_observation(start)
end = self.get_nearest_observation(end)
return self.timesfits[start: end].index.unique()
def get_mdi_map_sequence(self, start: str, end: str, filepath: str = str(path) + "/fulldisk/"):
"""
Get MDI Map Sequence for observations from given range.
Parameters
----------
start : str
The starting observation time and date.
end : str
The ending observation time and date.
filepath : str, optional
[description], by default str(path)+"/fulldisk/"
Returns
-------
mdi_mapsequence : sunpy.map.MapSequence
Map Sequece of the MDI maps in the given range.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> start = '2000-01-01 12:47:02'
>>> end = '2000-01-05 12:51:02'
>>> sunspotter.get_mdi_map_sequence(start, end)
<sunpy.map.mapsequence.MapSequence object at 0x7f2c7b85cda0>
MapSequence of 5 elements, with maps from MDIMap
"""
# TODO: Test this!
obsrange = self.get_available_obsdatetime_range(start, end)
maplist = []
for obsdate in obsrange:
maplist.append(self.get_mdi_fulldisk_map(obsdate, filepath))
return MapSequence(maplist)
def get_observations_from_hek(self, obsdate: str, event_type: str = 'AR',
observatory: str = 'SOHO'):
"""
Gets the observation metadata from HEK for the given obsdate.
By default gets Active Region data recieved from SOHO.
Parameters
----------
obsdate : str
The observation time and date.
event_type : str, optional
The type of Event, by default 'AR'
observatory : str, optional
Observatory that observed the Event, by default 'SOHO'
Returns
-------
results = sunpy.hek.HEKTable
The table of results recieved from HEK.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_observations_from_hek(obsdate)
<HEKTable length=5>
SOL_standard absnetcurrenthelicity ... unsignedvertcurrent
str30 object ... object
------------------------------ --------------------- ... -------------------
SOL2000-01-01T09:35:02L054C117 None ... None
SOL2000-01-01T09:35:02L058C100 None ... None
SOL2000-01-01T09:35:02L333C106 None ... None
SOL2000-01-01T09:35:02L033C066 None ... None
SOL2000-01-01T09:35:02L012C054 None ... None
"""
obsdate = self.get_nearest_observation(obsdate)
client = hek.HEKClient()
result = client.search(hek.attrs.Time(obsdate, obsdate), hek.attrs.EventType(event_type))
obsdate = "T".join(str(obsdate).split())
result = result[result['obs_observatory'] == 'SOHO']
result = result[result['event_starttime'] <= obsdate]
result = result[result['event_endtime'] > obsdate]
return result
def plot_observations(self, obsdate: str, mdi_map: Map = None):
"""
Plots the Active Regions for a given observation on the
MDI map corresponding to that observation.
Parameters
----------
obsdate : str
The observation time and date.
mdi_map : Map, optional
The MDI map corresponding to the given observation,
If None, the Map will be downloaded first.
By default None.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.plot_observations(obsdate)
"""
obsdate = self.get_nearest_observation(obsdate)
if mdi_map is None:
mdi_map = self.get_mdi_fulldisk_map(obsdate)
hek_result = self.get_observations_from_hek(obsdate)
bottom_left_x = hek_result['boundbox_c1ll']
bottom_left_y = hek_result['boundbox_c2ll']
top_right_x = hek_result['boundbox_c1ur']
top_right_y = hek_result['boundbox_c2ur']
number_of_observations = len(hek_result)
bottom_left_coords = SkyCoord([(bottom_left_x[i], bottom_left_y[i]) * u.arcsec
for i in range(number_of_observations)],
frame=mdi_map.coordinate_frame)
top_right_coords = SkyCoord([(top_right_x[i], top_right_y[i]) * u.arcsec
for i in range(number_of_observations)],
frame=mdi_map.coordinate_frame)
fig = plt.figure(figsize=(12, 10), dpi=100)
mdi_map.plot()
for i in range(number_of_observations):
mdi_map.draw_rectangle(bottom_left_coords[i],
top_right=top_right_coords[i],
color='b', label="Active Regions")
hek_legend, = plt.plot([], color='b', label="Active Regions")
plt.legend(handles=[hek_legend])
plt.show()
|
[
"matplotlib.pyplot.show",
"sunpy.map.Map",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"sunpy.net.hek.HEKClient",
"matplotlib.pyplot.legend",
"sunpy.map.MapSequence",
"sunpy.net.Fido.fetch",
"sunpy.net.attrs.Time",
"sunpy.net.hek.attrs.Time",
"matplotlib.pyplot.figure",
"sunpy.net.hek.attrs.EventType",
"sunpy.util.SunpyUserWarning",
"pandas.to_datetime",
"pathlib.Path"
] |
[((18413, 18454), 'sunpy.net.Fido.fetch', 'Fido.fetch', (['search_results'], {'path': 'filepath'}), '(search_results, path=filepath)\n', (18423, 18454), False, 'from sunpy.net import Fido\n'), ((20521, 20562), 'sunpy.net.Fido.fetch', 'Fido.fetch', (['search_results'], {'path': 'filepath'}), '(search_results, path=filepath)\n', (20531, 20562), False, 'from sunpy.net import Fido\n'), ((20578, 20601), 'sunpy.map.Map', 'Map', (['downloaded_file[0]'], {}), '(downloaded_file[0])\n', (20581, 20601), False, 'from sunpy.map import Map, MapSequence\n'), ((23319, 23339), 'sunpy.map.MapSequence', 'MapSequence', (['maplist'], {}), '(maplist)\n', (23330, 23339), False, 'from sunpy.map import Map, MapSequence\n'), ((25046, 25061), 'sunpy.net.hek.HEKClient', 'hek.HEKClient', ([], {}), '()\n', (25059, 25061), False, 'from sunpy.net import hek\n'), ((27059, 27096), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)', 'dpi': '(100)'}), '(figsize=(12, 10), dpi=100)\n', (27069, 27096), True, 'import matplotlib.pyplot as plt\n'), ((27386, 27433), 'matplotlib.pyplot.plot', 'plt.plot', (['[]'], {'color': '"""b"""', 'label': '"""Active Regions"""'}), "([], color='b', label='Active Regions')\n", (27394, 27433), True, 'import matplotlib.pyplot as plt\n'), ((27443, 27475), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[hek_legend]'}), '(handles=[hek_legend])\n', (27453, 27475), True, 'import matplotlib.pyplot as plt\n'), ((27484, 27494), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27492, 27494), True, 'import matplotlib.pyplot as plt\n'), ((4046, 4183), 'sunpy.util.SunpyUserWarning', 'SunpyUserWarning', (["('Sunspotter Object cannot be created. The Timesfits CSV is missing the following columns: '\n + missing_columns)"], {}), "(\n 'Sunspotter Object cannot be created. The Timesfits CSV is missing the following columns: '\n + missing_columns)\n", (4062, 4183), False, 'from sunpy.util import SunpyUserWarning\n'), ((4335, 4400), 'pandas.to_datetime', 'pd.to_datetime', (['self.timesfits.obs_date'], {'format': 'self.datetime_fmt'}), '(self.timesfits.obs_date, format=self.datetime_fmt)\n', (4349, 4400), True, 'import pandas as pd\n'), ((5452, 5590), 'sunpy.util.SunpyUserWarning', 'SunpyUserWarning', (["('Sunspotter Object cannot be created. The Properties CSV is missing the following columns: '\n + missing_columns)"], {}), "(\n 'Sunspotter Object cannot be created. The Properties CSV is missing the following columns: '\n + missing_columns)\n", (5468, 5590), False, 'from sunpy.util import SunpyUserWarning\n'), ((18343, 18367), 'sunpy.net.attrs.Time', 'a.Time', (['obsdate', 'obsdate'], {}), '(obsdate, obsdate)\n', (18349, 18367), True, 'from sunpy.net import attrs as a\n'), ((20451, 20475), 'sunpy.net.attrs.Time', 'a.Time', (['obsdate', 'obsdate'], {}), '(obsdate, obsdate)\n', (20457, 20475), True, 'from sunpy.net import attrs as a\n'), ((25093, 25125), 'sunpy.net.hek.attrs.Time', 'hek.attrs.Time', (['obsdate', 'obsdate'], {}), '(obsdate, obsdate)\n', (25107, 25125), False, 'from sunpy.net import hek\n'), ((25127, 25158), 'sunpy.net.hek.attrs.EventType', 'hek.attrs.EventType', (['event_type'], {}), '(event_type)\n', (25146, 25158), False, 'from sunpy.net import hek\n'), ((360, 374), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (364, 374), False, 'from pathlib import Path\n'), ((3258, 3306), 'pandas.read_csv', 'pd.read_csv', (['self.timesfits'], {'delimiter': 'delimiter'}), '(self.timesfits, delimiter=delimiter)\n', (3269, 3306), True, 'import pandas as pd\n'), ((3403, 3488), 'pandas.read_csv', 'pd.read_csv', (['self.timesfits'], {'delimiter': 'delimiter', 'usecols': 'self.timesfits_columns'}), '(self.timesfits, delimiter=delimiter, usecols=self.timesfits_columns\n )\n', (3414, 3488), True, 'import pandas as pd\n'), ((3619, 3753), 'sunpy.util.SunpyUserWarning', 'SunpyUserWarning', (['"""Sunspotter Object cannot be created. Either the Timesfits columns do not match, or the file is corrupted"""'], {}), "(\n 'Sunspotter Object cannot be created. Either the Timesfits columns do not match, or the file is corrupted'\n )\n", (3635, 3753), False, 'from sunpy.util import SunpyUserWarning\n'), ((4651, 4700), 'pandas.read_csv', 'pd.read_csv', (['self.properties'], {'delimiter': 'delimiter'}), '(self.properties, delimiter=delimiter)\n', (4662, 4700), True, 'import pandas as pd\n'), ((4799, 4886), 'pandas.read_csv', 'pd.read_csv', (['self.properties'], {'delimiter': 'delimiter', 'usecols': 'self.properties_columns'}), '(self.properties, delimiter=delimiter, usecols=self.\n properties_columns)\n', (4810, 4886), True, 'import pandas as pd\n'), ((5019, 5154), 'sunpy.util.SunpyUserWarning', 'SunpyUserWarning', (['"""Sunspotter Object cannot be created. Either the Properties columns do not match, or the file is corrupted"""'], {}), "(\n 'Sunspotter Object cannot be created. Either the Properties columns do not match, or the file is corrupted'\n )\n", (5035, 5154), False, 'from sunpy.util import SunpyUserWarning\n'), ((5923, 6034), 'sunpy.util.SunpyUserWarning', 'SunpyUserWarning', (['"""Classifications columns cannot be None when classifications.csv is to be loaded."""'], {}), "(\n 'Classifications columns cannot be None when classifications.csv is to be loaded.'\n )\n", (5939, 6034), False, 'from sunpy.util import SunpyUserWarning\n'), ((6123, 6220), 'pandas.read_csv', 'pd.read_csv', (['self.classifications'], {'delimiter': 'delimiter', 'usecols': 'self.classifications_columns'}), '(self.classifications, delimiter=delimiter, usecols=self.\n classifications_columns)\n', (6134, 6220), True, 'import pandas as pd\n'), ((6932, 7075), 'sunpy.util.SunpyUserWarning', 'SunpyUserWarning', (["('Sunspotter Object cannot be created. The Classifications CSV is missing the following columns: '\n + missing_columns)"], {}), "(\n 'Sunspotter Object cannot be created. The Classifications CSV is missing the following columns: '\n + missing_columns)\n", (6948, 7075), False, 'from sunpy.util import SunpyUserWarning\n'), ((14331, 14480), 'sunpy.util.SunpyUserWarning', 'SunpyUserWarning', (['"""The given observation date isn\'t in the Timesfits file.\nUsing the observation nearest to the given obsdate instead."""'], {}), '(\n """The given observation date isn\'t in the Timesfits file.\nUsing the observation nearest to the given obsdate instead."""\n )\n', (14347, 14480), False, 'from sunpy.util import SunpyUserWarning\n'), ((6371, 6511), 'sunpy.util.SunpyUserWarning', 'SunpyUserWarning', (['"""Sunspotter Object cannot be created. Either the Classifications columns do not match, or the file is corrupted"""'], {}), "(\n 'Sunspotter Object cannot be created. Either the Classifications columns do not match, or the file is corrupted'\n )\n", (6387, 6511), False, 'from sunpy.util import SunpyUserWarning\n')]
|
"""
"""
import turtle
class Pluma():
def __init__(self, color, grosor):
self.color = color
self.grosor = grosor
self.posicion = [0, 0]
self.previa = self.posicion
def mueve(self, x, y):
self.previa = self.posicion
self.posicion = [x, y]
turtle.goto(*self.posicion)
def regresa(self):
self.posicion, self.previa = self.previa, self.posicion
turtle.goto(*self.posicion)
# añade a la clase Pluma una función llamada 'regresa' que mueva la pluma a su previa posición
|
[
"turtle.goto"
] |
[((305, 332), 'turtle.goto', 'turtle.goto', (['*self.posicion'], {}), '(*self.posicion)\n', (316, 332), False, 'import turtle\n'), ((429, 456), 'turtle.goto', 'turtle.goto', (['*self.posicion'], {}), '(*self.posicion)\n', (440, 456), False, 'import turtle\n')]
|